python_code
stringlengths 0
290k
| repo_name
stringclasses 30
values | file_path
stringlengths 6
125
|
---|---|---|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("path", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def test_hf_hub_url(repo_id, path, revision):
url = hf_hub_url(repo_id=repo_id, path=path, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(path)}"
| datasets-main | tests/test_hub.py |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
pytestmark = pytest.mark.integration
@require_faiss
class IndexableDatasetTest(TestCase):
def _create_dummy_dataset(self):
dset = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(x) for x in np.arange(30).tolist()]})
return dset
def test_add_faiss_index(self):
import faiss
dset: Dataset = self._create_dummy_dataset()
dset = dset.map(
lambda ex, i: {"vecs": i * np.ones(5, dtype=np.float32)}, with_indices=True, keep_in_memory=True
)
dset = dset.add_faiss_index("vecs", batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT)
scores, examples = dset.get_nearest_examples("vecs", np.ones(5, dtype=np.float32))
self.assertEqual(examples["filename"][0], "my_name-train_29")
dset.drop_index("vecs")
def test_add_faiss_index_from_external_arrays(self):
import faiss
dset: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1, 1),
index_name="vecs",
batch_size=100,
metric_type=faiss.METRIC_INNER_PRODUCT,
)
scores, examples = dset.get_nearest_examples("vecs", np.ones(5, dtype=np.float32))
self.assertEqual(examples["filename"][0], "my_name-train_29")
def test_serialization(self):
import faiss
dset: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1, 1),
index_name="vecs",
metric_type=faiss.METRIC_INNER_PRODUCT,
)
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
dset.save_faiss_index("vecs", tmp_file.name)
dset.load_faiss_index("vecs2", tmp_file.name)
os.unlink(tmp_file.name)
scores, examples = dset.get_nearest_examples("vecs2", np.ones(5, dtype=np.float32))
self.assertEqual(examples["filename"][0], "my_name-train_29")
def test_drop_index(self):
dset: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1, 1), index_name="vecs"
)
dset.drop_index("vecs")
self.assertRaises(MissingIndex, partial(dset.get_nearest_examples, "vecs2", np.ones(5, dtype=np.float32)))
def test_add_elasticsearch_index(self):
from elasticsearch import Elasticsearch
dset: Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search") as mocked_search, patch(
"elasticsearch.client.IndicesClient.create"
) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk") as mocked_bulk:
mocked_index_create.return_value = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30)
mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
es_client = Elasticsearch()
dset.add_elasticsearch_index("filename", es_client=es_client)
scores, examples = dset.get_nearest_examples("filename", "my_name-train_29")
self.assertEqual(examples["filename"][0], "my_name-train_29")
@require_faiss
class FaissIndexTest(TestCase):
def test_flat_ip(self):
import faiss
index = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5, dtype=np.float32))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal, 5)
index.add_vectors(np.zeros((5, 5), dtype=np.float32))
self.assertEqual(index.faiss_index.ntotal, 10)
# single query
query = np.zeros(5, dtype=np.float32)
query[1] = 1
scores, indices = index.search(query)
self.assertRaises(ValueError, index.search, query.reshape(-1, 1))
self.assertGreater(scores[0], 0)
self.assertEqual(indices[0], 1)
# batched queries
queries = np.eye(5, dtype=np.float32)[::-1]
total_scores, total_indices = index.search_batch(queries)
self.assertRaises(ValueError, index.search_batch, queries[0])
best_scores = [scores[0] for scores in total_scores]
best_indices = [indices[0] for indices in total_indices]
self.assertGreater(np.min(best_scores), 0)
self.assertListEqual([4, 3, 2, 1, 0], best_indices)
def test_factory(self):
import faiss
index = FaissIndex(string_factory="Flat")
index.add_vectors(np.eye(5, dtype=np.float32))
self.assertIsInstance(index.faiss_index, faiss.IndexFlat)
index = FaissIndex(string_factory="LSH")
index.add_vectors(np.eye(5, dtype=np.float32))
self.assertIsInstance(index.faiss_index, faiss.IndexLSH)
with self.assertRaises(ValueError):
_ = FaissIndex(string_factory="Flat", custom_index=faiss.IndexFlat(5))
def test_custom(self):
import faiss
custom_index = faiss.IndexFlat(5)
index = FaissIndex(custom_index=custom_index)
index.add_vectors(np.eye(5, dtype=np.float32))
self.assertIsInstance(index.faiss_index, faiss.IndexFlat)
def test_serialization(self):
import faiss
index = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5, dtype=np.float32))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
index.save(tmp_file.name)
index = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
query = np.zeros(5, dtype=np.float32)
query[1] = 1
scores, indices = index.search(query)
self.assertGreater(scores[0], 0)
self.assertEqual(indices[0], 1)
@require_faiss
def test_serialization_fs(mockfs):
import faiss
index = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5, dtype=np.float32))
index_name = "index.faiss"
path = f"mock://{index_name}"
index.save(path, storage_options=mockfs.storage_options)
index = FaissIndex.load(path, storage_options=mockfs.storage_options)
query = np.zeros(5, dtype=np.float32)
query[1] = 1
scores, indices = index.search(query)
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class ElasticSearchIndexTest(TestCase):
def test_elasticsearch(self):
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search") as mocked_search, patch(
"elasticsearch.client.IndicesClient.create"
) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk") as mocked_bulk:
es_client = Elasticsearch()
mocked_index_create.return_value = {"acknowledged": True}
index = ElasticSearchIndex(es_client=es_client)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(["foo", "bar", "foobar"])
# single query
query = "foo"
mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
scores, indices = index.search(query)
self.assertEqual(scores[0], 1)
self.assertEqual(indices[0], 0)
# single query with timeout
query = "foo"
mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
scores, indices = index.search(query, request_timeout=30)
self.assertEqual(scores[0], 1)
self.assertEqual(indices[0], 0)
# batched queries
queries = ["foo", "bar", "foobar"]
mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
total_scores, total_indices = index.search_batch(queries)
best_scores = [scores[0] for scores in total_scores]
best_indices = [indices[0] for indices in total_indices]
self.assertGreater(np.min(best_scores), 0)
self.assertListEqual([1, 1, 1], best_indices)
# batched queries with timeout
queries = ["foo", "bar", "foobar"]
mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
total_scores, total_indices = index.search_batch(queries, request_timeout=30)
best_scores = [scores[0] for scores in total_scores]
best_indices = [indices[0] for indices in total_indices]
self.assertGreater(np.min(best_scores), 0)
self.assertListEqual([1, 1, 1], best_indices)
| datasets-main | tests/test_search.py |
import copy
import pickle
import warnings
from typing import List, Union
import numpy as np
import pyarrow as pa
import pytest
import datasets
from datasets import Sequence, Value
from datasets.features.features import Array2DExtensionType, ClassLabel, Features, Image
from datasets.table import (
ConcatenationTable,
InMemoryTable,
MemoryMappedTable,
Table,
TableBlock,
_in_memory_arrow_table_from_buffer,
_in_memory_arrow_table_from_file,
_interpolation_search,
_is_extension_type,
_memory_mapped_arrow_table_from_file,
array_concat,
cast_array_to_feature,
concat_tables,
embed_array_storage,
embed_table_storage,
inject_arrow_table_documentation,
table_cast,
table_iter,
)
from .utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, slow
@pytest.fixture(scope="session")
def in_memory_pa_table(arrow_file) -> pa.Table:
return pa.ipc.open_stream(arrow_file).read_all()
def _to_testing_blocks(table: TableBlock) -> List[List[TableBlock]]:
assert len(table) > 2
blocks = [
[table.slice(0, 2)],
[table.slice(2).drop([c for c in table.column_names if c != "tokens"]), table.slice(2).drop(["tokens"])],
]
return blocks
@pytest.fixture(scope="session")
def in_memory_blocks(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table)
return _to_testing_blocks(table)
@pytest.fixture(scope="session")
def memory_mapped_blocks(arrow_file):
table = MemoryMappedTable.from_file(arrow_file)
return _to_testing_blocks(table)
@pytest.fixture(scope="session")
def mixed_in_memory_and_memory_mapped_blocks(in_memory_blocks, memory_mapped_blocks):
return in_memory_blocks[:1] + memory_mapped_blocks[1:]
def assert_deepcopy_without_bringing_data_in_memory(table: MemoryMappedTable):
with assert_arrow_memory_doesnt_increase():
copied_table = copy.deepcopy(table)
assert isinstance(copied_table, MemoryMappedTable)
assert copied_table.table == table.table
def assert_deepcopy_does_bring_data_in_memory(table: MemoryMappedTable):
with assert_arrow_memory_increases():
copied_table = copy.deepcopy(table)
assert isinstance(copied_table, MemoryMappedTable)
assert copied_table.table == table.table
def assert_pickle_without_bringing_data_in_memory(table: MemoryMappedTable):
with assert_arrow_memory_doesnt_increase():
pickled_table = pickle.dumps(table)
unpickled_table = pickle.loads(pickled_table)
assert isinstance(unpickled_table, MemoryMappedTable)
assert unpickled_table.table == table.table
def assert_pickle_does_bring_data_in_memory(table: MemoryMappedTable):
with assert_arrow_memory_increases():
pickled_table = pickle.dumps(table)
unpickled_table = pickle.loads(pickled_table)
assert isinstance(unpickled_table, MemoryMappedTable)
assert unpickled_table.table == table.table
def assert_index_attributes_equal(table: Table, other: Table):
assert table._batches == other._batches
np.testing.assert_array_equal(table._offsets, other._offsets)
assert table._schema == other._schema
def add_suffix_to_column_names(table, suffix):
return table.rename_columns([f"{name}{suffix}" for name in table.column_names])
def test_inject_arrow_table_documentation(in_memory_pa_table):
method = pa.Table.slice
def function_to_wrap(*args):
return method(*args)
args = (0, 1)
wrapped_method = inject_arrow_table_documentation(method)(function_to_wrap)
assert method(in_memory_pa_table, *args) == wrapped_method(in_memory_pa_table, *args)
assert "pyarrow.Table" not in wrapped_method.__doc__
assert "Table" in wrapped_method.__doc__
def test_in_memory_arrow_table_from_file(arrow_file, in_memory_pa_table):
with assert_arrow_memory_increases():
pa_table = _in_memory_arrow_table_from_file(arrow_file)
assert in_memory_pa_table == pa_table
def test_in_memory_arrow_table_from_buffer(in_memory_pa_table):
with assert_arrow_memory_increases():
buf_writer = pa.BufferOutputStream()
writer = pa.RecordBatchStreamWriter(buf_writer, schema=in_memory_pa_table.schema)
writer.write_table(in_memory_pa_table)
writer.close()
buf_writer.close()
pa_table = _in_memory_arrow_table_from_buffer(buf_writer.getvalue())
assert in_memory_pa_table == pa_table
def test_memory_mapped_arrow_table_from_file(arrow_file, in_memory_pa_table):
with assert_arrow_memory_doesnt_increase():
pa_table = _memory_mapped_arrow_table_from_file(arrow_file)
assert in_memory_pa_table == pa_table
def test_table_init(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.table == in_memory_pa_table
def test_table_validate(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.validate() == in_memory_pa_table.validate()
def test_table_equals(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.equals(in_memory_pa_table)
def test_table_to_batches(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.to_batches() == in_memory_pa_table.to_batches()
def test_table_to_pydict(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.to_pydict() == in_memory_pa_table.to_pydict()
def test_table_to_string(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.to_string() == in_memory_pa_table.to_string()
def test_table_field(in_memory_pa_table):
assert "tokens" in in_memory_pa_table.column_names
table = Table(in_memory_pa_table)
assert table.field("tokens") == in_memory_pa_table.field("tokens")
def test_table_column(in_memory_pa_table):
assert "tokens" in in_memory_pa_table.column_names
table = Table(in_memory_pa_table)
assert table.column("tokens") == in_memory_pa_table.column("tokens")
def test_table_itercolumns(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert isinstance(table.itercolumns(), type(in_memory_pa_table.itercolumns()))
assert list(table.itercolumns()) == list(in_memory_pa_table.itercolumns())
def test_table_getitem(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table[0] == in_memory_pa_table[0]
def test_table_len(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert len(table) == len(in_memory_pa_table)
def test_table_str(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert str(table) == str(in_memory_pa_table).replace("pyarrow.Table", "Table")
assert repr(table) == repr(in_memory_pa_table).replace("pyarrow.Table", "Table")
@pytest.mark.parametrize(
"attribute", ["schema", "columns", "num_columns", "num_rows", "shape", "nbytes", "column_names"]
)
def test_table_attributes(in_memory_pa_table, attribute):
table = Table(in_memory_pa_table)
assert getattr(table, attribute) == getattr(in_memory_pa_table, attribute)
def test_in_memory_table_from_file(arrow_file, in_memory_pa_table):
with assert_arrow_memory_increases():
table = InMemoryTable.from_file(arrow_file)
assert table.table == in_memory_pa_table
assert isinstance(table, InMemoryTable)
def test_in_memory_table_from_buffer(in_memory_pa_table):
with assert_arrow_memory_increases():
buf_writer = pa.BufferOutputStream()
writer = pa.RecordBatchStreamWriter(buf_writer, schema=in_memory_pa_table.schema)
writer.write_table(in_memory_pa_table)
writer.close()
buf_writer.close()
table = InMemoryTable.from_buffer(buf_writer.getvalue())
assert table.table == in_memory_pa_table
assert isinstance(table, InMemoryTable)
def test_in_memory_table_from_pandas(in_memory_pa_table):
df = in_memory_pa_table.to_pandas()
with assert_arrow_memory_increases():
# with no schema it might infer another order of the fields in the schema
table = InMemoryTable.from_pandas(df)
assert isinstance(table, InMemoryTable)
# by specifying schema we get the same order of features, and so the exact same table
table = InMemoryTable.from_pandas(df, schema=in_memory_pa_table.schema)
assert table.table == in_memory_pa_table
assert isinstance(table, InMemoryTable)
def test_in_memory_table_from_arrays(in_memory_pa_table):
arrays = list(in_memory_pa_table.columns)
names = list(in_memory_pa_table.column_names)
table = InMemoryTable.from_arrays(arrays, names=names)
assert table.table == in_memory_pa_table
assert isinstance(table, InMemoryTable)
def test_in_memory_table_from_pydict(in_memory_pa_table):
pydict = in_memory_pa_table.to_pydict()
with assert_arrow_memory_increases():
table = InMemoryTable.from_pydict(pydict)
assert isinstance(table, InMemoryTable)
assert table.table == pa.Table.from_pydict(pydict)
def test_in_memory_table_from_pylist(in_memory_pa_table):
pylist = InMemoryTable(in_memory_pa_table).to_pylist()
table = InMemoryTable.from_pylist(pylist)
assert isinstance(table, InMemoryTable)
assert pylist == table.to_pylist()
def test_in_memory_table_from_batches(in_memory_pa_table):
batches = list(in_memory_pa_table.to_batches())
table = InMemoryTable.from_batches(batches)
assert table.table == in_memory_pa_table
assert isinstance(table, InMemoryTable)
def test_in_memory_table_deepcopy(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table)
copied_table = copy.deepcopy(table)
assert table.table == copied_table.table
assert_index_attributes_equal(table, copied_table)
# deepcopy must return the exact same arrow objects since they are immutable
assert table.table is copied_table.table
assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches))
def test_in_memory_table_pickle(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table)
pickled_table = pickle.dumps(table)
unpickled_table = pickle.loads(pickled_table)
assert unpickled_table.table == table.table
assert_index_attributes_equal(table, unpickled_table)
@slow
def test_in_memory_table_pickle_big_table():
big_table_4GB = InMemoryTable.from_pydict({"col": [0] * ((4 * 8 << 30) // 64)})
length = len(big_table_4GB)
big_table_4GB = pickle.dumps(big_table_4GB)
big_table_4GB = pickle.loads(big_table_4GB)
assert len(big_table_4GB) == length
def test_in_memory_table_slice(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table).slice(1, 2)
assert table.table == in_memory_pa_table.slice(1, 2)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_filter(in_memory_pa_table):
mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))])
table = InMemoryTable(in_memory_pa_table).filter(mask)
assert table.table == in_memory_pa_table.filter(mask)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_flatten(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table).flatten()
assert table.table == in_memory_pa_table.flatten()
assert isinstance(table, InMemoryTable)
def test_in_memory_table_combine_chunks(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table).combine_chunks()
assert table.table == in_memory_pa_table.combine_chunks()
assert isinstance(table, InMemoryTable)
def test_in_memory_table_cast(in_memory_pa_table):
assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types
schema = pa.schema(
{
k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32())
for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types)
}
)
table = InMemoryTable(in_memory_pa_table).cast(schema)
assert table.table == in_memory_pa_table.cast(schema)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_cast_reorder_struct():
table = InMemoryTable(
pa.Table.from_pydict(
{
"top": [
{
"foo": "a",
"bar": "b",
}
]
}
)
)
schema = pa.schema({"top": pa.struct({"bar": pa.string(), "foo": pa.string()})})
assert table.cast(schema).schema == schema
def test_in_memory_table_cast_with_hf_features():
table = InMemoryTable(pa.Table.from_pydict({"labels": [0, 1]}))
features = Features({"labels": ClassLabel(names=["neg", "pos"])})
schema = features.arrow_schema
assert table.cast(schema).schema == schema
assert Features.from_arrow_schema(table.cast(schema).schema) == features
def test_in_memory_table_replace_schema_metadata(in_memory_pa_table):
metadata = {"huggingface": "{}"}
table = InMemoryTable(in_memory_pa_table).replace_schema_metadata(metadata)
assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata
assert isinstance(table, InMemoryTable)
def test_in_memory_table_add_column(in_memory_pa_table):
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = InMemoryTable(in_memory_pa_table).add_column(i, field_, column)
assert table.table == in_memory_pa_table.add_column(i, field_, column)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_append_column(in_memory_pa_table):
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = InMemoryTable(in_memory_pa_table).append_column(field_, column)
assert table.table == in_memory_pa_table.append_column(field_, column)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_remove_column(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table).remove_column(0)
assert table.table == in_memory_pa_table.remove_column(0)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_set_column(in_memory_pa_table):
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = InMemoryTable(in_memory_pa_table).set_column(i, field_, column)
assert table.table == in_memory_pa_table.set_column(i, field_, column)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_rename_columns(in_memory_pa_table):
assert "tokens" in in_memory_pa_table.column_names
names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names]
table = InMemoryTable(in_memory_pa_table).rename_columns(names)
assert table.table == in_memory_pa_table.rename_columns(names)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_drop(in_memory_pa_table):
names = [in_memory_pa_table.column_names[0]]
table = InMemoryTable(in_memory_pa_table).drop(names)
assert table.table == in_memory_pa_table.drop(names)
assert isinstance(table, InMemoryTable)
def test_memory_mapped_table_init(arrow_file, in_memory_pa_table):
table = MemoryMappedTable(_memory_mapped_arrow_table_from_file(arrow_file), arrow_file)
assert table.table == in_memory_pa_table
assert isinstance(table, MemoryMappedTable)
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_from_file(arrow_file, in_memory_pa_table):
with assert_arrow_memory_doesnt_increase():
table = MemoryMappedTable.from_file(arrow_file)
assert table.table == in_memory_pa_table
assert isinstance(table, MemoryMappedTable)
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_from_file_with_replay(arrow_file, in_memory_pa_table):
replays = [("slice", (0, 1), {}), ("flatten", (), {})]
with assert_arrow_memory_doesnt_increase():
table = MemoryMappedTable.from_file(arrow_file, replays=replays)
assert len(table) == 1
for method, args, kwargs in replays:
in_memory_pa_table = getattr(in_memory_pa_table, method)(*args, **kwargs)
assert table.table == in_memory_pa_table
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_deepcopy(arrow_file):
table = MemoryMappedTable.from_file(arrow_file)
copied_table = copy.deepcopy(table)
assert table.table == copied_table.table
assert table.path == copied_table.path
assert_index_attributes_equal(table, copied_table)
# deepcopy must return the exact same arrow objects since they are immutable
assert table.table is copied_table.table
assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches))
def test_memory_mapped_table_pickle(arrow_file):
table = MemoryMappedTable.from_file(arrow_file)
pickled_table = pickle.dumps(table)
unpickled_table = pickle.loads(pickled_table)
assert unpickled_table.table == table.table
assert unpickled_table.path == table.path
assert_index_attributes_equal(table, unpickled_table)
def test_memory_mapped_table_pickle_doesnt_fill_memory(arrow_file):
with assert_arrow_memory_doesnt_increase():
table = MemoryMappedTable.from_file(arrow_file)
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_pickle_applies_replay(arrow_file):
replays = [("slice", (0, 1), {}), ("flatten", (), {})]
with assert_arrow_memory_doesnt_increase():
table = MemoryMappedTable.from_file(arrow_file, replays=replays)
assert isinstance(table, MemoryMappedTable)
assert table.replays == replays
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_slice(arrow_file, in_memory_pa_table):
table = MemoryMappedTable.from_file(arrow_file).slice(1, 2)
assert table.table == in_memory_pa_table.slice(1, 2)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("slice", (1, 2), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_filter(arrow_file, in_memory_pa_table):
mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))])
table = MemoryMappedTable.from_file(arrow_file).filter(mask)
assert table.table == in_memory_pa_table.filter(mask)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("filter", (mask,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
# filter DOES increase memory
# assert_pickle_without_bringing_data_in_memory(table)
assert_pickle_does_bring_data_in_memory(table)
def test_memory_mapped_table_flatten(arrow_file, in_memory_pa_table):
table = MemoryMappedTable.from_file(arrow_file).flatten()
assert table.table == in_memory_pa_table.flatten()
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("flatten", (), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_combine_chunks(arrow_file, in_memory_pa_table):
table = MemoryMappedTable.from_file(arrow_file).combine_chunks()
assert table.table == in_memory_pa_table.combine_chunks()
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("combine_chunks", (), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_cast(arrow_file, in_memory_pa_table):
assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types
schema = pa.schema(
{
k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32())
for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types)
}
)
table = MemoryMappedTable.from_file(arrow_file).cast(schema)
assert table.table == in_memory_pa_table.cast(schema)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("cast", (schema,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
# cast DOES increase memory when converting integers precision for example
# assert_pickle_without_bringing_data_in_memory(table)
assert_pickle_does_bring_data_in_memory(table)
def test_memory_mapped_table_replace_schema_metadata(arrow_file, in_memory_pa_table):
metadata = {"huggingface": "{}"}
table = MemoryMappedTable.from_file(arrow_file).replace_schema_metadata(metadata)
assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("replace_schema_metadata", (metadata,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_add_column(arrow_file, in_memory_pa_table):
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = MemoryMappedTable.from_file(arrow_file).add_column(i, field_, column)
assert table.table == in_memory_pa_table.add_column(i, field_, column)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("add_column", (i, field_, column), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_append_column(arrow_file, in_memory_pa_table):
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = MemoryMappedTable.from_file(arrow_file).append_column(field_, column)
assert table.table == in_memory_pa_table.append_column(field_, column)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("append_column", (field_, column), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_remove_column(arrow_file, in_memory_pa_table):
table = MemoryMappedTable.from_file(arrow_file).remove_column(0)
assert table.table == in_memory_pa_table.remove_column(0)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("remove_column", (0,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_set_column(arrow_file, in_memory_pa_table):
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = MemoryMappedTable.from_file(arrow_file).set_column(i, field_, column)
assert table.table == in_memory_pa_table.set_column(i, field_, column)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("set_column", (i, field_, column), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_rename_columns(arrow_file, in_memory_pa_table):
assert "tokens" in in_memory_pa_table.column_names
names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names]
table = MemoryMappedTable.from_file(arrow_file).rename_columns(names)
assert table.table == in_memory_pa_table.rename_columns(names)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("rename_columns", (names,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_drop(arrow_file, in_memory_pa_table):
names = [in_memory_pa_table.column_names[0]]
table = MemoryMappedTable.from_file(arrow_file).drop(names)
assert table.table == in_memory_pa_table.drop(names)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("drop", (names,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_init(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = (
in_memory_blocks
if blocks_type == "in_memory"
else memory_mapped_blocks
if blocks_type == "memory_mapped"
else mixed_in_memory_and_memory_mapped_blocks
)
table = ConcatenationTable(in_memory_pa_table, blocks)
assert table.table == in_memory_pa_table
assert table.blocks == blocks
def test_concatenation_table_from_blocks(in_memory_pa_table, in_memory_blocks):
assert len(in_memory_pa_table) > 2
in_memory_table = InMemoryTable(in_memory_pa_table)
t1, t2 = in_memory_table.slice(0, 2), in_memory_table.slice(2)
table = ConcatenationTable.from_blocks(in_memory_table)
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table
assert table.blocks == [[in_memory_table]]
table = ConcatenationTable.from_blocks([t1, t2])
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table
assert table.blocks == [[in_memory_table]]
table = ConcatenationTable.from_blocks([[t1], [t2]])
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table
assert table.blocks == [[in_memory_table]]
table = ConcatenationTable.from_blocks(in_memory_blocks)
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table
assert table.blocks == [[in_memory_table]]
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_from_blocks_doesnt_increase_memory(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
with assert_arrow_memory_doesnt_increase():
table = ConcatenationTable.from_blocks(blocks)
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table
if blocks_type == "in_memory":
assert table.blocks == [[InMemoryTable(in_memory_pa_table)]]
else:
assert table.blocks == blocks
@pytest.mark.parametrize("axis", [0, 1])
def test_concatenation_table_from_tables(axis, in_memory_pa_table, arrow_file):
in_memory_table = InMemoryTable(in_memory_pa_table)
concatenation_table = ConcatenationTable.from_blocks(in_memory_table)
memory_mapped_table = MemoryMappedTable.from_file(arrow_file)
tables = [in_memory_pa_table, in_memory_table, concatenation_table, memory_mapped_table]
if axis == 0:
expected_table = pa.concat_tables([in_memory_pa_table] * len(tables))
else:
# avoids error due to duplicate column names
tables[1:] = [add_suffix_to_column_names(table, i) for i, table in enumerate(tables[1:], 1)]
expected_table = in_memory_pa_table
for table in tables[1:]:
for name, col in zip(table.column_names, table.columns):
expected_table = expected_table.append_column(name, col)
with assert_arrow_memory_doesnt_increase():
table = ConcatenationTable.from_tables(tables, axis=axis)
assert isinstance(table, ConcatenationTable)
assert table.table == expected_table
# because of consolidation, we end up with 1 InMemoryTable and 1 MemoryMappedTable
assert len(table.blocks) == 1 if axis == 1 else 2
assert len(table.blocks[0]) == 1 if axis == 0 else 2
assert axis == 1 or len(table.blocks[1]) == 1
assert isinstance(table.blocks[0][0], InMemoryTable)
assert isinstance(table.blocks[1][0] if axis == 0 else table.blocks[0][1], MemoryMappedTable)
def test_concatenation_table_from_tables_axis1_misaligned_blocks(arrow_file):
table = MemoryMappedTable.from_file(arrow_file)
t1 = table.slice(0, 2)
t2 = table.slice(0, 3).rename_columns([col + "_1" for col in table.column_names])
concatenated = ConcatenationTable.from_tables(
[
ConcatenationTable.from_blocks([[t1], [t1], [t1]]),
ConcatenationTable.from_blocks([[t2], [t2]]),
],
axis=1,
)
assert len(concatenated) == 6
assert [len(row_blocks[0]) for row_blocks in concatenated.blocks] == [2, 1, 1, 2]
concatenated = ConcatenationTable.from_tables(
[
ConcatenationTable.from_blocks([[t2], [t2]]),
ConcatenationTable.from_blocks([[t1], [t1], [t1]]),
],
axis=1,
)
assert len(concatenated) == 6
assert [len(row_blocks[0]) for row_blocks in concatenated.blocks] == [2, 1, 1, 2]
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_deepcopy(
blocks_type, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks)
copied_table = copy.deepcopy(table)
assert table.table == copied_table.table
assert table.blocks == copied_table.blocks
assert_index_attributes_equal(table, copied_table)
# deepcopy must return the exact same arrow objects since they are immutable
assert table.table is copied_table.table
assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches))
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_pickle(
blocks_type, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks)
pickled_table = pickle.dumps(table)
unpickled_table = pickle.loads(pickled_table)
assert unpickled_table.table == table.table
assert unpickled_table.blocks == table.blocks
assert_index_attributes_equal(table, unpickled_table)
def test_concat_tables_with_features_metadata(arrow_file, in_memory_pa_table):
input_features = Features.from_arrow_schema(in_memory_pa_table.schema)
input_features["id"] = Value("int64", id="my_id")
intput_schema = input_features.arrow_schema
t0 = in_memory_pa_table.replace_schema_metadata(intput_schema.metadata)
t1 = MemoryMappedTable.from_file(arrow_file)
tables = [t0, t1]
concatenated_table = concat_tables(tables, axis=0)
output_schema = concatenated_table.schema
output_features = Features.from_arrow_schema(output_schema)
assert output_schema == intput_schema
assert output_schema.metadata == intput_schema.metadata
assert output_features == input_features
assert output_features["id"].id == "my_id"
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_slice(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks).slice(1, 2)
assert table.table == in_memory_pa_table.slice(1, 2)
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_filter(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))])
table = ConcatenationTable.from_blocks(blocks).filter(mask)
assert table.table == in_memory_pa_table.filter(mask)
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_flatten(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks).flatten()
assert table.table == in_memory_pa_table.flatten()
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_combine_chunks(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks).combine_chunks()
assert table.table == in_memory_pa_table.combine_chunks()
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_cast(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types
assert pa.int64() in in_memory_pa_table.schema.types
schema = pa.schema(
{
k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32())
for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types)
}
)
table = ConcatenationTable.from_blocks(blocks).cast(schema)
assert table.table == in_memory_pa_table.cast(schema)
assert isinstance(table, ConcatenationTable)
schema = pa.schema(
{
k: v if v != pa.int64() else pa.int32()
for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types)
}
)
table = ConcatenationTable.from_blocks(blocks).cast(schema)
assert table.table == in_memory_pa_table.cast(schema)
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concat_tables_cast_with_features_metadata(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
input_features = Features.from_arrow_schema(in_memory_pa_table.schema)
input_features["id"] = Value("int64", id="my_id")
intput_schema = input_features.arrow_schema
concatenated_table = ConcatenationTable.from_blocks(blocks).cast(intput_schema)
output_schema = concatenated_table.schema
output_features = Features.from_arrow_schema(output_schema)
assert output_schema == intput_schema
assert output_schema.metadata == intput_schema.metadata
assert output_features == input_features
assert output_features["id"].id == "my_id"
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_replace_schema_metadata(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
metadata = {"huggingface": "{}"}
table = ConcatenationTable.from_blocks(blocks).replace_schema_metadata(metadata)
assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_add_column(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
with pytest.raises(NotImplementedError):
ConcatenationTable.from_blocks(blocks).add_column(i, field_, column)
# assert table.table == in_memory_pa_table.add_column(i, field_, column)
# unpickled_table = pickle.loads(pickle.dumps(table))
# assert unpickled_table.table == in_memory_pa_table.add_column(i, field_, column)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_append_column(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
with pytest.raises(NotImplementedError):
ConcatenationTable.from_blocks(blocks).append_column(field_, column)
# assert table.table == in_memory_pa_table.append_column(field_, column)
# unpickled_table = pickle.loads(pickle.dumps(table))
# assert unpickled_table.table == in_memory_pa_table.append_column(field_, column)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_remove_column(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks).remove_column(0)
assert table.table == in_memory_pa_table.remove_column(0)
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_set_column(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
with pytest.raises(NotImplementedError):
ConcatenationTable.from_blocks(blocks).set_column(i, field_, column)
# assert table.table == in_memory_pa_table.set_column(i, field_, column)
# unpickled_table = pickle.loads(pickle.dumps(table))
# assert unpickled_table.table == in_memory_pa_table.set_column(i, field_, column)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_rename_columns(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
assert "tokens" in in_memory_pa_table.column_names
names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names]
table = ConcatenationTable.from_blocks(blocks).rename_columns(names)
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table.rename_columns(names)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_drop(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
names = [in_memory_pa_table.column_names[0]]
table = ConcatenationTable.from_blocks(blocks).drop(names)
assert table.table == in_memory_pa_table.drop(names)
assert isinstance(table, ConcatenationTable)
def test_concat_tables(arrow_file, in_memory_pa_table):
t0 = in_memory_pa_table
t1 = InMemoryTable(t0)
t2 = MemoryMappedTable.from_file(arrow_file)
t3 = ConcatenationTable.from_blocks(t1)
tables = [t0, t1, t2, t3]
concatenated_table = concat_tables(tables, axis=0)
assert concatenated_table.table == pa.concat_tables([t0] * 4)
assert concatenated_table.table.shape == (40, 4)
assert isinstance(concatenated_table, ConcatenationTable)
assert len(concatenated_table.blocks) == 3 # t0 and t1 are consolidated as a single InMemoryTable
assert isinstance(concatenated_table.blocks[0][0], InMemoryTable)
assert isinstance(concatenated_table.blocks[1][0], MemoryMappedTable)
assert isinstance(concatenated_table.blocks[2][0], InMemoryTable)
# add suffix to avoid error due to duplicate column names
concatenated_table = concat_tables(
[add_suffix_to_column_names(table, i) for i, table in enumerate(tables)], axis=1
)
assert concatenated_table.table.shape == (10, 16)
assert len(concatenated_table.blocks[0]) == 3 # t0 and t1 are consolidated as a single InMemoryTable
assert isinstance(concatenated_table.blocks[0][0], InMemoryTable)
assert isinstance(concatenated_table.blocks[0][1], MemoryMappedTable)
assert isinstance(concatenated_table.blocks[0][2], InMemoryTable)
def _interpolation_search_ground_truth(arr: List[int], x: int) -> Union[int, IndexError]:
for i in range(len(arr) - 1):
if arr[i] <= x < arr[i + 1]:
return i
return IndexError
class _ListWithGetitemCounter(list):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.unique_getitem_calls = set()
def __getitem__(self, i):
out = super().__getitem__(i)
self.unique_getitem_calls.add(i)
return out
@property
def getitem_unique_count(self):
return len(self.unique_getitem_calls)
@pytest.mark.parametrize(
"arr, x",
[(np.arange(0, 14, 3), x) for x in range(-1, 22)]
+ [(list(np.arange(-5, 5)), x) for x in range(-6, 6)]
+ [([0, 1_000, 1_001, 1_003], x) for x in [-1, 0, 2, 100, 999, 1_000, 1_001, 1_002, 1_003, 1_004]]
+ [(list(range(1_000)), x) for x in [-1, 0, 1, 10, 666, 999, 1_000, 1_0001]],
)
def test_interpolation_search(arr, x):
ground_truth = _interpolation_search_ground_truth(arr, x)
if isinstance(ground_truth, int):
arr = _ListWithGetitemCounter(arr)
output = _interpolation_search(arr, x)
assert ground_truth == output
# 4 maximum unique getitem calls is expected for the cases of this test
# but it can be bigger for large and messy arrays.
assert arr.getitem_unique_count <= 4
else:
with pytest.raises(ground_truth):
_interpolation_search(arr, x)
def test_indexed_table_mixin():
n_rows_per_chunk = 10
n_chunks = 4
pa_table = pa.Table.from_pydict({"col": [0] * n_rows_per_chunk})
pa_table = pa.concat_tables([pa_table] * n_chunks)
table = Table(pa_table)
assert all(table._offsets.tolist() == np.cumsum([0] + [n_rows_per_chunk] * n_chunks))
assert table.fast_slice(5) == pa_table.slice(5)
assert table.fast_slice(2, 13) == pa_table.slice(2, 13)
@pytest.mark.parametrize(
"arrays",
[
[pa.array([[1, 2, 3, 4]]), pa.array([[10, 2]])],
[
pa.array([[[1, 2], [3]]], pa.list_(pa.list_(pa.int32()), 2)),
pa.array([[[10, 2, 3], [2]]], pa.list_(pa.list_(pa.int32()), 2)),
],
[pa.array([[[1, 2, 3]], [[2, 3], [20, 21]], [[4]]]).slice(1), pa.array([[[1, 2, 3]]])],
],
)
def test_concat_arrays(arrays):
assert array_concat(arrays) == pa.concat_arrays(arrays)
def test_concat_arrays_nested_with_nulls():
arrays = [pa.array([{"a": 21, "b": [[1, 2], [3]]}]), pa.array([{"a": 100, "b": [[1], None]}])]
concatenated_arrays = array_concat(arrays)
assert concatenated_arrays == pa.array([{"a": 21, "b": [[1, 2], [3]]}, {"a": 100, "b": [[1], None]}])
def test_concat_extension_arrays():
arrays = [pa.array([[[1, 2], [3, 4]]]), pa.array([[[10, 2], [3, 4]]])]
extension_type = Array2DExtensionType((2, 2), "int64")
assert array_concat([extension_type.wrap_array(array) for array in arrays]) == extension_type.wrap_array(
pa.concat_arrays(arrays)
)
def test_cast_array_to_features():
arr = pa.array([[0, 1]])
assert cast_array_to_feature(arr, Sequence(Value("string"))).type == pa.list_(pa.string())
with pytest.raises(TypeError):
cast_array_to_feature(arr, Sequence(Value("string")), allow_number_to_str=False)
def test_cast_array_to_features_nested():
arr = pa.array([[{"foo": [0]}]])
assert cast_array_to_feature(arr, [{"foo": Sequence(Value("string"))}]).type == pa.list_(
pa.struct({"foo": pa.list_(pa.string())})
)
def test_cast_array_to_features_to_nested_with_no_fields():
arr = pa.array([{}])
assert cast_array_to_feature(arr, {}).type == pa.struct({})
assert cast_array_to_feature(arr, {}).to_pylist() == arr.to_pylist()
def test_cast_array_to_features_nested_with_null_values():
# same type
arr = pa.array([{"foo": [None, [0]]}], pa.struct({"foo": pa.list_(pa.list_(pa.int64()))}))
casted_array = cast_array_to_feature(arr, {"foo": [[Value("int64")]]})
assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int64()))})
assert casted_array.to_pylist() == arr.to_pylist()
# different type
arr = pa.array([{"foo": [None, [0]]}], pa.struct({"foo": pa.list_(pa.list_(pa.int64()))}))
if datasets.config.PYARROW_VERSION.major < 10:
with pytest.warns(UserWarning, match="None values are converted to empty lists.+"):
casted_array = cast_array_to_feature(arr, {"foo": [[Value("int32")]]})
assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int32()))})
assert casted_array.to_pylist() == [
{"foo": [[], [0]]}
] # empty list because of https://github.com/huggingface/datasets/issues/3676
else:
with warnings.catch_warnings():
warnings.simplefilter("error")
casted_array = cast_array_to_feature(arr, {"foo": [[Value("int32")]]})
assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int32()))})
assert casted_array.to_pylist() == [{"foo": [None, [0]]}]
def test_cast_array_to_features_to_null_type():
# same type
arr = pa.array([[None, None]])
assert cast_array_to_feature(arr, Sequence(Value("null"))).type == pa.list_(pa.null())
# different type
arr = pa.array([[None, 1]])
with pytest.raises(TypeError):
cast_array_to_feature(arr, Sequence(Value("null")))
def test_cast_array_to_features_sequence_classlabel():
arr = pa.array([[], [1], [0, 1]], pa.list_(pa.int64()))
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64())
arr = pa.array([[], ["bar"], ["foo", "bar"]], pa.list_(pa.string()))
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64())
# Test empty arrays
arr = pa.array([[], []], pa.list_(pa.int64()))
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64())
arr = pa.array([[], []], pa.list_(pa.string()))
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64())
# Test invalid class labels
arr = pa.array([[2]], pa.list_(pa.int64()))
with pytest.raises(ValueError):
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"])))
arr = pa.array([["baz"]], pa.list_(pa.string()))
with pytest.raises(ValueError):
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"])))
def test_cast_sliced_fixed_size_array_to_features():
arr = pa.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], pa.list_(pa.int32(), 3))
casted_array = cast_array_to_feature(arr[1:], Sequence(Value("int64"), length=3))
assert casted_array.type == pa.list_(pa.int64(), 3)
assert casted_array.to_pylist() == arr[1:].to_pylist()
def test_embed_array_storage(image_file):
array = pa.array([{"bytes": None, "path": image_file}], type=Image.pa_type)
embedded_images_array = embed_array_storage(array, Image())
assert isinstance(embedded_images_array.to_pylist()[0]["path"], str)
assert embedded_images_array.to_pylist()[0]["path"] == "test_image_rgb.jpg"
assert isinstance(embedded_images_array.to_pylist()[0]["bytes"], bytes)
def test_embed_array_storage_nested(image_file):
array = pa.array([[{"bytes": None, "path": image_file}]], type=pa.list_(Image.pa_type))
embedded_images_array = embed_array_storage(array, [Image()])
assert isinstance(embedded_images_array.to_pylist()[0][0]["path"], str)
assert isinstance(embedded_images_array.to_pylist()[0][0]["bytes"], bytes)
array = pa.array([{"foo": {"bytes": None, "path": image_file}}], type=pa.struct({"foo": Image.pa_type}))
embedded_images_array = embed_array_storage(array, {"foo": Image()})
assert isinstance(embedded_images_array.to_pylist()[0]["foo"]["path"], str)
assert isinstance(embedded_images_array.to_pylist()[0]["foo"]["bytes"], bytes)
def test_embed_table_storage(image_file):
features = Features({"image": Image()})
table = table_cast(pa.table({"image": [image_file]}), features.arrow_schema)
embedded_images_table = embed_table_storage(table)
assert isinstance(embedded_images_table.to_pydict()["image"][0]["path"], str)
assert isinstance(embedded_images_table.to_pydict()["image"][0]["bytes"], bytes)
@pytest.mark.parametrize(
"table",
[
InMemoryTable(pa.table({"foo": range(10)})),
InMemoryTable(pa.concat_tables([pa.table({"foo": range(0, 5)}), pa.table({"foo": range(5, 10)})])),
InMemoryTable(pa.concat_tables([pa.table({"foo": [i]}) for i in range(10)])),
],
)
@pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20])
@pytest.mark.parametrize("drop_last_batch", [False, True])
def test_table_iter(table, batch_size, drop_last_batch):
num_rows = len(table) if not drop_last_batch else len(table) // batch_size * batch_size
num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size
subtables = list(table_iter(table, batch_size=batch_size, drop_last_batch=drop_last_batch))
assert len(subtables) == num_batches
if drop_last_batch:
assert all(len(subtable) == batch_size for subtable in subtables)
else:
assert all(len(subtable) == batch_size for subtable in subtables[:-1])
assert len(subtables[-1]) <= batch_size
if num_rows > 0:
reloaded = pa.concat_tables(subtables)
assert table.slice(0, num_rows).to_pydict() == reloaded.to_pydict()
@pytest.mark.parametrize(
"pa_type, expected",
[
(pa.int8(), False),
(pa.struct({"col1": pa.int8(), "col2": pa.int64()}), False),
(pa.struct({"col1": pa.list_(pa.int8()), "col2": Array2DExtensionType((1, 3), "int64")}), True),
(pa.list_(pa.int8()), False),
(pa.list_(Array2DExtensionType((1, 3), "int64"), 4), True),
],
)
def test_is_extension_type(pa_type, expected):
assert _is_extension_type(pa_type) == expected
| datasets-main | tests/test_table.py |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
FILE_CONTENT = """\
Text data.
Second line of data."""
FILE_PATH = "file"
@pytest.fixture(scope="session")
def zstd_path(tmp_path_factory):
path = tmp_path_factory.mktemp("data") / (FILE_PATH + ".zstd")
data = bytes(FILE_CONTENT, "utf-8")
with zstd.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture
def tmpfs_file(tmpfs):
with open(os.path.join(tmpfs.local_root_dir, FILE_PATH), "w") as f:
f.write(FILE_CONTENT)
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"])
def test_cached_path_extract(compression_format, gz_file, xz_file, zstd_path, tmp_path, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
input_path = input_paths[compression_format]
cache_dir = tmp_path / "cache"
download_config = DownloadConfig(cache_dir=cache_dir, extract_compressed_file=True)
extracted_path = cached_path(input_path, download_config=download_config)
with open(extracted_path) as f:
extracted_file_content = f.read()
with open(text_file) as f:
expected_file_content = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False])
@pytest.mark.parametrize("default_cache_dir", [True, False])
def test_extracted_datasets_path(default_extracted, default_cache_dir, xz_file, tmp_path, monkeypatch):
custom_cache_dir = "custom_cache"
custom_extracted_dir = "custom_extracted_dir"
custom_extracted_path = tmp_path / "custom_extracted_path"
if default_extracted:
expected = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", custom_extracted_dir)
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(custom_extracted_path))
expected = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
filename = xz_file
download_config = (
DownloadConfig(extract_compressed_file=True)
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=True)
)
extracted_file_path = cached_path(filename, download_config=download_config)
assert Path(extracted_file_path).parent.parts[-2:] == expected
def test_cached_path_local(text_file):
# absolute path
text_file = str(Path(text_file).resolve())
assert cached_path(text_file) == text_file
# relative path
text_file = str(Path(__file__).resolve().relative_to(Path(os.getcwd())))
assert cached_path(text_file) == text_file
def test_cached_path_missing_local(tmp_path):
# absolute path
missing_file = str(tmp_path.resolve() / "__missing_file__.txt")
with pytest.raises(FileNotFoundError):
cached_path(missing_file)
# relative path
missing_file = "./__missing_file__.txt"
with pytest.raises(FileNotFoundError):
cached_path(missing_file)
def test_get_from_cache_fsspec(tmpfs_file):
output_path = get_from_cache(f"tmp://{tmpfs_file}")
with open(output_path) as f:
output_file_content = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", True)
def test_cached_path_offline():
with pytest.raises(OfflineModeIsEnabled):
cached_path("https://huggingface.co")
@patch("datasets.config.HF_DATASETS_OFFLINE", True)
def test_http_offline(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.html"
with pytest.raises(OfflineModeIsEnabled):
http_get("https://huggingface.co", temp_file=filename)
with pytest.raises(OfflineModeIsEnabled):
http_head("https://huggingface.co")
@patch("datasets.config.HF_DATASETS_OFFLINE", True)
def test_ftp_offline(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.html"
with pytest.raises(OfflineModeIsEnabled):
ftp_get("ftp://huggingface.co", temp_file=filename)
with pytest.raises(OfflineModeIsEnabled):
ftp_head("ftp://huggingface.co")
@patch("datasets.config.HF_DATASETS_OFFLINE", True)
def test_fsspec_offline(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.html"
with pytest.raises(OfflineModeIsEnabled):
fsspec_get("s3://huggingface.co", temp_file=filename)
with pytest.raises(OfflineModeIsEnabled):
fsspec_head("s3://huggingface.co")
| datasets-main | tests/test_file_utils.py |
datasets-main | tests/__init__.py |
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
DATASETS_ON_HF_GCP = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def list_datasets_on_hf_gcp_parameters(with_config=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=True))
class TestDatasetOnHfGcp(TestCase):
dataset = None
config_name = None
def test_dataset_info_available(self, dataset, config_name):
with TemporaryDirectory() as tmp_dir:
dataset_module = dataset_module_factory(dataset, cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name=config_name,
hash=dataset_module.hash,
)
dataset_info_url = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=False).replace(os.sep, "/"),
config.DATASET_INFO_FILENAME,
]
)
datset_info_path = cached_path(dataset_info_url, cache_dir=tmp_dir)
self.assertTrue(os.path.exists(datset_info_path))
@pytest.mark.integration
def test_as_dataset_from_hf_gcs(tmp_path_factory):
tmp_dir = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple"
dataset_module = dataset_module_factory("wikipedia", cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name="20220301.frr",
hash=dataset_module.hash,
)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
builder_instance._download_and_prepare = None
builder_instance.download_and_prepare()
ds = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def test_as_streaming_dataset_from_hf_gcs(tmp_path):
dataset_module = dataset_module_factory("wikipedia", cache_dir=tmp_path)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_path,
config_name="20220301.frr",
hash=dataset_module.hash,
)
ds = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(ds, IterableDatasetDict)
assert "train" in ds
assert isinstance(ds["train"], IterableDataset)
assert next(iter(ds["train"]))
| datasets-main | tests/test_hf_gcp.py |
import fnmatch
import os
import tempfile
import time
import unittest
from io import BytesIO
from pathlib import Path
from unittest.mock import patch
import numpy as np
import pytest
from huggingface_hub import DatasetCard, HfApi
from datasets import (
Audio,
ClassLabel,
Dataset,
DatasetDict,
Features,
Image,
Value,
load_dataset,
load_dataset_builder,
)
from datasets.config import METADATA_CONFIGS_FIELD
from datasets.utils.file_utils import cached_path
from datasets.utils.hub import hf_hub_url
from tests.fixtures.hub import CI_HUB_ENDPOINT, CI_HUB_USER, CI_HUB_USER_TOKEN
from tests.utils import for_all_test_methods, require_pil, require_sndfile, xfail_if_500_502_http_error
pytestmark = pytest.mark.integration
@for_all_test_methods(xfail_if_500_502_http_error)
@pytest.mark.usefixtures("ci_hub_config", "ci_hfh_hf_hub_url")
class TestPushToHub:
_api = HfApi(endpoint=CI_HUB_ENDPOINT)
_token = CI_HUB_USER_TOKEN
def test_push_dataset_dict_to_hub_no_token(self, temporary_repo, set_ci_hub_access_token):
ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
local_ds = DatasetDict({"train": ds})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
local_ds.push_to_hub(ds_name)
hub_ds = load_dataset(ds_name, download_mode="force_redownload")
assert local_ds.column_names == hub_ds.column_names
assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys())
assert local_ds["train"].features == hub_ds["train"].features
# Ensure that there is a single file on the repository that has the correct name
files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset"))
assert all(
fnmatch.fnmatch(file, expected_file)
for file, expected_file in zip(
files, [".gitattributes", "README.md", "data/train-00000-of-00001-*.parquet"]
)
)
def test_push_dataset_dict_to_hub_name_without_namespace(self, temporary_repo):
ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
local_ds = DatasetDict({"train": ds})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
local_ds.push_to_hub(ds_name.split("/")[-1], token=self._token)
hub_ds = load_dataset(ds_name, download_mode="force_redownload")
assert local_ds.column_names == hub_ds.column_names
assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys())
assert local_ds["train"].features == hub_ds["train"].features
# Ensure that there is a single file on the repository that has the correct name
files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset"))
assert all(
fnmatch.fnmatch(file, expected_file)
for file, expected_file in zip(
files, [".gitattributes", "README.md", "data/train-00000-of-00001-*.parquet"]
)
)
def test_push_dataset_dict_to_hub_datasets_with_different_features(self, cleanup_repo):
ds_train = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
ds_test = Dataset.from_dict({"x": [True, False, True], "y": ["a", "b", "c"]})
local_ds = DatasetDict({"train": ds_train, "test": ds_test})
ds_name = f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}"
try:
with pytest.raises(ValueError):
local_ds.push_to_hub(ds_name.split("/")[-1], token=self._token)
except AssertionError:
cleanup_repo(ds_name)
raise
def test_push_dataset_dict_to_hub_private(self, temporary_repo):
ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
local_ds = DatasetDict({"train": ds})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
local_ds.push_to_hub(ds_name, token=self._token, private=True)
hub_ds = load_dataset(ds_name, download_mode="force_redownload", token=self._token)
assert local_ds.column_names == hub_ds.column_names
assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys())
assert local_ds["train"].features == hub_ds["train"].features
# Ensure that there is a single file on the repository that has the correct name
files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token))
assert all(
fnmatch.fnmatch(file, expected_file)
for file, expected_file in zip(
files, [".gitattributes", "README.md", "data/train-00000-of-00001-*.parquet"]
)
)
def test_push_dataset_dict_to_hub(self, temporary_repo):
ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
local_ds = DatasetDict({"train": ds})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
local_ds.push_to_hub(ds_name, token=self._token)
hub_ds = load_dataset(ds_name, download_mode="force_redownload")
assert local_ds.column_names == hub_ds.column_names
assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys())
assert local_ds["train"].features == hub_ds["train"].features
# Ensure that there is a single file on the repository that has the correct name
files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token))
assert all(
fnmatch.fnmatch(file, expected_file)
for file, expected_file in zip(
files, [".gitattributes", "README.md", "data/train-00000-of-00001-*.parquet"]
)
)
def test_push_dataset_dict_to_hub_multiple_files(self, temporary_repo):
ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))})
local_ds = DatasetDict({"train": ds})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
with patch("datasets.config.MAX_SHARD_SIZE", "16KB"):
local_ds.push_to_hub(ds_name, token=self._token)
hub_ds = load_dataset(ds_name, download_mode="force_redownload")
assert local_ds.column_names == hub_ds.column_names
assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys())
assert local_ds["train"].features == hub_ds["train"].features
# Ensure that there are two files on the repository that have the correct name
files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token))
assert all(
fnmatch.fnmatch(file, expected_file)
for file, expected_file in zip(
files,
[
".gitattributes",
"README.md",
"data/train-00000-of-00002-*.parquet",
"data/train-00001-of-00002-*.parquet",
],
)
)
def test_push_dataset_dict_to_hub_multiple_files_with_max_shard_size(self, temporary_repo):
ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))})
local_ds = DatasetDict({"train": ds})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
local_ds.push_to_hub(ds_name, token=self._token, max_shard_size="16KB")
hub_ds = load_dataset(ds_name, download_mode="force_redownload")
assert local_ds.column_names == hub_ds.column_names
assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys())
assert local_ds["train"].features == hub_ds["train"].features
# Ensure that there are two files on the repository that have the correct name
files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token))
assert all(
fnmatch.fnmatch(file, expected_file)
for file, expected_file in zip(
files,
[
".gitattributes",
"README.md",
"data/train-00000-of-00002-*.parquet",
"data/train-00001-of-00002-*.parquet",
],
)
)
def test_push_dataset_dict_to_hub_multiple_files_with_num_shards(self, temporary_repo):
ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))})
local_ds = DatasetDict({"train": ds})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
local_ds.push_to_hub(ds_name, token=self._token, num_shards={"train": 2})
hub_ds = load_dataset(ds_name, download_mode="force_redownload")
assert local_ds.column_names == hub_ds.column_names
assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys())
assert local_ds["train"].features == hub_ds["train"].features
# Ensure that there are two files on the repository that have the correct name
files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token))
assert all(
fnmatch.fnmatch(file, expected_file)
for file, expected_file in zip(
files,
[
".gitattributes",
"README.md",
"data/train-00000-of-00002-*.parquet",
"data/train-00001-of-00002-*.parquet",
],
)
)
def test_push_dataset_dict_to_hub_overwrite_files(self, temporary_repo):
ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))})
ds2 = Dataset.from_dict({"x": list(range(100)), "y": list(range(100))})
local_ds = DatasetDict({"train": ds, "random": ds2})
ds_name = f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}"
# Push to hub two times, but the second time with a larger amount of files.
# Verify that the new files contain the correct dataset.
with temporary_repo(ds_name) as ds_name:
local_ds.push_to_hub(ds_name, token=self._token)
with tempfile.TemporaryDirectory() as tmp:
# Add a file starting with "data" to ensure it doesn't get deleted.
path = Path(tmp) / "datafile.txt"
with open(path, "w") as f:
f.write("Bogus file")
self._api.upload_file(
path_or_fileobj=str(path),
path_in_repo="datafile.txt",
repo_id=ds_name,
repo_type="dataset",
token=self._token,
)
local_ds.push_to_hub(ds_name, token=self._token, max_shard_size=500 << 5)
# Ensure that there are two files on the repository that have the correct name
files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token))
assert all(
fnmatch.fnmatch(file, expected_file)
for file, expected_file in zip(
files,
[
".gitattributes",
"README.md",
"data/random-00000-of-00001-*.parquet",
"data/train-00000-of-00002-*.parquet",
"data/train-00001-of-00002-*.parquet",
"datafile.txt",
],
)
)
self._api.delete_file("datafile.txt", repo_id=ds_name, repo_type="dataset", token=self._token)
hub_ds = load_dataset(ds_name, download_mode="force_redownload")
assert local_ds.column_names == hub_ds.column_names
assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys())
assert local_ds["train"].features == hub_ds["train"].features
del hub_ds
# Push to hub two times, but the second time with fewer files.
# Verify that the new files contain the correct dataset and that non-necessary files have been deleted.
with temporary_repo(ds_name) as ds_name:
local_ds.push_to_hub(ds_name, token=self._token, max_shard_size=500 << 5)
with tempfile.TemporaryDirectory() as tmp:
# Add a file starting with "data" to ensure it doesn't get deleted.
path = Path(tmp) / "datafile.txt"
with open(path, "w") as f:
f.write("Bogus file")
self._api.upload_file(
path_or_fileobj=str(path),
path_in_repo="datafile.txt",
repo_id=ds_name,
repo_type="dataset",
token=self._token,
)
local_ds.push_to_hub(ds_name, token=self._token)
# Ensure that there are two files on the repository that have the correct name
files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token))
assert all(
fnmatch.fnmatch(file, expected_file)
for file, expected_file in zip(
files,
[
".gitattributes",
"README.md",
"data/random-00000-of-00001-*.parquet",
"data/train-00000-of-00001-*.parquet",
"datafile.txt",
],
)
)
# Keeping the "datafile.txt" breaks the load_dataset to think it's a text-based dataset
self._api.delete_file("datafile.txt", repo_id=ds_name, repo_type="dataset", token=self._token)
hub_ds = load_dataset(ds_name, download_mode="force_redownload")
assert local_ds.column_names == hub_ds.column_names
assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys())
assert local_ds["train"].features == hub_ds["train"].features
def test_push_dataset_to_hub(self, temporary_repo):
local_ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
local_ds.push_to_hub(ds_name, split="train", token=self._token)
local_ds_dict = {"train": local_ds}
hub_ds_dict = load_dataset(ds_name, download_mode="force_redownload")
assert list(local_ds_dict.keys()) == list(hub_ds_dict.keys())
for ds_split_name in local_ds_dict.keys():
local_ds = local_ds_dict[ds_split_name]
hub_ds = hub_ds_dict[ds_split_name]
assert local_ds.column_names == hub_ds.column_names
assert list(local_ds.features.keys()) == list(hub_ds.features.keys())
assert local_ds.features == hub_ds.features
def test_push_dataset_to_hub_custom_features(self, temporary_repo):
features = Features({"x": Value("int64"), "y": ClassLabel(names=["neg", "pos"])})
ds = Dataset.from_dict({"x": [1, 2, 3], "y": [0, 0, 1]}, features=features)
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
ds.push_to_hub(ds_name, token=self._token)
hub_ds = load_dataset(ds_name, split="train", download_mode="force_redownload")
assert ds.column_names == hub_ds.column_names
assert list(ds.features.keys()) == list(hub_ds.features.keys())
assert ds.features == hub_ds.features
assert ds[:] == hub_ds[:]
@require_sndfile
def test_push_dataset_to_hub_custom_features_audio(self, temporary_repo):
audio_path = os.path.join(os.path.dirname(__file__), "features", "data", "test_audio_44100.wav")
data = {"x": [audio_path, None], "y": [0, -1]}
features = Features({"x": Audio(), "y": Value("int32")})
ds = Dataset.from_dict(data, features=features)
for embed_external_files in [True, False]:
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
ds.push_to_hub(ds_name, embed_external_files=embed_external_files, token=self._token)
hub_ds = load_dataset(ds_name, split="train", download_mode="force_redownload")
assert ds.column_names == hub_ds.column_names
assert list(ds.features.keys()) == list(hub_ds.features.keys())
assert ds.features == hub_ds.features
np.testing.assert_equal(ds[0]["x"]["array"], hub_ds[0]["x"]["array"])
assert ds[1] == hub_ds[1] # don't test hub_ds[0] since audio decoding might be slightly different
hub_ds = hub_ds.cast_column("x", Audio(decode=False))
elem = hub_ds[0]["x"]
path, bytes_ = elem["path"], elem["bytes"]
assert isinstance(path, str)
assert os.path.basename(path) == "test_audio_44100.wav"
assert bool(bytes_) == embed_external_files
@require_pil
def test_push_dataset_to_hub_custom_features_image(self, temporary_repo):
image_path = os.path.join(os.path.dirname(__file__), "features", "data", "test_image_rgb.jpg")
data = {"x": [image_path, None], "y": [0, -1]}
features = Features({"x": Image(), "y": Value("int32")})
ds = Dataset.from_dict(data, features=features)
for embed_external_files in [True, False]:
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
ds.push_to_hub(ds_name, embed_external_files=embed_external_files, token=self._token)
hub_ds = load_dataset(ds_name, split="train", download_mode="force_redownload")
assert ds.column_names == hub_ds.column_names
assert list(ds.features.keys()) == list(hub_ds.features.keys())
assert ds.features == hub_ds.features
assert ds[:] == hub_ds[:]
hub_ds = hub_ds.cast_column("x", Image(decode=False))
elem = hub_ds[0]["x"]
path, bytes_ = elem["path"], elem["bytes"]
assert isinstance(path, str)
assert bool(bytes_) == embed_external_files
@require_pil
def test_push_dataset_to_hub_custom_features_image_list(self, temporary_repo):
image_path = os.path.join(os.path.dirname(__file__), "features", "data", "test_image_rgb.jpg")
data = {"x": [[image_path], [image_path, image_path]], "y": [0, -1]}
features = Features({"x": [Image()], "y": Value("int32")})
ds = Dataset.from_dict(data, features=features)
for embed_external_files in [True, False]:
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
ds.push_to_hub(ds_name, embed_external_files=embed_external_files, token=self._token)
hub_ds = load_dataset(ds_name, split="train", download_mode="force_redownload")
assert ds.column_names == hub_ds.column_names
assert list(ds.features.keys()) == list(hub_ds.features.keys())
assert ds.features == hub_ds.features
assert ds[:] == hub_ds[:]
hub_ds = hub_ds.cast_column("x", [Image(decode=False)])
elem = hub_ds[0]["x"][0]
path, bytes_ = elem["path"], elem["bytes"]
assert isinstance(path, str)
assert bool(bytes_) == embed_external_files
def test_push_dataset_dict_to_hub_custom_features(self, temporary_repo):
features = Features({"x": Value("int64"), "y": ClassLabel(names=["neg", "pos"])})
ds = Dataset.from_dict({"x": [1, 2, 3], "y": [0, 0, 1]}, features=features)
local_ds = DatasetDict({"test": ds})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
local_ds.push_to_hub(ds_name, token=self._token)
hub_ds = load_dataset(ds_name, download_mode="force_redownload")
assert local_ds.column_names == hub_ds.column_names
assert list(local_ds["test"].features.keys()) == list(hub_ds["test"].features.keys())
assert local_ds["test"].features == hub_ds["test"].features
def test_push_dataset_to_hub_custom_splits(self, temporary_repo):
ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
ds.push_to_hub(ds_name, split="random", token=self._token)
hub_ds = load_dataset(ds_name, download_mode="force_redownload")
assert ds.column_names == hub_ds["random"].column_names
assert list(ds.features.keys()) == list(hub_ds["random"].features.keys())
assert ds.features == hub_ds["random"].features
def test_push_dataset_to_hub_skip_identical_files(self, temporary_repo):
ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
with patch("datasets.arrow_dataset.HfApi.upload_file", side_effect=self._api.upload_file) as mock_hf_api:
# Initial push
ds.push_to_hub(ds_name, token=self._token, max_shard_size="1KB")
call_count_old = mock_hf_api.call_count
mock_hf_api.reset_mock()
# Remove a data file
files = self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)
data_files = [f for f in files if f.startswith("data/")]
assert len(data_files) > 1
self._api.delete_file(data_files[0], repo_id=ds_name, repo_type="dataset", token=self._token)
# "Resume" push - push missing files
ds.push_to_hub(ds_name, token=self._token, max_shard_size="1KB")
call_count_new = mock_hf_api.call_count
assert call_count_old > call_count_new
hub_ds = load_dataset(ds_name, split="train", download_mode="force_redownload")
assert ds.column_names == hub_ds.column_names
assert list(ds.features.keys()) == list(hub_ds.features.keys())
assert ds.features == hub_ds.features
def test_push_dataset_to_hub_multiple_splits_one_by_one(self, temporary_repo):
ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
ds.push_to_hub(ds_name, split="train", token=self._token)
ds.push_to_hub(ds_name, split="test", token=self._token)
hub_ds = load_dataset(ds_name, download_mode="force_redownload")
assert sorted(hub_ds) == ["test", "train"]
assert ds.column_names == hub_ds["train"].column_names
assert list(ds.features.keys()) == list(hub_ds["train"].features.keys())
assert ds.features == hub_ds["train"].features
def test_push_dataset_dict_to_hub_custom_splits(self, temporary_repo):
ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
local_ds = DatasetDict({"random": ds})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
local_ds.push_to_hub(ds_name, token=self._token)
hub_ds = load_dataset(ds_name, download_mode="force_redownload")
assert local_ds.column_names == hub_ds.column_names
assert list(local_ds["random"].features.keys()) == list(hub_ds["random"].features.keys())
assert local_ds["random"].features == hub_ds["random"].features
@unittest.skip("This test cannot pass until iterable datasets have push to hub")
def test_push_streaming_dataset_dict_to_hub(self, temporary_repo):
ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
local_ds = DatasetDict({"train": ds})
with tempfile.TemporaryDirectory() as tmp:
local_ds.save_to_disk(tmp)
local_ds = load_dataset(tmp, streaming=True)
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
local_ds.push_to_hub(ds_name, token=self._token)
hub_ds = load_dataset(ds_name, download_mode="force_redownload")
assert local_ds.column_names == hub_ds.column_names
assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys())
assert local_ds["train"].features == hub_ds["train"].features
def test_push_multiple_dataset_configs_to_hub_load_dataset_builder(self, temporary_repo):
ds_default = Dataset.from_dict({"a": [0], "b": [1]})
ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
ds_default.push_to_hub(ds_name, token=self._token)
ds_config1.push_to_hub(ds_name, "config1", token=self._token)
ds_config2.push_to_hub(ds_name, "config2", token=self._token)
ds_builder_default = load_dataset_builder(ds_name, download_mode="force_redownload") # default config
assert len(ds_builder_default.BUILDER_CONFIGS) == 3
assert len(ds_builder_default.config.data_files["train"]) == 1
assert fnmatch.fnmatch(
ds_builder_default.config.data_files["train"][0],
"*/data/train-*",
)
ds_builder_config1 = load_dataset_builder(ds_name, "config1", download_mode="force_redownload")
assert len(ds_builder_config1.BUILDER_CONFIGS) == 3
assert len(ds_builder_config1.config.data_files["train"]) == 1
assert fnmatch.fnmatch(
ds_builder_config1.config.data_files["train"][0],
"*/config1/train-*",
)
ds_builder_config2 = load_dataset_builder(ds_name, "config2", download_mode="force_redownload")
assert len(ds_builder_config2.BUILDER_CONFIGS) == 3
assert len(ds_builder_config2.config.data_files["train"]) == 1
assert fnmatch.fnmatch(
ds_builder_config2.config.data_files["train"][0],
"*/config2/train-*",
)
with pytest.raises(ValueError): # no config 'config3'
load_dataset_builder(ds_name, "config3", download_mode="force_redownload")
def test_push_multiple_dataset_configs_to_hub_load_dataset(self, temporary_repo):
ds_default = Dataset.from_dict({"a": [0], "b": [1]})
ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
ds_default.push_to_hub(ds_name, token=self._token)
ds_config1.push_to_hub(ds_name, "config1", token=self._token)
ds_config2.push_to_hub(ds_name, "config2", token=self._token)
files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset"))
expected_files = sorted(
[
".gitattributes",
"README.md",
"config1/train-00000-of-00001-*.parquet",
"config2/train-00000-of-00001-*.parquet",
"data/train-00000-of-00001-*.parquet",
]
)
assert all(fnmatch.fnmatch(file, expected_file) for file, expected_file in zip(files, expected_files))
hub_ds_default = load_dataset(ds_name, download_mode="force_redownload")
hub_ds_config1 = load_dataset(ds_name, "config1", download_mode="force_redownload")
hub_ds_config2 = load_dataset(ds_name, "config2", download_mode="force_redownload")
# only "train" split
assert len(hub_ds_default) == len(hub_ds_config1) == len(hub_ds_config2) == 1
assert ds_default.column_names == hub_ds_default["train"].column_names == ["a", "b"]
assert ds_config1.column_names == hub_ds_config1["train"].column_names == ["x", "y"]
assert ds_config2.column_names == hub_ds_config2["train"].column_names == ["foo", "bar"]
assert ds_default.features == hub_ds_default["train"].features
assert ds_config1.features == hub_ds_config1["train"].features
assert ds_config2.features == hub_ds_config2["train"].features
assert ds_default.num_rows == hub_ds_default["train"].num_rows == 1
assert ds_config1.num_rows == hub_ds_config1["train"].num_rows == 3
assert ds_config2.num_rows == hub_ds_config2["train"].num_rows == 2
with pytest.raises(ValueError): # no config 'config3'
load_dataset(ds_name, "config3", download_mode="force_redownload")
def test_push_multiple_dataset_configs_to_hub_readme_metadata_content(self, temporary_repo):
ds_default = Dataset.from_dict({"a": [0], "b": [2]})
ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
ds_default.push_to_hub(ds_name, token=self._token)
ds_config1.push_to_hub(ds_name, "config1", token=self._token)
ds_config2.push_to_hub(ds_name, "config2", token=self._token)
# check that configs args was correctly pushed to README.md
ds_readme_path = cached_path(hf_hub_url(ds_name, "README.md"))
dataset_card_data = DatasetCard.load(ds_readme_path).data
assert METADATA_CONFIGS_FIELD in dataset_card_data
assert isinstance(dataset_card_data[METADATA_CONFIGS_FIELD], list)
assert sorted(dataset_card_data[METADATA_CONFIGS_FIELD], key=lambda x: x["config_name"]) == [
{
"config_name": "config1",
"data_files": [
{"split": "train", "path": "config1/train-*"},
],
},
{
"config_name": "config2",
"data_files": [
{"split": "train", "path": "config2/train-*"},
],
},
{
"config_name": "default",
"data_files": [
{"split": "train", "path": "data/train-*"},
],
},
]
def test_push_multiple_dataset_dict_configs_to_hub_load_dataset_builder(self, temporary_repo):
ds_default = Dataset.from_dict({"a": [0], "b": [1]})
ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]})
ds_default = DatasetDict({"random": ds_default})
ds_config1 = DatasetDict({"random": ds_config1})
ds_config2 = DatasetDict({"random": ds_config2})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
ds_default.push_to_hub(ds_name, token=self._token)
ds_config1.push_to_hub(ds_name, "config1", token=self._token)
ds_config2.push_to_hub(ds_name, "config2", token=self._token)
ds_builder_default = load_dataset_builder(ds_name, download_mode="force_redownload") # default config
assert len(ds_builder_default.BUILDER_CONFIGS) == 3
assert len(ds_builder_default.config.data_files["random"]) == 1
assert fnmatch.fnmatch(
ds_builder_default.config.data_files["random"][0],
"*/data/random-*",
)
ds_builder_config1 = load_dataset_builder(ds_name, "config1", download_mode="force_redownload")
assert len(ds_builder_config1.BUILDER_CONFIGS) == 3
assert len(ds_builder_config1.config.data_files["random"]) == 1
assert fnmatch.fnmatch(
ds_builder_config1.config.data_files["random"][0],
"*/config1/random-*",
)
ds_builder_config2 = load_dataset_builder(ds_name, "config2", download_mode="force_redownload")
assert len(ds_builder_config2.BUILDER_CONFIGS) == 3
assert len(ds_builder_config2.config.data_files["random"]) == 1
assert fnmatch.fnmatch(
ds_builder_config2.config.data_files["random"][0],
"*/config2/random-*",
)
with pytest.raises(ValueError): # no config named 'config3'
load_dataset_builder(ds_name, "config3", download_mode="force_redownload")
def test_push_multiple_dataset_dict_configs_to_hub_load_dataset(self, temporary_repo):
ds_default = Dataset.from_dict({"a": [0], "b": [1]})
ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]})
ds_default = DatasetDict({"train": ds_default, "random": ds_default})
ds_config1 = DatasetDict({"train": ds_config1, "random": ds_config1})
ds_config2 = DatasetDict({"train": ds_config2, "random": ds_config2})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
ds_default.push_to_hub(ds_name, token=self._token)
ds_config1.push_to_hub(ds_name, "config1", token=self._token)
ds_config2.push_to_hub(ds_name, "config2", token=self._token)
files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset"))
expected_files = sorted(
[
".gitattributes",
"README.md",
"config1/random-00000-of-00001-*.parquet",
"config1/train-00000-of-00001-*.parquet",
"config2/random-00000-of-00001-*.parquet",
"config2/train-00000-of-00001-*.parquet",
"data/random-00000-of-00001-*.parquet",
"data/train-00000-of-00001-*.parquet",
]
)
assert all(fnmatch.fnmatch(file, expected_file) for file, expected_file in zip(files, expected_files))
hub_ds_default = load_dataset(ds_name, download_mode="force_redownload")
hub_ds_config1 = load_dataset(ds_name, "config1", download_mode="force_redownload")
hub_ds_config2 = load_dataset(ds_name, "config2", download_mode="force_redownload")
# two splits
expected_splits = ["random", "train"]
assert len(hub_ds_default) == len(hub_ds_config1) == len(hub_ds_config2) == 2
assert sorted(hub_ds_default) == sorted(hub_ds_config1) == sorted(hub_ds_config2) == expected_splits
for split in expected_splits:
assert ds_default[split].column_names == hub_ds_default[split].column_names == ["a", "b"]
assert ds_config1[split].column_names == hub_ds_config1[split].column_names == ["x", "y"]
assert ds_config2[split].column_names == hub_ds_config2[split].column_names == ["foo", "bar"]
assert ds_default[split].features == hub_ds_default[split].features
assert ds_config1[split].features == hub_ds_config1[split].features
assert ds_config2[split].features == hub_ds_config2["train"].features
assert ds_default[split].num_rows == hub_ds_default[split].num_rows == 1
assert ds_config1[split].num_rows == hub_ds_config1[split].num_rows == 3
assert ds_config2[split].num_rows == hub_ds_config2[split].num_rows == 2
with pytest.raises(ValueError): # no config 'config3'
load_dataset(ds_name, "config3", download_mode="force_redownload")
def test_push_multiple_dataset_dict_configs_to_hub_readme_metadata_content(self, temporary_repo):
ds_default = Dataset.from_dict({"a": [0], "b": [1]})
ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]})
ds_default = DatasetDict({"train": ds_default, "random": ds_default})
ds_config1 = DatasetDict({"train": ds_config1, "random": ds_config1})
ds_config2 = DatasetDict({"train": ds_config2, "random": ds_config2})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
ds_default.push_to_hub(ds_name, token=self._token)
ds_config1.push_to_hub(ds_name, "config1", token=self._token)
ds_config2.push_to_hub(ds_name, "config2", token=self._token)
# check that configs args was correctly pushed to README.md
ds_readme_path = cached_path(hf_hub_url(ds_name, "README.md"))
dataset_card_data = DatasetCard.load(ds_readme_path).data
assert METADATA_CONFIGS_FIELD in dataset_card_data
assert isinstance(dataset_card_data[METADATA_CONFIGS_FIELD], list)
assert sorted(dataset_card_data[METADATA_CONFIGS_FIELD], key=lambda x: x["config_name"]) == [
{
"config_name": "config1",
"data_files": [
{"split": "train", "path": "config1/train-*"},
{"split": "random", "path": "config1/random-*"},
],
},
{
"config_name": "config2",
"data_files": [
{"split": "train", "path": "config2/train-*"},
{"split": "random", "path": "config2/random-*"},
],
},
{
"config_name": "default",
"data_files": [
{"split": "train", "path": "data/train-*"},
{"split": "random", "path": "data/random-*"},
],
},
]
def test_push_dataset_to_hub_with_config_no_metadata_configs(self, temporary_repo):
ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
ds_another_config = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]})
parquet_buf = BytesIO()
ds.to_parquet(parquet_buf)
parquet_content = parquet_buf.getvalue()
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
self._api.create_repo(ds_name, token=self._token, repo_type="dataset")
# old push_to_hub was uploading the parquet files only - without metadata configs
self._api.upload_file(
path_or_fileobj=parquet_content,
path_in_repo="data/train-00000-of-00001.parquet",
repo_id=ds_name,
repo_type="dataset",
token=self._token,
)
ds_another_config.push_to_hub(ds_name, "another_config", token=self._token)
ds_builder = load_dataset_builder(ds_name, download_mode="force_redownload")
assert len(ds_builder.config.data_files) == 1
assert len(ds_builder.config.data_files["train"]) == 1
assert fnmatch.fnmatch(ds_builder.config.data_files["train"][0], "*/data/train-00000-of-00001.parquet")
ds_another_config_builder = load_dataset_builder(
ds_name, "another_config", download_mode="force_redownload"
)
assert len(ds_another_config_builder.config.data_files) == 1
assert len(ds_another_config_builder.config.data_files["train"]) == 1
assert fnmatch.fnmatch(
ds_another_config_builder.config.data_files["train"][0],
"*/another_config/train-00000-of-00001-*.parquet",
)
def test_push_dataset_dict_to_hub_with_config_no_metadata_configs(self, temporary_repo):
ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
ds_another_config = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]})
parquet_buf = BytesIO()
ds.to_parquet(parquet_buf)
parquet_content = parquet_buf.getvalue()
local_ds_another_config = DatasetDict({"random": ds_another_config})
with temporary_repo(f"{CI_HUB_USER}/test-{int(time.time() * 10e3)}") as ds_name:
self._api.create_repo(ds_name, token=self._token, repo_type="dataset")
# old push_to_hub was uploading the parquet files only - without metadata configs
self._api.upload_file(
path_or_fileobj=parquet_content,
path_in_repo="data/random-00000-of-00001.parquet",
repo_id=ds_name,
repo_type="dataset",
token=self._token,
)
local_ds_another_config.push_to_hub(ds_name, "another_config", token=self._token)
ds_builder = load_dataset_builder(ds_name, download_mode="force_redownload")
assert len(ds_builder.config.data_files) == 1
assert len(ds_builder.config.data_files["random"]) == 1
assert fnmatch.fnmatch(ds_builder.config.data_files["random"][0], "*/data/random-00000-of-00001.parquet")
ds_another_config_builder = load_dataset_builder(
ds_name, "another_config", download_mode="force_redownload"
)
assert len(ds_another_config_builder.config.data_files) == 1
assert len(ds_another_config_builder.config.data_files["random"]) == 1
assert fnmatch.fnmatch(
ds_another_config_builder.config.data_files["random"][0],
"*/another_config/random-00000-of-00001-*.parquet",
)
| datasets-main | tests/test_upstream_hub.py |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import Array2D, ClassLabel, Features, Image, Value
from datasets.features.features import Array2DExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class TypedSequenceTest(TestCase):
def test_no_type(self):
arr = pa.array(TypedSequence([1, 2, 3]))
self.assertEqual(arr.type, pa.int64())
def test_array_type_forbidden(self):
with self.assertRaises(ValueError):
_ = pa.array(TypedSequence([1, 2, 3]), type=pa.int64())
def test_try_type_and_type_forbidden(self):
with self.assertRaises(ValueError):
_ = pa.array(TypedSequence([1, 2, 3], try_type=Value("bool"), type=Value("int64")))
def test_compatible_type(self):
arr = pa.array(TypedSequence([1, 2, 3], type=Value("int32")))
self.assertEqual(arr.type, pa.int32())
def test_incompatible_type(self):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid)):
_ = pa.array(TypedSequence(["foo", "bar"], type=Value("int64")))
def test_try_compatible_type(self):
arr = pa.array(TypedSequence([1, 2, 3], try_type=Value("int32")))
self.assertEqual(arr.type, pa.int32())
def test_try_incompatible_type(self):
arr = pa.array(TypedSequence(["foo", "bar"], try_type=Value("int64")))
self.assertEqual(arr.type, pa.string())
def test_compatible_extension_type(self):
arr = pa.array(TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64")))
self.assertEqual(arr.type, Array2DExtensionType((1, 3), "int64"))
def test_incompatible_extension_type(self):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid)):
_ = pa.array(TypedSequence(["foo", "bar"], type=Array2D((1, 3), "int64")))
def test_try_compatible_extension_type(self):
arr = pa.array(TypedSequence([[[1, 2, 3]]], try_type=Array2D((1, 3), "int64")))
self.assertEqual(arr.type, Array2DExtensionType((1, 3), "int64"))
def test_try_incompatible_extension_type(self):
arr = pa.array(TypedSequence(["foo", "bar"], try_type=Array2D((1, 3), "int64")))
self.assertEqual(arr.type, pa.string())
@require_pil
def test_exhaustive_cast(self):
import PIL.Image
pil_image = PIL.Image.fromarray(np.arange(10, dtype=np.uint8).reshape(2, 5))
with patch(
"datasets.arrow_writer.cast_to_python_objects", side_effect=cast_to_python_objects
) as mock_cast_to_python_objects:
_ = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image], type=Image()))
args, kwargs = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting", kwargs)
self.assertFalse(kwargs["optimize_list_casting"])
def _check_output(output, expected_num_chunks: int):
stream = pa.BufferReader(output) if isinstance(output, pa.Buffer) else pa.memory_map(output)
f = pa.ipc.open_stream(stream)
pa_table: pa.Table = f.read_all()
assert len(pa_table.to_batches()) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10])
@pytest.mark.parametrize(
"fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}]
)
def test_write(fields, writer_batch_size):
output = pa.BufferOutputStream()
schema = pa.schema(fields) if fields else None
with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer:
writer.write({"col_1": "foo", "col_2": 1})
writer.write({"col_1": "bar", "col_2": 2})
num_examples, num_bytes = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
fields = {"col_1": pa.string(), "col_2": pa.int64()}
assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata)
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
def test_write_with_features():
output = pa.BufferOutputStream()
features = Features({"labels": ClassLabel(names=["neg", "pos"])})
with ArrowWriter(stream=output, features=features) as writer:
writer.write({"labels": 0})
writer.write({"labels": 1})
num_examples, num_bytes = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
stream = pa.BufferReader(output.getvalue())
f = pa.ipc.open_stream(stream)
pa_table: pa.Table = f.read_all()
schema = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(schema)
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10])
def test_key_datatype(writer_batch_size):
output = pa.BufferOutputStream()
with ArrowWriter(
stream=output,
writer_batch_size=writer_batch_size,
hash_salt="split_name",
check_duplicates=True,
) as writer:
with pytest.raises(InvalidKeyError):
writer.write({"col_1": "foo", "col_2": 1}, key=[1, 2])
num_examples, num_bytes = writer.finalize()
@pytest.mark.parametrize("writer_batch_size", [None, 2, 10])
def test_duplicate_keys(writer_batch_size):
output = pa.BufferOutputStream()
with ArrowWriter(
stream=output,
writer_batch_size=writer_batch_size,
hash_salt="split_name",
check_duplicates=True,
) as writer:
with pytest.raises(DuplicatedKeysError):
writer.write({"col_1": "foo", "col_2": 1}, key=10)
writer.write({"col_1": "bar", "col_2": 2}, key=10)
num_examples, num_bytes = writer.finalize()
@pytest.mark.parametrize("writer_batch_size", [None, 2, 10])
def test_write_with_keys(writer_batch_size):
output = pa.BufferOutputStream()
with ArrowWriter(
stream=output,
writer_batch_size=writer_batch_size,
hash_salt="split_name",
check_duplicates=True,
) as writer:
writer.write({"col_1": "foo", "col_2": 1}, key=1)
writer.write({"col_1": "bar", "col_2": 2}, key=2)
num_examples, num_bytes = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10])
@pytest.mark.parametrize(
"fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}]
)
def test_write_batch(fields, writer_batch_size):
output = pa.BufferOutputStream()
schema = pa.schema(fields) if fields else None
with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]})
writer.write_batch({"col_1": [], "col_2": []})
num_examples, num_bytes = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
fields = {"col_1": pa.string(), "col_2": pa.int64()}
assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata)
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10])
@pytest.mark.parametrize(
"fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}]
)
def test_write_table(fields, writer_batch_size):
output = pa.BufferOutputStream()
schema = pa.schema(fields) if fields else None
with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]}))
num_examples, num_bytes = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
fields = {"col_1": pa.string(), "col_2": pa.int64()}
assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata)
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10])
@pytest.mark.parametrize(
"fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}]
)
def test_write_row(fields, writer_batch_size):
output = pa.BufferOutputStream()
schema = pa.schema(fields) if fields else None
with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]}))
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]}))
num_examples, num_bytes = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
fields = {"col_1": pa.string(), "col_2": pa.int64()}
assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata)
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
def test_write_file():
with tempfile.TemporaryDirectory() as tmp_dir:
fields = {"col_1": pa.string(), "col_2": pa.int64()}
output = os.path.join(tmp_dir, "test.arrow")
with ArrowWriter(path=output, schema=pa.schema(fields)) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]})
num_examples, num_bytes = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata)
_check_output(output, 1)
def get_base_dtype(arr_type):
if pa.types.is_list(arr_type):
return get_base_dtype(arr_type.value_type)
else:
return arr_type
def change_first_primitive_element_in_list(lst, value):
if isinstance(lst[0], list):
change_first_primitive_element_in_list(lst[0], value)
else:
lst[0] = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype", [(None, pa.int64()), (Value("int32"), pa.int32())])
@pytest.mark.parametrize("sequence", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]])
def test_optimized_int_type_for_typed_sequence(sequence, optimized_int_type, expected_dtype):
arr = pa.array(TypedSequence(sequence, optimized_int_type=optimized_int_type))
assert get_base_dtype(arr.type) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype",
[
("attention_mask", pa.int8()),
("special_tokens_mask", pa.int8()),
("token_type_ids", pa.int8()),
("input_ids", pa.int32()),
("other", pa.int64()),
],
)
@pytest.mark.parametrize("sequence", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]])
def test_optimized_typed_sequence(sequence, col, expected_dtype):
# in range
arr = pa.array(OptimizedTypedSequence(sequence, col=col))
assert get_base_dtype(arr.type) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
sequence = copy.deepcopy(sequence)
value = np.iinfo(expected_dtype.to_pandas_dtype()).max + 1
change_first_primitive_element_in_list(sequence, value)
arr = pa.array(OptimizedTypedSequence(sequence, col=col))
assert get_base_dtype(arr.type) == pa.int64()
@pytest.mark.parametrize("raise_exception", [False, True])
def test_arrow_writer_closes_stream(raise_exception, tmp_path):
path = str(tmp_path / "dataset-train.arrow")
try:
with ArrowWriter(path=path) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def test_arrow_writer_with_filesystem(mockfs):
path = "mock://dataset-train.arrow"
with ArrowWriter(path=path, storage_options=mockfs.storage_options) as writer:
assert isinstance(writer._fs, type(mockfs))
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1})
writer.write({"col_1": "bar", "col_2": 2})
num_examples, num_bytes = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(path)
def test_parquet_writer_write():
output = pa.BufferOutputStream()
with ParquetWriter(stream=output) as writer:
writer.write({"col_1": "foo", "col_2": 1})
writer.write({"col_1": "bar", "col_2": 2})
num_examples, num_bytes = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
stream = pa.BufferReader(output.getvalue())
pa_table: pa.Table = pq.read_table(stream)
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files", [False, True])
def test_writer_embed_local_files(tmp_path, embed_local_files):
import PIL.Image
image_path = str(tmp_path / "test_image_rgb.jpg")
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uint8)).save(image_path, format="png")
output = pa.BufferOutputStream()
with ParquetWriter(
stream=output, features=Features({"image": Image()}), embed_local_files=embed_local_files
) as writer:
writer.write({"image": image_path})
writer.finalize()
stream = pa.BufferReader(output.getvalue())
pa_table: pa.Table = pq.read_table(stream)
out = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"], str)
with open(image_path, "rb") as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def test_always_nullable():
non_nullable_schema = pa.schema([pa.field("col_1", pa.string(), nullable=False)])
output = pa.BufferOutputStream()
with ArrowWriter(stream=output) as writer:
writer._build_writer(inferred_schema=non_nullable_schema)
assert writer._schema == pa.schema([pa.field("col_1", pa.string())])
| datasets-main | tests/test_arrow_writer.py |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, extract_path_from_uri, is_remote_filesystem
from .utils import require_lz4, require_zstandard
def test_mockfs(mockfs):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_non_mockfs():
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_extract_path_from_uri():
mock_bucket = "mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem(mockfs):
is_remote = is_remote_filesystem(mockfs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.glob("*") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = fsspec.get_fs_token_paths(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
def test_fs_overwrites():
protocol = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(protocol, None, clobber=True)
with pytest.warns(UserWarning) as warning_info:
importlib.reload(datasets.filesystems)
assert len(warning_info) == 1
assert (
str(warning_info[0].message)
== f"A filesystem protocol was already set for {protocol} and will be overwritten."
)
| datasets-main | tests/test_filesystem.py |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def add_one(i): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def test_parallel_backend_input():
with parallel_backend("spark"):
assert ParallelBackendConfig.backend_name == "spark"
lst = [1, 2, 3]
with pytest.raises(ValueError):
with parallel_backend("unsupported backend"):
map_nested(add_one, lst, num_proc=2)
with pytest.raises(ValueError):
with parallel_backend("unsupported backend"):
map_nested(add_one, lst, num_proc=-1)
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc", [2, -1])
def test_parallel_backend_map_nested(num_proc):
s1 = [1, 2]
s2 = {"a": 1, "b": 2}
s3 = {"a": [1, 2], "b": [3, 4]}
s4 = {"a": {"1": 1}, "b": 2}
s5 = {"a": 1, "b": 2, "c": 3, "d": 4}
expected_map_nested_s1 = [2, 3]
expected_map_nested_s2 = {"a": 2, "b": 3}
expected_map_nested_s3 = {"a": [2, 3], "b": [4, 5]}
expected_map_nested_s4 = {"a": {"1": 2}, "b": 3}
expected_map_nested_s5 = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark"):
assert map_nested(add_one, s1, num_proc=num_proc) == expected_map_nested_s1
assert map_nested(add_one, s2, num_proc=num_proc) == expected_map_nested_s2
assert map_nested(add_one, s3, num_proc=num_proc) == expected_map_nested_s3
assert map_nested(add_one, s4, num_proc=num_proc) == expected_map_nested_s4
assert map_nested(add_one, s5, num_proc=num_proc) == expected_map_nested_s5
| datasets-main | tests/test_parallel.py |
import asyncio
import importlib.metadata
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_remote_tests = parse_flag_from_env("RUN_REMOTE", default=False)
_run_local_tests = parse_flag_from_env("RUN_LOCAL", default=True)
_run_packaged_tests = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
require_lz4 = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
require_py7zr = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
require_zstandard = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
require_sndfile = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib.metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
require_beam = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
require_dill_gt_0_3_2 = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
require_not_windows = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def require_faiss(test_case):
"""
Decorator marking a test that requires Faiss.
These tests are skipped when Faiss isn't installed.
"""
try:
import faiss # noqa
except ImportError:
test_case = unittest.skip("test requires faiss")(test_case)
return test_case
def require_regex(test_case):
"""
Decorator marking a test that requires regex.
These tests are skipped when Regex isn't installed.
"""
try:
import regex # noqa
except ImportError:
test_case = unittest.skip("test requires regex")(test_case)
return test_case
def require_elasticsearch(test_case):
"""
Decorator marking a test that requires ElasticSearch.
These tests are skipped when ElasticSearch isn't installed.
"""
try:
import elasticsearch # noqa
except ImportError:
test_case = unittest.skip("test requires elasticsearch")(test_case)
return test_case
def require_sqlalchemy(test_case):
"""
Decorator marking a test that requires SQLAlchemy.
These tests are skipped when SQLAlchemy isn't installed.
"""
try:
import sqlalchemy # noqa
except ImportError:
test_case = unittest.skip("test requires sqlalchemy")(test_case)
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not config.TORCH_AVAILABLE:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not config.TF_AVAILABLE:
test_case = unittest.skip("test requires TensorFlow")(test_case)
return test_case
def require_jax(test_case):
"""
Decorator marking a test that requires JAX.
These tests are skipped when JAX isn't installed.
"""
if not config.JAX_AVAILABLE:
test_case = unittest.skip("test requires JAX")(test_case)
return test_case
def require_pil(test_case):
"""
Decorator marking a test that requires Pillow.
These tests are skipped when Pillow isn't installed.
"""
if not config.PIL_AVAILABLE:
test_case = unittest.skip("test requires Pillow")(test_case)
return test_case
def require_transformers(test_case):
"""
Decorator marking a test that requires transformers.
These tests are skipped when transformers isn't installed.
"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(test_case)
else:
return test_case
def require_tiktoken(test_case):
"""
Decorator marking a test that requires tiktoken.
These tests are skipped when transformers isn't installed.
"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken")(test_case)
else:
return test_case
def require_spacy(test_case):
"""
Decorator marking a test that requires spacy.
These tests are skipped when they aren't installed.
"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy")(test_case)
else:
return test_case
def require_spacy_model(model):
"""
Decorator marking a test that requires a spacy model.
These tests are skipped when they aren't installed.
"""
def _require_spacy_model(test_case):
try:
import spacy # noqa F401
spacy.load(model)
except ImportError:
return unittest.skip("test requires spacy")(test_case)
except OSError:
return unittest.skip("test requires spacy model '{}'".format(model))(test_case)
else:
return test_case
return _require_spacy_model
def require_pyspark(test_case):
"""
Decorator marking a test that requires pyspark.
These tests are skipped when pyspark isn't installed.
"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark")(test_case)
else:
return test_case
def require_joblibspark(test_case):
"""
Decorator marking a test that requires joblibspark.
These tests are skipped when pyspark isn't installed.
"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark")(test_case)
else:
return test_case
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests or _run_slow_tests == 0:
test_case = unittest.skip("test is slow")(test_case)
return test_case
def local(test_case):
"""
Decorator marking a test as local
Local tests are run by default. Set the RUN_LOCAL environment variable
to a falsy value to not run them.
"""
if not _run_local_tests or _run_local_tests == 0:
test_case = unittest.skip("test is local")(test_case)
return test_case
def packaged(test_case):
"""
Decorator marking a test as packaged
Packaged tests are run by default. Set the RUN_PACKAGED environment variable
to a falsy value to not run them.
"""
if not _run_packaged_tests or _run_packaged_tests == 0:
test_case = unittest.skip("test is packaged")(test_case)
return test_case
def remote(test_case):
"""
Decorator marking a test as one that relies on GitHub or the Hugging Face Hub.
Remote tests are skipped by default. Set the RUN_REMOTE environment variable
to a falsy value to not run them.
"""
if not _run_remote_tests or _run_remote_tests == 0:
test_case = unittest.skip("test requires remote")(test_case)
return test_case
def for_all_test_methods(*decorators):
def decorate(cls):
for name, fn in cls.__dict__.items():
if callable(fn) and name.startswith("test"):
for decorator in decorators:
fn = decorator(fn)
setattr(cls, name, fn)
return cls
return decorate
class RequestWouldHangIndefinitelyError(Exception):
pass
class OfflineSimulationMode(Enum):
CONNECTION_FAILS = 0
CONNECTION_TIMES_OUT = 1
HF_DATASETS_OFFLINE_SET_TO_1 = 2
@contextmanager
def offline(mode=OfflineSimulationMode.CONNECTION_FAILS, timeout=1e-16):
"""
Simulate offline mode.
There are three offline simulatiom modes:
CONNECTION_FAILS (default mode): a ConnectionError is raised for each network call.
Connection errors are created by mocking socket.socket
CONNECTION_TIMES_OUT: the connection hangs until it times out.
The default timeout value is low (1e-16) to speed up the tests.
Timeout errors are created by mocking requests.request
HF_DATASETS_OFFLINE_SET_TO_1: the HF_DATASETS_OFFLINE environment variable is set to 1.
This makes the http/ftp calls of the library instantly fail and raise an OfflineModeEmabled error.
"""
online_request = requests.Session().request
def timeout_request(session, method, url, **kwargs):
# Change the url to an invalid url so that the connection hangs
invalid_url = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout."
)
kwargs["timeout"] = timeout
try:
return online_request(method, invalid_url, **kwargs)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
e.request.url = url
max_retry_error = e.args[0]
max_retry_error.args = (max_retry_error.args[0].replace("10.255.255.1", f"OfflineMock[{url}]"),)
e.args = (max_retry_error,)
raise
def raise_connection_error(session, prepared_request, **kwargs):
raise requests.ConnectionError("Offline mode is enabled.", request=prepared_request)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send", raise_connection_error):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request", timeout_request):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE", True):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def set_current_working_directory_to_temp_dir(*args, **kwargs):
original_working_dir = str(Path().resolve())
with tempfile.TemporaryDirectory(*args, **kwargs) as tmp_dir:
try:
os.chdir(tmp_dir)
yield
finally:
os.chdir(original_working_dir)
@contextmanager
def assert_arrow_memory_increases():
import gc
gc.collect()
previous_allocated_memory = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def assert_arrow_memory_doesnt_increase():
import gc
gc.collect()
previous_allocated_memory = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def is_rng_equal(rng1, rng2):
return deepcopy(rng1).integers(0, 100, 10).tolist() == deepcopy(rng2).integers(0, 100, 10).tolist()
def xfail_if_500_502_http_error(func):
import decorator
from requests.exceptions import HTTPError
def _wrapper(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if str(err).startswith("500") or str(err).startswith("502"):
pytest.xfail(str(err))
raise err
return decorator.decorator(_wrapper, func)
# --- distributed testing functions --- #
# copied from transformers
# originally adapted from https://stackoverflow.com/a/59041913/9201239
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda line: tee(line, out, sys.stdout, label="stdout:")),
_read_stream(p.stderr, lambda line: tee(line, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output.")
return result
def pytest_xdist_worker_id():
"""
Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
if `-n 1` or `pytest-xdist` isn't being used.
"""
worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
worker = re.sub(r"^gw", "", worker, 0, re.M)
return int(worker)
def get_torch_dist_unique_port():
"""
Returns a port number that can be fed to `torchrun`'s `--master_port` argument.
Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
port at once.
"""
port = 29500
uniq_delta = pytest_xdist_worker_id()
return port + uniq_delta
| datasets-main | tests/utils.py |
import importlib
import os
import tempfile
import types
from contextlib import nullcontext as does_not_raise
from multiprocessing import Process
from pathlib import Path
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from multiprocess.pool import Pool
from datasets.arrow_dataset import Dataset
from datasets.arrow_reader import DatasetNotOnHfGcsError
from datasets.arrow_writer import ArrowWriter
from datasets.builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from datasets.dataset_dict import DatasetDict, IterableDatasetDict
from datasets.download.download_manager import DownloadMode
from datasets.features import Features, Value
from datasets.info import DatasetInfo, PostProcessedInfo
from datasets.iterable_dataset import IterableDataset
from datasets.splits import Split, SplitDict, SplitGenerator, SplitInfo
from datasets.streaming import xjoin
from datasets.utils.file_utils import is_local_path
from datasets.utils.info_utils import VerificationMode
from datasets.utils.logging import INFO, get_logger
from .utils import (
assert_arrow_memory_doesnt_increase,
assert_arrow_memory_increases,
require_beam,
require_faiss,
set_current_working_directory_to_temp_dir,
)
class DummyBuilder(DatasetBuilder):
def _info(self):
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN)]
def _prepare_split(self, split_generator, **kwargs):
fname = f"{self.dataset_name}-{split_generator.name}.arrow"
with ArrowWriter(features=self.info.features, path=os.path.join(self._output_dir, fname)) as writer:
writer.write_batch({"text": ["foo"] * 100})
num_examples, num_bytes = writer.finalize()
split_generator.split_info.num_examples = num_examples
split_generator.split_info.num_bytes = num_bytes
class DummyGeneratorBasedBuilder(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN)]
def _generate_examples(self):
for i in range(100):
yield i, {"text": "foo"}
class DummyArrowBasedBuilder(ArrowBasedBuilder):
def _info(self):
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN)]
def _generate_tables(self):
for i in range(10):
yield i, pa.table({"text": ["foo"] * 10})
class DummyBeamBasedBuilder(BeamBasedBuilder):
def _info(self):
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN)]
def _build_pcollection(self, pipeline):
import apache_beam as beam
def _process(item):
for i in range(10):
yield f"{i}_{item}", {"text": "foo"}
return pipeline | "Initialize" >> beam.Create(range(10)) | "Extract content" >> beam.FlatMap(_process)
class DummyGeneratorBasedBuilderWithIntegers(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(features=Features({"id": Value("int8")}))
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN)]
def _generate_examples(self):
for i in range(100):
yield i, {"id": i}
class DummyGeneratorBasedBuilderConfig(BuilderConfig):
def __init__(self, content="foo", times=2, *args, **kwargs):
super().__init__(*args, **kwargs)
self.content = content
self.times = times
class DummyGeneratorBasedBuilderWithConfig(GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = DummyGeneratorBasedBuilderConfig
def _info(self):
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN)]
def _generate_examples(self):
for i in range(100):
yield i, {"text": self.config.content * self.config.times}
class DummyBuilderWithMultipleConfigs(DummyBuilder):
BUILDER_CONFIGS = [
DummyGeneratorBasedBuilderConfig(name="a"),
DummyGeneratorBasedBuilderConfig(name="b"),
]
class DummyBuilderWithDefaultConfig(DummyBuilderWithMultipleConfigs):
DEFAULT_CONFIG_NAME = "a"
class DummyBuilderWithDownload(DummyBuilder):
def __init__(self, *args, rel_path=None, abs_path=None, **kwargs):
super().__init__(*args, **kwargs)
self._rel_path = rel_path
self._abs_path = abs_path
def _split_generators(self, dl_manager):
if self._rel_path is not None:
assert os.path.exists(dl_manager.download(self._rel_path)), "dl_manager must support relative paths"
if self._abs_path is not None:
assert os.path.exists(dl_manager.download(self._abs_path)), "dl_manager must support absolute paths"
return [SplitGenerator(name=Split.TRAIN)]
class DummyBuilderWithManualDownload(DummyBuilderWithMultipleConfigs):
@property
def manual_download_instructions(self):
return "To use the dataset you have to download some stuff manually and pass the data path to data_dir"
def _split_generators(self, dl_manager):
if not os.path.exists(self.config.data_dir):
raise FileNotFoundError(f"data_dir {self.config.data_dir} doesn't exist.")
return [SplitGenerator(name=Split.TRAIN)]
class DummyArrowBasedBuilderWithShards(ArrowBasedBuilder):
def _info(self):
return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")}))
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepaths": [f"data{i}.txt" for i in range(4)]})]
def _generate_tables(self, filepaths):
idx = 0
for filepath in filepaths:
for i in range(10):
yield idx, pa.table({"id": range(10 * i, 10 * (i + 1)), "filepath": [filepath] * 10})
idx += 1
class DummyGeneratorBasedBuilderWithShards(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")}))
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepaths": [f"data{i}.txt" for i in range(4)]})]
def _generate_examples(self, filepaths):
idx = 0
for filepath in filepaths:
for i in range(100):
yield idx, {"id": i, "filepath": filepath}
idx += 1
class DummyArrowBasedBuilderWithAmbiguousShards(ArrowBasedBuilder):
def _info(self):
return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")}))
def _split_generators(self, dl_manager):
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={
"filepaths": [f"data{i}.txt" for i in range(4)],
"dummy_kwarg_with_different_length": [f"dummy_data{i}.txt" for i in range(3)],
},
)
]
def _generate_tables(self, filepaths, dummy_kwarg_with_different_length):
idx = 0
for filepath in filepaths:
for i in range(10):
yield idx, pa.table({"id": range(10 * i, 10 * (i + 1)), "filepath": [filepath] * 10})
idx += 1
class DummyGeneratorBasedBuilderWithAmbiguousShards(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")}))
def _split_generators(self, dl_manager):
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={
"filepaths": [f"data{i}.txt" for i in range(4)],
"dummy_kwarg_with_different_length": [f"dummy_data{i}.txt" for i in range(3)],
},
)
]
def _generate_examples(self, filepaths, dummy_kwarg_with_different_length):
idx = 0
for filepath in filepaths:
for i in range(100):
yield idx, {"id": i, "filepath": filepath}
idx += 1
def _run_concurrent_download_and_prepare(tmp_dir):
builder = DummyBuilder(cache_dir=tmp_dir)
builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.REUSE_DATASET_IF_EXISTS)
return builder
def check_streaming(builder):
builders_module = importlib.import_module(builder.__module__)
assert builders_module._patched_for_streaming
assert builders_module.os.path.join is xjoin
class BuilderTest(TestCase):
def test_download_and_prepare(self):
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyBuilder(cache_dir=tmp_dir)
builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD)
self.assertTrue(
os.path.exists(
os.path.join(
tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow"
)
)
)
self.assertDictEqual(builder.info.features, Features({"text": Value("string")}))
self.assertEqual(builder.info.splits["train"].num_examples, 100)
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json"))
)
def test_download_and_prepare_checksum_computation(self):
with tempfile.TemporaryDirectory() as tmp_dir:
builder_no_verification = DummyBuilder(cache_dir=tmp_dir)
builder_no_verification.download_and_prepare(
try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD
)
self.assertTrue(
all(v["checksum"] is not None for _, v in builder_no_verification.info.download_checksums.items())
)
builder_with_verification = DummyBuilder(cache_dir=tmp_dir)
builder_with_verification.download_and_prepare(
try_from_hf_gcs=False,
download_mode=DownloadMode.FORCE_REDOWNLOAD,
verification_mode=VerificationMode.ALL_CHECKS,
)
self.assertTrue(
all(v["checksum"] is None for _, v in builder_with_verification.info.download_checksums.items())
)
def test_concurrent_download_and_prepare(self):
with tempfile.TemporaryDirectory() as tmp_dir:
processes = 2
with Pool(processes=processes) as pool:
jobs = [
pool.apply_async(_run_concurrent_download_and_prepare, kwds={"tmp_dir": tmp_dir})
for _ in range(processes)
]
builders = [job.get() for job in jobs]
for builder in builders:
self.assertTrue(
os.path.exists(
os.path.join(
tmp_dir,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train.arrow",
)
)
)
self.assertDictEqual(builder.info.features, Features({"text": Value("string")}))
self.assertEqual(builder.info.splits["train"].num_examples, 100)
self.assertTrue(
os.path.exists(
os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")
)
)
def test_download_and_prepare_with_base_path(self):
with tempfile.TemporaryDirectory() as tmp_dir:
rel_path = "dummy1.data"
abs_path = os.path.join(tmp_dir, "dummy2.data")
# test relative path is missing
builder = DummyBuilderWithDownload(cache_dir=tmp_dir, rel_path=rel_path)
with self.assertRaises(FileNotFoundError):
builder.download_and_prepare(
try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir
)
# test absolute path is missing
builder = DummyBuilderWithDownload(cache_dir=tmp_dir, abs_path=abs_path)
with self.assertRaises(FileNotFoundError):
builder.download_and_prepare(
try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir
)
# test that they are both properly loaded when they exist
open(os.path.join(tmp_dir, rel_path), "w")
open(abs_path, "w")
builder = DummyBuilderWithDownload(cache_dir=tmp_dir, rel_path=rel_path, abs_path=abs_path)
builder.download_and_prepare(
try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir
)
self.assertTrue(
os.path.exists(
os.path.join(
tmp_dir,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train.arrow",
)
)
)
def test_as_dataset_with_post_process(self):
def _post_process(self, dataset, resources_paths):
def char_tokenize(example):
return {"tokens": list(example["text"])}
return dataset.map(char_tokenize, cache_file_name=resources_paths["tokenized_dataset"])
def _post_processing_resources(self, split):
return {"tokenized_dataset": f"tokenized_dataset-{split}.arrow"}
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyBuilder(cache_dir=tmp_dir)
builder.info.post_processed = PostProcessedInfo(
features=Features({"text": Value("string"), "tokens": [Value("string")]})
)
builder._post_process = types.MethodType(_post_process, builder)
builder._post_processing_resources = types.MethodType(_post_processing_resources, builder)
os.makedirs(builder.cache_dir)
builder.info.splits = SplitDict()
builder.info.splits.add(SplitInfo("train", num_examples=10))
builder.info.splits.add(SplitInfo("test", num_examples=10))
for split in builder.info.splits:
with ArrowWriter(
path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"),
features=Features({"text": Value("string")}),
) as writer:
writer.write_batch({"text": ["foo"] * 10})
writer.finalize()
with ArrowWriter(
path=os.path.join(builder.cache_dir, f"tokenized_dataset-{split}.arrow"),
features=Features({"text": Value("string"), "tokens": [Value("string")]}),
) as writer:
writer.write_batch({"text": ["foo"] * 10, "tokens": [list("foo")] * 10})
writer.finalize()
dsets = builder.as_dataset()
self.assertIsInstance(dsets, DatasetDict)
self.assertListEqual(list(dsets.keys()), ["train", "test"])
self.assertEqual(len(dsets["train"]), 10)
self.assertEqual(len(dsets["test"]), 10)
self.assertDictEqual(
dsets["train"].features, Features({"text": Value("string"), "tokens": [Value("string")]})
)
self.assertDictEqual(
dsets["test"].features, Features({"text": Value("string"), "tokens": [Value("string")]})
)
self.assertListEqual(dsets["train"].column_names, ["text", "tokens"])
self.assertListEqual(dsets["test"].column_names, ["text", "tokens"])
del dsets
dset = builder.as_dataset("train")
self.assertIsInstance(dset, Dataset)
self.assertEqual(dset.split, "train")
self.assertEqual(len(dset), 10)
self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": [Value("string")]}))
self.assertListEqual(dset.column_names, ["text", "tokens"])
self.assertGreater(builder.info.post_processing_size, 0)
self.assertGreater(
builder.info.post_processed.resources_checksums["train"]["tokenized_dataset"]["num_bytes"], 0
)
del dset
dset = builder.as_dataset("train+test[:30%]")
self.assertIsInstance(dset, Dataset)
self.assertEqual(dset.split, "train+test[:30%]")
self.assertEqual(len(dset), 13)
self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": [Value("string")]}))
self.assertListEqual(dset.column_names, ["text", "tokens"])
del dset
dset = builder.as_dataset("all")
self.assertIsInstance(dset, Dataset)
self.assertEqual(dset.split, "train+test")
self.assertEqual(len(dset), 20)
self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": [Value("string")]}))
self.assertListEqual(dset.column_names, ["text", "tokens"])
del dset
def _post_process(self, dataset, resources_paths):
return dataset.select([0, 1], keep_in_memory=True)
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyBuilder(cache_dir=tmp_dir)
builder._post_process = types.MethodType(_post_process, builder)
os.makedirs(builder.cache_dir)
builder.info.splits = SplitDict()
builder.info.splits.add(SplitInfo("train", num_examples=10))
builder.info.splits.add(SplitInfo("test", num_examples=10))
for split in builder.info.splits:
with ArrowWriter(
path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"),
features=Features({"text": Value("string")}),
) as writer:
writer.write_batch({"text": ["foo"] * 10})
writer.finalize()
with ArrowWriter(
path=os.path.join(builder.cache_dir, f"small_dataset-{split}.arrow"),
features=Features({"text": Value("string")}),
) as writer:
writer.write_batch({"text": ["foo"] * 2})
writer.finalize()
dsets = builder.as_dataset()
self.assertIsInstance(dsets, DatasetDict)
self.assertListEqual(list(dsets.keys()), ["train", "test"])
self.assertEqual(len(dsets["train"]), 2)
self.assertEqual(len(dsets["test"]), 2)
self.assertDictEqual(dsets["train"].features, Features({"text": Value("string")}))
self.assertDictEqual(dsets["test"].features, Features({"text": Value("string")}))
self.assertListEqual(dsets["train"].column_names, ["text"])
self.assertListEqual(dsets["test"].column_names, ["text"])
del dsets
dset = builder.as_dataset("train")
self.assertIsInstance(dset, Dataset)
self.assertEqual(dset.split, "train")
self.assertEqual(len(dset), 2)
self.assertDictEqual(dset.features, Features({"text": Value("string")}))
self.assertListEqual(dset.column_names, ["text"])
del dset
dset = builder.as_dataset("train+test[:30%]")
self.assertIsInstance(dset, Dataset)
self.assertEqual(dset.split, "train+test[:30%]")
self.assertEqual(len(dset), 2)
self.assertDictEqual(dset.features, Features({"text": Value("string")}))
self.assertListEqual(dset.column_names, ["text"])
del dset
@require_faiss
def test_as_dataset_with_post_process_with_index(self):
def _post_process(self, dataset, resources_paths):
if os.path.exists(resources_paths["index"]):
dataset.load_faiss_index("my_index", resources_paths["index"])
return dataset
else:
dataset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((len(dataset), 8)), string_factory="Flat", index_name="my_index"
)
dataset.save_faiss_index("my_index", resources_paths["index"])
return dataset
def _post_processing_resources(self, split):
return {"index": f"Flat-{split}.faiss"}
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyBuilder(cache_dir=tmp_dir)
builder._post_process = types.MethodType(_post_process, builder)
builder._post_processing_resources = types.MethodType(_post_processing_resources, builder)
os.makedirs(builder.cache_dir)
builder.info.splits = SplitDict()
builder.info.splits.add(SplitInfo("train", num_examples=10))
builder.info.splits.add(SplitInfo("test", num_examples=10))
for split in builder.info.splits:
with ArrowWriter(
path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"),
features=Features({"text": Value("string")}),
) as writer:
writer.write_batch({"text": ["foo"] * 10})
writer.finalize()
with ArrowWriter(
path=os.path.join(builder.cache_dir, f"small_dataset-{split}.arrow"),
features=Features({"text": Value("string")}),
) as writer:
writer.write_batch({"text": ["foo"] * 2})
writer.finalize()
dsets = builder.as_dataset()
self.assertIsInstance(dsets, DatasetDict)
self.assertListEqual(list(dsets.keys()), ["train", "test"])
self.assertEqual(len(dsets["train"]), 10)
self.assertEqual(len(dsets["test"]), 10)
self.assertDictEqual(dsets["train"].features, Features({"text": Value("string")}))
self.assertDictEqual(dsets["test"].features, Features({"text": Value("string")}))
self.assertListEqual(dsets["train"].column_names, ["text"])
self.assertListEqual(dsets["test"].column_names, ["text"])
self.assertListEqual(dsets["train"].list_indexes(), ["my_index"])
self.assertListEqual(dsets["test"].list_indexes(), ["my_index"])
self.assertGreater(builder.info.post_processing_size, 0)
self.assertGreater(builder.info.post_processed.resources_checksums["train"]["index"]["num_bytes"], 0)
del dsets
dset = builder.as_dataset("train")
self.assertIsInstance(dset, Dataset)
self.assertEqual(dset.split, "train")
self.assertEqual(len(dset), 10)
self.assertDictEqual(dset.features, Features({"text": Value("string")}))
self.assertListEqual(dset.column_names, ["text"])
self.assertListEqual(dset.list_indexes(), ["my_index"])
del dset
dset = builder.as_dataset("train+test[:30%]")
self.assertIsInstance(dset, Dataset)
self.assertEqual(dset.split, "train+test[:30%]")
self.assertEqual(len(dset), 13)
self.assertDictEqual(dset.features, Features({"text": Value("string")}))
self.assertListEqual(dset.column_names, ["text"])
self.assertListEqual(dset.list_indexes(), ["my_index"])
del dset
def test_download_and_prepare_with_post_process(self):
def _post_process(self, dataset, resources_paths):
def char_tokenize(example):
return {"tokens": list(example["text"])}
return dataset.map(char_tokenize, cache_file_name=resources_paths["tokenized_dataset"])
def _post_processing_resources(self, split):
return {"tokenized_dataset": f"tokenized_dataset-{split}.arrow"}
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyBuilder(cache_dir=tmp_dir)
builder.info.post_processed = PostProcessedInfo(
features=Features({"text": Value("string"), "tokens": [Value("string")]})
)
builder._post_process = types.MethodType(_post_process, builder)
builder._post_processing_resources = types.MethodType(_post_processing_resources, builder)
builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD)
self.assertTrue(
os.path.exists(
os.path.join(
tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow"
)
)
)
self.assertDictEqual(builder.info.features, Features({"text": Value("string")}))
self.assertDictEqual(
builder.info.post_processed.features,
Features({"text": Value("string"), "tokens": [Value("string")]}),
)
self.assertEqual(builder.info.splits["train"].num_examples, 100)
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json"))
)
def _post_process(self, dataset, resources_paths):
return dataset.select([0, 1], keep_in_memory=True)
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyBuilder(cache_dir=tmp_dir)
builder._post_process = types.MethodType(_post_process, builder)
builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD)
self.assertTrue(
os.path.exists(
os.path.join(
tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow"
)
)
)
self.assertDictEqual(builder.info.features, Features({"text": Value("string")}))
self.assertIsNone(builder.info.post_processed)
self.assertEqual(builder.info.splits["train"].num_examples, 100)
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json"))
)
def _post_process(self, dataset, resources_paths):
if os.path.exists(resources_paths["index"]):
dataset.load_faiss_index("my_index", resources_paths["index"])
return dataset
else:
dataset = dataset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((len(dataset), 8)), string_factory="Flat", index_name="my_index"
)
dataset.save_faiss_index("my_index", resources_paths["index"])
return dataset
def _post_processing_resources(self, split):
return {"index": f"Flat-{split}.faiss"}
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyBuilder(cache_dir=tmp_dir)
builder._post_process = types.MethodType(_post_process, builder)
builder._post_processing_resources = types.MethodType(_post_processing_resources, builder)
builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD)
self.assertTrue(
os.path.exists(
os.path.join(
tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow"
)
)
)
self.assertDictEqual(builder.info.features, Features({"text": Value("string")}))
self.assertIsNone(builder.info.post_processed)
self.assertEqual(builder.info.splits["train"].num_examples, 100)
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json"))
)
def test_error_download_and_prepare(self):
def _prepare_split(self, split_generator, **kwargs):
raise ValueError()
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyBuilder(cache_dir=tmp_dir)
builder._prepare_split = types.MethodType(_prepare_split, builder)
self.assertRaises(
ValueError,
builder.download_and_prepare,
try_from_hf_gcs=False,
download_mode=DownloadMode.FORCE_REDOWNLOAD,
)
self.assertRaises(FileNotFoundError, builder.as_dataset)
def test_generator_based_download_and_prepare(self):
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir)
builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD)
self.assertTrue(
os.path.exists(
os.path.join(
tmp_dir,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train.arrow",
)
)
)
self.assertDictEqual(builder.info.features, Features({"text": Value("string")}))
self.assertEqual(builder.info.splits["train"].num_examples, 100)
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json"))
)
# Test that duplicated keys are ignored if verification_mode is "no_checks"
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir)
with patch("datasets.builder.ArrowWriter", side_effect=ArrowWriter) as mock_arrow_writer:
builder.download_and_prepare(
download_mode=DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.NO_CHECKS
)
mock_arrow_writer.assert_called_once()
args, kwargs = mock_arrow_writer.call_args_list[0]
self.assertFalse(kwargs["check_duplicates"])
mock_arrow_writer.reset_mock()
builder.download_and_prepare(
download_mode=DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.BASIC_CHECKS
)
mock_arrow_writer.assert_called_once()
args, kwargs = mock_arrow_writer.call_args_list[0]
self.assertTrue(kwargs["check_duplicates"])
def test_cache_dir_no_args(self):
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_dir=None, data_files=None)
relative_cache_dir_parts = Path(builder._relative_data_dir()).parts
self.assertTupleEqual(relative_cache_dir_parts, (builder.dataset_name, "default", "0.0.0"))
def test_cache_dir_for_data_files(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dummy_data1 = os.path.join(tmp_dir, "dummy_data1.txt")
with open(dummy_data1, "w", encoding="utf-8") as f:
f.writelines("foo bar")
dummy_data2 = os.path.join(tmp_dir, "dummy_data2.txt")
with open(dummy_data2, "w", encoding="utf-8") as f:
f.writelines("foo bar\n")
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data1)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data1)
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1])
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"train": dummy_data1})
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={Split.TRAIN: dummy_data1})
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"train": [dummy_data1]})
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"test": dummy_data1})
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data2)
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data2])
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2])
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2])
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2])
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data2, dummy_data1])
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
builder = DummyGeneratorBasedBuilder(
cache_dir=tmp_dir, data_files={"train": dummy_data1, "test": dummy_data2}
)
other_builder = DummyGeneratorBasedBuilder(
cache_dir=tmp_dir, data_files={"train": dummy_data1, "test": dummy_data2}
)
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(
cache_dir=tmp_dir, data_files={"train": [dummy_data1], "test": dummy_data2}
)
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(
cache_dir=tmp_dir, data_files={"train": dummy_data1, "validation": dummy_data2}
)
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(
cache_dir=tmp_dir,
data_files={"train": [dummy_data1, dummy_data2], "test": dummy_data2},
)
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
def test_cache_dir_for_features(self):
with tempfile.TemporaryDirectory() as tmp_dir:
f1 = Features({"id": Value("int8")})
f2 = Features({"id": Value("int32")})
builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f1)
other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f1)
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f2)
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
def test_cache_dir_for_config_kwargs(self):
with tempfile.TemporaryDirectory() as tmp_dir:
# create config on the fly
builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="foo", times=2)
other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, times=2, content="foo")
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
self.assertIn("content=foo", builder.cache_dir)
self.assertIn("times=2", builder.cache_dir)
other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="bar", times=2)
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="foo")
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
with tempfile.TemporaryDirectory() as tmp_dir:
# overwrite an existing config
builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="foo", times=2)
other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", times=2, content="foo")
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
self.assertIn("content=foo", builder.cache_dir)
self.assertIn("times=2", builder.cache_dir)
other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="bar", times=2)
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="foo")
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
def test_config_names(self):
with tempfile.TemporaryDirectory() as tmp_dir:
with self.assertRaises(ValueError) as error_context:
DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, data_files=None, data_dir=None)
self.assertIn("Please pick one among the available configs", str(error_context.exception))
builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a")
self.assertEqual(builder.config.name, "a")
builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="b")
self.assertEqual(builder.config.name, "b")
with self.assertRaises(ValueError):
DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir)
builder = DummyBuilderWithDefaultConfig(cache_dir=tmp_dir)
self.assertEqual(builder.config.name, "a")
def test_cache_dir_for_data_dir(self):
with tempfile.TemporaryDirectory() as tmp_dir, tempfile.TemporaryDirectory() as data_dir:
builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=data_dir)
other_builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=data_dir)
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=tmp_dir)
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
def test_arrow_based_download_and_prepare(tmp_path):
builder = DummyArrowBasedBuilder(cache_dir=tmp_path)
builder.download_and_prepare()
assert os.path.exists(
os.path.join(
tmp_path,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train.arrow",
)
)
assert builder.info.features, Features({"text": Value("string")})
assert builder.info.splits["train"].num_examples == 100
assert os.path.exists(os.path.join(tmp_path, builder.dataset_name, "default", "0.0.0", "dataset_info.json"))
@require_beam
def test_beam_based_download_and_prepare(tmp_path):
builder = DummyBeamBasedBuilder(cache_dir=tmp_path, beam_runner="DirectRunner")
builder.download_and_prepare()
assert os.path.exists(
os.path.join(
tmp_path,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train.arrow",
)
)
assert builder.info.features, Features({"text": Value("string")})
assert builder.info.splits["train"].num_examples == 100
assert os.path.exists(os.path.join(tmp_path, builder.dataset_name, "default", "0.0.0", "dataset_info.json"))
@require_beam
def test_beam_based_as_dataset(tmp_path):
builder = DummyBeamBasedBuilder(cache_dir=tmp_path, beam_runner="DirectRunner")
builder.download_and_prepare()
dataset = builder.as_dataset()
assert dataset
assert isinstance(dataset["train"], Dataset)
assert len(dataset["train"]) > 0
@pytest.mark.parametrize(
"split, expected_dataset_class, expected_dataset_length",
[
(None, DatasetDict, 10),
("train", Dataset, 10),
("train+test[:30%]", Dataset, 13),
],
)
@pytest.mark.parametrize("in_memory", [False, True])
def test_builder_as_dataset(split, expected_dataset_class, expected_dataset_length, in_memory, tmp_path):
cache_dir = str(tmp_path)
builder = DummyBuilder(cache_dir=cache_dir)
os.makedirs(builder.cache_dir)
builder.info.splits = SplitDict()
builder.info.splits.add(SplitInfo("train", num_examples=10))
builder.info.splits.add(SplitInfo("test", num_examples=10))
for info_split in builder.info.splits:
with ArrowWriter(
path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{info_split}.arrow"),
features=Features({"text": Value("string")}),
) as writer:
writer.write_batch({"text": ["foo"] * 10})
writer.finalize()
with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase():
dataset = builder.as_dataset(split=split, in_memory=in_memory)
assert isinstance(dataset, expected_dataset_class)
if isinstance(dataset, DatasetDict):
assert list(dataset.keys()) == ["train", "test"]
datasets = dataset.values()
expected_splits = ["train", "test"]
elif isinstance(dataset, Dataset):
datasets = [dataset]
expected_splits = [split]
for dataset, expected_split in zip(datasets, expected_splits):
assert dataset.split == expected_split
assert len(dataset) == expected_dataset_length
assert dataset.features == Features({"text": Value("string")})
dataset.column_names == ["text"]
@pytest.mark.parametrize("in_memory", [False, True])
def test_generator_based_builder_as_dataset(in_memory, tmp_path):
cache_dir = tmp_path / "data"
cache_dir.mkdir()
cache_dir = str(cache_dir)
builder = DummyGeneratorBasedBuilder(cache_dir=cache_dir)
builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD)
with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase():
dataset = builder.as_dataset("train", in_memory=in_memory)
assert dataset.data.to_pydict() == {"text": ["foo"] * 100}
@pytest.mark.parametrize(
"writer_batch_size, default_writer_batch_size, expected_chunks", [(None, None, 1), (None, 5, 20), (10, None, 10)]
)
def test_custom_writer_batch_size(tmp_path, writer_batch_size, default_writer_batch_size, expected_chunks):
cache_dir = str(tmp_path)
if default_writer_batch_size:
DummyGeneratorBasedBuilder.DEFAULT_WRITER_BATCH_SIZE = default_writer_batch_size
builder = DummyGeneratorBasedBuilder(cache_dir=cache_dir, writer_batch_size=writer_batch_size)
assert builder._writer_batch_size == (writer_batch_size or default_writer_batch_size)
builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD)
dataset = builder.as_dataset("train")
assert len(dataset.data[0].chunks) == expected_chunks
def test_builder_as_streaming_dataset(tmp_path):
dummy_builder = DummyGeneratorBasedBuilder(cache_dir=str(tmp_path))
check_streaming(dummy_builder)
dsets = dummy_builder.as_streaming_dataset()
assert isinstance(dsets, IterableDatasetDict)
assert isinstance(dsets["train"], IterableDataset)
assert len(list(dsets["train"])) == 100
dset = dummy_builder.as_streaming_dataset(split="train")
assert isinstance(dset, IterableDataset)
assert len(list(dset)) == 100
@require_beam
def test_beam_based_builder_as_streaming_dataset(tmp_path):
builder = DummyBeamBasedBuilder(cache_dir=tmp_path)
check_streaming(builder)
with pytest.raises(DatasetNotOnHfGcsError):
builder.as_streaming_dataset()
def _run_test_builder_streaming_works_in_subprocesses(builder):
check_streaming(builder)
dset = builder.as_streaming_dataset(split="train")
assert isinstance(dset, IterableDataset)
assert len(list(dset)) == 100
def test_builder_streaming_works_in_subprocess(tmp_path):
dummy_builder = DummyGeneratorBasedBuilder(cache_dir=str(tmp_path))
p = Process(target=_run_test_builder_streaming_works_in_subprocesses, args=(dummy_builder,))
p.start()
p.join()
class DummyBuilderWithVersion(GeneratorBasedBuilder):
VERSION = "2.0.0"
def _info(self):
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
pass
def _generate_examples(self):
pass
class DummyBuilderWithBuilderConfigs(GeneratorBasedBuilder):
BUILDER_CONFIGS = [BuilderConfig(name="custom", version="2.0.0")]
def _info(self):
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
pass
def _generate_examples(self):
pass
class CustomBuilderConfig(BuilderConfig):
def __init__(self, date=None, language=None, version="2.0.0", **kwargs):
name = f"{date}.{language}"
super().__init__(name=name, version=version, **kwargs)
self.date = date
self.language = language
class DummyBuilderWithCustomBuilderConfigs(GeneratorBasedBuilder):
BUILDER_CONFIGS = [CustomBuilderConfig(date="20220501", language="en")]
BUILDER_CONFIG_CLASS = CustomBuilderConfig
def _info(self):
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
pass
def _generate_examples(self):
pass
@pytest.mark.parametrize(
"builder_class, kwargs",
[
(DummyBuilderWithVersion, {}),
(DummyBuilderWithBuilderConfigs, {"config_name": "custom"}),
(DummyBuilderWithCustomBuilderConfigs, {"config_name": "20220501.en"}),
(DummyBuilderWithCustomBuilderConfigs, {"date": "20220501", "language": "ca"}),
],
)
def test_builder_config_version(builder_class, kwargs, tmp_path):
cache_dir = str(tmp_path)
builder = builder_class(cache_dir=cache_dir, **kwargs)
assert builder.config.version == "2.0.0"
def test_builder_download_and_prepare_with_absolute_output_dir(tmp_path):
builder = DummyGeneratorBasedBuilder()
output_dir = str(tmp_path)
builder.download_and_prepare(output_dir)
assert builder._output_dir.startswith(tmp_path.resolve().as_posix())
assert os.path.exists(os.path.join(output_dir, "dataset_info.json"))
assert os.path.exists(os.path.join(output_dir, f"{builder.dataset_name}-train.arrow"))
assert not os.path.exists(os.path.join(output_dir + ".incomplete"))
def test_builder_download_and_prepare_with_relative_output_dir():
with set_current_working_directory_to_temp_dir():
builder = DummyGeneratorBasedBuilder()
output_dir = "test-out"
builder.download_and_prepare(output_dir)
assert Path(builder._output_dir).resolve().as_posix().startswith(Path(output_dir).resolve().as_posix())
assert os.path.exists(os.path.join(output_dir, "dataset_info.json"))
assert os.path.exists(os.path.join(output_dir, f"{builder.dataset_name}-train.arrow"))
assert not os.path.exists(os.path.join(output_dir + ".incomplete"))
def test_builder_with_filesystem_download_and_prepare(tmp_path, mockfs):
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path)
builder.download_and_prepare("mock://my_dataset", storage_options=mockfs.storage_options)
assert builder._output_dir.startswith("mock://my_dataset")
assert is_local_path(builder._cache_downloaded_dir)
assert isinstance(builder._fs, type(mockfs))
assert builder._fs.storage_options == mockfs.storage_options
assert mockfs.exists("my_dataset/dataset_info.json")
assert mockfs.exists(f"my_dataset/{builder.dataset_name}-train.arrow")
assert not mockfs.exists("my_dataset.incomplete")
def test_builder_with_filesystem_download_and_prepare_reload(tmp_path, mockfs, caplog):
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path)
mockfs.makedirs("my_dataset")
DatasetInfo().write_to_directory("mock://my_dataset", storage_options=mockfs.storage_options)
mockfs.touch(f"my_dataset/{builder.dataset_name}-train.arrow")
caplog.clear()
with caplog.at_level(INFO, logger=get_logger().name):
builder.download_and_prepare("mock://my_dataset", storage_options=mockfs.storage_options)
assert "Found cached dataset" in caplog.text
def test_generator_based_builder_download_and_prepare_as_parquet(tmp_path):
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path)
builder.download_and_prepare(file_format="parquet")
assert builder.info.splits["train"].num_examples == 100
parquet_path = os.path.join(
tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.parquet"
)
assert os.path.exists(parquet_path)
assert pq.ParquetFile(parquet_path) is not None
def test_generator_based_builder_download_and_prepare_sharded(tmp_path):
writer_batch_size = 25
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path, writer_batch_size=writer_batch_size)
with patch("datasets.config.MAX_SHARD_SIZE", 1): # one batch per shard
builder.download_and_prepare(file_format="parquet")
expected_num_shards = 100 // writer_batch_size
assert builder.info.splits["train"].num_examples == 100
parquet_path = os.path.join(
tmp_path,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet",
)
assert os.path.exists(parquet_path)
parquet_files = [
pq.ParquetFile(parquet_path)
for parquet_path in Path(tmp_path).rglob(
f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet"
)
]
assert len(parquet_files) == expected_num_shards
assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100
def test_generator_based_builder_download_and_prepare_with_max_shard_size(tmp_path):
writer_batch_size = 25
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path, writer_batch_size=writer_batch_size)
builder.download_and_prepare(file_format="parquet", max_shard_size=1) # one batch per shard
expected_num_shards = 100 // writer_batch_size
assert builder.info.splits["train"].num_examples == 100
parquet_path = os.path.join(
tmp_path,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet",
)
assert os.path.exists(parquet_path)
parquet_files = [
pq.ParquetFile(parquet_path)
for parquet_path in Path(tmp_path).rglob(
f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet"
)
]
assert len(parquet_files) == expected_num_shards
assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100
def test_generator_based_builder_download_and_prepare_with_num_proc(tmp_path):
builder = DummyGeneratorBasedBuilderWithShards(cache_dir=tmp_path)
builder.download_and_prepare(num_proc=2)
expected_num_shards = 2
assert builder.info.splits["train"].num_examples == 400
assert builder.info.splits["train"].shard_lengths == [200, 200]
arrow_path = os.path.join(
tmp_path,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.arrow",
)
assert os.path.exists(arrow_path)
ds = builder.as_dataset("train")
assert len(ds) == 400
assert ds.to_dict() == {
"id": [i for _ in range(4) for i in range(100)],
"filepath": [f"data{i}.txt" for i in range(4) for _ in range(100)],
}
@pytest.mark.parametrize(
"num_proc, expectation", [(None, does_not_raise()), (1, does_not_raise()), (2, pytest.raises(RuntimeError))]
)
def test_generator_based_builder_download_and_prepare_with_ambiguous_shards(num_proc, expectation, tmp_path):
builder = DummyGeneratorBasedBuilderWithAmbiguousShards(cache_dir=tmp_path)
with expectation:
builder.download_and_prepare(num_proc=num_proc)
def test_arrow_based_builder_download_and_prepare_as_parquet(tmp_path):
builder = DummyArrowBasedBuilder(cache_dir=tmp_path)
builder.download_and_prepare(file_format="parquet")
assert builder.info.splits["train"].num_examples == 100
parquet_path = os.path.join(
tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.parquet"
)
assert os.path.exists(parquet_path)
assert pq.ParquetFile(parquet_path) is not None
def test_arrow_based_builder_download_and_prepare_sharded(tmp_path):
builder = DummyArrowBasedBuilder(cache_dir=tmp_path)
with patch("datasets.config.MAX_SHARD_SIZE", 1): # one batch per shard
builder.download_and_prepare(file_format="parquet")
expected_num_shards = 10
assert builder.info.splits["train"].num_examples == 100
parquet_path = os.path.join(
tmp_path,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet",
)
assert os.path.exists(parquet_path)
parquet_files = [
pq.ParquetFile(parquet_path)
for parquet_path in Path(tmp_path).rglob(
f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet"
)
]
assert len(parquet_files) == expected_num_shards
assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100
def test_arrow_based_builder_download_and_prepare_with_max_shard_size(tmp_path):
builder = DummyArrowBasedBuilder(cache_dir=tmp_path)
builder.download_and_prepare(file_format="parquet", max_shard_size=1) # one table per shard
expected_num_shards = 10
assert builder.info.splits["train"].num_examples == 100
parquet_path = os.path.join(
tmp_path,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet",
)
assert os.path.exists(parquet_path)
parquet_files = [
pq.ParquetFile(parquet_path)
for parquet_path in Path(tmp_path).rglob(
f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet"
)
]
assert len(parquet_files) == expected_num_shards
assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100
def test_arrow_based_builder_download_and_prepare_with_num_proc(tmp_path):
builder = DummyArrowBasedBuilderWithShards(cache_dir=tmp_path)
builder.download_and_prepare(num_proc=2)
expected_num_shards = 2
assert builder.info.splits["train"].num_examples == 400
assert builder.info.splits["train"].shard_lengths == [200, 200]
arrow_path = os.path.join(
tmp_path,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.arrow",
)
assert os.path.exists(arrow_path)
ds = builder.as_dataset("train")
assert len(ds) == 400
assert ds.to_dict() == {
"id": [i for _ in range(4) for i in range(100)],
"filepath": [f"data{i}.txt" for i in range(4) for _ in range(100)],
}
@pytest.mark.parametrize(
"num_proc, expectation", [(None, does_not_raise()), (1, does_not_raise()), (2, pytest.raises(RuntimeError))]
)
def test_arrow_based_builder_download_and_prepare_with_ambiguous_shards(num_proc, expectation, tmp_path):
builder = DummyArrowBasedBuilderWithAmbiguousShards(cache_dir=tmp_path)
with expectation:
builder.download_and_prepare(num_proc=num_proc)
@require_beam
def test_beam_based_builder_download_and_prepare_as_parquet(tmp_path):
builder = DummyBeamBasedBuilder(cache_dir=tmp_path, beam_runner="DirectRunner")
builder.download_and_prepare(file_format="parquet")
assert builder.info.splits["train"].num_examples == 100
parquet_path = os.path.join(
tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.parquet"
)
assert os.path.exists(parquet_path)
assert pq.ParquetFile(parquet_path) is not None
| datasets-main | tests/test_builder.py |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
URL = "http://www.mocksite.com/file1.txt"
CONTENT = '"text": ["foo", "foo"]'
HASH = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class MockResponse:
status_code = 200
headers = {"Content-Length": "100"}
cookies = {}
def iter_content(self, **kwargs):
return [bytes(CONTENT, "utf-8")]
def mock_request(*args, **kwargs):
return MockResponse()
@pytest.mark.parametrize("urls_type", [str, list, dict])
def test_download_manager_download(urls_type, tmp_path, monkeypatch):
import requests
monkeypatch.setattr(requests, "request", mock_request)
url = URL
if issubclass(urls_type, str):
urls = url
elif issubclass(urls_type, list):
urls = [url]
elif issubclass(urls_type, dict):
urls = {"train": url}
dataset_name = "dummy"
cache_subdir = "downloads"
cache_dir_root = tmp_path
download_config = DownloadConfig(
cache_dir=os.path.join(cache_dir_root, cache_subdir),
use_etag=False,
)
dl_manager = DownloadManager(dataset_name=dataset_name, download_config=download_config)
downloaded_paths = dl_manager.download(urls)
input_urls = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(urls, str):
downloaded_paths = [downloaded_paths]
input_urls = [urls]
elif isinstance(urls, dict):
assert "train" in downloaded_paths.keys()
downloaded_paths = downloaded_paths.values()
input_urls = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(downloaded_paths, input_urls):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
downloaded_path = Path(downloaded_path)
parts = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
content = downloaded_path.read_text()
assert content == CONTENT
metadata_downloaded_path = downloaded_path.with_suffix(".json")
assert metadata_downloaded_path.exists()
metadata_content = json.loads(metadata_downloaded_path.read_text())
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type", [str, list, dict])
def test_download_manager_extract(paths_type, xz_file, text_file):
filename = str(xz_file)
if issubclass(paths_type, str):
paths = filename
elif issubclass(paths_type, list):
paths = [filename]
elif issubclass(paths_type, dict):
paths = {"train": filename}
dataset_name = "dummy"
cache_dir = xz_file.parent
extracted_subdir = "extracted"
download_config = DownloadConfig(
cache_dir=cache_dir,
use_etag=False,
)
dl_manager = DownloadManager(dataset_name=dataset_name, download_config=download_config)
extracted_paths = dl_manager.extract(paths)
input_paths = paths
for extracted_paths in [extracted_paths]:
if isinstance(paths, str):
extracted_paths = [extracted_paths]
input_paths = [paths]
elif isinstance(paths, dict):
assert "train" in extracted_paths.keys()
extracted_paths = extracted_paths.values()
input_paths = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(extracted_paths, input_paths):
assert extracted_path == dl_manager.extracted_paths[input_path]
extracted_path = Path(extracted_path)
parts = extracted_path.parts
assert parts[-1] == hash_url_to_filename(input_path, etag=None)
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
extracted_file_content = extracted_path.read_text()
expected_file_content = text_file.read_text()
assert extracted_file_content == expected_file_content
def _test_jsonl(path, file):
assert path.endswith(".jsonl")
for num_items, line in enumerate(file, start=1):
item = json.loads(line.decode("utf-8"))
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl", ["tar_jsonl_path", "zip_jsonl_path"])
def test_iter_archive_path(archive_jsonl, request):
archive_jsonl_path = request.getfixturevalue(archive_jsonl)
dl_manager = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(archive_jsonl_path), start=1):
_test_jsonl(path, file)
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl", ["tar_nested_jsonl_path", "zip_nested_jsonl_path"])
def test_iter_archive_file(archive_nested_jsonl, request):
archive_nested_jsonl_path = request.getfixturevalue(archive_nested_jsonl)
dl_manager = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(archive_nested_jsonl_path), start=1):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(file), start=1):
_test_jsonl(subpath, subfile)
assert num_tar == 1
assert num_jsonl == 2
def test_iter_files(data_dir_with_hidden_files):
dl_manager = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(data_dir_with_hidden_files), start=1):
assert os.path.basename(file) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| datasets-main | tests/test_download_manager.py |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
pytestmark = pytest.mark.integration
@pytest.mark.parametrize("path", ["paws", "csv"])
def test_inspect_dataset(path, tmp_path):
inspect_dataset(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.parametrize("path", ["accuracy"])
def test_inspect_metric(path, tmp_path):
inspect_metric(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.parametrize(
"path, config_name, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "default", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_config_info(path, config_name, expected_splits):
info = get_dataset_config_info(path, config_name=config_name)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
def test_get_dataset_config_info_private(hf_token, hf_private_dataset_repo_txt_data):
info = get_dataset_config_info(hf_private_dataset_repo_txt_data, config_name="default", token=hf_token)
assert list(info.splits.keys()) == ["train"]
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_config_info_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_config_info(path, config_name=config_name)
@pytest.mark.parametrize(
"path, expected",
[
("acronym_identification", ["default"]),
("squad", ["plain_text"]),
("hf-internal-testing/dataset_with_script", ["default"]),
("dalle-mini/wit", ["default"]),
("hf-internal-testing/librispeech_asr_dummy", ["clean", "other"]),
("hf-internal-testing/audiofolder_no_configs_in_metadata", ["default"]),
("hf-internal-testing/audiofolder_single_config_in_metadata", ["custom"]),
("hf-internal-testing/audiofolder_two_configs_in_metadata", ["v1", "v2"]),
],
)
def test_get_dataset_config_names(path, expected):
config_names = get_dataset_config_names(path)
assert config_names == expected
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config",
[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["default"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
],
)
def test_get_dataset_info(path, expected_configs, expected_splits_in_first_config):
infos = get_dataset_infos(path)
assert list(infos.keys()) == expected_configs
expected_config = expected_configs[0]
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "default", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_split_names(path, expected_config, expected_splits):
infos = get_dataset_infos(path)
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_split_names_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_split_names(path, config_name=config_name)
| datasets-main | tests/test_inspect.py |
from copy import deepcopy
from unittest.case import TestCase
import pytest
from datasets.arrow_dataset import Dataset
from datasets.features import Audio, ClassLabel, Features, Image, Sequence, Value
from datasets.info import DatasetInfo
from datasets.tasks import (
AudioClassification,
AutomaticSpeechRecognition,
ImageClassification,
LanguageModeling,
QuestionAnsweringExtractive,
Summarization,
TextClassification,
task_template_from_dict,
)
from datasets.utils.py_utils import asdict
SAMPLE_QUESTION_ANSWERING_EXTRACTIVE = {
"id": "5733be284776f41900661182",
"title": "University_of_Notre_Dame",
"context": 'Architecturally, the school has a Catholic character. Atop the Main Building\'s gold dome is a golden statue of the Virgin Mary. Immediately in front of the Main Building and facing it, is a copper statue of Christ with arms upraised with the legend "Venite Ad Me Omnes". Next to the Main Building is the Basilica of the Sacred Heart. Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection. It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly appeared to Saint Bernadette Soubirous in 1858. At the end of the main drive (and in a direct line that connects through 3 statues and the Gold Dome), is a simple, modern stone statue of Mary.',
"question": "To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?",
"answers": {"text": ["Saint Bernadette Soubirous"], "answer_start": [515]},
}
@pytest.mark.parametrize(
"task_cls",
[
AudioClassification,
AutomaticSpeechRecognition,
ImageClassification,
LanguageModeling,
QuestionAnsweringExtractive,
Summarization,
TextClassification,
],
)
def test_reload_task_from_dict(task_cls):
task = task_cls()
task_dict = asdict(task)
reloaded = task_template_from_dict(task_dict)
assert task == reloaded
class TestLanguageModeling:
def test_column_mapping(self):
task = LanguageModeling(text_column="input_text")
assert {"input_text": "text"} == task.column_mapping
def test_from_dict(self):
input_schema = Features({"text": Value("string")})
template_dict = {"text_column": "input_text"}
task = LanguageModeling.from_dict(template_dict)
assert "language-modeling" == task.task
assert input_schema == task.input_schema
class TextClassificationTest(TestCase):
def setUp(self):
self.labels = sorted(["pos", "neg"])
def test_column_mapping(self):
task = TextClassification(text_column="input_text", label_column="input_label")
self.assertDictEqual({"input_text": "text", "input_label": "labels"}, task.column_mapping)
def test_from_dict(self):
input_schema = Features({"text": Value("string")})
# Labels are cast to tuple during `TextClassification.__post_init__`, so we do the same here
label_schema = Features({"labels": ClassLabel})
template_dict = {"text_column": "input_text", "label_column": "input_labels"}
task = TextClassification.from_dict(template_dict)
self.assertEqual("text-classification", task.task)
self.assertEqual(input_schema, task.input_schema)
self.assertEqual(label_schema, task.label_schema)
def test_align_with_features(self):
task = TextClassification(text_column="input_text", label_column="input_label")
self.assertEqual(task.label_schema["labels"], ClassLabel)
task = task.align_with_features(Features({"input_label": ClassLabel(names=self.labels)}))
self.assertEqual(task.label_schema["labels"], ClassLabel(names=self.labels))
class QuestionAnsweringTest(TestCase):
def test_column_mapping(self):
task = QuestionAnsweringExtractive(
context_column="input_context", question_column="input_question", answers_column="input_answers"
)
self.assertDictEqual(
{"input_context": "context", "input_question": "question", "input_answers": "answers"}, task.column_mapping
)
def test_from_dict(self):
input_schema = Features({"question": Value("string"), "context": Value("string")})
label_schema = Features(
{
"answers": Sequence(
{
"text": Value("string"),
"answer_start": Value("int32"),
}
)
}
)
template_dict = {
"context_column": "input_input_context",
"question_column": "input_question",
"answers_column": "input_answers",
}
task = QuestionAnsweringExtractive.from_dict(template_dict)
self.assertEqual("question-answering-extractive", task.task)
self.assertEqual(input_schema, task.input_schema)
self.assertEqual(label_schema, task.label_schema)
class SummarizationTest(TestCase):
def test_column_mapping(self):
task = Summarization(text_column="input_text", summary_column="input_summary")
self.assertDictEqual({"input_text": "text", "input_summary": "summary"}, task.column_mapping)
def test_from_dict(self):
input_schema = Features({"text": Value("string")})
label_schema = Features({"summary": Value("string")})
template_dict = {"text_column": "input_text", "summary_column": "input_summary"}
task = Summarization.from_dict(template_dict)
self.assertEqual("summarization", task.task)
self.assertEqual(input_schema, task.input_schema)
self.assertEqual(label_schema, task.label_schema)
class AutomaticSpeechRecognitionTest(TestCase):
def test_column_mapping(self):
task = AutomaticSpeechRecognition(audio_column="input_audio", transcription_column="input_transcription")
self.assertDictEqual({"input_audio": "audio", "input_transcription": "transcription"}, task.column_mapping)
def test_from_dict(self):
input_schema = Features({"audio": Audio()})
label_schema = Features({"transcription": Value("string")})
template_dict = {
"audio_column": "input_audio",
"transcription_column": "input_transcription",
}
task = AutomaticSpeechRecognition.from_dict(template_dict)
self.assertEqual("automatic-speech-recognition", task.task)
self.assertEqual(input_schema, task.input_schema)
self.assertEqual(label_schema, task.label_schema)
class AudioClassificationTest(TestCase):
def setUp(self):
self.labels = sorted(["pos", "neg"])
def test_column_mapping(self):
task = AudioClassification(audio_column="input_audio", label_column="input_label")
self.assertDictEqual({"input_audio": "audio", "input_label": "labels"}, task.column_mapping)
def test_from_dict(self):
input_schema = Features({"audio": Audio()})
label_schema = Features({"labels": ClassLabel})
template_dict = {
"audio_column": "input_image",
"label_column": "input_label",
}
task = AudioClassification.from_dict(template_dict)
self.assertEqual("audio-classification", task.task)
self.assertEqual(input_schema, task.input_schema)
self.assertEqual(label_schema, task.label_schema)
def test_align_with_features(self):
task = AudioClassification(audio_column="input_audio", label_column="input_label")
self.assertEqual(task.label_schema["labels"], ClassLabel)
task = task.align_with_features(Features({"input_label": ClassLabel(names=self.labels)}))
self.assertEqual(task.label_schema["labels"], ClassLabel(names=self.labels))
class ImageClassificationTest(TestCase):
def setUp(self):
self.labels = sorted(["pos", "neg"])
def test_column_mapping(self):
task = ImageClassification(image_column="input_image", label_column="input_label")
self.assertDictEqual({"input_image": "image", "input_label": "labels"}, task.column_mapping)
def test_from_dict(self):
input_schema = Features({"image": Image()})
label_schema = Features({"labels": ClassLabel})
template_dict = {
"image_column": "input_image",
"label_column": "input_label",
}
task = ImageClassification.from_dict(template_dict)
self.assertEqual("image-classification", task.task)
self.assertEqual(input_schema, task.input_schema)
self.assertEqual(label_schema, task.label_schema)
def test_align_with_features(self):
task = ImageClassification(image_column="input_image", label_column="input_label")
self.assertEqual(task.label_schema["labels"], ClassLabel)
task = task.align_with_features(Features({"input_label": ClassLabel(names=self.labels)}))
self.assertEqual(task.label_schema["labels"], ClassLabel(names=self.labels))
class DatasetWithTaskProcessingTest(TestCase):
def test_map_on_task_template(self):
info = DatasetInfo(task_templates=QuestionAnsweringExtractive())
dataset = Dataset.from_dict({k: [v] for k, v in SAMPLE_QUESTION_ANSWERING_EXTRACTIVE.items()}, info=info)
assert isinstance(dataset.info.task_templates, list)
assert len(dataset.info.task_templates) == 1
def keep_task(x):
return x
def dont_keep_task(x):
out = deepcopy(SAMPLE_QUESTION_ANSWERING_EXTRACTIVE)
out["answers"]["foobar"] = 0
return out
mapped_dataset = dataset.map(keep_task)
assert mapped_dataset.info.task_templates == dataset.info.task_templates
# reload from cache
mapped_dataset = dataset.map(keep_task)
assert mapped_dataset.info.task_templates == dataset.info.task_templates
mapped_dataset = dataset.map(dont_keep_task)
assert mapped_dataset.info.task_templates == []
# reload from cache
mapped_dataset = dataset.map(dont_keep_task)
assert mapped_dataset.info.task_templates == []
def test_remove_and_map_on_task_template(self):
features = Features({"text": Value("string"), "label": ClassLabel(names=("pos", "neg"))})
task_templates = TextClassification(text_column="text", label_column="label")
info = DatasetInfo(features=features, task_templates=task_templates)
dataset = Dataset.from_dict({"text": ["A sentence."], "label": ["pos"]}, info=info)
def process(example):
return example
modified_dataset = dataset.remove_columns("label")
mapped_dataset = modified_dataset.map(process)
assert mapped_dataset.info.task_templates == []
| datasets-main | tests/test_tasks.py |
import datetime
from pathlib import Path
from unittest import TestCase
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from datasets import Audio, Features, Image, IterableDataset
from datasets.formatting import NumpyFormatter, PandasFormatter, PythonFormatter, query_table
from datasets.formatting.formatting import (
LazyBatch,
LazyRow,
NumpyArrowExtractor,
PandasArrowExtractor,
PythonArrowExtractor,
)
from datasets.table import InMemoryTable
from .utils import require_jax, require_pil, require_sndfile, require_tf, require_torch
class AnyArray:
def __init__(self, data) -> None:
self.data = data
def __array__(self) -> np.ndarray:
return np.asarray(self.data)
def _gen_any_arrays():
for _ in range(10):
yield {"array": AnyArray(list(range(10)))}
@pytest.fixture
def any_arrays_dataset():
return IterableDataset.from_generator(_gen_any_arrays)
_COL_A = [0, 1, 2]
_COL_B = ["foo", "bar", "foobar"]
_COL_C = [[[1.0, 0.0, 0.0]] * 2, [[0.0, 1.0, 0.0]] * 2, [[0.0, 0.0, 1.0]] * 2]
_COL_D = [datetime.datetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)] * 3
_INDICES = [1, 0]
IMAGE_PATH_1 = Path(__file__).parent / "features" / "data" / "test_image_rgb.jpg"
IMAGE_PATH_2 = Path(__file__).parent / "features" / "data" / "test_image_rgba.png"
AUDIO_PATH_1 = Path(__file__).parent / "features" / "data" / "test_audio_44100.wav"
class ArrowExtractorTest(TestCase):
def _create_dummy_table(self):
return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C, "d": _COL_D})
def test_python_extractor(self):
pa_table = self._create_dummy_table()
extractor = PythonArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertEqual(row, {"a": _COL_A[0], "b": _COL_B[0], "c": _COL_C[0], "d": _COL_D[0]})
col = extractor.extract_column(pa_table)
self.assertEqual(col, _COL_A)
batch = extractor.extract_batch(pa_table)
self.assertEqual(batch, {"a": _COL_A, "b": _COL_B, "c": _COL_C, "d": _COL_D})
def test_numpy_extractor(self):
pa_table = self._create_dummy_table().drop(["c", "d"])
extractor = NumpyArrowExtractor()
row = extractor.extract_row(pa_table)
np.testing.assert_equal(row, {"a": _COL_A[0], "b": _COL_B[0]})
col = extractor.extract_column(pa_table)
np.testing.assert_equal(col, np.array(_COL_A))
batch = extractor.extract_batch(pa_table)
np.testing.assert_equal(batch, {"a": np.array(_COL_A), "b": np.array(_COL_B)})
def test_numpy_extractor_nested(self):
pa_table = self._create_dummy_table().drop(["a", "b", "d"])
extractor = NumpyArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertEqual(row["c"][0].dtype, np.float64)
self.assertEqual(row["c"].dtype, object)
col = extractor.extract_column(pa_table)
self.assertEqual(col[0][0].dtype, np.float64)
self.assertEqual(col[0].dtype, object)
self.assertEqual(col.dtype, object)
batch = extractor.extract_batch(pa_table)
self.assertEqual(batch["c"][0][0].dtype, np.float64)
self.assertEqual(batch["c"][0].dtype, object)
self.assertEqual(batch["c"].dtype, object)
def test_numpy_extractor_temporal(self):
pa_table = self._create_dummy_table().drop(["a", "b", "c"])
extractor = NumpyArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertTrue(np.issubdtype(row["d"].dtype, np.datetime64))
col = extractor.extract_column(pa_table)
self.assertTrue(np.issubdtype(col[0].dtype, np.datetime64))
self.assertTrue(np.issubdtype(col.dtype, np.datetime64))
batch = extractor.extract_batch(pa_table)
self.assertTrue(np.issubdtype(batch["d"][0].dtype, np.datetime64))
self.assertTrue(np.issubdtype(batch["d"].dtype, np.datetime64))
def test_pandas_extractor(self):
pa_table = self._create_dummy_table()
extractor = PandasArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertIsInstance(row, pd.DataFrame)
pd.testing.assert_series_equal(row["a"], pd.Series(_COL_A, name="a")[:1])
pd.testing.assert_series_equal(row["b"], pd.Series(_COL_B, name="b")[:1])
col = extractor.extract_column(pa_table)
pd.testing.assert_series_equal(col, pd.Series(_COL_A, name="a"))
batch = extractor.extract_batch(pa_table)
self.assertIsInstance(batch, pd.DataFrame)
pd.testing.assert_series_equal(batch["a"], pd.Series(_COL_A, name="a"))
pd.testing.assert_series_equal(batch["b"], pd.Series(_COL_B, name="b"))
def test_pandas_extractor_nested(self):
pa_table = self._create_dummy_table().drop(["a", "b", "d"])
extractor = PandasArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertEqual(row["c"][0][0].dtype, np.float64)
self.assertEqual(row["c"].dtype, object)
col = extractor.extract_column(pa_table)
self.assertEqual(col[0][0].dtype, np.float64)
self.assertEqual(col[0].dtype, object)
self.assertEqual(col.dtype, object)
batch = extractor.extract_batch(pa_table)
self.assertEqual(batch["c"][0][0].dtype, np.float64)
self.assertEqual(batch["c"][0].dtype, object)
self.assertEqual(batch["c"].dtype, object)
def test_pandas_extractor_temporal(self):
pa_table = self._create_dummy_table().drop(["a", "b", "c"])
extractor = PandasArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertTrue(pd.api.types.is_datetime64_any_dtype(row["d"].dtype))
col = extractor.extract_column(pa_table)
self.assertTrue(isinstance(col[0], datetime.datetime))
self.assertTrue(pd.api.types.is_datetime64_any_dtype(col.dtype))
batch = extractor.extract_batch(pa_table)
self.assertTrue(isinstance(batch["d"][0], datetime.datetime))
self.assertTrue(pd.api.types.is_datetime64_any_dtype(batch["d"].dtype))
class LazyDictTest(TestCase):
def _create_dummy_table(self):
return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C})
def _create_dummy_formatter(self):
return PythonFormatter(lazy=True)
def test_lazy_dict_copy(self):
pa_table = self._create_dummy_table()
formatter = self._create_dummy_formatter()
lazy_batch = formatter.format_batch(pa_table)
lazy_batch_copy = lazy_batch.copy()
self.assertEqual(type(lazy_batch), type(lazy_batch_copy))
self.assertEqual(lazy_batch.items(), lazy_batch_copy.items())
lazy_batch["d"] = [1, 2, 3]
self.assertNotEqual(lazy_batch.items(), lazy_batch_copy.items())
class FormatterTest(TestCase):
def _create_dummy_table(self):
return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C})
def test_python_formatter(self):
pa_table = self._create_dummy_table()
formatter = PythonFormatter()
row = formatter.format_row(pa_table)
self.assertEqual(row, {"a": _COL_A[0], "b": _COL_B[0], "c": _COL_C[0]})
col = formatter.format_column(pa_table)
self.assertEqual(col, _COL_A)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch, {"a": _COL_A, "b": _COL_B, "c": _COL_C})
def test_python_formatter_lazy(self):
pa_table = self._create_dummy_table()
formatter = PythonFormatter(lazy=True)
row = formatter.format_row(pa_table)
self.assertIsInstance(row, LazyRow)
self.assertEqual(row["a"], _COL_A[0])
self.assertEqual(row["b"], _COL_B[0])
self.assertEqual(row["c"], _COL_C[0])
batch = formatter.format_batch(pa_table)
self.assertIsInstance(batch, LazyBatch)
self.assertEqual(batch["a"], _COL_A)
self.assertEqual(batch["b"], _COL_B)
self.assertEqual(batch["c"], _COL_C)
def test_numpy_formatter(self):
pa_table = self._create_dummy_table()
formatter = NumpyFormatter()
row = formatter.format_row(pa_table)
np.testing.assert_equal(row, {"a": _COL_A[0], "b": _COL_B[0], "c": np.array(_COL_C[0])})
col = formatter.format_column(pa_table)
np.testing.assert_equal(col, np.array(_COL_A))
batch = formatter.format_batch(pa_table)
np.testing.assert_equal(batch, {"a": np.array(_COL_A), "b": np.array(_COL_B), "c": np.array(_COL_C)})
assert batch["c"].shape == np.array(_COL_C).shape
def test_numpy_formatter_np_array_kwargs(self):
pa_table = self._create_dummy_table().drop(["b"])
formatter = NumpyFormatter(dtype=np.float16)
row = formatter.format_row(pa_table)
self.assertEqual(row["c"].dtype, np.dtype(np.float16))
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, np.float16)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["a"].dtype, np.dtype(np.float16))
self.assertEqual(batch["c"].dtype, np.dtype(np.float16))
@require_pil
def test_numpy_formatter_image(self):
# same dimensions
pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2})
formatter = NumpyFormatter(features=Features({"image": Image()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["image"].dtype, np.uint8)
self.assertEqual(row["image"].shape, (480, 640, 3))
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, np.uint8)
self.assertEqual(col.shape, (2, 480, 640, 3))
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["image"].dtype, np.uint8)
self.assertEqual(batch["image"].shape, (2, 480, 640, 3))
# different dimensions
pa_table = pa.table(
{"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]}
)
formatter = NumpyFormatter(features=Features({"image": Image()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["image"].dtype, np.uint8)
self.assertEqual(row["image"].shape, (480, 640, 3))
col = formatter.format_column(pa_table)
self.assertIsInstance(col, np.ndarray)
self.assertEqual(col.dtype, object)
self.assertEqual(col[0].dtype, np.uint8)
self.assertEqual(col[0].shape, (480, 640, 3))
batch = formatter.format_batch(pa_table)
self.assertIsInstance(batch["image"], np.ndarray)
self.assertEqual(batch["image"].dtype, object)
self.assertEqual(batch["image"][0].dtype, np.uint8)
self.assertEqual(batch["image"][0].shape, (480, 640, 3))
@require_sndfile
def test_numpy_formatter_audio(self):
pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]})
formatter = NumpyFormatter(features=Features({"audio": Audio()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["audio"]["array"].dtype, np.dtype(np.float32))
col = formatter.format_column(pa_table)
self.assertEqual(col[0]["array"].dtype, np.float32)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["audio"][0]["array"].dtype, np.dtype(np.float32))
def test_pandas_formatter(self):
pa_table = self._create_dummy_table()
formatter = PandasFormatter()
row = formatter.format_row(pa_table)
self.assertIsInstance(row, pd.DataFrame)
pd.testing.assert_series_equal(row["a"], pd.Series(_COL_A, name="a")[:1])
pd.testing.assert_series_equal(row["b"], pd.Series(_COL_B, name="b")[:1])
col = formatter.format_column(pa_table)
pd.testing.assert_series_equal(col, pd.Series(_COL_A, name="a"))
batch = formatter.format_batch(pa_table)
self.assertIsInstance(batch, pd.DataFrame)
pd.testing.assert_series_equal(batch["a"], pd.Series(_COL_A, name="a"))
pd.testing.assert_series_equal(batch["b"], pd.Series(_COL_B, name="b"))
@require_torch
def test_torch_formatter(self):
import torch
from datasets.formatting import TorchFormatter
pa_table = self._create_dummy_table()
formatter = TorchFormatter()
row = formatter.format_row(pa_table)
torch.testing.assert_close(row["a"], torch.tensor(_COL_A, dtype=torch.int64)[0])
assert row["b"] == _COL_B[0]
torch.testing.assert_close(row["c"], torch.tensor(_COL_C, dtype=torch.float32)[0])
col = formatter.format_column(pa_table)
torch.testing.assert_close(col, torch.tensor(_COL_A, dtype=torch.int64))
batch = formatter.format_batch(pa_table)
torch.testing.assert_close(batch["a"], torch.tensor(_COL_A, dtype=torch.int64))
assert batch["b"] == _COL_B
torch.testing.assert_close(batch["c"], torch.tensor(_COL_C, dtype=torch.float32))
assert batch["c"].shape == np.array(_COL_C).shape
@require_torch
def test_torch_formatter_torch_tensor_kwargs(self):
import torch
from datasets.formatting import TorchFormatter
pa_table = self._create_dummy_table().drop(["b"])
formatter = TorchFormatter(dtype=torch.float16)
row = formatter.format_row(pa_table)
self.assertEqual(row["c"].dtype, torch.float16)
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, torch.float16)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["a"].dtype, torch.float16)
self.assertEqual(batch["c"].dtype, torch.float16)
@require_torch
@require_pil
def test_torch_formatter_image(self):
import torch
from datasets.formatting import TorchFormatter
# same dimensions
pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2})
formatter = TorchFormatter(features=Features({"image": Image()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["image"].dtype, torch.uint8)
self.assertEqual(row["image"].shape, (480, 640, 3))
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, torch.uint8)
self.assertEqual(col.shape, (2, 480, 640, 3))
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["image"].dtype, torch.uint8)
self.assertEqual(batch["image"].shape, (2, 480, 640, 3))
# different dimensions
pa_table = pa.table(
{"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]}
)
formatter = TorchFormatter(features=Features({"image": Image()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["image"].dtype, torch.uint8)
self.assertEqual(row["image"].shape, (480, 640, 3))
col = formatter.format_column(pa_table)
self.assertIsInstance(col, list)
self.assertEqual(col[0].dtype, torch.uint8)
self.assertEqual(col[0].shape, (480, 640, 3))
batch = formatter.format_batch(pa_table)
self.assertIsInstance(batch["image"], list)
self.assertEqual(batch["image"][0].dtype, torch.uint8)
self.assertEqual(batch["image"][0].shape, (480, 640, 3))
@require_torch
@require_sndfile
def test_torch_formatter_audio(self):
import torch
from datasets.formatting import TorchFormatter
pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]})
formatter = TorchFormatter(features=Features({"audio": Audio()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["audio"]["array"].dtype, torch.float32)
col = formatter.format_column(pa_table)
self.assertEqual(col[0]["array"].dtype, torch.float32)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["audio"][0]["array"].dtype, torch.float32)
@require_tf
def test_tf_formatter(self):
import tensorflow as tf
from datasets.formatting import TFFormatter
pa_table = self._create_dummy_table()
formatter = TFFormatter()
row = formatter.format_row(pa_table)
tf.debugging.assert_equal(row["a"], tf.convert_to_tensor(_COL_A, dtype=tf.int64)[0])
tf.debugging.assert_equal(row["b"], tf.convert_to_tensor(_COL_B, dtype=tf.string)[0])
tf.debugging.assert_equal(row["c"], tf.convert_to_tensor(_COL_C, dtype=tf.float32)[0])
col = formatter.format_column(pa_table)
tf.debugging.assert_equal(col, tf.ragged.constant(_COL_A, dtype=tf.int64))
batch = formatter.format_batch(pa_table)
tf.debugging.assert_equal(batch["a"], tf.convert_to_tensor(_COL_A, dtype=tf.int64))
tf.debugging.assert_equal(batch["b"], tf.convert_to_tensor(_COL_B, dtype=tf.string))
self.assertIsInstance(batch["c"], tf.Tensor)
self.assertEqual(batch["c"].dtype, tf.float32)
tf.debugging.assert_equal(
batch["c"].shape.as_list(), tf.convert_to_tensor(_COL_C, dtype=tf.float32).shape.as_list()
)
tf.debugging.assert_equal(tf.convert_to_tensor(batch["c"]), tf.convert_to_tensor(_COL_C, dtype=tf.float32))
@require_tf
def test_tf_formatter_tf_tensor_kwargs(self):
import tensorflow as tf
from datasets.formatting import TFFormatter
pa_table = self._create_dummy_table().drop(["b"])
formatter = TFFormatter(dtype=tf.float16)
row = formatter.format_row(pa_table)
self.assertEqual(row["c"].dtype, tf.float16)
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, tf.float16)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["a"].dtype, tf.float16)
self.assertEqual(batch["c"].dtype, tf.float16)
@require_tf
@require_pil
def test_tf_formatter_image(self):
import tensorflow as tf
from datasets.formatting import TFFormatter
# same dimensions
pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2})
formatter = TFFormatter(features=Features({"image": Image()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["image"].dtype, tf.uint8)
self.assertEqual(row["image"].shape, (480, 640, 3))
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, tf.uint8)
self.assertEqual(col.shape, (2, 480, 640, 3))
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["image"][0].dtype, tf.uint8)
self.assertEqual(batch["image"].shape, (2, 480, 640, 3))
# different dimensions
pa_table = pa.table(
{"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]}
)
formatter = TFFormatter(features=Features({"image": Image()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["image"].dtype, tf.uint8)
self.assertEqual(row["image"].shape, (480, 640, 3))
col = formatter.format_column(pa_table)
self.assertIsInstance(col, list)
self.assertEqual(col[0].dtype, tf.uint8)
self.assertEqual(col[0].shape, (480, 640, 3))
batch = formatter.format_batch(pa_table)
self.assertIsInstance(batch["image"], list)
self.assertEqual(batch["image"][0].dtype, tf.uint8)
self.assertEqual(batch["image"][0].shape, (480, 640, 3))
@require_tf
@require_sndfile
def test_tf_formatter_audio(self):
import tensorflow as tf
from datasets.formatting import TFFormatter
pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]})
formatter = TFFormatter(features=Features({"audio": Audio()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["audio"]["array"].dtype, tf.float32)
col = formatter.format_column(pa_table)
self.assertEqual(col[0]["array"].dtype, tf.float32)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["audio"][0]["array"].dtype, tf.float32)
@require_jax
def test_jax_formatter(self):
import jax
import jax.numpy as jnp
from datasets.formatting import JaxFormatter
pa_table = self._create_dummy_table()
formatter = JaxFormatter()
row = formatter.format_row(pa_table)
jnp.allclose(row["a"], jnp.array(_COL_A, dtype=jnp.int64 if jax.config.jax_enable_x64 else jnp.int32)[0])
assert row["b"] == _COL_B[0]
jnp.allclose(row["c"], jnp.array(_COL_C, dtype=jnp.float32)[0])
col = formatter.format_column(pa_table)
jnp.allclose(col, jnp.array(_COL_A, dtype=jnp.int64 if jax.config.jax_enable_x64 else jnp.int32))
batch = formatter.format_batch(pa_table)
jnp.allclose(batch["a"], jnp.array(_COL_A, dtype=jnp.int64 if jax.config.jax_enable_x64 else jnp.int32))
assert batch["b"] == _COL_B
jnp.allclose(batch["c"], jnp.array(_COL_C, dtype=jnp.float32))
assert batch["c"].shape == np.array(_COL_C).shape
@require_jax
def test_jax_formatter_jnp_array_kwargs(self):
import jax.numpy as jnp
from datasets.formatting import JaxFormatter
pa_table = self._create_dummy_table().drop(["b"])
formatter = JaxFormatter(dtype=jnp.float16)
row = formatter.format_row(pa_table)
self.assertEqual(row["c"].dtype, jnp.float16)
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, jnp.float16)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["a"].dtype, jnp.float16)
self.assertEqual(batch["c"].dtype, jnp.float16)
@require_jax
@require_pil
def test_jax_formatter_image(self):
import jax.numpy as jnp
from datasets.formatting import JaxFormatter
# same dimensions
pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2})
formatter = JaxFormatter(features=Features({"image": Image()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["image"].dtype, jnp.uint8)
self.assertEqual(row["image"].shape, (480, 640, 3))
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, jnp.uint8)
self.assertEqual(col.shape, (2, 480, 640, 3))
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["image"].dtype, jnp.uint8)
self.assertEqual(batch["image"].shape, (2, 480, 640, 3))
# different dimensions
pa_table = pa.table(
{"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]}
)
formatter = JaxFormatter(features=Features({"image": Image()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["image"].dtype, jnp.uint8)
self.assertEqual(row["image"].shape, (480, 640, 3))
col = formatter.format_column(pa_table)
self.assertIsInstance(col, list)
self.assertEqual(col[0].dtype, jnp.uint8)
self.assertEqual(col[0].shape, (480, 640, 3))
batch = formatter.format_batch(pa_table)
self.assertIsInstance(batch["image"], list)
self.assertEqual(batch["image"][0].dtype, jnp.uint8)
self.assertEqual(batch["image"][0].shape, (480, 640, 3))
@require_jax
@require_sndfile
def test_jax_formatter_audio(self):
import jax.numpy as jnp
from datasets.formatting import JaxFormatter
pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]})
formatter = JaxFormatter(features=Features({"audio": Audio()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["audio"]["array"].dtype, jnp.float32)
col = formatter.format_column(pa_table)
self.assertEqual(col[0]["array"].dtype, jnp.float32)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["audio"][0]["array"].dtype, jnp.float32)
@require_jax
def test_jax_formatter_device(self):
import jax
from datasets.formatting import JaxFormatter
pa_table = self._create_dummy_table()
device = jax.devices()[0]
formatter = JaxFormatter(device=str(device))
row = formatter.format_row(pa_table)
assert row["a"].device() == device
assert row["c"].device() == device
col = formatter.format_column(pa_table)
assert col.device() == device
batch = formatter.format_batch(pa_table)
assert batch["a"].device() == device
assert batch["c"].device() == device
class QueryTest(TestCase):
def _create_dummy_table(self):
return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C})
def _create_dummy_arrow_indices(self):
return pa.Table.from_arrays([pa.array(_INDICES, type=pa.uint64())], names=["indices"])
def assertTableEqual(self, first: pa.Table, second: pa.Table):
self.assertEqual(first.schema, second.schema)
for first_array, second_array in zip(first, second):
self.assertEqual(first_array, second_array)
self.assertEqual(first, second)
def test_query_table_int(self):
pa_table = self._create_dummy_table()
table = InMemoryTable(pa_table)
n = pa_table.num_rows
# classical usage
subtable = query_table(table, 0)
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[:1], "b": _COL_B[:1], "c": _COL_C[:1]}))
subtable = query_table(table, 1)
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[1:2], "b": _COL_B[1:2], "c": _COL_C[1:2]}))
subtable = query_table(table, -1)
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[-1:], "b": _COL_B[-1:], "c": _COL_C[-1:]}))
# raise an IndexError
with self.assertRaises(IndexError):
query_table(table, n)
with self.assertRaises(IndexError):
query_table(table, -(n + 1))
# with indices
indices = InMemoryTable(self._create_dummy_arrow_indices())
subtable = query_table(table, 0, indices=indices)
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}),
)
with self.assertRaises(IndexError):
assert len(indices) < n
query_table(table, len(indices), indices=indices)
def test_query_table_slice(self):
pa_table = self._create_dummy_table()
table = InMemoryTable(pa_table)
n = pa_table.num_rows
# classical usage
subtable = query_table(table, slice(0, 1))
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[:1], "b": _COL_B[:1], "c": _COL_C[:1]}))
subtable = query_table(table, slice(1, 2))
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[1:2], "b": _COL_B[1:2], "c": _COL_C[1:2]}))
subtable = query_table(table, slice(-2, -1))
self.assertTableEqual(
subtable, pa.Table.from_pydict({"a": _COL_A[-2:-1], "b": _COL_B[-2:-1], "c": _COL_C[-2:-1]})
)
# usage with None
subtable = query_table(table, slice(-1, None))
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[-1:], "b": _COL_B[-1:], "c": _COL_C[-1:]}))
subtable = query_table(table, slice(None, n + 1))
self.assertTableEqual(
subtable, pa.Table.from_pydict({"a": _COL_A[: n + 1], "b": _COL_B[: n + 1], "c": _COL_C[: n + 1]})
)
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C}))
subtable = query_table(table, slice(-(n + 1), None))
self.assertTableEqual(
subtable, pa.Table.from_pydict({"a": _COL_A[-(n + 1) :], "b": _COL_B[-(n + 1) :], "c": _COL_C[-(n + 1) :]})
)
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C}))
# usage with step
subtable = query_table(table, slice(None, None, 2))
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[::2], "b": _COL_B[::2], "c": _COL_C[::2]}))
# empty ouput but no errors
subtable = query_table(table, slice(-1, 0)) # usage with both negative and positive idx
assert len(_COL_A[-1:0]) == 0
self.assertTableEqual(subtable, pa_table.slice(0, 0))
subtable = query_table(table, slice(2, 1))
assert len(_COL_A[2:1]) == 0
self.assertTableEqual(subtable, pa_table.slice(0, 0))
subtable = query_table(table, slice(n, n))
assert len(_COL_A[n:n]) == 0
self.assertTableEqual(subtable, pa_table.slice(0, 0))
subtable = query_table(table, slice(n, n + 1))
assert len(_COL_A[n : n + 1]) == 0
self.assertTableEqual(subtable, pa_table.slice(0, 0))
# it's not possible to get an error with a slice
# with indices
indices = InMemoryTable(self._create_dummy_arrow_indices())
subtable = query_table(table, slice(0, 1), indices=indices)
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}),
)
subtable = query_table(table, slice(n - 1, n), indices=indices)
assert len(indices.column(0).to_pylist()[n - 1 : n]) == 0
self.assertTableEqual(subtable, pa_table.slice(0, 0))
def test_query_table_range(self):
pa_table = self._create_dummy_table()
table = InMemoryTable(pa_table)
n = pa_table.num_rows
np_A, np_B, np_C = np.array(_COL_A, dtype=np.int64), np.array(_COL_B), np.array(_COL_C)
# classical usage
subtable = query_table(table, range(0, 1))
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": np_A[range(0, 1)], "b": np_B[range(0, 1)], "c": np_C[range(0, 1)].tolist()}),
)
subtable = query_table(table, range(1, 2))
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": np_A[range(1, 2)], "b": np_B[range(1, 2)], "c": np_C[range(1, 2)].tolist()}),
)
subtable = query_table(table, range(-2, -1))
self.assertTableEqual(
subtable,
pa.Table.from_pydict(
{"a": np_A[range(-2, -1)], "b": np_B[range(-2, -1)], "c": np_C[range(-2, -1)].tolist()}
),
)
# usage with both negative and positive idx
subtable = query_table(table, range(-1, 0))
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": np_A[range(-1, 0)], "b": np_B[range(-1, 0)], "c": np_C[range(-1, 0)].tolist()}),
)
subtable = query_table(table, range(-1, n))
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": np_A[range(-1, n)], "b": np_B[range(-1, n)], "c": np_C[range(-1, n)].tolist()}),
)
# usage with step
subtable = query_table(table, range(0, n, 2))
self.assertTableEqual(
subtable,
pa.Table.from_pydict(
{"a": np_A[range(0, n, 2)], "b": np_B[range(0, n, 2)], "c": np_C[range(0, n, 2)].tolist()}
),
)
subtable = query_table(table, range(0, n + 1, 2 * n))
self.assertTableEqual(
subtable,
pa.Table.from_pydict(
{
"a": np_A[range(0, n + 1, 2 * n)],
"b": np_B[range(0, n + 1, 2 * n)],
"c": np_C[range(0, n + 1, 2 * n)].tolist(),
}
),
)
# empty ouput but no errors
subtable = query_table(table, range(2, 1))
assert len(np_A[range(2, 1)]) == 0
self.assertTableEqual(subtable, pa.Table.from_batches([], schema=pa_table.schema))
subtable = query_table(table, range(n, n))
assert len(np_A[range(n, n)]) == 0
self.assertTableEqual(subtable, pa.Table.from_batches([], schema=pa_table.schema))
# raise an IndexError
with self.assertRaises(IndexError):
with self.assertRaises(IndexError):
np_A[range(0, n + 1)]
query_table(table, range(0, n + 1))
with self.assertRaises(IndexError):
with self.assertRaises(IndexError):
np_A[range(-(n + 1), -1)]
query_table(table, range(-(n + 1), -1))
with self.assertRaises(IndexError):
with self.assertRaises(IndexError):
np_A[range(n, n + 1)]
query_table(table, range(n, n + 1))
# with indices
indices = InMemoryTable(self._create_dummy_arrow_indices())
subtable = query_table(table, range(0, 1), indices=indices)
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}),
)
with self.assertRaises(IndexError):
assert len(indices) < n
query_table(table, range(len(indices), len(indices) + 1), indices=indices)
def test_query_table_str(self):
pa_table = self._create_dummy_table()
table = InMemoryTable(pa_table)
subtable = query_table(table, "a")
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A}))
with self.assertRaises(KeyError):
query_table(table, "z")
indices = InMemoryTable(self._create_dummy_arrow_indices())
subtable = query_table(table, "a", indices=indices)
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": [_COL_A[i] for i in _INDICES]}))
def test_query_table_iterable(self):
pa_table = self._create_dummy_table()
table = InMemoryTable(pa_table)
n = pa_table.num_rows
np_A, np_B, np_C = np.array(_COL_A, dtype=np.int64), np.array(_COL_B), np.array(_COL_C)
# classical usage
subtable = query_table(table, [0])
self.assertTableEqual(
subtable, pa.Table.from_pydict({"a": np_A[[0]], "b": np_B[[0]], "c": np_C[[0]].tolist()})
)
subtable = query_table(table, [1])
self.assertTableEqual(
subtable, pa.Table.from_pydict({"a": np_A[[1]], "b": np_B[[1]], "c": np_C[[1]].tolist()})
)
subtable = query_table(table, [-1])
self.assertTableEqual(
subtable, pa.Table.from_pydict({"a": np_A[[-1]], "b": np_B[[-1]], "c": np_C[[-1]].tolist()})
)
subtable = query_table(table, [0, -1, 1])
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": np_A[[0, -1, 1]], "b": np_B[[0, -1, 1]], "c": np_C[[0, -1, 1]].tolist()}),
)
# numpy iterable
subtable = query_table(table, np.array([0, -1, 1]))
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": np_A[[0, -1, 1]], "b": np_B[[0, -1, 1]], "c": np_C[[0, -1, 1]].tolist()}),
)
# empty ouput but no errors
subtable = query_table(table, [])
assert len(np_A[[]]) == 0
self.assertTableEqual(subtable, pa.Table.from_batches([], schema=pa_table.schema))
# raise an IndexError
with self.assertRaises(IndexError):
with self.assertRaises(IndexError):
np_A[[n]]
query_table(table, [n])
with self.assertRaises(IndexError):
with self.assertRaises(IndexError):
np_A[[-(n + 1)]]
query_table(table, [-(n + 1)])
# with indices
indices = InMemoryTable(self._create_dummy_arrow_indices())
subtable = query_table(table, [0], indices=indices)
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}),
)
with self.assertRaises(IndexError):
assert len(indices) < n
query_table(table, [len(indices)], indices=indices)
def test_query_table_invalid_key_type(self):
pa_table = self._create_dummy_table()
table = InMemoryTable(pa_table)
with self.assertRaises(TypeError):
query_table(table, 0.0)
with self.assertRaises(TypeError):
query_table(table, [0, "a"])
with self.assertRaises(TypeError):
query_table(table, int)
with self.assertRaises(TypeError):
def iter_to_inf(start=0):
while True:
yield start
start += 1
query_table(table, iter_to_inf())
@pytest.fixture(scope="session")
def arrow_table():
return pa.Table.from_pydict({"col_int": [0, 1, 2], "col_float": [0.0, 1.0, 2.0]})
@require_tf
@pytest.mark.parametrize(
"cast_schema",
[
None,
[("col_int", pa.int64()), ("col_float", pa.float64())],
[("col_int", pa.int32()), ("col_float", pa.float64())],
[("col_int", pa.int64()), ("col_float", pa.float32())],
],
)
def test_tf_formatter_sets_default_dtypes(cast_schema, arrow_table):
import tensorflow as tf
from datasets.formatting import TFFormatter
if cast_schema:
arrow_table = arrow_table.cast(pa.schema(cast_schema))
arrow_table_dict = arrow_table.to_pydict()
list_int = arrow_table_dict["col_int"]
list_float = arrow_table_dict["col_float"]
formatter = TFFormatter()
row = formatter.format_row(arrow_table)
tf.debugging.assert_equal(row["col_int"], tf.ragged.constant(list_int, dtype=tf.int64)[0])
tf.debugging.assert_equal(row["col_float"], tf.ragged.constant(list_float, dtype=tf.float32)[0])
col = formatter.format_column(arrow_table)
tf.debugging.assert_equal(col, tf.ragged.constant(list_int, dtype=tf.int64))
batch = formatter.format_batch(arrow_table)
tf.debugging.assert_equal(batch["col_int"], tf.ragged.constant(list_int, dtype=tf.int64))
tf.debugging.assert_equal(batch["col_float"], tf.ragged.constant(list_float, dtype=tf.float32))
@require_torch
@pytest.mark.parametrize(
"cast_schema",
[
None,
[("col_int", pa.int64()), ("col_float", pa.float64())],
[("col_int", pa.int32()), ("col_float", pa.float64())],
[("col_int", pa.int64()), ("col_float", pa.float32())],
],
)
def test_torch_formatter_sets_default_dtypes(cast_schema, arrow_table):
import torch
from datasets.formatting import TorchFormatter
if cast_schema:
arrow_table = arrow_table.cast(pa.schema(cast_schema))
arrow_table_dict = arrow_table.to_pydict()
list_int = arrow_table_dict["col_int"]
list_float = arrow_table_dict["col_float"]
formatter = TorchFormatter()
row = formatter.format_row(arrow_table)
torch.testing.assert_close(row["col_int"], torch.tensor(list_int, dtype=torch.int64)[0])
torch.testing.assert_close(row["col_float"], torch.tensor(list_float, dtype=torch.float32)[0])
col = formatter.format_column(arrow_table)
torch.testing.assert_close(col, torch.tensor(list_int, dtype=torch.int64))
batch = formatter.format_batch(arrow_table)
torch.testing.assert_close(batch["col_int"], torch.tensor(list_int, dtype=torch.int64))
torch.testing.assert_close(batch["col_float"], torch.tensor(list_float, dtype=torch.float32))
def test_iterable_dataset_of_arrays_format_to_arrow(any_arrays_dataset: IterableDataset):
formatted = any_arrays_dataset.with_format("arrow")
assert all(isinstance(example, pa.Table) for example in formatted)
def test_iterable_dataset_of_arrays_format_to_numpy(any_arrays_dataset: IterableDataset):
formatted = any_arrays_dataset.with_format("np")
assert all(isinstance(example["array"], np.ndarray) for example in formatted)
@require_torch
def test_iterable_dataset_of_arrays_format_to_torch(any_arrays_dataset: IterableDataset):
import torch
formatted = any_arrays_dataset.with_format("torch")
assert all(isinstance(example["array"], torch.Tensor) for example in formatted)
@require_tf
def test_iterable_dataset_of_arrays_format_to_tf(any_arrays_dataset: IterableDataset):
import tensorflow as tf
formatted = any_arrays_dataset.with_format("tf")
assert all(isinstance(example["array"], tf.Tensor) for example in formatted)
@require_jax
def test_iterable_dataset_of_arrays_format_to_jax(any_arrays_dataset: IterableDataset):
import jax.numpy as jnp
formatted = any_arrays_dataset.with_format("jax")
assert all(isinstance(example["array"], jnp.ndarray) for example in formatted)
| datasets-main | tests/test_formatting.py |
import os
import tempfile
from pathlib import Path
from unittest import TestCase
import pyarrow as pa
import pytest
from datasets.arrow_dataset import Dataset
from datasets.arrow_reader import ArrowReader, BaseReader, FileInstructions, ReadInstruction, make_file_instructions
from datasets.info import DatasetInfo
from datasets.splits import NamedSplit, Split, SplitDict, SplitInfo
from .utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
class ReaderTest(BaseReader):
"""
Build a Dataset object out of Instruction instance(s).
This reader is made for testing. It mocks file reads.
"""
def _get_table_from_filename(self, filename_skip_take, in_memory=False):
"""Returns a Dataset instance from given (filename, skip, take)."""
filename, skip, take = (
filename_skip_take["filename"],
filename_skip_take["skip"] if "skip" in filename_skip_take else None,
filename_skip_take["take"] if "take" in filename_skip_take else None,
)
open(os.path.join(filename), "wb").close()
pa_table = pa.Table.from_pydict({"filename": [Path(filename).name] * 100})
if take == -1:
take = len(pa_table) - skip
if skip is not None and take is not None:
pa_table = pa_table.slice(skip, take)
return pa_table
class BaseReaderTest(TestCase):
def test_read(self):
name = "my_name"
train_info = SplitInfo(name="train", num_examples=100)
test_info = SplitInfo(name="test", num_examples=100)
split_infos = [train_info, test_info]
split_dict = SplitDict()
split_dict.add(train_info)
split_dict.add(test_info)
info = DatasetInfo(splits=split_dict)
with tempfile.TemporaryDirectory() as tmp_dir:
reader = ReaderTest(tmp_dir, info)
instructions = "test[:33%]"
dset = Dataset(**reader.read(name, instructions, split_infos))
self.assertEqual(dset["filename"][0], f"{name}-test")
self.assertEqual(dset.num_rows, 33)
self.assertEqual(dset.num_columns, 1)
instructions1 = ["train", "test[:33%]"]
instructions2 = [Split.TRAIN, ReadInstruction.from_spec("test[:33%]")]
for instructions in [instructions1, instructions2]:
datasets_kwargs = [reader.read(name, instr, split_infos) for instr in instructions]
train_dset, test_dset = (Dataset(**dataset_kwargs) for dataset_kwargs in datasets_kwargs)
self.assertEqual(train_dset["filename"][0], f"{name}-train")
self.assertEqual(train_dset.num_rows, 100)
self.assertEqual(train_dset.num_columns, 1)
self.assertIsInstance(train_dset.split, NamedSplit)
self.assertEqual(str(train_dset.split), "train")
self.assertEqual(test_dset["filename"][0], f"{name}-test")
self.assertEqual(test_dset.num_rows, 33)
self.assertEqual(test_dset.num_columns, 1)
self.assertIsInstance(test_dset.split, NamedSplit)
self.assertEqual(str(test_dset.split), "test[:33%]")
del train_dset, test_dset
def test_read_sharded(self):
name = "my_name"
train_info = SplitInfo(name="train", num_examples=1000, shard_lengths=[100] * 10)
split_infos = [train_info]
split_dict = SplitDict()
split_dict.add(train_info)
info = DatasetInfo(splits=split_dict)
with tempfile.TemporaryDirectory() as tmp_dir:
reader = ReaderTest(tmp_dir, info)
instructions = "train[:33%]"
dset = Dataset(**reader.read(name, instructions, split_infos))
self.assertEqual(dset["filename"][0], f"{name}-train-00000-of-00010")
self.assertEqual(dset["filename"][-1], f"{name}-train-00003-of-00010")
self.assertEqual(dset.num_rows, 330)
self.assertEqual(dset.num_columns, 1)
def test_read_files(self):
train_info = SplitInfo(name="train", num_examples=100)
test_info = SplitInfo(name="test", num_examples=100)
split_dict = SplitDict()
split_dict.add(train_info)
split_dict.add(test_info)
info = DatasetInfo(splits=split_dict)
with tempfile.TemporaryDirectory() as tmp_dir:
reader = ReaderTest(tmp_dir, info)
files = [
{"filename": os.path.join(tmp_dir, "train")},
{"filename": os.path.join(tmp_dir, "test"), "skip": 10, "take": 10},
]
dset = Dataset(**reader.read_files(files, original_instructions="train+test[10:20]"))
self.assertEqual(dset.num_rows, 110)
self.assertEqual(dset.num_columns, 1)
del dset
@pytest.mark.parametrize("in_memory", [False, True])
def test_read_table(in_memory, dataset, arrow_file):
filename = arrow_file
with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase():
table = ArrowReader.read_table(filename, in_memory=in_memory)
assert table.shape == dataset.data.shape
assert set(table.column_names) == set(dataset.data.column_names)
assert dict(table.to_pydict()) == dict(dataset.data.to_pydict()) # to_pydict returns OrderedDict
@pytest.mark.parametrize("in_memory", [False, True])
def test_read_files(in_memory, dataset, arrow_file):
filename = arrow_file
reader = ArrowReader("", None)
with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase():
dataset_kwargs = reader.read_files([{"filename": filename}], in_memory=in_memory)
assert dataset_kwargs.keys() == {"arrow_table", "info", "split"}
table = dataset_kwargs["arrow_table"]
assert table.shape == dataset.data.shape
assert set(table.column_names) == set(dataset.data.column_names)
assert dict(table.to_pydict()) == dict(dataset.data.to_pydict()) # to_pydict returns OrderedDict
def test_read_instruction_spec():
assert ReadInstruction("train", to=10, unit="abs").to_spec() == "train[:10]"
assert ReadInstruction("train", from_=-80, to=10, unit="%").to_spec() == "train[-80%:10%]"
spec_train_test = "train+test"
assert ReadInstruction.from_spec(spec_train_test).to_spec() == spec_train_test
spec_train_abs = "train[2:10]"
assert ReadInstruction.from_spec(spec_train_abs).to_spec() == spec_train_abs
spec_train_pct = "train[15%:-20%]"
assert ReadInstruction.from_spec(spec_train_pct).to_spec() == spec_train_pct
spec_train_pct_rounding = "train[:10%](closest)"
assert ReadInstruction.from_spec(spec_train_pct_rounding).to_spec() == "train[:10%]"
spec_train_pct_rounding = "train[:10%](pct1_dropremainder)"
assert ReadInstruction.from_spec(spec_train_pct_rounding).to_spec() == spec_train_pct_rounding
spec_train_test_pct_rounding = "train[:10%](pct1_dropremainder)+test[-10%:](pct1_dropremainder)"
assert ReadInstruction.from_spec(spec_train_test_pct_rounding).to_spec() == spec_train_test_pct_rounding
def test_make_file_instructions():
name = "dummy"
split_infos = [SplitInfo(name="train", num_examples=100)]
instruction = "train[:33%]"
filetype_suffix = "arrow"
prefix_path = "prefix"
file_instructions = make_file_instructions(name, split_infos, instruction, filetype_suffix, prefix_path)
assert isinstance(file_instructions, FileInstructions)
assert file_instructions.num_examples == 33
assert file_instructions.file_instructions == [
{"filename": os.path.join(prefix_path, f"{name}-train.arrow"), "skip": 0, "take": 33}
]
split_infos = [SplitInfo(name="train", num_examples=100, shard_lengths=[10] * 10)]
file_instructions = make_file_instructions(name, split_infos, instruction, filetype_suffix, prefix_path)
assert isinstance(file_instructions, FileInstructions)
assert file_instructions.num_examples == 33
assert file_instructions.file_instructions == [
{"filename": os.path.join(prefix_path, f"{name}-train-00000-of-00010.arrow"), "skip": 0, "take": -1},
{"filename": os.path.join(prefix_path, f"{name}-train-00001-of-00010.arrow"), "skip": 0, "take": -1},
{"filename": os.path.join(prefix_path, f"{name}-train-00002-of-00010.arrow"), "skip": 0, "take": -1},
{"filename": os.path.join(prefix_path, f"{name}-train-00003-of-00010.arrow"), "skip": 0, "take": 3},
]
@pytest.mark.parametrize("name, expected_exception", [(None, TypeError), ("", ValueError)])
def test_make_file_instructions_raises(name, expected_exception):
split_infos = [SplitInfo(name="train", num_examples=100)]
instruction = "train"
filetype_suffix = "arrow"
prefix_path = "prefix_path"
with pytest.raises(expected_exception):
_ = make_file_instructions(name, split_infos, instruction, filetype_suffix, prefix_path)
| datasets-main | tests/test_arrow_reader.py |
import copy
import os
from pathlib import Path, PurePath
from typing import List
from unittest.mock import patch
import fsspec
import pytest
from fsspec.registry import _registry as _fsspec_registry
from fsspec.spec import AbstractFileSystem
from datasets.data_files import (
DataFilesDict,
DataFilesList,
_get_data_files_patterns,
_get_metadata_files_patterns,
_is_inside_unrequested_special_dir,
_is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir,
resolve_pattern,
)
from datasets.fingerprint import Hasher
_TEST_PATTERNS = ["*", "**", "**/*", "*.txt", "data/*", "**/*.txt", "**/train.txt"]
_FILES_TO_IGNORE = {".dummy", "README.md", "dummy_data.zip", "dataset_infos.json"}
_DIRS_TO_IGNORE = {"data/.dummy_subdir", "__pycache__"}
_TEST_PATTERNS_SIZES = {
"*": 0,
"**": 4,
"**/*": 4,
"*.txt": 0,
"data/*": 2,
"data/**": 4,
"**/*.txt": 4,
"**/train.txt": 2,
}
_TEST_URL = "https://raw.githubusercontent.com/huggingface/datasets/9675a5a1e7b99a86f9c250f6ea5fa5d1e6d5cc7d/setup.py"
@pytest.fixture
def complex_data_dir(tmp_path):
data_dir = tmp_path / "complex_data_dir"
data_dir.mkdir()
(data_dir / "data").mkdir()
with open(data_dir / "data" / "train.txt", "w") as f:
f.write("foo\n" * 10)
with open(data_dir / "data" / "test.txt", "w") as f:
f.write("bar\n" * 10)
with open(data_dir / "README.md", "w") as f:
f.write("This is a readme")
with open(data_dir / ".dummy", "w") as f:
f.write("this is a dummy file that is not a data file")
(data_dir / "data" / "subdir").mkdir()
with open(data_dir / "data" / "subdir" / "train.txt", "w") as f:
f.write("foo\n" * 10)
with open(data_dir / "data" / "subdir" / "test.txt", "w") as f:
f.write("bar\n" * 10)
(data_dir / "data" / ".dummy_subdir").mkdir()
with open(data_dir / "data" / ".dummy_subdir" / "train.txt", "w") as f:
f.write("foo\n" * 10)
with open(data_dir / "data" / ".dummy_subdir" / "test.txt", "w") as f:
f.write("bar\n" * 10)
(data_dir / "__pycache__").mkdir()
with open(data_dir / "__pycache__" / "script.py", "w") as f:
f.write("foo\n" * 10)
return str(data_dir)
def is_relative_to(path, *other):
# A built-in method in Python 3.9+
try:
path.relative_to(*other)
return True
except ValueError:
return False
@pytest.fixture
def pattern_results(complex_data_dir):
# We use fsspec glob as a reference for data files resolution from patterns.
# This is the same as dask for example.
#
# /!\ Here are some behaviors specific to fsspec glob that are different from glob.glob, Path.glob, Path.match or fnmatch:
# - '*' matches only first level items
# - '**' matches all items
# - '**/*' matches all at least second level items
#
# More generally:
# - '*' matches any character except a forward-slash (to match just the file or directory name)
# - '**' matches any character including a forward-slash /
return {
pattern: sorted(
Path(os.path.abspath(path)).as_posix()
for path in fsspec.filesystem("file").glob(os.path.join(complex_data_dir, pattern))
if Path(path).name not in _FILES_TO_IGNORE
and not any(
is_relative_to(Path(path), os.path.join(complex_data_dir, dir_path)) for dir_path in _DIRS_TO_IGNORE
)
and Path(path).is_file()
)
for pattern in _TEST_PATTERNS
}
@pytest.fixture
def hub_dataset_repo_path(tmpfs, complex_data_dir):
for path in Path(complex_data_dir).rglob("*"):
if path.is_file():
with tmpfs.open(path.relative_to(complex_data_dir).as_posix(), "wb") as f:
f.write(path.read_bytes())
yield "tmp://"
@pytest.fixture
def hub_dataset_repo_patterns_results(hub_dataset_repo_path, complex_data_dir, pattern_results):
return {
pattern: [
hub_dataset_repo_path + Path(path).relative_to(complex_data_dir).as_posix()
for path in pattern_results[pattern]
]
for pattern in pattern_results
}
def test_is_inside_unrequested_special_dir(complex_data_dir, pattern_results):
# usual patterns outside special dir work fine
for pattern, result in pattern_results.items():
if result:
matched_rel_path = str(Path(result[0]).relative_to(complex_data_dir))
assert _is_inside_unrequested_special_dir(matched_rel_path, pattern) is False
# check behavior for special dir
f = _is_inside_unrequested_special_dir
assert f("__pycache__/b.txt", "**") is True
assert f("__pycache__/b.txt", "*/b.txt") is True
assert f("__pycache__/b.txt", "__pycache__/*") is False
assert f("__pycache__/__b.txt", "__pycache__/*") is False
assert f("__pycache__/__b.txt", "__*/*") is False
assert f("__b.txt", "*") is False
def test_is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(complex_data_dir, pattern_results):
# usual patterns outside hidden dir work fine
for pattern, result in pattern_results.items():
if result:
matched_rel_path = str(Path(result[0]).relative_to(complex_data_dir))
assert _is_inside_unrequested_special_dir(matched_rel_path, pattern) is False
# check behavior for hidden dir and file
f = _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir
assert f(".hidden_file.txt", "**") is True
assert f(".hidden_file.txt", ".*") is False
assert f(".hidden_dir/a.txt", "**") is True
assert f(".hidden_dir/a.txt", ".*/*") is False
assert f(".hidden_dir/a.txt", ".hidden_dir/*") is False
assert f(".hidden_dir/.hidden_file.txt", "**") is True
assert f(".hidden_dir/.hidden_file.txt", ".*/*") is True
assert f(".hidden_dir/.hidden_file.txt", ".*/.*") is False
assert f(".hidden_dir/.hidden_file.txt", ".hidden_dir/*") is True
assert f(".hidden_dir/.hidden_file.txt", ".hidden_dir/.*") is False
@pytest.mark.parametrize("pattern", _TEST_PATTERNS)
def test_pattern_results_fixture(pattern_results, pattern):
assert len(pattern_results[pattern]) == _TEST_PATTERNS_SIZES[pattern]
assert all(Path(path).is_file() for path in pattern_results[pattern])
@pytest.mark.parametrize("pattern", _TEST_PATTERNS)
def test_resolve_pattern_locally(complex_data_dir, pattern, pattern_results):
try:
resolved_data_files = resolve_pattern(pattern, complex_data_dir)
assert sorted(str(f) for f in resolved_data_files) == pattern_results[pattern]
except FileNotFoundError:
assert len(pattern_results[pattern]) == 0
def test_resolve_pattern_locally_with_dot_in_base_path(complex_data_dir):
base_path_with_dot = os.path.join(complex_data_dir, "data", ".dummy_subdir")
resolved_data_files = resolve_pattern(os.path.join(base_path_with_dot, "train.txt"), base_path_with_dot)
assert len(resolved_data_files) == 1
def test_resolve_pattern_locally_with_absolute_path(tmp_path, complex_data_dir):
abs_path = os.path.join(complex_data_dir, "data", "train.txt")
resolved_data_files = resolve_pattern(abs_path, str(tmp_path / "blabla"))
assert len(resolved_data_files) == 1
def test_resolve_pattern_locally_with_double_dots(tmp_path, complex_data_dir):
path_with_double_dots = os.path.join(complex_data_dir, "data", "subdir", "..", "train.txt")
resolved_data_files = resolve_pattern(path_with_double_dots, str(tmp_path / "blabla"))
assert len(resolved_data_files) == 1
def test_resolve_pattern_locally_returns_hidden_file_only_if_requested(complex_data_dir):
with pytest.raises(FileNotFoundError):
resolve_pattern("*dummy", complex_data_dir)
resolved_data_files = resolve_pattern(".dummy", complex_data_dir)
assert len(resolved_data_files) == 1
def test_resolve_pattern_locally_hidden_base_path(tmp_path):
hidden = tmp_path / ".test_hidden_base_path"
hidden.mkdir()
(tmp_path / ".test_hidden_base_path" / "a.txt").touch()
resolved_data_files = resolve_pattern("*", str(hidden))
assert len(resolved_data_files) == 1
def test_resolve_pattern_locallyreturns_hidden_dir_only_if_requested(complex_data_dir):
with pytest.raises(FileNotFoundError):
resolve_pattern("data/*dummy_subdir/train.txt", complex_data_dir)
resolved_data_files = resolve_pattern("data/.dummy_subdir/train.txt", complex_data_dir)
assert len(resolved_data_files) == 1
resolved_data_files = resolve_pattern("*/.dummy_subdir/train.txt", complex_data_dir)
assert len(resolved_data_files) == 1
def test_resolve_pattern_locally_returns_special_dir_only_if_requested(complex_data_dir):
with pytest.raises(FileNotFoundError):
resolve_pattern("data/*dummy_subdir/train.txt", complex_data_dir)
resolved_data_files = resolve_pattern("data/.dummy_subdir/train.txt", complex_data_dir)
assert len(resolved_data_files) == 1
resolved_data_files = resolve_pattern("*/.dummy_subdir/train.txt", complex_data_dir)
assert len(resolved_data_files) == 1
def test_resolve_pattern_locally_special_base_path(tmp_path):
special = tmp_path / "__test_special_base_path__"
special.mkdir()
(tmp_path / "__test_special_base_path__" / "a.txt").touch()
resolved_data_files = resolve_pattern("*", str(special))
assert len(resolved_data_files) == 1
@pytest.mark.parametrize("pattern,size,extensions", [("**", 4, [".txt"]), ("**", 4, None), ("**", 0, [".blablabla"])])
def test_resolve_pattern_locally_with_extensions(complex_data_dir, pattern, size, extensions):
if size > 0:
resolved_data_files = resolve_pattern(pattern, complex_data_dir, allowed_extensions=extensions)
assert len(resolved_data_files) == size
else:
with pytest.raises(FileNotFoundError):
resolve_pattern(pattern, complex_data_dir, allowed_extensions=extensions)
def test_fail_resolve_pattern_locally(complex_data_dir):
with pytest.raises(FileNotFoundError):
resolve_pattern(complex_data_dir, ["blablabla"])
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support symlinks in the default mode")
def test_resolve_pattern_locally_does_not_resolve_symbolic_links(tmp_path, complex_data_dir):
(tmp_path / "train_data_symlink.txt").symlink_to(os.path.join(complex_data_dir, "data", "train.txt"))
resolved_data_files = resolve_pattern("train_data_symlink.txt", str(tmp_path))
assert len(resolved_data_files) == 1
assert Path(resolved_data_files[0]) == tmp_path / "train_data_symlink.txt"
def test_resolve_pattern_locally_sorted_files(tmp_path_factory):
path = str(tmp_path_factory.mktemp("unsorted_text_files"))
unsorted_names = ["0.txt", "2.txt", "3.txt"]
for name in unsorted_names:
with open(os.path.join(path, name), "w"):
pass
resolved_data_files = resolve_pattern("*", path)
resolved_names = [os.path.basename(data_file) for data_file in resolved_data_files]
assert resolved_names == sorted(unsorted_names)
@pytest.mark.parametrize("pattern", _TEST_PATTERNS)
def test_resolve_pattern_in_dataset_repository(hub_dataset_repo_path, pattern, hub_dataset_repo_patterns_results):
try:
resolved_data_files = resolve_pattern(pattern, hub_dataset_repo_path)
assert sorted(str(f) for f in resolved_data_files) == hub_dataset_repo_patterns_results[pattern]
except FileNotFoundError:
assert len(hub_dataset_repo_patterns_results[pattern]) == 0
@pytest.mark.parametrize(
"pattern,size,base_path", [("**", 4, None), ("**", 4, "data"), ("**", 2, "data/subdir"), ("**", 0, "data/subdir2")]
)
def test_resolve_pattern_in_dataset_repository_with_base_path(hub_dataset_repo_path, pattern, size, base_path):
base_path = hub_dataset_repo_path + (base_path or "")
if size > 0:
resolved_data_files = resolve_pattern(pattern, base_path)
assert len(resolved_data_files) == size
else:
with pytest.raises(FileNotFoundError):
resolve_pattern(pattern, base_path)
@pytest.mark.parametrize("pattern,size,extensions", [("**", 4, [".txt"]), ("**", 4, None), ("**", 0, [".blablabla"])])
def test_resolve_pattern_in_dataset_repository_with_extensions(hub_dataset_repo_path, pattern, size, extensions):
if size > 0:
resolved_data_files = resolve_pattern(pattern, hub_dataset_repo_path, allowed_extensions=extensions)
assert len(resolved_data_files) == size
else:
with pytest.raises(FileNotFoundError):
resolved_data_files = resolve_pattern(pattern, hub_dataset_repo_path, allowed_extensions=extensions)
def test_fail_resolve_pattern_in_dataset_repository(hub_dataset_repo_path):
with pytest.raises(FileNotFoundError):
resolve_pattern("blablabla", hub_dataset_repo_path)
def test_resolve_pattern_in_dataset_repository_returns_hidden_file_only_if_requested(hub_dataset_repo_path):
with pytest.raises(FileNotFoundError):
resolve_pattern("*dummy", hub_dataset_repo_path)
resolved_data_files = resolve_pattern(".dummy", hub_dataset_repo_path)
assert len(resolved_data_files) == 1
def test_resolve_pattern_in_dataset_repository_hidden_base_path(tmpfs):
tmpfs.touch(".hidden/a.txt")
resolved_data_files = resolve_pattern("*", base_path="tmp://.hidden")
assert len(resolved_data_files) == 1
def test_resolve_pattern_in_dataset_repository_returns_hidden_dir_only_if_requested(hub_dataset_repo_path):
with pytest.raises(FileNotFoundError):
resolve_pattern("data/*dummy_subdir/train.txt", hub_dataset_repo_path)
resolved_data_files = resolve_pattern("data/.dummy_subdir/train.txt", hub_dataset_repo_path)
assert len(resolved_data_files) == 1
resolved_data_files = resolve_pattern("*/.dummy_subdir/train.txt", hub_dataset_repo_path)
assert len(resolved_data_files) == 1
def test_resolve_pattern_in_dataset_repository_returns_special_dir_only_if_requested(hub_dataset_repo_path):
with pytest.raises(FileNotFoundError):
resolve_pattern("data/*dummy_subdir/train.txt", hub_dataset_repo_path)
resolved_data_files = resolve_pattern("data/.dummy_subdir/train.txt", hub_dataset_repo_path)
assert len(resolved_data_files) == 1
resolved_data_files = resolve_pattern("*/.dummy_subdir/train.txt", hub_dataset_repo_path)
assert len(resolved_data_files) == 1
def test_resolve_pattern_in_dataset_repository_special_base_path(tmpfs):
tmpfs.touch("__special__/a.txt")
resolved_data_files = resolve_pattern("*", base_path="tmp://__special__")
assert len(resolved_data_files) == 1
@pytest.fixture
def dummy_fs():
DummyTestFS = mock_fs(["train.txt", "test.txt"])
_fsspec_registry["mock"] = DummyTestFS
_fsspec_registry["dummy"] = DummyTestFS
yield
del _fsspec_registry["mock"]
del _fsspec_registry["dummy"]
def test_resolve_pattern_fs(dummy_fs):
resolved_data_files = resolve_pattern("mock://train.txt", base_path="")
assert resolved_data_files == ["mock://train.txt"]
@pytest.mark.parametrize("pattern", _TEST_PATTERNS)
def test_DataFilesList_from_patterns_in_dataset_repository_(
hub_dataset_repo_path, hub_dataset_repo_patterns_results, pattern
):
try:
data_files_list = DataFilesList.from_patterns([pattern], hub_dataset_repo_path)
assert sorted(data_files_list) == hub_dataset_repo_patterns_results[pattern]
assert len(data_files_list.origin_metadata) == len(data_files_list)
except FileNotFoundError:
assert len(hub_dataset_repo_patterns_results[pattern]) == 0
def test_DataFilesList_from_patterns_locally_with_extra_files(complex_data_dir, text_file):
data_files_list = DataFilesList.from_patterns([_TEST_URL, text_file.as_posix()], complex_data_dir)
assert list(data_files_list) == [_TEST_URL, text_file.as_posix()]
assert len(data_files_list.origin_metadata) == 2
def test_DataFilesList_from_patterns_raises_FileNotFoundError(complex_data_dir):
with pytest.raises(FileNotFoundError):
DataFilesList.from_patterns(["file_that_doesnt_exist.txt"], complex_data_dir)
class TestDataFilesDict:
def test_key_order_after_copy(self):
data_files = DataFilesDict({"train": "train.csv", "test": "test.csv"})
copied_data_files = copy.deepcopy(data_files)
assert list(copied_data_files.keys()) == list(data_files.keys()) # test split order with list()
@pytest.mark.parametrize("pattern", _TEST_PATTERNS)
def test_DataFilesDict_from_patterns_in_dataset_repository(
hub_dataset_repo_path, hub_dataset_repo_patterns_results, pattern
):
split_name = "train"
try:
data_files = DataFilesDict.from_patterns({split_name: [pattern]}, hub_dataset_repo_path)
assert all(isinstance(data_files_list, DataFilesList) for data_files_list in data_files.values())
assert sorted(data_files[split_name]) == hub_dataset_repo_patterns_results[pattern]
except FileNotFoundError:
assert len(hub_dataset_repo_patterns_results[pattern]) == 0
@pytest.mark.parametrize(
"pattern,size,base_path,split_name",
[
("**", 4, None, "train"),
("**", 4, "data", "train"),
("**", 2, "data/subdir", "train"),
("**train*", 1, "data/subdir", "train"),
("**test*", 1, "data/subdir", "test"),
("**", 0, "data/subdir2", "train"),
],
)
def test_DataFilesDict_from_patterns_in_dataset_repository_with_base_path(
hub_dataset_repo_path, pattern, size, base_path, split_name
):
base_path = hub_dataset_repo_path + (base_path or "")
if size > 0:
data_files = DataFilesDict.from_patterns({split_name: [pattern]}, base_path=base_path)
assert len(data_files[split_name]) == size
else:
with pytest.raises(FileNotFoundError):
resolve_pattern(pattern, base_path)
@pytest.mark.parametrize("pattern", _TEST_PATTERNS)
def test_DataFilesDict_from_patterns_locally(complex_data_dir, pattern_results, pattern):
split_name = "train"
try:
data_files = DataFilesDict.from_patterns({split_name: [pattern]}, complex_data_dir)
assert all(isinstance(data_files_list, DataFilesList) for data_files_list in data_files.values())
assert sorted(data_files[split_name]) == pattern_results[pattern]
except FileNotFoundError:
assert len(pattern_results[pattern]) == 0
def test_DataFilesDict_from_patterns_in_dataset_repository_hashing(hub_dataset_repo_path):
patterns = {"train": ["**/train.txt"], "test": ["**/test.txt"]}
data_files1 = DataFilesDict.from_patterns(patterns, hub_dataset_repo_path)
data_files2 = DataFilesDict.from_patterns(patterns, hub_dataset_repo_path)
assert Hasher.hash(data_files1) == Hasher.hash(data_files2)
data_files2 = DataFilesDict(sorted(data_files1.items(), reverse=True))
assert Hasher.hash(data_files1) == Hasher.hash(data_files2)
patterns2 = {"train": ["data/**train.txt"], "test": ["data/**test.txt"]}
data_files2 = DataFilesDict.from_patterns(patterns2, hub_dataset_repo_path)
assert Hasher.hash(data_files1) == Hasher.hash(data_files2)
patterns2 = {"train": ["data/**train.txt"], "test": ["data/**train.txt"]}
data_files2 = DataFilesDict.from_patterns(patterns2, hub_dataset_repo_path)
assert Hasher.hash(data_files1) != Hasher.hash(data_files2)
# the tmpfs used to mock the hub repo is based on a local directory
# therefore os.stat is used to get the mtime of the data files
with patch("os.stat", return_value=os.stat(__file__)):
data_files2 = DataFilesDict.from_patterns(patterns, hub_dataset_repo_path)
assert Hasher.hash(data_files1) != Hasher.hash(data_files2)
def test_DataFilesDict_from_patterns_locally_or_remote_hashing(text_file):
patterns = {"train": [_TEST_URL], "test": [str(text_file)]}
data_files1 = DataFilesDict.from_patterns(patterns)
data_files2 = DataFilesDict.from_patterns(patterns)
assert Hasher.hash(data_files1) == Hasher.hash(data_files2)
data_files2 = DataFilesDict(sorted(data_files1.items(), reverse=True))
assert Hasher.hash(data_files1) == Hasher.hash(data_files2)
patterns2 = {"train": [_TEST_URL], "test": [_TEST_URL]}
data_files2 = DataFilesDict.from_patterns(patterns2)
assert Hasher.hash(data_files1) != Hasher.hash(data_files2)
with patch("fsspec.implementations.http._file_info", return_value={}):
data_files2 = DataFilesDict.from_patterns(patterns)
assert Hasher.hash(data_files1) != Hasher.hash(data_files2)
with patch("os.stat", return_value=os.stat(__file__)):
data_files2 = DataFilesDict.from_patterns(patterns)
assert Hasher.hash(data_files1) != Hasher.hash(data_files2)
def mock_fs(file_paths: List[str]):
"""
Set up a mock filesystem for fsspec containing the provided files
Example:
```py
>>> fs = mock_fs(["data/train.txt", "data.test.txt"])
>>> assert fsspec.get_filesystem_class("mock").__name__ == "DummyTestFS"
>>> assert type(fs).__name__ == "DummyTestFS"
>>> print(fs.glob("**"))
["data", "data/train.txt", "data.test.txt"]
```
"""
dir_paths = {file_path.rsplit("/")[0] for file_path in file_paths if "/" in file_path}
fs_contents = [{"name": dir_path, "type": "directory"} for dir_path in dir_paths] + [
{"name": file_path, "type": "file", "size": 10} for file_path in file_paths
]
class DummyTestFS(AbstractFileSystem):
protocol = ("mock", "dummy")
_fs_contents = fs_contents
def ls(self, path, detail=True, refresh=True, **kwargs):
if kwargs.pop("strip_proto", True):
path = self._strip_protocol(path)
files = not refresh and self._ls_from_cache(path)
if not files:
files = [file for file in self._fs_contents if path == self._parent(file["name"])]
files.sort(key=lambda file: file["name"])
self.dircache[path.rstrip("/")] = files
if detail:
return files
return [file["name"] for file in files]
return DummyTestFS
@pytest.mark.parametrize(
"data_file_per_split",
[
# === Main cases ===
# file named after split at the root
{"train": "train.txt", "validation": "valid.txt", "test": "test.txt"},
# file named after split in a directory
{
"train": "data/train.txt",
"validation": "data/valid.txt",
"test": "data/test.txt",
},
# directory named after split
{
"train": "train/split.txt",
"validation": "valid/split.txt",
"test": "test/split.txt",
},
# sharded splits
{
"train": [f"data/train_{i}.txt" for i in range(3)],
"validation": [f"data/validation_{i}.txt" for i in range(3)],
"test": [f"data/test_{i}.txt" for i in range(3)],
},
# sharded splits with standard format (+ custom split name)
{
"train": [f"data/train-0000{i}-of-00003.txt" for i in range(3)],
"validation": [f"data/validation-0000{i}-of-00003.txt" for i in range(3)],
"test": [f"data/test-0000{i}-of-00003.txt" for i in range(3)],
"random": [f"data/random-0000{i}-of-00003.txt" for i in range(3)],
},
# === Secondary cases ===
# Default to train split
{"train": "dataset.txt"},
{"train": "data/dataset.txt"},
{"train": ["data/image.jpg", "metadata.jsonl"]},
{"train": ["data/image.jpg", "metadata.csv"]},
# With prefix or suffix in directory or file names
{"train": "my_train_dir/dataset.txt"},
{"train": "data/my_train_file.txt"},
{"test": "my_test_dir/dataset.txt"},
{"test": "data/my_test_file.txt"},
{"validation": "my_validation_dir/dataset.txt"},
{"validation": "data/my_validation_file.txt"},
# With test<>eval aliases
{"test": "eval.txt"},
{"test": "data/eval.txt"},
{"test": "eval/dataset.txt"},
# With valid<>dev aliases
{"validation": "dev.txt"},
{"validation": "data/dev.txt"},
{"validation": "dev/dataset.txt"},
# With valid<>val aliases
{"validation": "val.txt"},
{"validation": "data/val.txt"},
# With other extensions
{"train": "train.parquet", "validation": "valid.parquet", "test": "test.parquet"},
# With "dev" or "eval" without separators
{"train": "developers_list.txt"},
{"train": "data/seqeval_results.txt"},
{"train": "contest.txt"},
# With supported separators
{"test": "my.test.file.txt"},
{"test": "my-test-file.txt"},
{"test": "my_test_file.txt"},
{"test": "my test file.txt"},
{"test": "test00001.txt"},
],
)
def test_get_data_files_patterns(data_file_per_split):
data_file_per_split = {k: v if isinstance(v, list) else [v] for k, v in data_file_per_split.items()}
file_paths = [file_path for split_file_paths in data_file_per_split.values() for file_path in split_file_paths]
DummyTestFS = mock_fs(file_paths)
fs = DummyTestFS()
def resolver(pattern):
return [file_path for file_path in fs.glob(pattern) if fs.isfile(file_path)]
patterns_per_split = _get_data_files_patterns(resolver)
assert list(patterns_per_split.keys()) == list(data_file_per_split.keys()) # Test split order with list()
for split, patterns in patterns_per_split.items():
matched = [file_path for pattern in patterns for file_path in resolver(pattern)]
assert matched == data_file_per_split[split]
@pytest.mark.parametrize(
"metadata_files",
[
# metadata files at the root
["metadata.jsonl"],
["metadata.csv"],
# nested metadata files
["data/metadata.jsonl", "data/train/metadata.jsonl"],
["data/metadata.csv", "data/train/metadata.csv"],
],
)
def test_get_metadata_files_patterns(metadata_files):
def resolver(pattern):
return [PurePath(path) for path in set(metadata_files) if PurePath(path).match(pattern)]
patterns = _get_metadata_files_patterns(resolver)
matched = [path for path in metadata_files for pattern in patterns if PurePath(path).match(pattern)]
# Use set to remove the difference between in behavior between PurePath.match and mathcing via fsspec.glob
assert len(set(matched)) == len(metadata_files)
assert sorted(set(matched)) == sorted(metadata_files)
| datasets-main | tests/test_data_files.py |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def test_filelock(tmpdir):
lock1 = FileLock(str(tmpdir / "foo.lock"))
lock2 = FileLock(str(tmpdir / "foo.lock"))
timeout = 0.01
with lock1.acquire():
with pytest.raises(Timeout):
_start = time.time()
lock2.acquire(timeout)
assert time.time() - _start > timeout
def test_long_filename(tmpdir):
filename = "a" * 1000 + ".lock"
lock1 = FileLock(str(tmpdir / filename))
assert lock1._lock_file.endswith(".lock")
assert not lock1._lock_file.endswith(filename)
assert len(os.path.basename(lock1._lock_file)) <= 255
lock2 = FileLock(tmpdir / filename)
with lock1.acquire():
with pytest.raises(Timeout):
lock2.acquire(0)
| datasets-main | tests/test_filelock.py |
# Copyright 2020 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
pytestmark = pytest.mark.integration
REQUIRE_FAIRSEQ = {"comet"}
_has_fairseq = importlib.util.find_spec("fairseq") is not None
UNSUPPORTED_ON_WINDOWS = {"code_eval"}
_on_windows = os.name == "nt"
REQUIRE_TRANSFORMERS = {"bertscore", "frugalscore", "perplexity"}
_has_transformers = importlib.util.find_spec("transformers") is not None
def skip_if_metric_requires_fairseq(test_case):
@wraps(test_case)
def wrapper(self, metric_name):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"')
else:
test_case(self, metric_name)
return wrapper
def skip_if_metric_requires_transformers(test_case):
@wraps(test_case)
def wrapper(self, metric_name):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"')
else:
test_case(self, metric_name)
return wrapper
def skip_on_windows_if_not_windows_compatible(test_case):
@wraps(test_case)
def wrapper(self, metric_name):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"')
else:
test_case(self, metric_name)
return wrapper
def get_local_metric_names():
metrics = [metric_dir.split(os.sep)[-2] for metric_dir in glob.glob("./metrics/*/")]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names())
@for_all_test_methods(
skip_if_metric_requires_fairseq, skip_if_metric_requires_transformers, skip_on_windows_if_not_windows_compatible
)
@local
class LocalMetricTest(parameterized.TestCase):
INTENSIVE_CALLS_PATCHER = {}
metric_name = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning")
def test_load_metric(self, metric_name):
doctest.ELLIPSIS_MARKER = "[...]"
metric_module = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics", metric_name)).module_path
)
metric = datasets.load.import_main_class(metric_module.__name__, dataset=False)
# check parameters
parameters = inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(metric_name, metric_module.__name__):
with self.use_local_metrics():
try:
results = doctest.testmod(metric_module, verbose=True, raise_on_error=True)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed, 0)
self.assertGreater(results.attempted, 1)
@slow
def test_load_real_metric(self, metric_name):
doctest.ELLIPSIS_MARKER = "[...]"
metric_module = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics", metric_name)).module_path
)
# run doctest
with self.use_local_metrics():
results = doctest.testmod(metric_module, verbose=True, raise_on_error=True)
self.assertEqual(results.failed, 0)
self.assertGreater(results.attempted, 1)
@contextmanager
def patch_intensive_calls(self, metric_name, module_name):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](module_name):
yield
else:
yield
@contextmanager
def use_local_metrics(self):
def load_local_metric(metric_name, *args, **kwargs):
return load_metric(os.path.join("metrics", metric_name), *args, **kwargs)
with patch("datasets.load_metric") as mock_load_metric:
mock_load_metric.side_effect = load_local_metric
yield
@classmethod
def register_intensive_calls_patcher(cls, metric_name):
def wrapper(patcher):
patcher = contextmanager(patcher)
cls.INTENSIVE_CALLS_PATCHER[metric_name] = patcher
return patcher
return wrapper
# Metrics intensive calls patchers
# --------------------------------
@LocalMetricTest.register_intensive_calls_patcher("bleurt")
def patch_bleurt(module_name):
import tensorflow.compat.v1 as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv", "", "") # handle pytest cli flags
class MockedPredictor(Predictor):
def predict(self, input_dict):
assert len(input_dict["input_ids"]) == 2
return np.array([1.03, 1.04])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor") as mock_create_predictor:
mock_create_predictor.return_value = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore")
def patch_bertscore(module_name):
import torch
def bert_cos_score_idf(model, refs, *args, **kwargs):
return torch.tensor([[1.0, 1.0, 1.0]] * len(refs))
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model"), patch(
"bert_score.scorer.bert_cos_score_idf"
) as mock_bert_cos_score_idf:
mock_bert_cos_score_idf.side_effect = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet")
def patch_comet(module_name):
def load_from_checkpoint(model_path):
class Model:
def predict(self, data, *args, **kwargs):
assert len(data) == 2
scores = [0.19, 0.92]
return scores, sum(scores) / len(scores)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model") as mock_download_model:
mock_download_model.return_value = None
with patch("comet.load_from_checkpoint") as mock_load_from_checkpoint:
mock_load_from_checkpoint.side_effect = load_from_checkpoint
yield
def test_seqeval_raises_when_incorrect_scheme():
metric = load_metric(os.path.join("metrics", "seqeval"))
wrong_scheme = "ERROR"
error_message = f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"
with pytest.raises(ValueError, match=re.escape(error_message)):
metric.compute(predictions=[], references=[], scheme=wrong_scheme)
| datasets-main | tests/test_metric_common.py |
import os
import zipfile
import pytest
from datasets.utils.extract import (
Bzip2Extractor,
Extractor,
GzipExtractor,
Lz4Extractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lz4, require_py7zr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive",
[
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
],
)
def test_base_extractors(
compression_format,
is_archive,
bz2_file,
gz_file,
lz4_file,
seven_zip_file,
tar_file,
xz_file,
zip_file,
zstd_file,
tmp_path,
text_file,
):
input_paths_and_base_extractors = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bz2_file, Bzip2Extractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lz4_file, Lz4Extractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
input_path, base_extractor = input_paths_and_base_extractors[compression_format]
if input_path is None:
reason = f"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_py7zr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
assert base_extractor.is_extractable(input_path)
output_path = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(input_path, output_path)
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
extracted_file_content = file_path.read_text(encoding="utf-8")
else:
extracted_file_content = output_path.read_text(encoding="utf-8")
expected_file_content = text_file.read_text(encoding="utf-8")
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive",
[
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
],
)
def test_extractor(
compression_format,
is_archive,
bz2_file,
gz_file,
lz4_file,
seven_zip_file,
tar_file,
xz_file,
zip_file,
zstd_file,
tmp_path,
text_file,
):
input_paths = {
"7z": seven_zip_file,
"bz2": bz2_file,
"gzip": gz_file,
"lz4": lz4_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
input_path = input_paths[compression_format]
if input_path is None:
reason = f"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_py7zr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
extractor_format = Extractor.infer_extractor_format(input_path)
assert extractor_format is not None
output_path = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(input_path, output_path, extractor_format)
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
extracted_file_content = file_path.read_text(encoding="utf-8")
else:
extracted_file_content = output_path.read_text(encoding="utf-8")
expected_file_content = text_file.read_text(encoding="utf-8")
assert extracted_file_content == expected_file_content
@pytest.fixture
def tar_file_with_dot_dot(tmp_path, text_file):
import tarfile
directory = tmp_path / "data_dot_dot"
directory.mkdir()
path = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(path, "w") as f:
f.add(text_file, arcname=os.path.join("..", text_file.name))
return path
@pytest.fixture
def tar_file_with_sym_link(tmp_path):
import tarfile
directory = tmp_path / "data_sym_link"
directory.mkdir()
path = directory / "tar_file_with_sym_link.tar"
os.symlink("..", directory / "subdir", target_is_directory=True)
with tarfile.TarFile(path, "w") as f:
f.add(str(directory / "subdir"), arcname="subdir") # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log",
[("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")],
)
def test_tar_extract_insecure_files(
insecure_tar_file, error_log, tar_file_with_dot_dot, tar_file_with_sym_link, tmp_path, caplog
):
insecure_tar_files = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
input_path = insecure_tar_files[insecure_tar_file]
output_path = tmp_path / "extracted"
TarExtractor.extract(input_path, output_path)
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def test_is_zipfile_false_positive(tmpdir):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
not_a_zip_file = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
data = (
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
b"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
b"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
b"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb") as f:
f.write(data)
assert zipfile.is_zipfile(str(not_a_zip_file)) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(not_a_zip_file) # but we're right
| datasets-main | tests/test_extract.py |
import os
import sys
from pathlib import Path
import pytest
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
from .utils import execute_subprocess_async, get_torch_dist_unique_port, require_torch
def test_split_dataset_by_node_map_style():
full_ds = Dataset.from_dict({"i": range(17)})
full_size = len(full_ds)
world_size = 3
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(ds) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
def test_split_dataset_by_node_iterable():
def gen():
return ({"i": i} for i in range(17))
world_size = 3
full_ds = IterableDataset.from_generator(gen)
full_size = len(list(full_ds))
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
@pytest.mark.parametrize("shards_per_node", [1, 2, 3])
def test_split_dataset_by_node_iterable_sharded(shards_per_node):
def gen(shards):
for shard in shards:
yield from ({"i": i, "shard": shard} for i in range(17))
world_size = 3
num_shards = shards_per_node * world_size
gen_kwargs = {"shards": [f"shard_{shard_idx}.txt" for shard_idx in range(num_shards)]}
full_ds = IterableDataset.from_generator(gen, gen_kwargs=gen_kwargs)
full_size = len(list(full_ds))
assert full_ds.n_shards == world_size * shards_per_node
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert [ds.n_shards for ds in datasets_per_rank] == [shards_per_node] * world_size
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
def test_distributed_shuffle_iterable():
def gen():
return ({"i": i} for i in range(17))
world_size = 2
full_ds = IterableDataset.from_generator(gen)
full_size = len(list(full_ds))
ds_rank0 = split_dataset_by_node(full_ds, rank=0, world_size=world_size).shuffle(seed=42)
assert len(list(ds_rank0)) == 1 + full_size // world_size
with pytest.raises(RuntimeError):
split_dataset_by_node(full_ds, rank=0, world_size=world_size).shuffle()
ds_rank0 = split_dataset_by_node(full_ds.shuffle(seed=42), rank=0, world_size=world_size)
assert len(list(ds_rank0)) == 1 + full_size // world_size
with pytest.raises(RuntimeError):
split_dataset_by_node(full_ds.shuffle(), rank=0, world_size=world_size)
@pytest.mark.parametrize("streaming", [False, True])
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_run(streaming):
nproc_per_node = 2
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "run_torch_distributed.py"
distributed_args = f"""
-m torch.distributed.run
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
@pytest.mark.parametrize(
"nproc_per_node, num_workers",
[
(2, 2), # each node has 2 shards and each worker has 1 shards
(3, 2), # each node uses all the shards but skips examples, and each worker has 2 shards
],
)
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_run_streaming_with_num_workers(nproc_per_node, num_workers):
streaming = True
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "run_torch_distributed.py"
distributed_args = f"""
-m torch.distributed.run
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
--num_workers={num_workers}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
| datasets-main | tests/test_distributed.py |
import importlib
import os
import pickle
import shutil
import tempfile
import time
from hashlib import sha256
from multiprocessing import Pool
from pathlib import Path
from unittest import TestCase
from unittest.mock import patch
import dill
import pyarrow as pa
import pytest
import requests
import datasets
from datasets import config, load_dataset, load_from_disk
from datasets.arrow_dataset import Dataset
from datasets.arrow_writer import ArrowWriter
from datasets.builder import DatasetBuilder
from datasets.config import METADATA_CONFIGS_FIELD
from datasets.data_files import DataFilesDict
from datasets.dataset_dict import DatasetDict, IterableDatasetDict
from datasets.download.download_config import DownloadConfig
from datasets.features import Features, Value
from datasets.iterable_dataset import IterableDataset
from datasets.load import (
CachedDatasetModuleFactory,
CachedMetricModuleFactory,
GithubMetricModuleFactory,
HubDatasetModuleFactoryWithoutScript,
HubDatasetModuleFactoryWithScript,
LocalDatasetModuleFactoryWithoutScript,
LocalDatasetModuleFactoryWithScript,
LocalMetricModuleFactory,
PackagedDatasetModuleFactory,
infer_module_for_data_files_list,
infer_module_for_data_files_list_in_archives,
load_dataset_builder,
)
from datasets.packaged_modules.audiofolder.audiofolder import AudioFolder, AudioFolderConfig
from datasets.packaged_modules.imagefolder.imagefolder import ImageFolder, ImageFolderConfig
from datasets.utils.logging import INFO, get_logger
from .utils import (
OfflineSimulationMode,
assert_arrow_memory_doesnt_increase,
assert_arrow_memory_increases,
offline,
require_pil,
require_sndfile,
set_current_working_directory_to_temp_dir,
)
DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset1__"
DATASET_LOADING_SCRIPT_CODE = """
import os
import datasets
from datasets import DatasetInfo, Features, Split, SplitGenerator, Value
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self) -> DatasetInfo:
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
return [
SplitGenerator(Split.TRAIN, gen_kwargs={"filepath": os.path.join(dl_manager.manual_dir, "train.txt")}),
SplitGenerator(Split.TEST, gen_kwargs={"filepath": os.path.join(dl_manager.manual_dir, "test.txt")}),
]
def _generate_examples(self, filepath, **kwargs):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, {"text": line.strip()}
"""
SAMPLE_DATASET_IDENTIFIER = "hf-internal-testing/dataset_with_script" # has dataset script
SAMPLE_DATASET_IDENTIFIER2 = "hf-internal-testing/dataset_with_data_files" # only has data files
SAMPLE_DATASET_IDENTIFIER3 = "hf-internal-testing/multi_dir_dataset" # has multiple data directories
SAMPLE_DATASET_IDENTIFIER4 = "hf-internal-testing/imagefolder_with_metadata" # imagefolder with a metadata file outside of the train/test directories
SAMPLE_NOT_EXISTING_DATASET_IDENTIFIER = "hf-internal-testing/_dummy"
SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST = "_dummy"
SAMPLE_DATASET_NO_CONFIGS_IN_METADATA = "hf-internal-testing/audiofolder_no_configs_in_metadata"
SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_single_config_in_metadata"
SAMPLE_DATASET_TWO_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_two_configs_in_metadata"
SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT = (
"hf-internal-testing/audiofolder_two_configs_in_metadata_with_default"
)
METRIC_LOADING_SCRIPT_NAME = "__dummy_metric1__"
METRIC_LOADING_SCRIPT_CODE = """
import datasets
from datasets import MetricInfo, Features, Value
class __DummyMetric1__(datasets.Metric):
def _info(self):
return MetricInfo(features=Features({"predictions": Value("int"), "references": Value("int")}))
def _compute(self, predictions, references):
return {"__dummy_metric1__": sum(int(p == r) for p, r in zip(predictions, references))}
"""
@pytest.fixture
def data_dir(tmp_path):
data_dir = tmp_path / "data_dir"
data_dir.mkdir()
with open(data_dir / "train.txt", "w") as f:
f.write("foo\n" * 10)
with open(data_dir / "test.txt", "w") as f:
f.write("bar\n" * 10)
return str(data_dir)
@pytest.fixture
def data_dir_with_arrow(tmp_path):
data_dir = tmp_path / "data_dir"
data_dir.mkdir()
output_train = os.path.join(data_dir, "train.arrow")
with ArrowWriter(path=output_train) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo"] * 10}))
num_examples, num_bytes = writer.finalize()
assert num_examples == 10
assert num_bytes > 0
output_test = os.path.join(data_dir, "test.arrow")
with ArrowWriter(path=output_test) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["bar"] * 10}))
num_examples, num_bytes = writer.finalize()
assert num_examples == 10
assert num_bytes > 0
return str(data_dir)
@pytest.fixture
def data_dir_with_metadata(tmp_path):
data_dir = tmp_path / "data_dir_with_metadata"
data_dir.mkdir()
with open(data_dir / "train.jpg", "wb") as f:
f.write(b"train_image_bytes")
with open(data_dir / "test.jpg", "wb") as f:
f.write(b"test_image_bytes")
with open(data_dir / "metadata.jsonl", "w") as f:
f.write(
"""\
{"file_name": "train.jpg", "caption": "Cool tran image"}
{"file_name": "test.jpg", "caption": "Cool test image"}
"""
)
return str(data_dir)
@pytest.fixture
def data_dir_with_single_config_in_metadata(tmp_path):
data_dir = tmp_path / "data_dir_with_one_default_config_in_metadata"
cats_data_dir = data_dir / "cats"
cats_data_dir.mkdir(parents=True)
dogs_data_dir = data_dir / "dogs"
dogs_data_dir.mkdir(parents=True)
with open(cats_data_dir / "cat.jpg", "wb") as f:
f.write(b"this_is_a_cat_image_bytes")
with open(dogs_data_dir / "dog.jpg", "wb") as f:
f.write(b"this_is_a_dog_image_bytes")
with open(data_dir / "README.md", "w") as f:
f.write(
f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: custom
drop_labels: true
---
"""
)
return str(data_dir)
@pytest.fixture
def data_dir_with_two_config_in_metadata(tmp_path):
data_dir = tmp_path / "data_dir_with_two_configs_in_metadata"
cats_data_dir = data_dir / "cats"
cats_data_dir.mkdir(parents=True)
dogs_data_dir = data_dir / "dogs"
dogs_data_dir.mkdir(parents=True)
with open(cats_data_dir / "cat.jpg", "wb") as f:
f.write(b"this_is_a_cat_image_bytes")
with open(dogs_data_dir / "dog.jpg", "wb") as f:
f.write(b"this_is_a_dog_image_bytes")
with open(data_dir / "README.md", "w") as f:
f.write(
f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: "v1"
drop_labels: true
default: true
- config_name: "v2"
drop_labels: false
---
"""
)
return str(data_dir)
@pytest.fixture
def data_dir_with_data_dir_configs_in_metadata(tmp_path):
data_dir = tmp_path / "data_dir_with_two_configs_in_metadata"
cats_data_dir = data_dir / "cats"
cats_data_dir.mkdir(parents=True)
dogs_data_dir = data_dir / "dogs"
dogs_data_dir.mkdir(parents=True)
with open(cats_data_dir / "cat.jpg", "wb") as f:
f.write(b"this_is_a_cat_image_bytes")
with open(dogs_data_dir / "dog.jpg", "wb") as f:
f.write(b"this_is_a_dog_image_bytes")
@pytest.fixture
def sub_data_dirs(tmp_path):
data_dir2 = tmp_path / "data_dir2"
relative_subdir1 = "subdir1"
sub_data_dir1 = data_dir2 / relative_subdir1
sub_data_dir1.mkdir(parents=True)
with open(sub_data_dir1 / "train.txt", "w") as f:
f.write("foo\n" * 10)
with open(sub_data_dir1 / "test.txt", "w") as f:
f.write("bar\n" * 10)
relative_subdir2 = "subdir2"
sub_data_dir2 = tmp_path / data_dir2 / relative_subdir2
sub_data_dir2.mkdir(parents=True)
with open(sub_data_dir2 / "train.txt", "w") as f:
f.write("foo\n" * 10)
with open(sub_data_dir2 / "test.txt", "w") as f:
f.write("bar\n" * 10)
return str(data_dir2), relative_subdir1
@pytest.fixture
def complex_data_dir(tmp_path):
data_dir = tmp_path / "complex_data_dir"
data_dir.mkdir()
(data_dir / "data").mkdir()
with open(data_dir / "data" / "train.txt", "w") as f:
f.write("foo\n" * 10)
with open(data_dir / "data" / "test.txt", "w") as f:
f.write("bar\n" * 10)
with open(data_dir / "README.md", "w") as f:
f.write("This is a readme")
with open(data_dir / ".dummy", "w") as f:
f.write("this is a dummy file that is not a data file")
return str(data_dir)
@pytest.fixture
def dataset_loading_script_dir(tmp_path):
script_name = DATASET_LOADING_SCRIPT_NAME
script_dir = tmp_path / script_name
script_dir.mkdir()
script_path = script_dir / f"{script_name}.py"
with open(script_path, "w") as f:
f.write(DATASET_LOADING_SCRIPT_CODE)
return str(script_dir)
@pytest.fixture
def dataset_loading_script_dir_readonly(tmp_path):
script_name = DATASET_LOADING_SCRIPT_NAME
script_dir = tmp_path / "readonly" / script_name
script_dir.mkdir(parents=True)
script_path = script_dir / f"{script_name}.py"
with open(script_path, "w") as f:
f.write(DATASET_LOADING_SCRIPT_CODE)
dataset_loading_script_dir = str(script_dir)
# Make this directory readonly
os.chmod(dataset_loading_script_dir, 0o555)
os.chmod(os.path.join(dataset_loading_script_dir, f"{script_name}.py"), 0o555)
return dataset_loading_script_dir
@pytest.fixture
def metric_loading_script_dir(tmp_path):
script_name = METRIC_LOADING_SCRIPT_NAME
script_dir = tmp_path / script_name
script_dir.mkdir()
script_path = script_dir / f"{script_name}.py"
with open(script_path, "w") as f:
f.write(METRIC_LOADING_SCRIPT_CODE)
return str(script_dir)
@pytest.mark.parametrize(
"data_files, expected_module, expected_builder_kwargs",
[
(["train.csv"], "csv", {}),
(["train.tsv"], "csv", {"sep": "\t"}),
(["train.json"], "json", {}),
(["train.jsonl"], "json", {}),
(["train.parquet"], "parquet", {}),
(["train.arrow"], "arrow", {}),
(["train.txt"], "text", {}),
(["uppercase.TXT"], "text", {}),
(["unsupported.ext"], None, {}),
([""], None, {}),
],
)
def test_infer_module_for_data_files(data_files, expected_module, expected_builder_kwargs):
module, builder_kwargs = infer_module_for_data_files_list(data_files)
assert module == expected_module
assert builder_kwargs == expected_builder_kwargs
@pytest.mark.parametrize(
"data_file, expected_module",
[
("zip_csv_path", "csv"),
("zip_csv_with_dir_path", "csv"),
("zip_uppercase_csv_path", "csv"),
("zip_unsupported_ext_path", None),
],
)
def test_infer_module_for_data_files_in_archives(
data_file, expected_module, zip_csv_path, zip_csv_with_dir_path, zip_uppercase_csv_path, zip_unsupported_ext_path
):
data_file_paths = {
"zip_csv_path": zip_csv_path,
"zip_csv_with_dir_path": zip_csv_with_dir_path,
"zip_uppercase_csv_path": zip_uppercase_csv_path,
"zip_unsupported_ext_path": zip_unsupported_ext_path,
}
data_files = [str(data_file_paths[data_file])]
inferred_module, _ = infer_module_for_data_files_list_in_archives(data_files)
assert inferred_module == expected_module
class ModuleFactoryTest(TestCase):
@pytest.fixture(autouse=True)
def inject_fixtures(
self,
jsonl_path,
data_dir,
data_dir_with_metadata,
data_dir_with_single_config_in_metadata,
data_dir_with_two_config_in_metadata,
sub_data_dirs,
dataset_loading_script_dir,
metric_loading_script_dir,
):
self._jsonl_path = jsonl_path
self._data_dir = data_dir
self._data_dir_with_metadata = data_dir_with_metadata
self._data_dir_with_single_config_in_metadata = data_dir_with_single_config_in_metadata
self._data_dir_with_two_config_in_metadata = data_dir_with_two_config_in_metadata
self._data_dir2 = sub_data_dirs[0]
self._sub_data_dir = sub_data_dirs[1]
self._dataset_loading_script_dir = dataset_loading_script_dir
self._metric_loading_script_dir = metric_loading_script_dir
def setUp(self):
self.hf_modules_cache = tempfile.mkdtemp()
self.cache_dir = tempfile.mkdtemp()
self.download_config = DownloadConfig(cache_dir=self.cache_dir)
self.dynamic_modules_path = datasets.load.init_dynamic_modules(
name="test_datasets_modules_" + os.path.basename(self.hf_modules_cache),
hf_modules_cache=self.hf_modules_cache,
)
def test_HubDatasetModuleFactoryWithScript_with_github_dataset(self):
# "wmt_t2t" has additional imports (internal)
factory = HubDatasetModuleFactoryWithScript(
"wmt_t2t", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT)
def test_GithubMetricModuleFactory_with_internal_import(self):
# "squad_v2" requires additional imports (internal)
factory = GithubMetricModuleFactory(
"squad_v2", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
@pytest.mark.filterwarnings("ignore:GithubMetricModuleFactory is deprecated:FutureWarning")
def test_GithubMetricModuleFactory_with_external_import(self):
# "bleu" requires additional imports (external from github)
factory = GithubMetricModuleFactory(
"bleu", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
def test_LocalMetricModuleFactory(self):
path = os.path.join(self._metric_loading_script_dir, f"{METRIC_LOADING_SCRIPT_NAME}.py")
factory = LocalMetricModuleFactory(
path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
def test_LocalDatasetModuleFactoryWithScript(self):
path = os.path.join(self._dataset_loading_script_dir, f"{DATASET_LOADING_SCRIPT_NAME}.py")
factory = LocalDatasetModuleFactoryWithScript(
path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
assert os.path.isdir(module_factory_result.builder_kwargs["base_path"])
def test_LocalDatasetModuleFactoryWithoutScript(self):
factory = LocalDatasetModuleFactoryWithoutScript(self._data_dir)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
assert os.path.isdir(module_factory_result.builder_kwargs["base_path"])
def test_LocalDatasetModuleFactoryWithoutScript_with_data_dir(self):
factory = LocalDatasetModuleFactoryWithoutScript(self._data_dir2, data_dir=self._sub_data_dir)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
assert (
module_factory_result.builder_kwargs["data_files"] is not None
and len(module_factory_result.builder_kwargs["data_files"]["train"]) == 1
and len(module_factory_result.builder_kwargs["data_files"]["test"]) == 1
)
assert all(
self._sub_data_dir in Path(data_file).parts
for data_file in module_factory_result.builder_kwargs["data_files"]["train"]
+ module_factory_result.builder_kwargs["data_files"]["test"]
)
def test_LocalDatasetModuleFactoryWithoutScript_with_metadata(self):
factory = LocalDatasetModuleFactoryWithoutScript(self._data_dir_with_metadata)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
assert (
module_factory_result.builder_kwargs["data_files"] is not None
and len(module_factory_result.builder_kwargs["data_files"]["train"]) > 0
and len(module_factory_result.builder_kwargs["data_files"]["test"]) > 0
)
assert any(
Path(data_file).name == "metadata.jsonl"
for data_file in module_factory_result.builder_kwargs["data_files"]["train"]
)
assert any(
Path(data_file).name == "metadata.jsonl"
for data_file in module_factory_result.builder_kwargs["data_files"]["test"]
)
def test_LocalDatasetModuleFactoryWithoutScript_with_single_config_in_metadata(self):
factory = LocalDatasetModuleFactoryWithoutScript(
self._data_dir_with_single_config_in_metadata,
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs
assert module_metadata_configs is not None
assert len(module_metadata_configs) == 1
assert next(iter(module_metadata_configs)) == "custom"
assert "drop_labels" in next(iter(module_metadata_configs.values()))
assert next(iter(module_metadata_configs.values()))["drop_labels"] is True
module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs
assert module_builder_configs is not None
assert len(module_builder_configs) == 1
assert isinstance(module_builder_configs[0], ImageFolderConfig)
assert module_builder_configs[0].name == "custom"
assert module_builder_configs[0].data_files is not None
assert isinstance(module_builder_configs[0].data_files, DataFilesDict)
assert len(module_builder_configs[0].data_files) == 1 # one train split
assert len(module_builder_configs[0].data_files["train"]) == 2 # two files
assert module_builder_configs[0].drop_labels is True # parameter is passed from metadata
# config named "default" is automatically considered to be a default config
assert module_factory_result.builder_configs_parameters.default_config_name is None
# we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly
assert "drop_labels" not in module_factory_result.builder_kwargs
def test_LocalDatasetModuleFactoryWithoutScript_with_two_configs_in_metadata(self):
factory = LocalDatasetModuleFactoryWithoutScript(
self._data_dir_with_two_config_in_metadata,
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs
assert module_metadata_configs is not None
assert len(module_metadata_configs) == 2
assert list(module_metadata_configs) == ["v1", "v2"]
assert "drop_labels" in module_metadata_configs["v1"]
assert module_metadata_configs["v1"]["drop_labels"] is True
assert "drop_labels" in module_metadata_configs["v2"]
assert module_metadata_configs["v2"]["drop_labels"] is False
module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs
assert module_builder_configs is not None
assert len(module_builder_configs) == 2
module_builder_config_v1, module_builder_config_v2 = module_builder_configs
assert module_builder_config_v1.name == "v1"
assert module_builder_config_v2.name == "v2"
assert isinstance(module_builder_config_v1, ImageFolderConfig)
assert isinstance(module_builder_config_v2, ImageFolderConfig)
assert isinstance(module_builder_config_v1.data_files, DataFilesDict)
assert isinstance(module_builder_config_v2.data_files, DataFilesDict)
assert sorted(module_builder_config_v1.data_files) == ["train"]
assert len(module_builder_config_v1.data_files["train"]) == 2
assert sorted(module_builder_config_v2.data_files) == ["train"]
assert len(module_builder_config_v2.data_files["train"]) == 2
assert module_builder_config_v1.drop_labels is True # parameter is passed from metadata
assert module_builder_config_v2.drop_labels is False # parameter is passed from metadata
assert (
module_factory_result.builder_configs_parameters.default_config_name == "v1"
) # it's marked as a default one in yaml
# we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly
assert "drop_labels" not in module_factory_result.builder_kwargs
def test_PackagedDatasetModuleFactory(self):
factory = PackagedDatasetModuleFactory(
"json", data_files=self._jsonl_path, download_config=self.download_config
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
def test_PackagedDatasetModuleFactory_with_data_dir(self):
factory = PackagedDatasetModuleFactory("json", data_dir=self._data_dir, download_config=self.download_config)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
assert (
module_factory_result.builder_kwargs["data_files"] is not None
and len(module_factory_result.builder_kwargs["data_files"]["train"]) > 0
and len(module_factory_result.builder_kwargs["data_files"]["test"]) > 0
)
assert Path(module_factory_result.builder_kwargs["data_files"]["train"][0]).parent.samefile(self._data_dir)
assert Path(module_factory_result.builder_kwargs["data_files"]["test"][0]).parent.samefile(self._data_dir)
def test_PackagedDatasetModuleFactory_with_data_dir_and_metadata(self):
factory = PackagedDatasetModuleFactory(
"imagefolder", data_dir=self._data_dir_with_metadata, download_config=self.download_config
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
assert (
module_factory_result.builder_kwargs["data_files"] is not None
and len(module_factory_result.builder_kwargs["data_files"]["train"]) > 0
and len(module_factory_result.builder_kwargs["data_files"]["test"]) > 0
)
assert Path(module_factory_result.builder_kwargs["data_files"]["train"][0]).parent.samefile(
self._data_dir_with_metadata
)
assert Path(module_factory_result.builder_kwargs["data_files"]["test"][0]).parent.samefile(
self._data_dir_with_metadata
)
assert any(
Path(data_file).name == "metadata.jsonl"
for data_file in module_factory_result.builder_kwargs["data_files"]["train"]
)
assert any(
Path(data_file).name == "metadata.jsonl"
for data_file in module_factory_result.builder_kwargs["data_files"]["test"]
)
@pytest.mark.integration
def test_HubDatasetModuleFactoryWithoutScript(self):
factory = HubDatasetModuleFactoryWithoutScript(
SAMPLE_DATASET_IDENTIFIER2, download_config=self.download_config
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT)
@pytest.mark.integration
def test_HubDatasetModuleFactoryWithoutScript_with_data_dir(self):
data_dir = "data2"
factory = HubDatasetModuleFactoryWithoutScript(
SAMPLE_DATASET_IDENTIFIER3, data_dir=data_dir, download_config=self.download_config
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT)
assert (
module_factory_result.builder_kwargs["data_files"] is not None
and len(module_factory_result.builder_kwargs["data_files"]["train"]) == 1
and len(module_factory_result.builder_kwargs["data_files"]["test"]) == 1
)
assert all(
data_dir in Path(data_file).parts
for data_file in module_factory_result.builder_kwargs["data_files"]["train"]
+ module_factory_result.builder_kwargs["data_files"]["test"]
)
@pytest.mark.integration
def test_HubDatasetModuleFactoryWithoutScript_with_metadata(self):
factory = HubDatasetModuleFactoryWithoutScript(
SAMPLE_DATASET_IDENTIFIER4, download_config=self.download_config
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT)
assert (
module_factory_result.builder_kwargs["data_files"] is not None
and len(module_factory_result.builder_kwargs["data_files"]["train"]) > 0
and len(module_factory_result.builder_kwargs["data_files"]["test"]) > 0
)
assert any(
Path(data_file).name == "metadata.jsonl"
for data_file in module_factory_result.builder_kwargs["data_files"]["train"]
)
assert any(
Path(data_file).name == "metadata.jsonl"
for data_file in module_factory_result.builder_kwargs["data_files"]["test"]
)
@pytest.mark.integration
def test_HubDatasetModuleFactoryWithoutScript_with_one_default_config_in_metadata(self):
factory = HubDatasetModuleFactoryWithoutScript(
SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA,
download_config=self.download_config,
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT)
module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs
assert module_metadata_configs is not None
assert len(module_metadata_configs) == 1
assert next(iter(module_metadata_configs)) == "custom"
assert "drop_labels" in next(iter(module_metadata_configs.values()))
assert next(iter(module_metadata_configs.values()))["drop_labels"] is True
module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs
assert module_builder_configs is not None
assert len(module_builder_configs) == 1
assert isinstance(module_builder_configs[0], AudioFolderConfig)
assert module_builder_configs[0].name == "custom"
assert module_builder_configs[0].data_files is not None
assert isinstance(module_builder_configs[0].data_files, DataFilesDict)
assert sorted(module_builder_configs[0].data_files) == ["test", "train"]
assert len(module_builder_configs[0].data_files["train"]) == 3
assert len(module_builder_configs[0].data_files["test"]) == 3
assert module_builder_configs[0].drop_labels is True # parameter is passed from metadata
# config named "default" is automatically considered to be a default config
assert module_factory_result.builder_configs_parameters.default_config_name is None
# we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly
assert "drop_labels" not in module_factory_result.builder_kwargs
@pytest.mark.integration
def test_HubDatasetModuleFactoryWithoutScript_with_two_configs_in_metadata(self):
datasets_names = [SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT]
for dataset_name in datasets_names:
factory = HubDatasetModuleFactoryWithoutScript(dataset_name, download_config=self.download_config)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs
assert module_metadata_configs is not None
assert len(module_metadata_configs) == 2
assert list(module_metadata_configs) == ["v1", "v2"]
assert "drop_labels" in module_metadata_configs["v1"]
assert module_metadata_configs["v1"]["drop_labels"] is True
assert "drop_labels" in module_metadata_configs["v2"]
assert module_metadata_configs["v2"]["drop_labels"] is False
module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs
assert module_builder_configs is not None
assert len(module_builder_configs) == 2
module_builder_config_v1, module_builder_config_v2 = module_builder_configs
assert module_builder_config_v1.name == "v1"
assert module_builder_config_v2.name == "v2"
assert isinstance(module_builder_config_v1, AudioFolderConfig)
assert isinstance(module_builder_config_v2, AudioFolderConfig)
assert isinstance(module_builder_config_v1.data_files, DataFilesDict)
assert isinstance(module_builder_config_v2.data_files, DataFilesDict)
assert sorted(module_builder_config_v1.data_files) == ["test", "train"]
assert len(module_builder_config_v1.data_files["train"]) == 3
assert len(module_builder_config_v1.data_files["test"]) == 3
assert sorted(module_builder_config_v2.data_files) == ["test", "train"]
assert len(module_builder_config_v2.data_files["train"]) == 2
assert len(module_builder_config_v2.data_files["test"]) == 1
assert module_builder_config_v1.drop_labels is True # parameter is passed from metadata
assert module_builder_config_v2.drop_labels is False # parameter is passed from metadata
# we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly
assert "drop_labels" not in module_factory_result.builder_kwargs
if dataset_name == SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT:
assert module_factory_result.builder_configs_parameters.default_config_name == "v1"
else:
assert module_factory_result.builder_configs_parameters.default_config_name is None
@pytest.mark.integration
def test_HubDatasetModuleFactoryWithScript(self):
factory = HubDatasetModuleFactoryWithScript(
SAMPLE_DATASET_IDENTIFIER,
download_config=self.download_config,
dynamic_modules_path=self.dynamic_modules_path,
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT)
def test_CachedDatasetModuleFactory(self):
path = os.path.join(self._dataset_loading_script_dir, f"{DATASET_LOADING_SCRIPT_NAME}.py")
factory = LocalDatasetModuleFactoryWithScript(
path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path
)
module_factory_result = factory.get_module()
for offline_mode in OfflineSimulationMode:
with offline(offline_mode):
factory = CachedDatasetModuleFactory(
DATASET_LOADING_SCRIPT_NAME,
dynamic_modules_path=self.dynamic_modules_path,
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
@pytest.mark.filterwarnings("ignore:LocalMetricModuleFactory is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:CachedMetricModuleFactory is deprecated:FutureWarning")
def test_CachedMetricModuleFactory(self):
path = os.path.join(self._metric_loading_script_dir, f"{METRIC_LOADING_SCRIPT_NAME}.py")
factory = LocalMetricModuleFactory(
path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path
)
module_factory_result = factory.get_module()
for offline_mode in OfflineSimulationMode:
with offline(offline_mode):
factory = CachedMetricModuleFactory(
METRIC_LOADING_SCRIPT_NAME,
dynamic_modules_path=self.dynamic_modules_path,
)
module_factory_result = factory.get_module()
assert importlib.import_module(module_factory_result.module_path) is not None
@pytest.mark.parametrize(
"factory_class",
[
CachedDatasetModuleFactory,
CachedMetricModuleFactory,
GithubMetricModuleFactory,
HubDatasetModuleFactoryWithoutScript,
HubDatasetModuleFactoryWithScript,
LocalDatasetModuleFactoryWithoutScript,
LocalDatasetModuleFactoryWithScript,
LocalMetricModuleFactory,
PackagedDatasetModuleFactory,
],
)
def test_module_factories(factory_class):
name = "dummy_name"
factory = factory_class(name)
assert factory.name == name
@pytest.mark.integration
class LoadTest(TestCase):
@pytest.fixture(autouse=True)
def inject_fixtures(self, caplog):
self._caplog = caplog
def setUp(self):
self.hf_modules_cache = tempfile.mkdtemp()
self.dynamic_modules_path = datasets.load.init_dynamic_modules(
name="test_datasets_modules2", hf_modules_cache=self.hf_modules_cache
)
def tearDown(self):
shutil.rmtree(self.hf_modules_cache)
def _dummy_module_dir(self, modules_dir, dummy_module_name, dummy_code):
assert dummy_module_name.startswith("__")
module_dir = os.path.join(modules_dir, dummy_module_name)
os.makedirs(module_dir, exist_ok=True)
module_path = os.path.join(module_dir, dummy_module_name + ".py")
with open(module_path, "w") as f:
f.write(dummy_code)
return module_dir
def test_dataset_module_factory(self):
with tempfile.TemporaryDirectory() as tmp_dir:
# prepare module from directory path
dummy_code = "MY_DUMMY_VARIABLE = 'hello there'"
module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name1__", dummy_code)
dataset_module = datasets.load.dataset_module_factory(
module_dir, dynamic_modules_path=self.dynamic_modules_path
)
dummy_module = importlib.import_module(dataset_module.module_path)
self.assertEqual(dummy_module.MY_DUMMY_VARIABLE, "hello there")
self.assertEqual(dataset_module.hash, sha256(dummy_code.encode("utf-8")).hexdigest())
# prepare module from file path + check resolved_file_path
dummy_code = "MY_DUMMY_VARIABLE = 'general kenobi'"
module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name1__", dummy_code)
module_path = os.path.join(module_dir, "__dummy_module_name1__.py")
dataset_module = datasets.load.dataset_module_factory(
module_path, dynamic_modules_path=self.dynamic_modules_path
)
dummy_module = importlib.import_module(dataset_module.module_path)
self.assertEqual(dummy_module.MY_DUMMY_VARIABLE, "general kenobi")
self.assertEqual(dataset_module.hash, sha256(dummy_code.encode("utf-8")).hexdigest())
# missing module
for offline_simulation_mode in list(OfflineSimulationMode):
with offline(offline_simulation_mode):
with self.assertRaises((FileNotFoundError, ConnectionError, requests.exceptions.ConnectionError)):
datasets.load.dataset_module_factory(
"__missing_dummy_module_name__", dynamic_modules_path=self.dynamic_modules_path
)
def test_offline_dataset_module_factory(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dummy_code = "MY_DUMMY_VARIABLE = 'hello there'"
module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name2__", dummy_code)
dataset_module_1 = datasets.load.dataset_module_factory(
module_dir, dynamic_modules_path=self.dynamic_modules_path
)
time.sleep(0.1) # make sure there's a difference in the OS update time of the python file
dummy_code = "MY_DUMMY_VARIABLE = 'general kenobi'"
module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name2__", dummy_code)
dataset_module_2 = datasets.load.dataset_module_factory(
module_dir, dynamic_modules_path=self.dynamic_modules_path
)
for offline_simulation_mode in list(OfflineSimulationMode):
with offline(offline_simulation_mode):
self._caplog.clear()
# allow provide the module name without an explicit path to remote or local actual file
dataset_module_3 = datasets.load.dataset_module_factory(
"__dummy_module_name2__", dynamic_modules_path=self.dynamic_modules_path
)
# it loads the most recent version of the module
self.assertEqual(dataset_module_2.module_path, dataset_module_3.module_path)
self.assertNotEqual(dataset_module_1.module_path, dataset_module_3.module_path)
self.assertIn("Using the latest cached version of the module", self._caplog.text)
def test_load_dataset_from_hub(self):
with self.assertRaises(FileNotFoundError) as context:
datasets.load_dataset("_dummy")
self.assertIn(
"Dataset '_dummy' doesn't exist on the Hub",
str(context.exception),
)
with self.assertRaises(FileNotFoundError) as context:
datasets.load_dataset("_dummy", revision="0.0.0")
self.assertIn(
"Dataset '_dummy' doesn't exist on the Hub",
str(context.exception),
)
self.assertIn(
"at revision '0.0.0'",
str(context.exception),
)
for offline_simulation_mode in list(OfflineSimulationMode):
with offline(offline_simulation_mode):
with self.assertRaises(ConnectionError) as context:
datasets.load_dataset("_dummy")
if offline_simulation_mode != OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
self.assertIn(
"Couldn't reach '_dummy' on the Hub",
str(context.exception),
)
def test_load_dataset_namespace(self):
with self.assertRaises(FileNotFoundError) as context:
datasets.load_dataset("hf-internal-testing/_dummy")
self.assertIn(
"hf-internal-testing/_dummy",
str(context.exception),
)
for offline_simulation_mode in list(OfflineSimulationMode):
with offline(offline_simulation_mode):
with self.assertRaises(ConnectionError) as context:
datasets.load_dataset("hf-internal-testing/_dummy")
self.assertIn("hf-internal-testing/_dummy", str(context.exception), msg=offline_simulation_mode)
@pytest.mark.integration
def test_load_dataset_builder_with_metadata():
builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4)
assert isinstance(builder, ImageFolder)
assert builder.config.name == "default"
assert builder.config.data_files is not None
assert builder.config.drop_metadata is None
builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4, "non-existing-config")
assert isinstance(builder, ImageFolder)
assert builder.config.name == "non-existing-config"
@pytest.mark.integration
def test_load_dataset_builder_config_kwargs_passed_as_arguments():
builder_default = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4)
builder_custom = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4, drop_metadata=True)
assert builder_custom.config.drop_metadata != builder_default.config.drop_metadata
assert builder_custom.config.drop_metadata is True
@pytest.mark.integration
def test_load_dataset_builder_with_two_configs_in_metadata():
builder = datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1")
assert isinstance(builder, AudioFolder)
assert builder.config.name == "v1"
assert builder.config.data_files is not None
with pytest.raises(ValueError):
datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA)
with pytest.raises(ValueError):
datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "non-existing-config")
@pytest.mark.parametrize("serializer", [pickle, dill])
def test_load_dataset_builder_with_metadata_configs_pickable(serializer):
builder = datasets.load_dataset_builder(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA)
builder_unpickled = serializer.loads(serializer.dumps(builder))
assert builder.BUILDER_CONFIGS == builder_unpickled.BUILDER_CONFIGS
assert list(builder_unpickled.builder_configs) == ["custom"]
assert isinstance(builder_unpickled.builder_configs["custom"], AudioFolderConfig)
builder2 = datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1")
builder2_unpickled = serializer.loads(serializer.dumps(builder2))
assert builder2.BUILDER_CONFIGS == builder2_unpickled.BUILDER_CONFIGS != builder_unpickled.BUILDER_CONFIGS
assert list(builder2_unpickled.builder_configs) == ["v1", "v2"]
assert isinstance(builder2_unpickled.builder_configs["v1"], AudioFolderConfig)
assert isinstance(builder2_unpickled.builder_configs["v2"], AudioFolderConfig)
def test_load_dataset_builder_for_absolute_script_dir(dataset_loading_script_dir, data_dir):
builder = datasets.load_dataset_builder(dataset_loading_script_dir, data_dir=data_dir)
assert isinstance(builder, DatasetBuilder)
assert builder.name == DATASET_LOADING_SCRIPT_NAME
assert builder.dataset_name == DATASET_LOADING_SCRIPT_NAME
assert builder.info.features == Features({"text": Value("string")})
def test_load_dataset_builder_for_relative_script_dir(dataset_loading_script_dir, data_dir):
with set_current_working_directory_to_temp_dir():
relative_script_dir = DATASET_LOADING_SCRIPT_NAME
shutil.copytree(dataset_loading_script_dir, relative_script_dir)
builder = datasets.load_dataset_builder(relative_script_dir, data_dir=data_dir)
assert isinstance(builder, DatasetBuilder)
assert builder.name == DATASET_LOADING_SCRIPT_NAME
assert builder.dataset_name == DATASET_LOADING_SCRIPT_NAME
assert builder.info.features == Features({"text": Value("string")})
def test_load_dataset_builder_for_script_path(dataset_loading_script_dir, data_dir):
builder = datasets.load_dataset_builder(
os.path.join(dataset_loading_script_dir, DATASET_LOADING_SCRIPT_NAME + ".py"), data_dir=data_dir
)
assert isinstance(builder, DatasetBuilder)
assert builder.name == DATASET_LOADING_SCRIPT_NAME
assert builder.dataset_name == DATASET_LOADING_SCRIPT_NAME
assert builder.info.features == Features({"text": Value("string")})
def test_load_dataset_builder_for_absolute_data_dir(complex_data_dir):
builder = datasets.load_dataset_builder(complex_data_dir)
assert isinstance(builder, DatasetBuilder)
assert builder.name == "text"
assert builder.dataset_name == Path(complex_data_dir).name
assert builder.config.name == "default"
assert isinstance(builder.config.data_files, DataFilesDict)
assert len(builder.config.data_files["train"]) > 0
assert len(builder.config.data_files["test"]) > 0
def test_load_dataset_builder_for_relative_data_dir(complex_data_dir):
with set_current_working_directory_to_temp_dir():
relative_data_dir = "relative_data_dir"
shutil.copytree(complex_data_dir, relative_data_dir)
builder = datasets.load_dataset_builder(relative_data_dir)
assert isinstance(builder, DatasetBuilder)
assert builder.name == "text"
assert builder.dataset_name == relative_data_dir
assert builder.config.name == "default"
assert isinstance(builder.config.data_files, DataFilesDict)
assert len(builder.config.data_files["train"]) > 0
assert len(builder.config.data_files["test"]) > 0
@pytest.mark.integration
def test_load_dataset_builder_for_community_dataset_with_script():
builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER)
assert isinstance(builder, DatasetBuilder)
assert builder.name == SAMPLE_DATASET_IDENTIFIER.split("/")[-1]
assert builder.dataset_name == SAMPLE_DATASET_IDENTIFIER.split("/")[-1]
assert builder.config.name == "default"
assert builder.info.features == Features({"text": Value("string")})
namespace = SAMPLE_DATASET_IDENTIFIER[: SAMPLE_DATASET_IDENTIFIER.index("/")]
assert builder._relative_data_dir().startswith(namespace)
assert SAMPLE_DATASET_IDENTIFIER.replace("/", "--") in builder.__module__
@pytest.mark.integration
def test_load_dataset_builder_for_community_dataset_without_script():
builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER2)
assert isinstance(builder, DatasetBuilder)
assert builder.name == "text"
assert builder.dataset_name == SAMPLE_DATASET_IDENTIFIER2.split("/")[-1]
assert builder.config.name == "default"
assert isinstance(builder.config.data_files, DataFilesDict)
assert len(builder.config.data_files["train"]) > 0
assert len(builder.config.data_files["test"]) > 0
def test_load_dataset_builder_fail():
with pytest.raises(FileNotFoundError):
datasets.load_dataset_builder("blabla")
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_load_dataset_local(dataset_loading_script_dir, data_dir, keep_in_memory, caplog):
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, keep_in_memory=keep_in_memory)
assert isinstance(dataset, DatasetDict)
assert all(isinstance(d, Dataset) for d in dataset.values())
assert len(dataset) == 2
assert isinstance(next(iter(dataset["train"])), dict)
for offline_simulation_mode in list(OfflineSimulationMode):
with offline(offline_simulation_mode):
caplog.clear()
# Load dataset from cache
dataset = datasets.load_dataset(DATASET_LOADING_SCRIPT_NAME, data_dir=data_dir)
assert len(dataset) == 2
assert "Using the latest cached version of the module" in caplog.text
with pytest.raises(FileNotFoundError) as exc_info:
datasets.load_dataset(SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST)
assert f"Dataset '{SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST}' doesn't exist on the Hub" in str(exc_info.value)
assert os.path.abspath(SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST) in str(exc_info.value)
def test_load_dataset_streaming(dataset_loading_script_dir, data_dir):
dataset = load_dataset(dataset_loading_script_dir, streaming=True, data_dir=data_dir)
assert isinstance(dataset, IterableDatasetDict)
assert all(isinstance(d, IterableDataset) for d in dataset.values())
assert len(dataset) == 2
assert isinstance(next(iter(dataset["train"])), dict)
def test_load_dataset_streaming_gz_json(jsonl_gz_path):
data_files = jsonl_gz_path
ds = load_dataset("json", split="train", data_files=data_files, streaming=True)
assert isinstance(ds, IterableDataset)
ds_item = next(iter(ds))
assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0}
@pytest.mark.integration
@pytest.mark.parametrize(
"path", ["sample.jsonl", "sample.jsonl.gz", "sample.tar", "sample.jsonl.xz", "sample.zip", "sample.jsonl.zst"]
)
def test_load_dataset_streaming_compressed_files(path):
repo_id = "hf-internal-testing/compressed_files"
data_files = f"https://huggingface.co/datasets/{repo_id}/resolve/main/{path}"
if data_files[-3:] in ("zip", "tar"): # we need to glob "*" inside archives
data_files = data_files[-3:] + "://*::" + data_files
return # TODO(QL, albert): support re-add support for ZIP and TAR archives streaming
ds = load_dataset("json", split="train", data_files=data_files, streaming=True)
assert isinstance(ds, IterableDataset)
ds_item = next(iter(ds))
assert ds_item == {
"tokens": ["Ministeri", "de", "Justícia", "d'Espanya"],
"ner_tags": [1, 2, 2, 2],
"langs": ["ca", "ca", "ca", "ca"],
"spans": ["PER: Ministeri de Justícia d'Espanya"],
}
@pytest.mark.parametrize("path_extension", ["csv", "csv.bz2"])
@pytest.mark.parametrize("streaming", [False, True])
def test_load_dataset_streaming_csv(path_extension, streaming, csv_path, bz2_csv_path):
paths = {"csv": csv_path, "csv.bz2": bz2_csv_path}
data_files = str(paths[path_extension])
features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")})
ds = load_dataset("csv", split="train", data_files=data_files, features=features, streaming=streaming)
assert isinstance(ds, IterableDataset if streaming else Dataset)
ds_item = next(iter(ds))
assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0}
@pytest.mark.parametrize("streaming", [False, True])
@pytest.mark.parametrize("data_file", ["zip_csv_path", "zip_csv_with_dir_path", "csv_path"])
def test_load_dataset_zip_csv(data_file, streaming, zip_csv_path, zip_csv_with_dir_path, csv_path):
data_file_paths = {
"zip_csv_path": zip_csv_path,
"zip_csv_with_dir_path": zip_csv_with_dir_path,
"csv_path": csv_path,
}
data_files = str(data_file_paths[data_file])
expected_size = 8 if data_file.startswith("zip") else 4
features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")})
ds = load_dataset("csv", split="train", data_files=data_files, features=features, streaming=streaming)
if streaming:
ds_item_counter = 0
for ds_item in ds:
if ds_item_counter == 0:
assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0}
ds_item_counter += 1
assert ds_item_counter == expected_size
else:
assert ds.shape[0] == expected_size
ds_item = next(iter(ds))
assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0}
@pytest.mark.parametrize("streaming", [False, True])
@pytest.mark.parametrize("data_file", ["zip_jsonl_path", "zip_jsonl_with_dir_path", "jsonl_path"])
def test_load_dataset_zip_jsonl(data_file, streaming, zip_jsonl_path, zip_jsonl_with_dir_path, jsonl_path):
data_file_paths = {
"zip_jsonl_path": zip_jsonl_path,
"zip_jsonl_with_dir_path": zip_jsonl_with_dir_path,
"jsonl_path": jsonl_path,
}
data_files = str(data_file_paths[data_file])
expected_size = 8 if data_file.startswith("zip") else 4
features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")})
ds = load_dataset("json", split="train", data_files=data_files, features=features, streaming=streaming)
if streaming:
ds_item_counter = 0
for ds_item in ds:
if ds_item_counter == 0:
assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0}
ds_item_counter += 1
assert ds_item_counter == expected_size
else:
assert ds.shape[0] == expected_size
ds_item = next(iter(ds))
assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0}
@pytest.mark.parametrize("streaming", [False, True])
@pytest.mark.parametrize("data_file", ["zip_text_path", "zip_text_with_dir_path", "text_path"])
def test_load_dataset_zip_text(data_file, streaming, zip_text_path, zip_text_with_dir_path, text_path):
data_file_paths = {
"zip_text_path": zip_text_path,
"zip_text_with_dir_path": zip_text_with_dir_path,
"text_path": text_path,
}
data_files = str(data_file_paths[data_file])
expected_size = 8 if data_file.startswith("zip") else 4
ds = load_dataset("text", split="train", data_files=data_files, streaming=streaming)
if streaming:
ds_item_counter = 0
for ds_item in ds:
if ds_item_counter == 0:
assert ds_item == {"text": "0"}
ds_item_counter += 1
assert ds_item_counter == expected_size
else:
assert ds.shape[0] == expected_size
ds_item = next(iter(ds))
assert ds_item == {"text": "0"}
@pytest.mark.parametrize("streaming", [False, True])
def test_load_dataset_arrow(streaming, data_dir_with_arrow):
ds = load_dataset("arrow", split="train", data_dir=data_dir_with_arrow, streaming=streaming)
expected_size = 10
if streaming:
ds_item_counter = 0
for ds_item in ds:
if ds_item_counter == 0:
assert ds_item == {"col_1": "foo"}
ds_item_counter += 1
assert ds_item_counter == 10
else:
assert ds.num_rows == 10
assert ds.shape[0] == expected_size
ds_item = next(iter(ds))
assert ds_item == {"col_1": "foo"}
def test_load_dataset_text_with_unicode_new_lines(text_path_with_unicode_new_lines):
data_files = str(text_path_with_unicode_new_lines)
ds = load_dataset("text", split="train", data_files=data_files)
assert ds.num_rows == 3
def test_load_dataset_with_unsupported_extensions(text_dir_with_unsupported_extension):
data_files = str(text_dir_with_unsupported_extension)
ds = load_dataset("text", split="train", data_files=data_files)
assert ds.num_rows == 4
@pytest.mark.integration
def test_loading_from_the_datasets_hub():
with tempfile.TemporaryDirectory() as tmp_dir:
dataset = load_dataset(SAMPLE_DATASET_IDENTIFIER, cache_dir=tmp_dir)
assert len(dataset["train"]) == 2
assert len(dataset["validation"]) == 3
del dataset
@pytest.mark.integration
def test_loading_from_the_datasets_hub_with_token():
true_request = requests.Session().request
def assert_auth(method, url, *args, headers, **kwargs):
assert headers["authorization"] == "Bearer foo"
return true_request(method, url, *args, headers=headers, **kwargs)
with patch("requests.Session.request") as mock_request:
mock_request.side_effect = assert_auth
with tempfile.TemporaryDirectory() as tmp_dir:
with offline():
with pytest.raises((ConnectionError, requests.exceptions.ConnectionError)):
load_dataset(SAMPLE_NOT_EXISTING_DATASET_IDENTIFIER, cache_dir=tmp_dir, token="foo")
mock_request.assert_called()
@pytest.mark.integration
def test_load_streaming_private_dataset(hf_token, hf_private_dataset_repo_txt_data):
ds = load_dataset(hf_private_dataset_repo_txt_data, streaming=True, token=hf_token)
assert next(iter(ds)) is not None
@pytest.mark.integration
def test_load_dataset_builder_private_dataset(hf_token, hf_private_dataset_repo_txt_data):
builder = load_dataset_builder(hf_private_dataset_repo_txt_data, token=hf_token)
assert isinstance(builder, DatasetBuilder)
@pytest.mark.integration
def test_load_streaming_private_dataset_with_zipped_data(hf_token, hf_private_dataset_repo_zipped_txt_data):
ds = load_dataset(hf_private_dataset_repo_zipped_txt_data, streaming=True, token=hf_token)
assert next(iter(ds)) is not None
@pytest.mark.integration
def test_load_dataset_config_kwargs_passed_as_arguments():
ds_default = load_dataset(SAMPLE_DATASET_IDENTIFIER4)
ds_custom = load_dataset(SAMPLE_DATASET_IDENTIFIER4, drop_metadata=True)
assert list(ds_default["train"].features) == ["image", "caption"]
assert list(ds_custom["train"].features) == ["image"]
@require_sndfile
@pytest.mark.integration
def test_load_hub_dataset_without_script_with_single_config_in_metadata():
# load the same dataset but with no configurations (=with default parameters)
ds = load_dataset(SAMPLE_DATASET_NO_CONFIGS_IN_METADATA)
assert list(ds["train"].features) == ["audio", "label"] # assert label feature is here as expected by default
assert len(ds["train"]) == 5 and len(ds["test"]) == 4
ds2 = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA) # single config -> no need to specify it
assert list(ds2["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed
assert len(ds2["train"]) == 3 and len(ds2["test"]) == 3
ds3 = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, "custom")
assert list(ds3["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed
assert len(ds3["train"]) == 3 and len(ds3["test"]) == 3
with pytest.raises(ValueError):
# no config named "default"
_ = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, "default")
@require_sndfile
@pytest.mark.integration
def test_load_hub_dataset_without_script_with_two_config_in_metadata():
ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1")
assert list(ds["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed
assert len(ds["train"]) == 3 and len(ds["test"]) == 3
ds2 = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v2")
assert list(ds2["train"].features) == [
"audio",
"label",
] # assert param `drop_labels=False` from metadata is passed
assert len(ds2["train"]) == 2 and len(ds2["test"]) == 1
with pytest.raises(ValueError):
# config is required but not specified
_ = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA)
with pytest.raises(ValueError):
# no config named "default"
_ = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "default")
ds_with_default = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT)
# it's a dataset with the same data but "v1" config is marked as a default one
assert list(ds_with_default["train"].features) == list(ds["train"].features)
assert len(ds_with_default["train"]) == len(ds["train"]) and len(ds_with_default["test"]) == len(ds["test"])
@require_sndfile
@pytest.mark.integration
def test_load_hub_dataset_without_script_with_metadata_config_in_parallel():
# assert it doesn't fail (pickling of dynamically created class works)
ds = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, num_proc=2)
assert "label" not in ds["train"].features # assert param `drop_labels=True` from metadata is passed
assert len(ds["train"]) == 3 and len(ds["test"]) == 3
ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1", num_proc=2)
assert "label" not in ds["train"].features # assert param `drop_labels=True` from metadata is passed
assert len(ds["train"]) == 3 and len(ds["test"]) == 3
ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v2", num_proc=2)
assert "label" in ds["train"].features
assert len(ds["train"]) == 2 and len(ds["test"]) == 1
@require_pil
@pytest.mark.integration
@pytest.mark.parametrize("streaming", [True])
def test_load_dataset_private_zipped_images(hf_private_dataset_repo_zipped_img_data, hf_token, streaming):
ds = load_dataset(hf_private_dataset_repo_zipped_img_data, split="train", streaming=streaming, token=hf_token)
assert isinstance(ds, IterableDataset if streaming else Dataset)
ds_items = list(ds)
assert len(ds_items) == 2
def test_load_dataset_then_move_then_reload(dataset_loading_script_dir, data_dir, tmp_path, caplog):
cache_dir1 = tmp_path / "cache1"
cache_dir2 = tmp_path / "cache2"
dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="train", cache_dir=cache_dir1)
fingerprint1 = dataset._fingerprint
del dataset
os.rename(cache_dir1, cache_dir2)
caplog.clear()
with caplog.at_level(INFO, logger=get_logger().name):
dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="train", cache_dir=cache_dir2)
assert "Found cached dataset" in caplog.text
assert dataset._fingerprint == fingerprint1, "for the caching mechanism to work, fingerprint should stay the same"
dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="test", cache_dir=cache_dir2)
assert dataset._fingerprint != fingerprint1
def test_load_dataset_readonly(dataset_loading_script_dir, dataset_loading_script_dir_readonly, data_dir, tmp_path):
cache_dir1 = tmp_path / "cache1"
cache_dir2 = tmp_path / "cache2"
dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="train", cache_dir=cache_dir1)
fingerprint1 = dataset._fingerprint
del dataset
# Load readonly dataset and check that the fingerprint is the same.
dataset = load_dataset(dataset_loading_script_dir_readonly, data_dir=data_dir, split="train", cache_dir=cache_dir2)
assert dataset._fingerprint == fingerprint1, "Cannot load a dataset in a readonly folder."
@pytest.mark.parametrize("max_in_memory_dataset_size", ["default", 0, 50, 500])
def test_load_dataset_local_with_default_in_memory(
max_in_memory_dataset_size, dataset_loading_script_dir, data_dir, monkeypatch
):
current_dataset_size = 148
if max_in_memory_dataset_size == "default":
max_in_memory_dataset_size = 0 # default
else:
monkeypatch.setattr(datasets.config, "IN_MEMORY_MAX_SIZE", max_in_memory_dataset_size)
if max_in_memory_dataset_size:
expected_in_memory = current_dataset_size < max_in_memory_dataset_size
else:
expected_in_memory = False
with assert_arrow_memory_increases() if expected_in_memory else assert_arrow_memory_doesnt_increase():
dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir)
assert (dataset["train"].dataset_size < max_in_memory_dataset_size) is expected_in_memory
@pytest.mark.parametrize("max_in_memory_dataset_size", ["default", 0, 100, 1000])
def test_load_from_disk_with_default_in_memory(
max_in_memory_dataset_size, dataset_loading_script_dir, data_dir, tmp_path, monkeypatch
):
current_dataset_size = 512 # arrow file size = 512, in-memory dataset size = 148
if max_in_memory_dataset_size == "default":
max_in_memory_dataset_size = 0 # default
else:
monkeypatch.setattr(datasets.config, "IN_MEMORY_MAX_SIZE", max_in_memory_dataset_size)
if max_in_memory_dataset_size:
expected_in_memory = current_dataset_size < max_in_memory_dataset_size
else:
expected_in_memory = False
dset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, keep_in_memory=True)
dataset_path = os.path.join(tmp_path, "saved_dataset")
dset.save_to_disk(dataset_path)
with assert_arrow_memory_increases() if expected_in_memory else assert_arrow_memory_doesnt_increase():
_ = load_from_disk(dataset_path)
@pytest.mark.integration
def test_remote_data_files():
repo_id = "hf-internal-testing/raw_jsonl"
filename = "wikiann-bn-validation.jsonl"
data_files = f"https://huggingface.co/datasets/{repo_id}/resolve/main/{filename}"
ds = load_dataset("json", split="train", data_files=data_files, streaming=True)
assert isinstance(ds, IterableDataset)
ds_item = next(iter(ds))
assert ds_item.keys() == {"langs", "ner_tags", "spans", "tokens"}
@pytest.mark.parametrize("deleted", [False, True])
def test_load_dataset_deletes_extracted_files(deleted, jsonl_gz_path, tmp_path):
data_files = jsonl_gz_path
cache_dir = tmp_path / "cache"
if deleted:
download_config = DownloadConfig(delete_extracted=True, cache_dir=cache_dir / "downloads")
ds = load_dataset(
"json", split="train", data_files=data_files, cache_dir=cache_dir, download_config=download_config
)
else: # default
ds = load_dataset("json", split="train", data_files=data_files, cache_dir=cache_dir)
assert ds[0] == {"col_1": "0", "col_2": 0, "col_3": 0.0}
assert (
[path for path in (cache_dir / "downloads" / "extracted").iterdir() if path.suffix != ".lock"] == []
) is deleted
def distributed_load_dataset(args):
data_name, tmp_dir, datafiles = args
dataset = load_dataset(data_name, cache_dir=tmp_dir, data_files=datafiles)
return dataset
def test_load_dataset_distributed(tmp_path, csv_path):
num_workers = 5
args = "csv", str(tmp_path), csv_path
with Pool(processes=num_workers) as pool: # start num_workers processes
datasets = pool.map(distributed_load_dataset, [args] * num_workers)
assert len(datasets) == num_workers
assert all(len(dataset) == len(datasets[0]) > 0 for dataset in datasets)
assert len(datasets[0].cache_files) > 0
assert all(dataset.cache_files == datasets[0].cache_files for dataset in datasets)
def test_load_dataset_with_storage_options(mockfs):
with mockfs.open("data.txt", "w") as f:
f.write("Hello there\n")
f.write("General Kenobi !")
data_files = {"train": ["mock://data.txt"]}
ds = load_dataset("text", data_files=data_files, storage_options=mockfs.storage_options)
assert list(ds["train"]) == [{"text": "Hello there"}, {"text": "General Kenobi !"}]
@require_pil
def test_load_dataset_with_storage_options_with_decoding(mockfs, image_file):
import PIL.Image
filename = os.path.basename(image_file)
with mockfs.open(filename, "wb") as fout:
with open(image_file, "rb") as fin:
fout.write(fin.read())
data_files = {"train": ["mock://" + filename]}
ds = load_dataset("imagefolder", data_files=data_files, storage_options=mockfs.storage_options)
assert len(ds["train"]) == 1
assert isinstance(ds["train"][0]["image"], PIL.Image.Image)
def test_load_dataset_without_script_with_zip(zip_csv_path):
path = str(zip_csv_path.parent)
ds = load_dataset(path)
assert list(ds.keys()) == ["train"]
assert ds["train"].column_names == ["col_1", "col_2", "col_3"]
assert ds["train"].num_rows == 8
assert ds["train"][0] == {"col_1": 0, "col_2": 0, "col_3": 0.0}
| datasets-main | tests/test_load.py |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class DummyBeamDataset(datasets.BeamBasedBuilder):
"""Dummy beam dataset."""
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string")}),
# No default supervised_keys.
supervised_keys=None,
)
def _split_generators(self, dl_manager, pipeline):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"examples": get_test_dummy_examples()})]
def _build_pcollection(self, pipeline, examples):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(examples)
class NestedBeamDataset(datasets.BeamBasedBuilder):
"""Dummy beam dataset."""
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string")})}),
# No default supervised_keys.
supervised_keys=None,
)
def _split_generators(self, dl_manager, pipeline):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"examples": get_test_nested_examples()})
]
def _build_pcollection(self, pipeline, examples):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(examples)
def get_test_dummy_examples():
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"])]
def get_test_nested_examples():
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"])]
class BeamBuilderTest(TestCase):
@require_beam
def test_download_and_prepare(self):
expected_num_examples = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
builder = DummyBeamDataset(cache_dir=tmp_cache_dir, beam_runner="DirectRunner")
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train.arrow")
)
)
self.assertDictEqual(builder.info.features, datasets.Features({"content": datasets.Value("string")}))
dset = builder.as_dataset()
self.assertEqual(dset["train"].num_rows, expected_num_examples)
self.assertEqual(dset["train"].info.splits["train"].num_examples, expected_num_examples)
self.assertDictEqual(dset["train"][0], get_test_dummy_examples()[0][1])
self.assertDictEqual(
dset["train"][expected_num_examples - 1], get_test_dummy_examples()[expected_num_examples - 1][1]
)
self.assertTrue(
os.path.exists(os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", "dataset_info.json"))
)
del dset
@require_beam
def test_download_and_prepare_sharded(self):
import apache_beam as beam
original_write_parquet = beam.io.parquetio.WriteToParquet
expected_num_examples = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
builder = DummyBeamDataset(cache_dir=tmp_cache_dir, beam_runner="DirectRunner")
with patch("apache_beam.io.parquetio.WriteToParquet") as write_parquet_mock:
write_parquet_mock.side_effect = partial(original_write_parquet, num_shards=2)
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train-00000-of-00002.arrow"
)
)
)
self.assertTrue(
os.path.exists(
os.path.join(
tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train-00000-of-00002.arrow"
)
)
)
self.assertDictEqual(builder.info.features, datasets.Features({"content": datasets.Value("string")}))
dset = builder.as_dataset()
self.assertEqual(dset["train"].num_rows, expected_num_examples)
self.assertEqual(dset["train"].info.splits["train"].num_examples, expected_num_examples)
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"]), sorted(["foo", "bar", "foobar"]))
self.assertTrue(
os.path.exists(os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", "dataset_info.json"))
)
del dset
@require_beam
def test_no_beam_options(self):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
builder = DummyBeamDataset(cache_dir=tmp_cache_dir)
self.assertRaises(datasets.builder.MissingBeamOptions, builder.download_and_prepare)
@require_beam
def test_nested_features(self):
expected_num_examples = len(get_test_nested_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
builder = NestedBeamDataset(cache_dir=tmp_cache_dir, beam_runner="DirectRunner")
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train.arrow")
)
)
self.assertDictEqual(
builder.info.features, datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string")})})
)
dset = builder.as_dataset()
self.assertEqual(dset["train"].num_rows, expected_num_examples)
self.assertEqual(dset["train"].info.splits["train"].num_examples, expected_num_examples)
self.assertDictEqual(dset["train"][0], get_test_nested_examples()[0][1])
self.assertDictEqual(
dset["train"][expected_num_examples - 1], get_test_nested_examples()[expected_num_examples - 1][1]
)
self.assertTrue(
os.path.exists(os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", "dataset_info.json"))
)
del dset
| datasets-main | tests/test_beam.py |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files",
[
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
],
)
def test_from_dir(files, tmp_path_factory):
dataset_infos_dir = tmp_path_factory.mktemp("dset_infos_dir")
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md", "w") as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---")
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md", "w") as f:
f.write("")
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json", "w") as f:
f.write('{"default": {"dataset_size": 42}}')
dataset_infos = DatasetInfosDict.from_directory(dataset_infos_dir)
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info",
[
DatasetInfo(),
DatasetInfo(
description="foo",
features=Features({"a": Value("int32")}),
builder_name="builder",
config_name="config",
version="1.0.0",
splits=[{"name": "train"}],
download_size=42,
),
],
)
def test_dataset_info_dump_and_reload(tmp_path, dataset_info: DatasetInfo):
tmp_path = str(tmp_path)
dataset_info.write_to_directory(tmp_path)
reloaded = DatasetInfo.from_directory(tmp_path)
assert dataset_info == reloaded
assert os.path.exists(os.path.join(tmp_path, "dataset_info.json"))
def test_dataset_info_to_yaml_dict():
dataset_info = DatasetInfo(
description="foo",
citation="bar",
homepage="https://foo.bar",
license="CC0",
features=Features({"a": Value("int32")}),
post_processed={},
supervised_keys=(),
task_templates=[],
builder_name="builder",
config_name="config",
version="1.0.0",
splits=[{"name": "train", "num_examples": 42}],
download_checksums={},
download_size=1337,
post_processing_size=442,
dataset_size=1234,
size_in_bytes=1337 + 442 + 1234,
)
dataset_info_yaml_dict = dataset_info._to_yaml_dict()
assert sorted(dataset_info_yaml_dict) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML)
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str))
dataset_info_yaml = yaml.safe_dump(dataset_info_yaml_dict)
reloaded = yaml.safe_load(dataset_info_yaml)
assert dataset_info_yaml_dict == reloaded
def test_dataset_info_to_yaml_dict_empty():
dataset_info = DatasetInfo()
dataset_info_yaml_dict = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict",
[
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()}),
DatasetInfosDict({"my_config_name": DatasetInfo()}),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo",
features=Features({"a": Value("int32")}),
builder_name="builder",
config_name="config",
version="1.0.0",
splits=[{"name": "train"}],
download_size=42,
)
}
),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42),
"v2": DatasetInfo(dataset_size=1337),
}
),
],
)
def test_dataset_infos_dict_dump_and_reload(tmp_path, dataset_infos_dict: DatasetInfosDict):
tmp_path = str(tmp_path)
dataset_infos_dict.write_to_directory(tmp_path)
reloaded = DatasetInfosDict.from_directory(tmp_path)
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
dataset_info.config_name = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
dataset_infos_dict[config_name] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict())
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(tmp_path, "README.md"))
| datasets-main | tests/test_info.py |
import pickle
from copy import deepcopy
from itertools import chain, islice
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.compute as pc
import pytest
from datasets import Dataset, load_dataset
from datasets.combine import concatenate_datasets, interleave_datasets
from datasets.features import (
ClassLabel,
Features,
Image,
Value,
)
from datasets.formatting import get_format_type_from_alias
from datasets.info import DatasetInfo
from datasets.iterable_dataset import (
ArrowExamplesIterable,
BufferShuffledExamplesIterable,
CyclingMultiSourcesExamplesIterable,
ExamplesIterable,
FilteredExamplesIterable,
FormattingConfig,
HorizontallyConcatenatedMultiSourcesExamplesIterable,
IterableDataset,
MappedExamplesIterable,
RandomlyCyclingMultiSourcesExamplesIterable,
SelectColumnsIterable,
ShuffledDataSourcesArrowExamplesIterable,
ShuffledDataSourcesExamplesIterable,
ShufflingConfig,
SkipExamplesIterable,
StepExamplesIterable,
TakeExamplesIterable,
TypedExamplesIterable,
VerticallyConcatenatedMultiSourcesExamplesIterable,
_BaseExamplesIterable,
_batch_arrow_tables,
_batch_to_examples,
_convert_to_arrow,
_examples_to_batch,
)
from .utils import (
assert_arrow_memory_doesnt_increase,
is_rng_equal,
require_dill_gt_0_3_2,
require_not_windows,
require_pyspark,
require_tf,
require_torch,
)
DEFAULT_N_EXAMPLES = 20
DEFAULT_BATCH_SIZE = 4
DEFAULT_FILEPATH = "file.txt"
SAMPLE_DATASET_IDENTIFIER = "hf-internal-testing/dataset_with_script" # has dataset script
def generate_examples_fn(**kwargs):
kwargs = kwargs.copy()
n = kwargs.pop("n", DEFAULT_N_EXAMPLES)
filepaths = kwargs.pop("filepaths", None)
for filepath in filepaths or [DEFAULT_FILEPATH]:
if filepaths is not None:
kwargs["filepath"] = filepath
for i in range(n):
yield f"{filepath}_{i}", {"id": i, **kwargs}
def generate_tables_fn(**kwargs):
kwargs = kwargs.copy()
n = kwargs.pop("n", DEFAULT_N_EXAMPLES)
batch_size = kwargs.pop("batch_size", DEFAULT_BATCH_SIZE)
filepaths = kwargs.pop("filepaths", None)
for filepath in filepaths or [DEFAULT_FILEPATH]:
buffer = []
batch_idx = 0
if filepaths is not None:
kwargs["filepath"] = filepath
for i in range(n):
buffer.append({"id": i, **kwargs})
if len(buffer) == batch_size:
yield f"{filepath}_{batch_idx}", pa.Table.from_pylist(buffer)
buffer = []
batch_idx += 1
yield batch_idx, pa.Table.from_pylist(buffer)
@pytest.fixture
def dataset():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
return IterableDataset(ex_iterable, info=DatasetInfo(description="dummy"), split="train")
@pytest.fixture
def dataset_with_several_columns():
ex_iterable = ExamplesIterable(
generate_examples_fn,
{"filepath": ["data0.txt", "data1.txt", "data2.txt"], "metadata": {"sources": ["https://foo.bar"]}},
)
return IterableDataset(ex_iterable, info=DatasetInfo(description="dummy"), split="train")
@pytest.fixture
def arrow_file(tmp_path_factory, dataset: IterableDataset):
filename = str(tmp_path_factory.mktemp("data") / "file.arrow")
Dataset.from_generator(dataset.__iter__).map(cache_file_name=filename)
return filename
################################
#
# Utilities tests
#
################################
@pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20])
@pytest.mark.parametrize("drop_last_batch", [False, True])
def test_convert_to_arrow(batch_size, drop_last_batch):
examples = [{"foo": i} for i in range(10)]
full_table = pa.Table.from_pylist(examples)
num_rows = len(full_table) if not drop_last_batch else len(full_table) // batch_size * batch_size
num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size
subtables = list(
_convert_to_arrow(
[(i, example) for i, example in enumerate(examples)],
batch_size=batch_size,
drop_last_batch=drop_last_batch,
)
)
assert len(subtables) == num_batches
if drop_last_batch:
assert all(len(subtable) == batch_size for _, subtable in subtables)
else:
assert all(len(subtable) == batch_size for _, subtable in subtables[:-1])
assert len(subtables[-1][1]) <= batch_size
if num_rows > 0:
reloaded = pa.concat_tables([subtable for _, subtable in subtables])
assert full_table.slice(0, num_rows).to_pydict() == reloaded.to_pydict()
@pytest.mark.parametrize(
"tables",
[
[pa.table({"foo": range(10)})],
[pa.table({"foo": range(0, 5)}), pa.table({"foo": range(5, 10)})],
[pa.table({"foo": [i]}) for i in range(10)],
],
)
@pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20])
@pytest.mark.parametrize("drop_last_batch", [False, True])
def test_batch_arrow_tables(tables, batch_size, drop_last_batch):
full_table = pa.concat_tables(tables)
num_rows = len(full_table) if not drop_last_batch else len(full_table) // batch_size * batch_size
num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size
subtables = list(
_batch_arrow_tables(
[(i, table) for i, table in enumerate(tables)], batch_size=batch_size, drop_last_batch=drop_last_batch
)
)
assert len(subtables) == num_batches
if drop_last_batch:
assert all(len(subtable) == batch_size for _, subtable in subtables)
else:
assert all(len(subtable) == batch_size for _, subtable in subtables[:-1])
assert len(subtables[-1][1]) <= batch_size
if num_rows > 0:
reloaded = pa.concat_tables([subtable for _, subtable in subtables])
assert full_table.slice(0, num_rows).to_pydict() == reloaded.to_pydict()
################################
#
# _BaseExampleIterable tests
#
################################
def test_examples_iterable():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
expected = list(generate_examples_fn())
assert next(iter(ex_iterable)) == expected[0]
assert list(ex_iterable) == expected
assert ex_iterable.iter_arrow is None
def test_examples_iterable_with_kwargs():
ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": ["0.txt", "1.txt"], "split": "train"})
expected = list(generate_examples_fn(filepaths=["0.txt", "1.txt"], split="train"))
assert list(ex_iterable) == expected
assert all("split" in ex for _, ex in ex_iterable)
assert sorted({ex["filepath"] for _, ex in ex_iterable}) == ["0.txt", "1.txt"]
def test_examples_iterable_shuffle_data_sources():
ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": ["0.txt", "1.txt"]})
ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(40))
expected = list(generate_examples_fn(filepaths=["1.txt", "0.txt"])) # shuffle the filepaths
assert list(ex_iterable) == expected
def test_examples_iterable_shuffle_shards_and_metadata():
def gen(filepaths, all_metadata):
for i, (filepath, metadata) in enumerate(zip(filepaths, all_metadata)):
yield i, {"filepath": filepath, "metadata": metadata}
ex_iterable = ExamplesIterable(
gen,
{
"filepaths": [f"{i}.txt" for i in range(100)],
"all_metadata": [{"id": str(i)} for i in range(100)],
},
)
ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(42))
out = list(ex_iterable)
filepaths_ids = [x["filepath"].split(".")[0] for _, x in out]
metadata_ids = [x["metadata"]["id"] for _, x in out]
assert filepaths_ids == metadata_ids, "entangled lists of shards/metadata should be shuffled the same way"
def test_arrow_examples_iterable():
ex_iterable = ArrowExamplesIterable(generate_tables_fn, {})
expected = sum([pa_table.to_pylist() for _, pa_table in generate_tables_fn()], [])
assert next(iter(ex_iterable))[1] == expected[0]
assert [example for _, example in ex_iterable] == expected
expected = list(generate_tables_fn())
assert list(ex_iterable.iter_arrow()) == expected
def test_arrow_examples_iterable_with_kwargs():
ex_iterable = ArrowExamplesIterable(generate_tables_fn, {"filepaths": ["0.txt", "1.txt"], "split": "train"})
expected = sum(
[pa_table.to_pylist() for _, pa_table in generate_tables_fn(filepaths=["0.txt", "1.txt"], split="train")], []
)
assert [example for _, example in ex_iterable] == expected
assert all("split" in ex for _, ex in ex_iterable)
assert sorted({ex["filepath"] for _, ex in ex_iterable}) == ["0.txt", "1.txt"]
expected = list(generate_tables_fn(filepaths=["0.txt", "1.txt"], split="train"))
assert list(ex_iterable.iter_arrow()) == expected
def test_arrow_examples_iterable_shuffle_data_sources():
ex_iterable = ArrowExamplesIterable(generate_tables_fn, {"filepaths": ["0.txt", "1.txt"]})
ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(40))
expected = sum(
[pa_table.to_pylist() for _, pa_table in generate_tables_fn(filepaths=["1.txt", "0.txt"])], []
) # shuffle the filepaths
assert [example for _, example in ex_iterable] == expected
expected = list(generate_tables_fn(filepaths=["1.txt", "0.txt"]))
assert list(ex_iterable.iter_arrow()) == expected
@pytest.mark.parametrize("seed", [42, 1337, 101010, 123456])
def test_buffer_shuffled_examples_iterable(seed):
n, buffer_size = 100, 30
generator = np.random.default_rng(seed)
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = BufferShuffledExamplesIterable(base_ex_iterable, buffer_size=buffer_size, generator=generator)
rng = deepcopy(generator)
expected_indices_used_for_shuffling = list(
islice(BufferShuffledExamplesIterable._iter_random_indices(rng, buffer_size=buffer_size), n - buffer_size)
)
# indices to pick in the shuffle buffer should all be in the right range
assert all(0 <= index_to_pick < buffer_size for index_to_pick in expected_indices_used_for_shuffling)
# it should be random indices
assert expected_indices_used_for_shuffling != list(range(buffer_size))
# The final order of examples is the result of a shuffle buffer.
all_examples = list(generate_examples_fn(n=n))
# We create a buffer and we pick random examples from it.
buffer, rest = all_examples[:buffer_size], all_examples[buffer_size:]
expected = []
for i, index_to_pick in enumerate(expected_indices_used_for_shuffling):
expected.append(buffer[index_to_pick])
# The picked examples are directly replaced by the next examples from the iterable.
buffer[index_to_pick] = rest.pop(0)
# Once we have reached the end of the iterable, we shuffle the buffer and return the remaining examples.
rng.shuffle(buffer)
expected += buffer
assert next(iter(ex_iterable)) == expected[0]
assert list(ex_iterable) == expected
assert sorted(ex_iterable) == sorted(all_examples)
def test_cycling_multi_sources_examples_iterable():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"text": "foo"})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"text": "bar"})
ex_iterable = CyclingMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
expected = list(chain(*zip(generate_examples_fn(text="foo"), generate_examples_fn(text="bar"))))
# The cycling stops as soon as one iterable is out of examples (here ex_iterable1), so the last sample from ex_iterable2 is unecessary
expected = expected[:-1]
assert next(iter(ex_iterable)) == expected[0]
assert list(ex_iterable) == expected
assert all((x["id"], x["text"]) == (i // 2, "bar" if i % 2 else "foo") for i, (_, x) in enumerate(ex_iterable))
@pytest.mark.parametrize("probabilities", [None, (0.5, 0.5), (0.9, 0.1)])
def test_randomly_cycling_multi_sources_examples_iterable(probabilities):
seed = 42
generator = np.random.default_rng(seed)
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"text": "foo"})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"text": "bar"})
ex_iterable = RandomlyCyclingMultiSourcesExamplesIterable(
[ex_iterable1, ex_iterable2], generator=generator, probabilities=probabilities
)
# The source used randomly changes at each example. It stops when one of the iterators is empty.
rng = deepcopy(generator)
iterators = (generate_examples_fn(text="foo"), generate_examples_fn(text="bar"))
indices_iterator = RandomlyCyclingMultiSourcesExamplesIterable._iter_random_indices(
rng, len(iterators), p=probabilities
)
expected = []
lengths = [len(list(ex_iterable1)), len(list(ex_iterable2))]
for i in indices_iterator:
if lengths[0] == 0 or lengths[1] == 0:
break
for key, example in iterators[i]:
expected.append((key, example))
lengths[i] -= 1
break
else:
break
assert next(iter(ex_iterable)) == expected[0]
assert list(ex_iterable) == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x: {"id+1": x["id"] + 1}, False, None), # just add 1 to the id
(3, lambda x: {"id+1": [x["id"][0] + 1]}, True, 1), # same with bs=1
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10
(25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None), # same with bs=None
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1), # same with bs<=0
(3, lambda x: {k: v * 2 for k, v in x.items()}, True, 1), # make a duplicate of each example
],
)
def test_mapped_examples_iterable(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(base_ex_iterable, func, batched=batched, batch_size=batch_size)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [{**x, **func(x)} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
transformed_batch = func(batch)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x: {"id+1": x["id"] + 1}, False, None), # just add 1 to the id
(3, lambda x: {"id+1": [x["id"][0] + 1]}, True, 1), # same with bs=1
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10
(25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None), # same with bs=None
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1), # same with bs<=0
(3, lambda x: {k: v * 2 for k, v in x.items()}, True, 1), # make a duplicate of each example
],
)
def test_mapped_examples_iterable_drop_last_batch(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, drop_last_batch=True
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
is_empty = False
if batched is False:
# `drop_last_batch` has no effect here
expected = [{**x, **func(x)} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
if len(examples) < batch_size: # ignore last batch
break
batch = _examples_to_batch(examples)
transformed_batch = func(batch)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
all_examples = all_examples if n % batch_size == 0 else all_examples[: n // batch_size * batch_size]
if all_examples:
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
else:
is_empty = True
if not is_empty:
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
else:
with pytest.raises(StopIteration):
next(iter(ex_iterable))
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x, index: {"id+idx": x["id"] + index}, False, None), # add the index to the id
(
25,
lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]},
True,
10,
), # add the index to the id
(5, lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]}, True, None), # same with bs=None
(5, lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]}, True, -1), # same with bs<=0
],
)
def test_mapped_examples_iterable_with_indices(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, with_indices=True
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [{**x, **func(x, idx)} for idx, x in enumerate(all_examples)]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
indices = list(range(batch_offset, batch_offset + len(examples)))
transformed_batch = func(batch, indices)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, remove_columns",
[
(3, lambda x: {"id+1": x["id"] + 1}, False, None, ["extra_column"]), # just add 1 to the id
(25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10, ["extra_column"]), # same with bs=10
(
50,
lambda x: {"foo": ["bar"] * np.random.default_rng(x["id"][0]).integers(0, 10)},
True,
8,
["extra_column", "id"],
), # make a duplicate of each example
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None, ["extra_column"]), # same with bs=None
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1, ["extra_column"]), # same with bs<=0
],
)
def test_mapped_examples_iterable_remove_columns(n, func, batched, batch_size, remove_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "extra_column": "foo"})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, remove_columns=remove_columns
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_remove = remove_columns if isinstance(remove_columns, list) else [remove_columns]
if batched is False:
expected = [{**{k: v for k, v in x.items() if k not in columns_to_remove}, **func(x)} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
transformed_batch = func(batch)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = {k: v for k, v in _examples_to_batch(all_examples).items() if k not in columns_to_remove}
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, fn_kwargs",
[
(3, lambda x, y=0: {"id+y": x["id"] + y}, False, None, None),
(3, lambda x, y=0: {"id+y": x["id"] + y}, False, None, {"y": 3}),
(25, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, 10, {"y": 3}),
(5, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, None, {"y": 3}), # same with bs=None
(5, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, -1, {"y": 3}), # same with bs<=0
],
)
def test_mapped_examples_iterable_fn_kwargs(n, func, batched, batch_size, fn_kwargs):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if fn_kwargs is None:
fn_kwargs = {}
if batched is False:
expected = [{**x, **func(x, **fn_kwargs)} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
transformed_batch = func(batch, **fn_kwargs)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, input_columns",
[
(3, lambda id_: {"id+1": id_ + 1}, False, None, ["id"]), # just add 1 to the id
(25, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, 10, ["id"]), # same with bs=10
(5, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, None, ["id"]), # same with bs=None
(5, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, -1, ["id"]), # same with bs<=0
],
)
def test_mapped_examples_iterable_input_columns(n, func, batched, batch_size, input_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, input_columns=input_columns
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns]
if batched is False:
expected = [{**x, **func(*[x[col] for col in columns_to_input])} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
transformed_batch = func(*[batch[col] for col in columns_to_input])
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None), # just add 1 to the id
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 1), # same with bs=1
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None), # same with bs=None
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1), # same with bs<=0
(3, lambda t: pa.concat_tables([t] * 2), True, 1), # make a duplicate of each example
],
)
def test_mapped_examples_iterable_arrow_format(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [func(pa.Table.from_pylist([x])).to_pylist()[0] for x in all_examples]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(func(batch).to_pylist())
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None), # just add 1 to the id
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 1), # same with bs=1
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None), # same with bs=None
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1), # same with bs<=0
(3, lambda t: pa.concat_tables([t] * 2), True, 1), # make a duplicate of each example
],
)
def test_mapped_examples_iterable_drop_last_batch_and_arrow_format(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
drop_last_batch=True,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
is_empty = False
if batched is False:
# `drop_last_batch` has no effect here
expected = [func(pa.Table.from_pylist([x])).to_pylist()[0] for x in all_examples]
else:
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
if len(examples) < batch_size: # ignore last batch
break
batch = pa.Table.from_pylist(examples)
out = func(batch)
all_transformed_examples.extend(
out.to_pylist()
) # we don't merge with input since they're arrow tables and not dictionaries
all_examples = all_examples if n % batch_size == 0 else all_examples[: n // batch_size * batch_size]
if all_examples:
expected = all_transformed_examples
else:
is_empty = True
if not is_empty:
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
else:
with pytest.raises(StopIteration):
next(iter(ex_iterable))
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(
3,
lambda t, index: t.append_column("id+idx", pc.add(t["id"], index)),
False,
None,
), # add the index to the id
(
25,
lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)),
True,
10,
), # add the index to the id
(5, lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)), True, None), # same with bs=None
(5, lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)), True, -1), # same with bs<=0
],
)
def test_mapped_examples_iterable_with_indices_and_arrow_format(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
with_indices=True,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [func(pa.Table.from_pylist([x]), i).to_pylist()[0] for i, x in enumerate(all_examples)]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(func(batch, list(range(batch_offset, batch_offset + len(batch)))).to_pylist())
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, remove_columns",
[
(
3,
lambda t: t.append_column("id+1", pc.add(t["id"], 1)),
False,
None,
["extra_column"],
), # just add 1 to the id
(25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10, ["extra_column"]), # same with bs=10
(
50,
lambda t: pa.table({"foo": ["bar"] * np.random.default_rng(t["id"][0].as_py()).integers(0, 10)}),
True,
8,
["extra_column", "id"],
), # make a duplicate of each example
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None, ["extra_column"]), # same with bs=None
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1, ["extra_column"]), # same with bs<=0
],
)
def test_mapped_examples_iterable_remove_columns_arrow_format(n, func, batched, batch_size, remove_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "extra_column": "foo"})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
remove_columns=remove_columns,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_remove = remove_columns if isinstance(remove_columns, list) else [remove_columns]
if batched is False:
expected = [
{**{k: v for k, v in func(pa.Table.from_pylist([x])).to_pylist()[0].items() if k not in columns_to_remove}}
for x in all_examples
]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(
[{k: v for k, v in x.items() if k not in columns_to_remove} for x in func(batch).to_pylist()]
)
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, fn_kwargs",
[
(3, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), False, None, None),
(3, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), False, None, {"y": 3}),
(25, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, 10, {"y": 3}),
(5, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, None, {"y": 3}), # same with bs=None
(5, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, -1, {"y": 3}), # same with bs<=0
],
)
def test_mapped_examples_iterable_fn_kwargs_and_arrow_format(n, func, batched, batch_size, fn_kwargs):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
fn_kwargs=fn_kwargs,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if fn_kwargs is None:
fn_kwargs = {}
if batched is False:
expected = [func(pa.Table.from_pylist([x]), **fn_kwargs).to_pylist()[0] for x in all_examples]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(func(batch, **fn_kwargs).to_pylist())
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, input_columns",
[
(3, lambda id_: pa.table({"id+1": pc.add(id_, 1)}), False, None, ["id"]), # just add 1 to the id
(25, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, 10, ["id"]), # same with bs=10
(5, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, None, ["id"]), # same with bs=None
(5, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, -1, ["id"]), # same with bs<=0
],
)
def test_mapped_examples_iterable_input_columns_and_arrow_format(n, func, batched, batch_size, input_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
input_columns=input_columns,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns]
if batched is False:
expected = [
func(*[pa.Table.from_pylist([x])[col] for col in columns_to_input]).to_pylist()[0] for x in all_examples
]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(func(*[batch[col] for col in columns_to_input]).to_pylist())
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x: x["id"] % 2 == 0, False, None), # keep even number
(3, lambda x: [x["id"][0] % 2 == 0], True, 1), # same with bs=1
(25, lambda x: [i % 2 == 0 for i in x["id"]], True, 10), # same with bs=10
(5, lambda x: [i % 2 == 0 for i in x["id"]], True, None), # same with bs=None
(5, lambda x: [i % 2 == 0 for i in x["id"]], True, -1), # same with bs<=0
(3, lambda x: False, False, None), # return 0 examples
(3, lambda x: [False] * len(x["id"]), True, 10), # same with bs=10
],
)
def test_filtered_examples_iterable(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = FilteredExamplesIterable(base_ex_iterable, func, batched=batched, batch_size=batch_size)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [x for x in all_examples if func(x)]
else:
# For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
mask = func(batch)
expected.extend([x for x, to_keep in zip(examples, mask) if to_keep])
if expected:
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x, index: index % 2 == 0, False, None), # keep even number
(25, lambda x, indices: [idx % 2 == 0 for idx in indices], True, 10), # same with bs=10
(5, lambda x, indices: [idx % 2 == 0 for idx in indices], True, None), # same with bs=None
(5, lambda x, indices: [idx % 2 == 0 for idx in indices], True, -1), # same with bs<=0
],
)
def test_filtered_examples_iterable_with_indices(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = FilteredExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, with_indices=True
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [x for idx, x in enumerate(all_examples) if func(x, idx)]
else:
# For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
indices = list(range(batch_offset, batch_offset + len(examples)))
mask = func(batch, indices)
expected.extend([x for x, to_keep in zip(examples, mask) if to_keep])
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, input_columns",
[
(3, lambda id_: id_ % 2 == 0, False, None, ["id"]), # keep even number
(25, lambda ids_: [i % 2 == 0 for i in ids_], True, 10, ["id"]), # same with bs=10
(3, lambda ids_: [i % 2 == 0 for i in ids_], True, None, ["id"]), # same with bs=None
(3, lambda ids_: [i % 2 == 0 for i in ids_], True, None, ["id"]), # same with bs=None
],
)
def test_filtered_examples_iterable_input_columns(n, func, batched, batch_size, input_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = FilteredExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, input_columns=input_columns
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns]
if batched is False:
expected = [x for x in all_examples if func(*[x[col] for col in columns_to_input])]
else:
# For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
mask = func(*[batch[col] for col in columns_to_input])
expected.extend([x for x, to_keep in zip(examples, mask) if to_keep])
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
def test_skip_examples_iterable():
total, count = 10, 2
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": total})
skip_ex_iterable = SkipExamplesIterable(base_ex_iterable, n=count)
expected = list(generate_examples_fn(n=total))[count:]
assert list(skip_ex_iterable) == expected
assert (
skip_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is skip_ex_iterable
), "skip examples makes the shards order fixed"
def test_take_examples_iterable():
total, count = 10, 2
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": total})
take_ex_iterable = TakeExamplesIterable(base_ex_iterable, n=count)
expected = list(generate_examples_fn(n=total))[:count]
assert list(take_ex_iterable) == expected
assert (
take_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is take_ex_iterable
), "skip examples makes the shards order fixed"
def test_vertically_concatenated_examples_iterable():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5})
concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
expected = [x for _, x in ex_iterable1] + [x for _, x in ex_iterable2]
assert [x for _, x in concatenated_ex_iterable] == expected
def test_vertically_concatenated_examples_iterable_with_different_columns():
# having different columns is supported
# Though iterable datasets fill the missing data with nulls
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {})
concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
expected = [x for _, x in ex_iterable1] + [x for _, x in ex_iterable2]
assert [x for _, x in concatenated_ex_iterable] == expected
def test_vertically_concatenated_examples_iterable_shuffle_data_sources():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5})
concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
rng = np.random.default_rng(42)
shuffled_ex_iterable = concatenated_ex_iterable.shuffle_data_sources(rng)
# make sure the list of examples iterables is shuffled, and each examples iterable is shuffled
expected = [x for _, x in ex_iterable2.shuffle_data_sources(rng)] + [
x for _, x in ex_iterable1.shuffle_data_sources(rng)
]
assert [x for _, x in shuffled_ex_iterable] == expected
def test_horizontally_concatenated_examples_iterable():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5})
concatenated_ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
with pytest.raises(ValueError): # column "id" is duplicated -> raise an error
list(concatenated_ex_iterable)
ex_iterable2 = MappedExamplesIterable(ex_iterable2, lambda x: x, remove_columns=["id"])
concatenated_ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
expected = [{**x, **y} for (_, x), (_, y) in zip(ex_iterable1, ex_iterable2)]
assert [x for _, x in concatenated_ex_iterable] == expected
assert (
concatenated_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is concatenated_ex_iterable
), "horizontally concatenated examples makes the shards order fixed"
@pytest.mark.parametrize(
"ex_iterable",
[
ExamplesIterable(generate_examples_fn, {}),
ShuffledDataSourcesExamplesIterable(generate_examples_fn, {}, np.random.default_rng(42)),
SelectColumnsIterable(ExamplesIterable(generate_examples_fn, {}), ["id"]),
StepExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 2, 0),
CyclingMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]),
VerticallyConcatenatedMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]),
HorizontallyConcatenatedMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]),
RandomlyCyclingMultiSourcesExamplesIterable(
[ExamplesIterable(generate_examples_fn, {})], np.random.default_rng(42)
),
MappedExamplesIterable(ExamplesIterable(generate_examples_fn, {}), lambda x: x),
MappedExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), lambda x: x),
FilteredExamplesIterable(ExamplesIterable(generate_examples_fn, {}), lambda x: True),
FilteredExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), lambda x: True),
BufferShuffledExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10, np.random.default_rng(42)),
SkipExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10),
TakeExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10),
TypedExamplesIterable(
ExamplesIterable(generate_examples_fn, {}), Features({"id": Value("int32")}), token_per_repo_id={}
),
],
)
def test_no_iter_arrow(ex_iterable: _BaseExamplesIterable):
assert ex_iterable.iter_arrow is None
@pytest.mark.parametrize(
"ex_iterable",
[
ArrowExamplesIterable(generate_tables_fn, {}),
ShuffledDataSourcesArrowExamplesIterable(generate_tables_fn, {}, np.random.default_rng(42)),
SelectColumnsIterable(ArrowExamplesIterable(generate_tables_fn, {}), ["id"]),
# StepExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 2, 0), # not implemented
# CyclingMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]), # not implemented
VerticallyConcatenatedMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]),
# HorizontallyConcatenatedMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]), # not implemented
# RandomlyCyclingMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})], np.random.default_rng(42)), # not implemented
MappedExamplesIterable(
ExamplesIterable(generate_examples_fn, {}), lambda t: t, formatting=FormattingConfig(format_type="arrow")
),
MappedExamplesIterable(
ArrowExamplesIterable(generate_tables_fn, {}),
lambda t: t,
formatting=FormattingConfig(format_type="arrow"),
),
FilteredExamplesIterable(
ExamplesIterable(generate_examples_fn, {}),
lambda t: True,
formatting=FormattingConfig(format_type="arrow"),
),
FilteredExamplesIterable(
ArrowExamplesIterable(generate_tables_fn, {}),
lambda t: True,
formatting=FormattingConfig(format_type="arrow"),
),
# BufferShuffledExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10, np.random.default_rng(42)), # not implemented
# SkipExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10), # not implemented
# TakeExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10), # not implemented
TypedExamplesIterable(
ArrowExamplesIterable(generate_tables_fn, {}), Features({"id": Value("int32")}), token_per_repo_id={}
),
],
)
def test_iter_arrow(ex_iterable: _BaseExamplesIterable):
assert ex_iterable.iter_arrow is not None
key, pa_table = next(ex_iterable.iter_arrow())
assert isinstance(pa_table, pa.Table)
############################
#
# IterableDataset tests
#
############################
def test_iterable_dataset():
dataset = IterableDataset(ExamplesIterable(generate_examples_fn, {}))
expected = [x for _, x in generate_examples_fn()]
assert next(iter(dataset)) == expected[0]
assert list(dataset) == expected
def test_iterable_dataset_from_generator():
data = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
def gen():
yield from data
dataset = IterableDataset.from_generator(gen)
assert isinstance(dataset, IterableDataset)
assert list(dataset) == data
def test_iterable_dataset_from_generator_with_shards():
def gen(shard_names):
for shard_name in shard_names:
for i in range(10):
yield {"shard_name": shard_name, "i": i}
shard_names = [f"data{shard_idx}.txt" for shard_idx in range(4)]
dataset = IterableDataset.from_generator(gen, gen_kwargs={"shard_names": shard_names})
assert isinstance(dataset, IterableDataset)
assert dataset.n_shards == len(shard_names)
def test_iterable_dataset_from_file(dataset: IterableDataset, arrow_file: str):
with assert_arrow_memory_doesnt_increase():
dataset_from_file = IterableDataset.from_file(arrow_file)
expected_features = dataset._resolve_features().features
assert dataset_from_file.features.type == expected_features.type
assert dataset_from_file.features == expected_features
assert isinstance(dataset_from_file, IterableDataset)
assert list(dataset_from_file) == list(dataset)
@require_not_windows
@require_dill_gt_0_3_2
@require_pyspark
def test_from_spark_streaming():
import pyspark
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
data = [
("0", 0, 0.0),
("1", 1, 1.0),
("2", 2, 2.0),
("3", 3, 3.0),
]
df = spark.createDataFrame(data, "col_1: string, col_2: int, col_3: float")
dataset = IterableDataset.from_spark(df)
assert isinstance(dataset, IterableDataset)
results = []
for ex in dataset:
results.append(ex)
assert results == [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
@require_not_windows
@require_dill_gt_0_3_2
@require_pyspark
def test_from_spark_streaming_features():
import PIL.Image
import pyspark
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
data = [(0, np.arange(4 * 4 * 3).reshape(4, 4, 3).tolist())]
df = spark.createDataFrame(data, "idx: int, image: array<array<array<int>>>")
features = Features({"idx": Value("int64"), "image": Image()})
dataset = IterableDataset.from_spark(
df,
features=features,
)
assert isinstance(dataset, IterableDataset)
results = []
for ex in dataset:
results.append(ex)
assert len(results) == 1
isinstance(results[0]["image"], PIL.Image.Image)
@require_torch
def test_iterable_dataset_torch_integration():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable)
import torch.utils.data
assert isinstance(dataset, torch.utils.data.IterableDataset)
assert isinstance(dataset, IterableDataset)
assert dataset._ex_iterable is ex_iterable
@require_torch
def test_iterable_dataset_torch_picklable():
import pickle
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable, formatting=FormattingConfig(format_type="torch"))
reloaded_dataset = pickle.loads(pickle.dumps(dataset))
import torch.utils.data
assert isinstance(reloaded_dataset, IterableDataset)
assert isinstance(reloaded_dataset, torch.utils.data.IterableDataset)
assert reloaded_dataset._formatting.format_type == "torch"
assert len(list(dataset)) == len(list(reloaded_dataset))
@require_torch
def test_iterable_dataset_with_format_torch():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable)
from torch.utils.data import DataLoader
dataloader = DataLoader(dataset)
assert len(list(dataloader)) == len(list(ex_iterable))
@require_torch
def test_iterable_dataset_torch_dataloader_parallel():
from torch.utils.data import DataLoader
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable)
dataloader = DataLoader(dataset, num_workers=2, batch_size=None)
result = list(dataloader)
expected = [example for _, example in ex_iterable]
assert len(result) == len(expected)
assert {str(x) for x in result} == {str(x) for x in expected}
@require_torch
@pytest.mark.filterwarnings("ignore:This DataLoader will create:UserWarning")
@pytest.mark.parametrize("n_shards, num_workers", [(2, 1), (2, 2), (3, 2), (2, 3)])
def test_sharded_iterable_dataset_torch_dataloader_parallel(n_shards, num_workers):
from torch.utils.data import DataLoader
ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}.txt" for i in range(n_shards)]})
dataset = IterableDataset(ex_iterable)
dataloader = DataLoader(dataset, batch_size=None, num_workers=num_workers)
result = list(dataloader)
expected = [example for _, example in ex_iterable]
assert len(result) == len(expected)
assert {str(x) for x in result} == {str(x) for x in expected}
@require_torch
@pytest.mark.integration
@pytest.mark.parametrize("num_workers", [1, 2])
def test_iterable_dataset_from_hub_torch_dataloader_parallel(num_workers, tmp_path):
from torch.utils.data import DataLoader
dataset = load_dataset(SAMPLE_DATASET_IDENTIFIER, cache_dir=str(tmp_path), streaming=True, split="train")
dataloader = DataLoader(dataset, batch_size=None, num_workers=num_workers)
result = list(dataloader)
assert len(result) == 2
@pytest.mark.parametrize("batch_size", [4, 5])
@pytest.mark.parametrize("drop_last_batch", [False, True])
def test_iterable_dataset_iter_batch(batch_size, drop_last_batch):
n = 25
dataset = IterableDataset(ExamplesIterable(generate_examples_fn, {"n": n}))
all_examples = [ex for _, ex in generate_examples_fn(n=n)]
expected = []
for i in range(0, len(all_examples), batch_size):
if len(all_examples[i : i + batch_size]) < batch_size and drop_last_batch:
continue
expected.append(_examples_to_batch(all_examples[i : i + batch_size]))
assert next(iter(dataset.iter(batch_size, drop_last_batch=drop_last_batch))) == expected[0]
assert list(dataset.iter(batch_size, drop_last_batch=drop_last_batch)) == expected
def test_iterable_dataset_info():
info = DatasetInfo(description="desc", citation="@article{}", size_in_bytes=42)
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable, info=info)
assert dataset.info == info
assert dataset.description == info.description
assert dataset.citation == info.citation
assert dataset.size_in_bytes == info.size_in_bytes
def test_iterable_dataset_set_epoch(dataset: IterableDataset):
assert dataset._epoch == 0
dataset.set_epoch(42)
assert dataset._epoch == 42
@pytest.mark.parametrize("seed", [None, 42, 1337])
@pytest.mark.parametrize("epoch", [None, 0, 1, 10])
def test_iterable_dataset_set_epoch_of_shuffled_dataset(dataset: IterableDataset, seed, epoch):
buffer_size = 10
shuffled_dataset = dataset.shuffle(seed, buffer_size=buffer_size)
base_generator = shuffled_dataset._shuffling.generator
if epoch is not None:
shuffled_dataset.set_epoch(epoch)
effective_generator = shuffled_dataset._effective_generator()
assert effective_generator is not None
if epoch is None or epoch == 0:
assert is_rng_equal(base_generator, shuffled_dataset._effective_generator())
else:
assert not is_rng_equal(base_generator, shuffled_dataset._effective_generator())
effective_seed = deepcopy(base_generator).integers(0, 1 << 63) - epoch
assert is_rng_equal(np.random.default_rng(effective_seed), shuffled_dataset._effective_generator())
def test_iterable_dataset_map(
dataset: IterableDataset,
):
func = lambda x: {"id+1": x["id"] + 1} # noqa: E731
mapped_dataset = dataset.map(func)
assert isinstance(mapped_dataset._ex_iterable, MappedExamplesIterable)
assert mapped_dataset._ex_iterable.function is func
assert mapped_dataset._ex_iterable.batched is False
assert next(iter(mapped_dataset)) == {**next(iter(dataset)), **func(next(iter(generate_examples_fn()))[1])}
def test_iterable_dataset_map_batched(
dataset: IterableDataset,
):
func = lambda x: {"id+1": [i + 1 for i in x["id"]]} # noqa: E731
batch_size = 3
dataset = dataset.map(func, batched=True, batch_size=batch_size)
assert isinstance(dataset._ex_iterable, MappedExamplesIterable)
assert dataset._ex_iterable.function is func
assert dataset._ex_iterable.batch_size == batch_size
assert next(iter(dataset)) == {"id": 0, "id+1": 1}
def test_iterable_dataset_map_complex_features(
dataset: IterableDataset,
):
# https://github.com/huggingface/datasets/issues/3505
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": "positive"})
features = Features(
{
"id": Value("int64"),
"label": Value("string"),
}
)
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
dataset = dataset.cast_column("label", ClassLabel(names=["negative", "positive"]))
dataset = dataset.map(lambda x: {"id+1": x["id"] + 1, **x})
assert isinstance(dataset._ex_iterable, MappedExamplesIterable)
features["label"] = ClassLabel(names=["negative", "positive"])
assert [{k: v for k, v in ex.items() if k != "id+1"} for ex in dataset] == [
features.encode_example(ex) for _, ex in ex_iterable
]
def test_iterable_dataset_map_with_features(dataset: IterableDataset) -> None:
# https://github.com/huggingface/datasets/issues/3888
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": "positive"})
features_before_map = Features(
{
"id": Value("int64"),
"label": Value("string"),
}
)
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features_before_map))
assert dataset.info.features is not None
assert dataset.info.features == features_before_map
features_after_map = Features(
{
"id": Value("int64"),
"label": Value("string"),
"target": Value("string"),
}
)
dataset = dataset.map(lambda x: {"target": x["label"]}, features=features_after_map)
assert dataset.info.features is not None
assert dataset.info.features == features_after_map
def test_iterable_dataset_map_with_fn_kwargs(dataset: IterableDataset) -> None:
fn_kwargs = {"y": 1}
mapped_dataset = dataset.map(lambda x, y: {"id+y": x["id"] + y}, fn_kwargs=fn_kwargs)
assert mapped_dataset._ex_iterable.batched is False
assert next(iter(mapped_dataset)) == {"id": 0, "id+y": 1}
batch_size = 3
mapped_dataset = dataset.map(
lambda x, y: {"id+y": [i + y for i in x["id"]]}, batched=True, batch_size=batch_size, fn_kwargs=fn_kwargs
)
assert isinstance(mapped_dataset._ex_iterable, MappedExamplesIterable)
assert mapped_dataset._ex_iterable.batch_size == batch_size
assert next(iter(mapped_dataset)) == {"id": 0, "id+y": 1}
def test_iterable_dataset_filter(dataset: IterableDataset) -> None:
fn_kwargs = {"y": 1}
filtered_dataset = dataset.filter(lambda x, y: x["id"] == y, fn_kwargs=fn_kwargs)
assert filtered_dataset._ex_iterable.batched is False
assert next(iter(filtered_dataset)) == {"id": 1}
@pytest.mark.parametrize("seed", [42, 1337, 101010, 123456])
@pytest.mark.parametrize("epoch", [None, 0, 1])
def test_iterable_dataset_shuffle(dataset: IterableDataset, seed, epoch):
buffer_size = 3
dataset = deepcopy(dataset)
dataset._ex_iterable.kwargs["filepaths"] = ["0.txt", "1.txt"]
dataset = dataset.shuffle(seed, buffer_size=buffer_size)
assert isinstance(dataset._shuffling, ShufflingConfig)
assert isinstance(dataset._shuffling.generator, np.random.Generator)
assert is_rng_equal(dataset._shuffling.generator, np.random.default_rng(seed))
# Effective seed is sum of seed and epoch
if epoch is None or epoch == 0:
effective_seed = seed
else:
dataset.set_epoch(epoch)
effective_seed = np.random.default_rng(seed).integers(0, 1 << 63) - epoch
# Shuffling adds a shuffle buffer
expected_first_example_index = next(
iter(BufferShuffledExamplesIterable._iter_random_indices(np.random.default_rng(effective_seed), buffer_size))
)
assert isinstance(dataset._ex_iterable, BufferShuffledExamplesIterable)
# It also shuffles the underlying examples iterable
expected_ex_iterable = ExamplesIterable(
generate_examples_fn, {"filepaths": ["0.txt", "1.txt"]}
).shuffle_data_sources(np.random.default_rng(effective_seed))
assert isinstance(dataset._ex_iterable.ex_iterable, ExamplesIterable)
assert next(iter(dataset)) == list(islice(expected_ex_iterable, expected_first_example_index + 1))[-1][1]
@pytest.mark.parametrize(
"features",
[
None,
Features(
{
"id": Value("int64"),
"label": Value("int64"),
}
),
Features(
{
"id": Value("int64"),
"label": ClassLabel(names=["negative", "positive"]),
}
),
],
)
def test_iterable_dataset_features(features):
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 0})
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
if features:
expected = [features.encode_example(x) for _, x in ex_iterable]
else:
expected = [x for _, x in ex_iterable]
assert list(dataset) == expected
def test_iterable_dataset_features_cast_to_python():
ex_iterable = ExamplesIterable(
generate_examples_fn, {"timestamp": pd.Timestamp(2020, 1, 1), "array": np.ones(5), "n": 1}
)
features = Features(
{
"id": Value("int64"),
"timestamp": Value("timestamp[us]"),
"array": [Value("int64")],
}
)
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
assert list(dataset) == [{"timestamp": pd.Timestamp(2020, 1, 1).to_pydatetime(), "array": [1] * 5, "id": 0}]
@pytest.mark.parametrize("format_type", [None, "torch", "python", "tf", "tensorflow", "np", "numpy", "jax"])
def test_iterable_dataset_with_format(dataset: IterableDataset, format_type):
formatted_dataset = dataset.with_format(format_type)
assert formatted_dataset._formatting.format_type == get_format_type_from_alias(format_type)
@require_torch
def test_iterable_dataset_is_torch_iterable_dataset(dataset: IterableDataset):
from torch.utils.data import DataLoader, _DatasetKind
dataloader = DataLoader(dataset)
assert dataloader._dataset_kind == _DatasetKind.Iterable
out = list(dataloader)
assert len(out) == DEFAULT_N_EXAMPLES
@pytest.mark.parametrize("n", [0, 2, int(1e10)])
def test_iterable_dataset_skip(dataset: IterableDataset, n):
skip_dataset = dataset.skip(n)
assert isinstance(skip_dataset._ex_iterable, SkipExamplesIterable)
assert skip_dataset._ex_iterable.n == n
assert list(skip_dataset) == list(dataset)[n:]
@pytest.mark.parametrize("n", [0, 2, int(1e10)])
def test_iterable_dataset_take(dataset: IterableDataset, n):
take_dataset = dataset.take(n)
assert isinstance(take_dataset._ex_iterable, TakeExamplesIterable)
assert take_dataset._ex_iterable.n == n
assert list(take_dataset) == list(dataset)[:n]
@pytest.mark.parametrize("method", ["skip", "take"])
def test_iterable_dataset_shuffle_after_skip_or_take(method):
seed = 42
n, n_shards = 3, 10
count = 7
ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "filepaths": [f"{i}.txt" for i in range(n_shards)]})
dataset = IterableDataset(ex_iterable)
dataset = dataset.skip(n) if method == "skip" else dataset.take(count)
shuffled_dataset = dataset.shuffle(seed, buffer_size=DEFAULT_N_EXAMPLES)
# shuffling a skip/take dataset should keep the same examples and don't shuffle the shards
key = lambda x: f"{x['filepath']}_{x['id']}" # noqa: E731
assert sorted(dataset, key=key) == sorted(shuffled_dataset, key=key)
def test_iterable_dataset_add_column(dataset_with_several_columns):
new_column = list(range(DEFAULT_N_EXAMPLES))
new_dataset = dataset_with_several_columns.add_column("new_column", new_column)
assert list(new_dataset) == [
{**example, "new_column": idx} for idx, example in enumerate(dataset_with_several_columns)
]
new_dataset = new_dataset._resolve_features()
assert "new_column" in new_dataset.column_names
def test_iterable_dataset_rename_column(dataset_with_several_columns):
new_dataset = dataset_with_several_columns.rename_column("id", "new_id")
assert list(new_dataset) == [
{("new_id" if k == "id" else k): v for k, v in example.items()} for example in dataset_with_several_columns
]
assert new_dataset.features is None
assert new_dataset.column_names is None
# rename the column if ds.features was not None
new_dataset = dataset_with_several_columns._resolve_features().rename_column("id", "new_id")
assert new_dataset.features is not None
assert new_dataset.column_names is not None
assert "id" not in new_dataset.column_names
assert "new_id" in new_dataset.column_names
def test_iterable_dataset_rename_columns(dataset_with_several_columns):
column_mapping = {"id": "new_id", "filepath": "filename"}
new_dataset = dataset_with_several_columns.rename_columns(column_mapping)
assert list(new_dataset) == [
{column_mapping.get(k, k): v for k, v in example.items()} for example in dataset_with_several_columns
]
assert new_dataset.features is None
assert new_dataset.column_names is None
# rename the columns if ds.features was not None
new_dataset = dataset_with_several_columns._resolve_features().rename_columns(column_mapping)
assert new_dataset.features is not None
assert new_dataset.column_names is not None
assert all(c not in new_dataset.column_names for c in ["id", "filepath"])
assert all(c in new_dataset.column_names for c in ["new_id", "filename"])
def test_iterable_dataset_remove_columns(dataset_with_several_columns):
new_dataset = dataset_with_several_columns.remove_columns("id")
assert list(new_dataset) == [
{k: v for k, v in example.items() if k != "id"} for example in dataset_with_several_columns
]
assert new_dataset.features is None
new_dataset = dataset_with_several_columns.remove_columns(["id", "filepath"])
assert list(new_dataset) == [
{k: v for k, v in example.items() if k != "id" and k != "filepath"} for example in dataset_with_several_columns
]
assert new_dataset.features is None
assert new_dataset.column_names is None
# remove the columns if ds.features was not None
new_dataset = dataset_with_several_columns._resolve_features().remove_columns(["id", "filepath"])
assert new_dataset.features is not None
assert new_dataset.column_names is not None
assert all(c not in new_dataset.features for c in ["id", "filepath"])
assert all(c not in new_dataset.column_names for c in ["id", "filepath"])
def test_iterable_dataset_select_columns(dataset_with_several_columns):
new_dataset = dataset_with_several_columns.select_columns("id")
assert list(new_dataset) == [
{k: v for k, v in example.items() if k == "id"} for example in dataset_with_several_columns
]
assert new_dataset.features is None
new_dataset = dataset_with_several_columns.select_columns(["id", "filepath"])
assert list(new_dataset) == [
{k: v for k, v in example.items() if k in ("id", "filepath")} for example in dataset_with_several_columns
]
assert new_dataset.features is None
# select the columns if ds.features was not None
new_dataset = dataset_with_several_columns._resolve_features().select_columns(["id", "filepath"])
assert new_dataset.features is not None
assert new_dataset.column_names is not None
assert all(c in new_dataset.features for c in ["id", "filepath"])
assert all(c in new_dataset.column_names for c in ["id", "filepath"])
def test_iterable_dataset_cast_column():
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 10})
features = Features({"id": Value("int64"), "label": Value("int64")})
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
casted_dataset = dataset.cast_column("label", Value("bool"))
casted_features = features.copy()
casted_features["label"] = Value("bool")
assert list(casted_dataset) == [casted_features.encode_example(ex) for _, ex in ex_iterable]
def test_iterable_dataset_cast():
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 10})
features = Features({"id": Value("int64"), "label": Value("int64")})
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
new_features = Features({"id": Value("int64"), "label": Value("bool")})
casted_dataset = dataset.cast(new_features)
assert list(casted_dataset) == [new_features.encode_example(ex) for _, ex in ex_iterable]
def test_iterable_dataset_resolve_features():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable)
assert dataset.features is None
assert dataset.column_names is None
dataset = dataset._resolve_features()
assert dataset.features == Features(
{
"id": Value("int64"),
}
)
assert dataset.column_names == ["id"]
def test_iterable_dataset_resolve_features_keep_order():
def gen():
yield from zip(range(3), [{"a": 1}, {"c": 1}, {"b": 1}])
ex_iterable = ExamplesIterable(gen, {})
dataset = IterableDataset(ex_iterable)._resolve_features()
# columns appear in order of appearance in the dataset
assert list(dataset.features) == ["a", "c", "b"]
assert dataset.column_names == ["a", "c", "b"]
def test_iterable_dataset_with_features_fill_with_none():
def gen():
yield from zip(range(2), [{"a": 1}, {"b": 1}])
ex_iterable = ExamplesIterable(gen, {})
info = DatasetInfo(features=Features({"a": Value("int32"), "b": Value("int32")}))
dataset = IterableDataset(ex_iterable, info=info)
assert list(dataset) == [{"a": 1, "b": None}, {"b": 1, "a": None}]
def test_concatenate_datasets():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5})
dataset2 = IterableDataset(ex_iterable2)
concatenated_dataset = concatenate_datasets([dataset1, dataset2])
assert list(concatenated_dataset) == list(dataset1) + list(dataset2)
def test_concatenate_datasets_resolves_features():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5})
dataset2 = IterableDataset(ex_iterable2)
concatenated_dataset = concatenate_datasets([dataset1, dataset2])
assert concatenated_dataset.features is not None
assert sorted(concatenated_dataset.features) == ["id", "label"]
def test_concatenate_datasets_with_different_columns():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {})
dataset2 = IterableDataset(ex_iterable2)
# missing column "label" -> it should be replaced with nulls
extended_dataset2_list = [{"label": None, **x} for x in dataset2]
concatenated_dataset = concatenate_datasets([dataset1, dataset2])
assert list(concatenated_dataset) == list(dataset1) + extended_dataset2_list
# change order
concatenated_dataset = concatenate_datasets([dataset2, dataset1])
assert list(concatenated_dataset) == extended_dataset2_list + list(dataset1)
def test_concatenate_datasets_axis_1():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5})
dataset2 = IterableDataset(ex_iterable2)
with pytest.raises(ValueError): # column "id" is duplicated -> raise an error
concatenate_datasets([dataset1, dataset2], axis=1)
concatenated_dataset = concatenate_datasets([dataset1, dataset2.remove_columns("id")], axis=1)
assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(dataset1, dataset2)]
def test_concatenate_datasets_axis_1_resolves_features():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5})
dataset2 = IterableDataset(ex_iterable2).remove_columns("id")
concatenated_dataset = concatenate_datasets([dataset1, dataset2], axis=1)
assert concatenated_dataset.features is not None
assert sorted(concatenated_dataset.features) == ["id", "label1", "label2"]
def test_concatenate_datasets_axis_1_with_different_lengths():
n1 = 10
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10, "n": n1})
dataset1 = IterableDataset(ex_iterable1)
n2 = 5
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5, "n": n2})
dataset2 = IterableDataset(ex_iterable2).remove_columns("id")
# missing rows -> they should be replaced with nulls
extended_dataset2_list = list(dataset2) + [{"label2": None}] * (n1 - n2)
concatenated_dataset = concatenate_datasets([dataset1, dataset2], axis=1)
assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(dataset1, extended_dataset2_list)]
# change order
concatenated_dataset = concatenate_datasets([dataset2, dataset1], axis=1)
assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(extended_dataset2_list, dataset1)]
@pytest.mark.parametrize(
"probas, seed, expected_length, stopping_strategy",
[
(None, None, 3 * (DEFAULT_N_EXAMPLES - 1) + 1, "first_exhausted"),
([1, 0, 0], None, DEFAULT_N_EXAMPLES, "first_exhausted"),
([0, 1, 0], None, DEFAULT_N_EXAMPLES, "first_exhausted"),
([0.2, 0.5, 0.3], 42, None, "first_exhausted"),
([0.1, 0.1, 0.8], 1337, None, "first_exhausted"),
([0.5, 0.2, 0.3], 101010, None, "first_exhausted"),
(None, None, 3 * DEFAULT_N_EXAMPLES, "all_exhausted"),
([0.2, 0.5, 0.3], 42, None, "all_exhausted"),
([0.1, 0.1, 0.8], 1337, None, "all_exhausted"),
([0.5, 0.2, 0.3], 101010, None, "all_exhausted"),
],
)
def test_interleave_datasets(dataset: IterableDataset, probas, seed, expected_length, stopping_strategy):
d1 = dataset
d2 = dataset.map(lambda x: {"id+1": x["id"] + 1, **x})
d3 = dataset.with_format("python")
datasets = [d1, d2, d3]
merged_dataset = interleave_datasets(
datasets, probabilities=probas, seed=seed, stopping_strategy=stopping_strategy
)
def fill_default(example):
return {"id": None, "id+1": None, **example}
# Check the examples iterable
assert isinstance(
merged_dataset._ex_iterable, (CyclingMultiSourcesExamplesIterable, RandomlyCyclingMultiSourcesExamplesIterable)
)
# Check that it is deterministic
if seed is not None:
merged_dataset2 = interleave_datasets(
[d1, d2, d3], probabilities=probas, seed=seed, stopping_strategy=stopping_strategy
)
assert list(merged_dataset) == list(merged_dataset2)
# Check features
assert merged_dataset.features == Features({"id": Value("int64"), "id+1": Value("int64")})
# Check first example
if seed is not None:
rng = np.random.default_rng(seed)
i = next(iter(RandomlyCyclingMultiSourcesExamplesIterable._iter_random_indices(rng, len(datasets), p=probas)))
assert next(iter(merged_dataset)) == fill_default(next(iter(datasets[i])))
else:
assert any(next(iter(merged_dataset)) == fill_default(next(iter(dataset))) for dataset in datasets)
# Compute length it case it's random
if expected_length is None:
expected_length = 0
counts = np.array([len(list(d)) for d in datasets])
bool_strategy_func = np.all if stopping_strategy == "all_exhausted" else np.any
rng = np.random.default_rng(seed)
for i in RandomlyCyclingMultiSourcesExamplesIterable._iter_random_indices(rng, len(datasets), p=probas):
counts[i] -= 1
expected_length += 1
if bool_strategy_func(counts <= 0):
break
# Check length
assert len(list(merged_dataset)) == expected_length
def test_interleave_datasets_with_features(
dataset: IterableDataset,
):
features = Features(
{
"id": Value("int64"),
"label": ClassLabel(names=["negative", "positive"]),
}
)
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 0})
dataset_with_features = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
merged_dataset = interleave_datasets([dataset, dataset_with_features])
assert merged_dataset.features == features
def test_interleave_datasets_with_oversampling():
# Test hardcoded results
d1 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [0, 1, 2]])), {}))
d2 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [10, 11, 12, 13]])), {}))
d3 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [20, 21, 22, 23, 24]])), {}))
expected_values = [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24]
# Check oversampling strategy without probabilities
assert [x["a"] for x in interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")] == expected_values
# Check oversampling strategy with probabilities
expected_values = [20, 0, 21, 10, 1, 22, 23, 24, 2, 0, 1, 20, 11, 21, 2, 0, 12, 1, 22, 13]
values = [
x["a"]
for x in interleave_datasets(
[d1, d2, d3], probabilities=[0.5, 0.2, 0.3], seed=42, stopping_strategy="all_exhausted"
)
]
assert values == expected_values
@require_torch
def test_with_format_torch(dataset_with_several_columns: IterableDataset):
import torch
dset = dataset_with_several_columns.with_format(type="torch")
example = next(iter(dset))
batch = next(iter(dset.iter(batch_size=3)))
assert len(example) == 3
assert isinstance(example["id"], torch.Tensor)
assert list(example["id"].shape) == []
assert example["id"].item() == 0
assert isinstance(batch["id"], torch.Tensor)
assert isinstance(example["filepath"], list)
assert isinstance(example["filepath"][0], str)
assert example["filepath"][0] == "data0.txt"
assert isinstance(batch["filepath"], list)
assert isinstance(example["metadata"], dict)
assert isinstance(example["metadata"]["sources"], list)
assert isinstance(example["metadata"]["sources"][0], str)
assert isinstance(batch["metadata"], list)
@require_tf
def test_with_format_tf(dataset_with_several_columns: IterableDataset):
import tensorflow as tf
dset = dataset_with_several_columns.with_format(type="tensorflow")
example = next(iter(dset))
batch = next(iter(dset.iter(batch_size=3)))
assert isinstance(example["id"], tf.Tensor)
assert list(example["id"].shape) == []
assert example["id"].numpy().item() == 0
assert isinstance(batch["id"], tf.Tensor)
assert isinstance(example["filepath"], tf.Tensor)
assert example["filepath"][0] == b"data0.txt"
assert isinstance(batch["filepath"], tf.Tensor)
assert isinstance(example["metadata"], dict)
assert isinstance(example["metadata"]["sources"], tf.Tensor)
assert isinstance(batch["metadata"], list)
def test_map_array_are_not_converted_back_to_lists(dataset: IterableDataset):
def func(example):
return {"array": np.array([1, 2, 3])}
dset_test = dataset.map(func)
example = next(iter(dset_test))
# not aligned with Dataset.map because we don't convert back to lists after map()
assert isinstance(example["array"], np.ndarray)
def test_formatted_map(dataset: IterableDataset):
dataset = dataset.with_format("np")
assert isinstance(next(dataset.iter(batch_size=3))["id"], np.ndarray)
dataset = dataset.with_format(None)
assert isinstance(next(dataset.iter(batch_size=3))["id"], list)
def add_one_numpy(example):
assert isinstance(example["id"], np.ndarray)
return {"id": example["id"] + 1}
dataset = dataset.with_format("np")
dataset = dataset.map(add_one_numpy, batched=True)
assert isinstance(next(dataset.iter(batch_size=3))["id"], np.ndarray)
dataset = dataset.with_format(None)
assert isinstance(next(dataset.iter(batch_size=3))["id"], list)
@pytest.mark.parametrize("n_shards1, n_shards2, num_workers", [(2, 1, 1), (2, 2, 2), (1, 3, 1), (4, 3, 3)])
def test_interleave_dataset_with_sharding(n_shards1, n_shards2, num_workers):
from torch.utils.data import DataLoader
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}-1.txt" for i in range(n_shards1)]})
dataset1 = IterableDataset(ex_iterable1).with_format("torch")
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}-2.txt" for i in range(n_shards2)]})
dataset2 = IterableDataset(ex_iterable2).with_format("torch")
dataset_merged = interleave_datasets([dataset1, dataset2], stopping_strategy="first_exhausted")
assert dataset_merged.n_shards == min(n_shards1, n_shards2)
dataloader = DataLoader(dataset_merged, batch_size=None, num_workers=num_workers)
result = list(dataloader)
expected_length = 2 * min(
len([example for _, example in ex_iterable1]), len([example for _, example in ex_iterable2])
)
# some samples may be missing because the stopping strategy is applied per process
assert expected_length - num_workers <= len(result) <= expected_length
assert len(result) == len({str(x) for x in result})
def filter_func(batch):
return batch["id"] == 4
def map_func(batch):
batch["id"] *= 2
return batch
def test_pickle_after_many_transforms(dataset_with_several_columns):
dataset = dataset_with_several_columns
dataset = dataset.remove_columns(["filepath"])
dataset = dataset.take(5)
dataset = dataset.map(map_func)
dataset = dataset.shuffle()
dataset = dataset.skip(1)
dataset = dataset.filter(filter_func)
dataset = dataset.add_column("additional_col", ["something"])
dataset = dataset.rename_column("metadata", "metadata1")
dataset = dataset.rename_columns({"id": "id1", "metadata1": "metadata2"})
dataset = dataset.select_columns(["id1", "additional_col"])
unpickled_dataset = pickle.loads(pickle.dumps(dataset))
assert list(unpickled_dataset) == list(dataset)
| datasets-main | tests/test_iterable_dataset.py |
import os
import tempfile
from unittest import TestCase
import numpy as np
import pandas as pd
import pytest
from datasets import load_from_disk
from datasets.arrow_dataset import Dataset
from datasets.dataset_dict import DatasetDict, IterableDatasetDict
from datasets.features import ClassLabel, Features, Sequence, Value
from datasets.iterable_dataset import IterableDataset
from datasets.splits import NamedSplit
from .utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_tf, require_torch
class DatasetDictTest(TestCase):
def _create_dummy_dataset(self, multiple_columns=False):
if multiple_columns:
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
dset = Dataset.from_dict(data)
else:
dset = Dataset.from_dict(
{"filename": ["my_name-train" + "_" + f"{x:03d}" for x in np.arange(30).tolist()]}
)
return dset
def _create_dummy_dataset_dict(self, multiple_columns=False) -> DatasetDict:
return DatasetDict(
{
"train": self._create_dummy_dataset(multiple_columns=multiple_columns),
"test": self._create_dummy_dataset(multiple_columns=multiple_columns),
}
)
def _create_dummy_iterable_dataset(self, multiple_columns=False) -> IterableDataset:
def gen():
if multiple_columns:
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
for v1, v2 in zip(data["col_1"], data["col_2"]):
yield {"col_1": v1, "col_2": v2}
else:
for x in range(30):
yield {"filename": "my_name-train" + "_" + f"{x:03d}"}
return IterableDataset.from_generator(gen)
def _create_dummy_iterable_dataset_dict(self, multiple_columns=False) -> IterableDatasetDict:
return IterableDatasetDict(
{
"train": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns),
"test": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns),
}
)
def test_flatten(self):
dset_split = Dataset.from_dict(
{"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10},
features=Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}),
)
dset = DatasetDict({"train": dset_split, "test": dset_split})
dset = dset.flatten()
self.assertDictEqual(dset.column_names, {"train": ["a.b.c", "foo"], "test": ["a.b.c", "foo"]})
self.assertListEqual(sorted(dset["train"].features.keys()), ["a.b.c", "foo"])
self.assertDictEqual(
dset["train"].features, Features({"a.b.c": Sequence(Value("string")), "foo": Value("int64")})
)
del dset
def test_set_format_numpy(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="numpy", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], np.int64)
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.reset_format()
with dset.formatted_as(type="numpy", columns=["col_1"]):
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], np.int64)
self.assertEqual(dset_split[0]["col_1"].item(), 3)
for dset_split in dset.values():
self.assertEqual(dset_split.format["type"], None)
self.assertEqual(dset_split.format["format_kwargs"], {})
self.assertEqual(dset_split.format["columns"], dset_split.column_names)
self.assertEqual(dset_split.format["output_all_columns"], False)
dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="numpy", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], np.str_)
self.assertEqual(dset_split[0]["col_2"].item(), "a")
del dset
@require_torch
def test_set_format_torch(self):
import torch
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="torch", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor)
self.assertListEqual(list(dset_split[0]["col_1"].shape), [])
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.set_format(type="torch", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="torch")
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor)
self.assertListEqual(list(dset_split[0]["col_1"].shape), [])
self.assertEqual(dset_split[0]["col_1"].item(), 3)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
del dset
@require_tf
def test_set_format_tf(self):
import tensorflow as tf
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="tensorflow", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], tf.Tensor)
self.assertListEqual(list(dset_split[0]["col_1"].shape), [])
self.assertEqual(dset_split[0]["col_1"].numpy().item(), 3)
dset.set_format(type="tensorflow", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="tensorflow", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertEqual(dset_split[0]["col_2"].numpy().decode("utf-8"), "a")
del dset
def test_set_format_pandas(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="pandas", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].columns), 1)
self.assertIsInstance(dset_split[0], pd.DataFrame)
self.assertListEqual(list(dset_split[0].shape), [1, 1])
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.set_format(type="pandas", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].columns), 2)
self.assertEqual(dset_split[0]["col_2"].item(), "a")
del dset
def test_set_transform(self):
def transform(batch):
return {k: [str(i).upper() for i in v] for k, v in batch.items()}
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_transform(transform=transform, columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(dset_split.format["type"], "custom")
self.assertEqual(len(dset_split[0].keys()), 1)
self.assertEqual(dset_split[0]["col_1"], "3")
self.assertEqual(dset_split[:2]["col_1"], ["3", "2"])
self.assertEqual(dset_split["col_1"][:2], ["3", "2"])
prev_format = dset[list(dset.keys())[0]].format
for dset_split in dset.values():
dset_split.set_format(**dset_split.format)
self.assertEqual(prev_format, dset_split.format)
dset.set_transform(transform=transform, columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].keys()), 2)
self.assertEqual(dset_split[0]["col_2"], "A")
del dset
def test_with_format(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset2 = dset.with_format("numpy", columns=["col_1"])
dset.set_format("numpy", columns=["col_1"])
for dset_split, dset_split2 in zip(dset.values(), dset2.values()):
self.assertDictEqual(dset_split.format, dset_split2.format)
del dset, dset2
def test_with_transform(self):
def transform(batch):
return {k: [str(i).upper() for i in v] for k, v in batch.items()}
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset2 = dset.with_transform(transform, columns=["col_1"])
dset.set_transform(transform, columns=["col_1"])
for dset_split, dset_split2 in zip(dset.values(), dset2.values()):
self.assertDictEqual(dset_split.format, dset_split2.format)
del dset, dset2
def test_cast(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
features = dset["train"].features
features["col_1"] = Value("float64")
dset = dset.cast(features)
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
self.assertEqual(dset_split.features["col_1"], Value("float64"))
self.assertIsInstance(dset_split[0]["col_1"], float)
del dset
def test_remove_columns(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.remove_columns(column_names="col_1")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_2"])
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.remove_columns(column_names=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 0)
dset = self._create_dummy_dataset_dict(multiple_columns=True)
for dset_split in dset.values():
dset_split._format_columns = ["col_1", "col_2"]
dset = dset.remove_columns(column_names=["col_1"])
for dset_split in dset.values():
self.assertListEqual(dset_split._format_columns, ["col_2"])
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_2"])
del dset
def test_rename_column(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.rename_column(original_column_name="col_1", new_column_name="new_name")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
self.assertListEqual(list(dset_split.column_names), ["new_name", "col_2"])
del dset
def test_select_columns(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.select_columns(column_names=[])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 0)
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.select_columns(column_names="col_1")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_1"])
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.select_columns(column_names=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
dset = self._create_dummy_dataset_dict(multiple_columns=True)
for dset_split in dset.values():
dset_split._format_columns = ["col_1", "col_2"]
dset = dset.select_columns(column_names=["col_1"])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_1"])
self.assertListEqual(dset_split._format_columns, ["col_1"])
def test_map(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
mapped_dsets_1: DatasetDict = dsets.map(lambda ex: {"foo": ["bar"] * len(ex["filename"])}, batched=True)
self.assertListEqual(list(dsets.keys()), list(mapped_dsets_1.keys()))
self.assertListEqual(mapped_dsets_1["train"].column_names, ["filename", "foo"])
cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
mapped_dsets_2: DatasetDict = mapped_dsets_1.map(
lambda ex: {"bar": ["foo"] * len(ex["filename"])}, batched=True, cache_file_names=cache_file_names
)
self.assertListEqual(list(dsets.keys()), list(mapped_dsets_2.keys()))
self.assertListEqual(sorted(mapped_dsets_2["train"].column_names), sorted(["filename", "foo", "bar"]))
del dsets, mapped_dsets_1, mapped_dsets_2
def test_iterable_map(self):
dsets = self._create_dummy_iterable_dataset_dict()
fn_kwargs = {"n": 3}
mapped_dsets: IterableDatasetDict = dsets.map(
lambda x, n: {"foo": [n] * len(x["filename"])},
batched=True,
fn_kwargs=fn_kwargs,
)
mapped_example = next(iter(mapped_dsets["train"]))
self.assertListEqual(sorted(mapped_example.keys()), sorted(["filename", "foo"]))
self.assertLessEqual(mapped_example["foo"], 3)
del dsets, mapped_dsets
def test_filter(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
filtered_dsets_1: DatasetDict = dsets.filter(lambda ex: int(ex["filename"].split("_")[-1]) < 10)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_1.keys()))
self.assertEqual(len(filtered_dsets_1["train"]), 10)
cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
filtered_dsets_2: DatasetDict = filtered_dsets_1.filter(
lambda ex: int(ex["filename"].split("_")[-1]) < 5, cache_file_names=cache_file_names
)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_2.keys()))
self.assertEqual(len(filtered_dsets_2["train"]), 5)
filtered_dsets_3: DatasetDict = dsets.filter(
lambda examples: [int(ex.split("_")[-1]) < 10 for ex in examples["filename"]], batched=True
)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_3.keys()))
self.assertEqual(len(filtered_dsets_3["train"]), 10)
del dsets, filtered_dsets_1, filtered_dsets_2, filtered_dsets_3
def test_iterable_filter(self):
dsets = self._create_dummy_iterable_dataset_dict()
example = next(iter(dsets["train"]))
fn_kwargs = {"n": 3}
filtered_dsets: IterableDatasetDict = dsets.filter(
lambda ex, n: n < int(ex["filename"].split("_")[-1]), fn_kwargs=fn_kwargs
)
filtered_example = next(iter(filtered_dsets["train"]))
self.assertListEqual(list(example.keys()), list(filtered_example.keys()))
self.assertEqual(int(filtered_example["filename"].split("_")[-1]), 4) # id starts from 3
del dsets, filtered_dsets
def test_sort(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
sorted_dsets_1: DatasetDict = dsets.sort("filename")
self.assertListEqual(list(dsets.keys()), list(sorted_dsets_1.keys()))
self.assertListEqual(
[f.split("_")[-1] for f in sorted_dsets_1["train"]["filename"]],
sorted(f"{x:03d}" for x in range(30)),
)
indices_cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
sorted_dsets_2: DatasetDict = sorted_dsets_1.sort(
"filename", indices_cache_file_names=indices_cache_file_names, reverse=True
)
self.assertListEqual(list(dsets.keys()), list(sorted_dsets_2.keys()))
self.assertListEqual(
[f.split("_")[-1] for f in sorted_dsets_2["train"]["filename"]],
sorted((f"{x:03d}" for x in range(30)), reverse=True),
)
del dsets, sorted_dsets_1, sorted_dsets_2
def test_shuffle(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
indices_cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
seeds = {
"train": 1234,
"test": 1234,
}
dsets_shuffled = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False
)
self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled["test"]["filename"])
self.assertEqual(len(dsets_shuffled["train"]), 30)
self.assertEqual(dsets_shuffled["train"][0]["filename"], "my_name-train_028")
self.assertEqual(dsets_shuffled["train"][2]["filename"], "my_name-train_010")
self.assertDictEqual(dsets["train"].features, Features({"filename": Value("string")}))
self.assertDictEqual(dsets_shuffled["train"].features, Features({"filename": Value("string")}))
# Reproducibility
indices_cache_file_names_2 = {
"train": os.path.join(tmp_dir, "train_2.arrow"),
"test": os.path.join(tmp_dir, "test_2.arrow"),
}
dsets_shuffled_2 = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names_2, load_from_cache_file=False
)
self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled_2["train"]["filename"])
seeds = {
"train": 1234,
"test": 1,
}
indices_cache_file_names_3 = {
"train": os.path.join(tmp_dir, "train_3.arrow"),
"test": os.path.join(tmp_dir, "test_3.arrow"),
}
dsets_shuffled_3 = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names_3, load_from_cache_file=False
)
self.assertNotEqual(dsets_shuffled_3["train"]["filename"], dsets_shuffled_3["test"]["filename"])
# other input types
dsets_shuffled_int = dsets.shuffle(42)
dsets_shuffled_alias = dsets.shuffle(seed=42)
dsets_shuffled_none = dsets.shuffle()
self.assertEqual(len(dsets_shuffled_int["train"]), 30)
self.assertEqual(len(dsets_shuffled_alias["train"]), 30)
self.assertEqual(len(dsets_shuffled_none["train"]), 30)
del dsets, dsets_shuffled, dsets_shuffled_2, dsets_shuffled_3
del dsets_shuffled_int, dsets_shuffled_alias, dsets_shuffled_none
def test_flatten_indices(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
indices_cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
dsets_shuffled = dsets.shuffle(
seed=42, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False
)
self.assertIsNotNone(dsets_shuffled["train"]._indices)
self.assertIsNotNone(dsets_shuffled["test"]._indices)
dsets_flat = dsets_shuffled.flatten_indices()
self.assertIsNone(dsets_flat["train"]._indices)
self.assertIsNone(dsets_flat["test"]._indices)
del dsets, dsets_shuffled, dsets_flat
def test_check_values_type(self):
dsets = self._create_dummy_dataset_dict()
dsets["bad_split"] = None
self.assertRaises(TypeError, dsets.map, lambda x: x)
self.assertRaises(TypeError, dsets.filter, lambda x: True)
self.assertRaises(TypeError, dsets.shuffle)
self.assertRaises(TypeError, dsets.sort, "filename")
del dsets
def test_serialization(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir)
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["test", "train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["test"]), 30)
self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"])
del reloaded_dsets
del dsets["test"]
dsets.save_to_disk(tmp_dir)
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
del dsets, reloaded_dsets
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir, num_shards={"train": 3, "test": 2})
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["test", "train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["train"].cache_files), 3)
self.assertEqual(len(reloaded_dsets["test"]), 30)
self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["test"].cache_files), 2)
del reloaded_dsets
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir, num_proc=2)
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["test", "train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["train"].cache_files), 2)
self.assertEqual(len(reloaded_dsets["test"]), 30)
self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["test"].cache_files), 2)
del reloaded_dsets
def test_load_from_disk(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir)
del dsets
dsets = load_from_disk(tmp_dir)
self.assertListEqual(sorted(dsets), ["test", "train"])
self.assertEqual(len(dsets["train"]), 30)
self.assertListEqual(dsets["train"].column_names, ["filename"])
self.assertEqual(len(dsets["test"]), 30)
self.assertListEqual(dsets["test"].column_names, ["filename"])
del dsets
def test_align_labels_with_mapping(self):
train_features = Features(
{
"input_text": Value("string"),
"input_labels": ClassLabel(num_classes=3, names=["entailment", "neutral", "contradiction"]),
}
)
test_features = Features(
{
"input_text": Value("string"),
"input_labels": ClassLabel(num_classes=3, names=["entailment", "contradiction", "neutral"]),
}
)
train_data = {"input_text": ["a", "a", "b", "b", "c", "c"], "input_labels": [0, 0, 1, 1, 2, 2]}
test_data = {"input_text": ["a", "a", "c", "c", "b", "b"], "input_labels": [0, 0, 1, 1, 2, 2]}
label2id = {"CONTRADICTION": 0, "ENTAILMENT": 2, "NEUTRAL": 1}
id2label = {v: k for k, v in label2id.items()}
train_expected_labels = [2, 2, 1, 1, 0, 0]
test_expected_labels = [2, 2, 0, 0, 1, 1]
train_expected_label_names = [id2label[idx] for idx in train_expected_labels]
test_expected_label_names = [id2label[idx] for idx in test_expected_labels]
dsets = DatasetDict(
{
"train": Dataset.from_dict(train_data, features=train_features),
"test": Dataset.from_dict(test_data, features=test_features),
}
)
dsets = dsets.align_labels_with_mapping(label2id, "input_labels")
self.assertListEqual(train_expected_labels, dsets["train"]["input_labels"])
self.assertListEqual(test_expected_labels, dsets["test"]["input_labels"])
train_aligned_label_names = [
dsets["train"].features["input_labels"].int2str(idx) for idx in dsets["train"]["input_labels"]
]
test_aligned_label_names = [
dsets["test"].features["input_labels"].int2str(idx) for idx in dsets["test"]["input_labels"]
]
self.assertListEqual(train_expected_label_names, train_aligned_label_names)
self.assertListEqual(test_expected_label_names, test_aligned_label_names)
def test_dummy_datasetdict_serialize_fs(mockfs):
dataset_dict = DatasetDict(
{
"train": Dataset.from_dict({"a": range(30)}),
"test": Dataset.from_dict({"a": range(10)}),
}
)
dataset_path = "mock://my_dataset"
dataset_dict.save_to_disk(dataset_path, storage_options=mockfs.storage_options)
assert mockfs.isdir(dataset_path)
assert mockfs.glob(dataset_path + "/*")
reloaded = dataset_dict.load_from_disk(dataset_path, storage_options=mockfs.storage_options)
assert list(reloaded) == list(dataset_dict)
for k in dataset_dict:
assert reloaded[k].features == dataset_dict[k].features
assert reloaded[k].to_dict() == dataset_dict[k].to_dict()
def _check_csv_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_csv({"train": csv_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_csv_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_csv_features(features, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_csv({"train": csv_path}, features=features, cache_dir=cache_dir)
_check_csv_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_csv_split(split, csv_path, tmp_path):
if split:
path = {split: csv_path}
else:
split = "train"
path = {"train": csv_path, "test": csv_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_csv(path, cache_dir=cache_dir)
_check_csv_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_json_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_json({"train": jsonl_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_json_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_json_features(features, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_json({"train": jsonl_path}, features=features, cache_dir=cache_dir)
_check_json_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_json_splits(split, jsonl_path, tmp_path):
if split:
path = {split: jsonl_path}
else:
split = "train"
path = {"train": jsonl_path, "test": jsonl_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_json(path, cache_dir=cache_dir)
_check_json_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_parquet_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_parquet({"train": parquet_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_parquet_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_parquet_features(features, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_parquet({"train": parquet_path}, features=features, cache_dir=cache_dir)
_check_parquet_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_parquet_split(split, parquet_path, tmp_path):
if split:
path = {split: parquet_path}
else:
split = "train"
path = {"train": parquet_path, "test": parquet_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_parquet(path, cache_dir=cache_dir)
_check_parquet_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_text_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_text({"train": text_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
],
)
def test_datasetdict_from_text_features(features, text_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"text": "string"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_text({"train": text_path}, features=features, cache_dir=cache_dir)
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_text_split(split, text_path, tmp_path):
if split:
path = {split: text_path}
else:
split = "train"
path = {"train": text_path, "test": text_path}
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
dataset = DatasetDict.from_text(path, cache_dir=cache_dir)
_check_text_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
| datasets-main | tests/test_dataset_dict.py |
import unittest
import warnings
from datasets.utils import experimental
@experimental
def dummy_function():
return "success"
class TestExperimentalFlag(unittest.TestCase):
def test_experimental_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.assertEqual(dummy_function(), "success")
self.assertEqual(len(w), 1)
| datasets-main | tests/test_experimental.py |
# isort: skip_file
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: F401 - this is just for tests
import os as renamed_os # noqa: F401 - this is just for tests
from os import path # noqa: F401 - this is just for tests
from os import path as renamed_path # noqa: F401 - this is just for tests
from os.path import join # noqa: F401 - this is just for tests
from os.path import join as renamed_join # noqa: F401 - this is just for tests
open = open # noqa we just need to have a builtin inside this module to test it properly
| datasets-main | tests/_test_patching.py |
import re
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import yaml
from huggingface_hub import DatasetCard, DatasetCardData
from datasets.config import METADATA_CONFIGS_FIELD
from datasets.utils.metadata import MetadataConfigs
def _dedent(string: str) -> str:
indent_level = min(re.search("^ +", t).end() if t.startswith(" ") else 0 for t in string.splitlines())
return "\n".join([line[indent_level:] for line in string.splitlines() if indent_level < len(line)])
README_YAML = """\
---
language:
- zh
- en
task_ids:
- sentiment-classification
---
# Begin of markdown
Some cool dataset card
"""
README_EMPTY_YAML = """\
---
---
# Begin of markdown
Some cool dataset card
"""
README_NO_YAML = """\
# Begin of markdown
Some cool dataset card
"""
README_METADATA_CONFIG_INCORRECT_FORMAT = f"""\
---
{METADATA_CONFIGS_FIELD}:
data_dir: v1
drop_labels: true
---
"""
README_METADATA_SINGLE_CONFIG = f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: custom
data_dir: v1
drop_labels: true
---
"""
README_METADATA_TWO_CONFIGS_WITH_DEFAULT_FLAG = f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: v1
data_dir: v1
drop_labels: true
- config_name: v2
data_dir: v2
drop_labels: false
default: true
---
"""
README_METADATA_TWO_CONFIGS_WITH_DEFAULT_NAME = f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: custom
data_dir: custom
drop_labels: true
- config_name: default
data_dir: data
drop_labels: false
---
"""
EXPECTED_METADATA_SINGLE_CONFIG = {"custom": {"data_dir": "v1", "drop_labels": True}}
EXPECTED_METADATA_TWO_CONFIGS_DEFAULT_FLAG = {
"v1": {"data_dir": "v1", "drop_labels": True},
"v2": {"data_dir": "v2", "drop_labels": False, "default": True},
}
EXPECTED_METADATA_TWO_CONFIGS_DEFAULT_NAME = {
"custom": {"data_dir": "custom", "drop_labels": True},
"default": {"data_dir": "data", "drop_labels": False},
}
@pytest.fixture
def data_dir_with_two_subdirs(tmp_path):
data_dir = tmp_path / "data_dir_with_two_configs_in_metadata"
cats_data_dir = data_dir / "cats"
cats_data_dir.mkdir(parents=True)
dogs_data_dir = data_dir / "dogs"
dogs_data_dir.mkdir(parents=True)
with open(cats_data_dir / "cat.jpg", "wb") as f:
f.write(b"this_is_a_cat_image_bytes")
with open(dogs_data_dir / "dog.jpg", "wb") as f:
f.write(b"this_is_a_dog_image_bytes")
return str(data_dir)
class TestMetadataUtils(unittest.TestCase):
def test_metadata_dict_from_readme(self):
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir) / "README.md"
with open(path, "w+") as readme_file:
readme_file.write(README_YAML)
dataset_card_data = DatasetCard.load(path).data
self.assertDictEqual(
dataset_card_data.to_dict(), {"language": ["zh", "en"], "task_ids": ["sentiment-classification"]}
)
with open(path, "w+") as readme_file:
readme_file.write(README_EMPTY_YAML)
if (
sys.platform != "win32"
): # there is a bug on windows, see https://github.com/huggingface/huggingface_hub/issues/1546
dataset_card_data = DatasetCard.load(path).data
self.assertDictEqual(dataset_card_data.to_dict(), {})
with open(path, "w+") as readme_file:
readme_file.write(README_NO_YAML)
dataset_card_data = DatasetCard.load(path).data
self.assertEqual(dataset_card_data.to_dict(), {})
def test_from_yaml_string(self):
valid_yaml_string = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
language:
- en
license:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
"""
)
assert DatasetCardData(**yaml.safe_load(valid_yaml_string)).to_dict()
valid_yaml_with_optional_keys = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
language:
- en
license:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- text-classification
task_ids:
- multi-class-classification
paperswithcode_id:
- squad
configs:
- en
train-eval-index:
- config: en
task: text-classification
task_id: multi_class_classification
splits:
train_split: train
eval_split: test
col_mapping:
text: text
label: target
metrics:
- type: accuracy
name: Accuracy
extra_gated_prompt: |
By clicking on “Access repository” below, you also agree to ImageNet Terms of Access:
[RESEARCHER_FULLNAME] (the "Researcher") has requested permission to use the ImageNet database (the "Database") at Princeton University and Stanford University. In exchange for such permission, Researcher hereby agrees to the following terms and conditions:
1. Researcher shall use the Database only for non-commercial research and educational purposes.
extra_gated_fields:
Company: text
Country: text
I agree to use this model for non-commerical use ONLY: checkbox
"""
)
assert DatasetCardData(**yaml.safe_load(valid_yaml_with_optional_keys)).to_dict()
@pytest.mark.parametrize(
"readme_content, expected_metadata_configs_dict, expected_default_config_name",
[
(README_METADATA_SINGLE_CONFIG, EXPECTED_METADATA_SINGLE_CONFIG, None),
(README_METADATA_TWO_CONFIGS_WITH_DEFAULT_FLAG, EXPECTED_METADATA_TWO_CONFIGS_DEFAULT_FLAG, "v2"),
(README_METADATA_TWO_CONFIGS_WITH_DEFAULT_NAME, EXPECTED_METADATA_TWO_CONFIGS_DEFAULT_NAME, "default"),
],
)
def test_metadata_configs_dataset_card_data(
readme_content, expected_metadata_configs_dict, expected_default_config_name
):
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir) / "README.md"
with open(path, "w+") as readme_file:
readme_file.write(readme_content)
dataset_card_data = DatasetCard.load(path).data
metadata_configs_dict = MetadataConfigs.from_dataset_card_data(dataset_card_data)
assert metadata_configs_dict == expected_metadata_configs_dict
assert metadata_configs_dict.get_default_config_name() == expected_default_config_name
def test_metadata_configs_incorrect_yaml():
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir) / "README.md"
with open(path, "w+") as readme_file:
readme_file.write(README_METADATA_CONFIG_INCORRECT_FORMAT)
dataset_card_data = DatasetCard.load(path).data
with pytest.raises(ValueError):
_ = MetadataConfigs.from_dataset_card_data(dataset_card_data)
| datasets-main | tests/test_metadata_util.py |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
NUM_SHARDS = 4
NUM_ITEMS_PER_SHARD = 3
class FailedTestError(RuntimeError):
pass
def gen(shards: List[str]):
for shard in shards:
for i in range(NUM_ITEMS_PER_SHARD):
yield {"i": i, "shard": shard}
def main():
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
parser = ArgumentParser()
parser.add_argument("--streaming", type=bool)
parser.add_argument("--local_rank", type=int)
parser.add_argument("--num_workers", type=int, default=0)
args = parser.parse_args()
streaming = args.streaming
num_workers = args.num_workers
gen_kwargs = {"shards": [f"shard_{shard_idx}" for shard_idx in range(NUM_SHARDS)]}
ds = IterableDataset.from_generator(gen, gen_kwargs=gen_kwargs)
if not streaming:
ds = Dataset.from_list(list(ds))
ds = split_dataset_by_node(ds, rank=rank, world_size=world_size)
dataloader = torch.utils.data.DataLoader(ds, num_workers=num_workers)
full_size = NUM_SHARDS * NUM_ITEMS_PER_SHARD
expected_local_size = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
local_size = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f"local_size {local_size} != expected_local_size {expected_local_size}")
if __name__ == "__main__":
main()
| datasets-main | tests/distributed_scripts/run_torch_distributed.py |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _check_parquet_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = ParquetDatasetReader(parquet_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read()
_check_parquet_dataset(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_dataset_from_parquet_features(features, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = ParquetDatasetReader(parquet_path, features=features, cache_dir=cache_dir).read()
_check_parquet_dataset(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_dataset_from_parquet_split(split, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = ParquetDatasetReader(parquet_path, cache_dir=cache_dir, split=split).read()
_check_parquet_dataset(dataset, expected_features)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type", [str, list])
def test_dataset_from_parquet_path_type(path_type, parquet_path, tmp_path):
if issubclass(path_type, str):
path = parquet_path
elif issubclass(path_type, list):
path = [parquet_path]
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = ParquetDatasetReader(path, cache_dir=cache_dir).read()
_check_parquet_dataset(dataset, expected_features)
def _check_parquet_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_parquet_datasetdict_reader_keep_in_memory(keep_in_memory, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = ParquetDatasetReader(
{"train": parquet_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory
).read()
_check_parquet_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_parquet_datasetdict_reader_features(features, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = ParquetDatasetReader({"train": parquet_path}, features=features, cache_dir=cache_dir).read()
_check_parquet_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_parquet_datasetdict_reader_split(split, parquet_path, tmp_path):
if split:
path = {split: parquet_path}
else:
split = "train"
path = {"train": parquet_path, "test": parquet_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = ParquetDatasetReader(path, cache_dir=cache_dir).read()
_check_parquet_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def test_parquet_write(dataset, tmp_path):
writer = ParquetDatasetWriter(dataset, tmp_path / "foo.parquet")
assert writer.write() > 0
pf = pq.ParquetFile(tmp_path / "foo.parquet")
output_table = pf.read()
assert dataset.data.table == output_table
def test_dataset_to_parquet_keeps_features(shared_datadir, tmp_path):
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = {"image": [image_path]}
features = Features({"image": Image()})
dataset = Dataset.from_dict(data, features=features)
writer = ParquetDatasetWriter(dataset, tmp_path / "foo.parquet")
assert writer.write() > 0
reloaded_dataset = Dataset.from_parquet(str(tmp_path / "foo.parquet"))
assert dataset.features == reloaded_dataset.features
reloaded_iterable_dataset = ParquetDatasetReader(str(tmp_path / "foo.parquet"), streaming=True).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected",
[
(Features({"foo": Value("int32")}), None),
(Features({"image": Image(), "foo": Value("int32")}), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio())}), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
],
)
def test_get_writer_batch_size(feature, expected):
assert get_writer_batch_size(feature) == expected
| datasets-main | tests/io/test_parquet.py |
datasets-main | tests/io/__init__.py |
|
import contextlib
import os
import sqlite3
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _check_sql_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_sql_keep_in_memory(keep_in_memory, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = SqlDatasetReader(
"dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory
).read()
_check_sql_dataset(dataset, expected_features)
@require_sqlalchemy
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_dataset_from_sql_features(features, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, features=features, cache_dir=cache_dir).read()
_check_sql_dataset(dataset, expected_features)
def iter_sql_file(sqlite_path):
with contextlib.closing(sqlite3.connect(sqlite_path)) as con:
cur = con.cursor()
cur.execute("SELECT * FROM dataset")
for row in cur:
yield row
@require_sqlalchemy
def test_dataset_to_sql(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=1).write()
original_sql = iter_sql_file(sqlite_path)
expected_sql = iter_sql_file(output_sqlite_path)
for row1, row2 in zip(original_sql, expected_sql):
assert row1 == row2
@require_sqlalchemy
def test_dataset_to_sql_multiproc(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=2).write()
original_sql = iter_sql_file(sqlite_path)
expected_sql = iter_sql_file(output_sqlite_path)
for row1, row2 in zip(original_sql, expected_sql):
assert row1 == row2
@require_sqlalchemy
def test_dataset_to_sql_invalidproc(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
with pytest.raises(ValueError):
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=0).write()
| datasets-main | tests/io/test_sql.py |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _check_text_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = TextDatasetReader(text_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read()
_check_text_dataset(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
],
)
def test_dataset_from_text_features(features, text_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"text": "string"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = TextDatasetReader(text_path, features=features, cache_dir=cache_dir).read()
_check_text_dataset(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_dataset_from_text_split(split, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
dataset = TextDatasetReader(text_path, cache_dir=cache_dir, split=split).read()
_check_text_dataset(dataset, expected_features)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type", [str, list])
def test_dataset_from_text_path_type(path_type, text_path, tmp_path):
if issubclass(path_type, str):
path = text_path
elif issubclass(path_type, list):
path = [text_path]
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
dataset = TextDatasetReader(path, cache_dir=cache_dir).read()
_check_text_dataset(dataset, expected_features)
def _check_text_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = TextDatasetReader({"train": text_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read()
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
],
)
def test_datasetdict_from_text_features(features, text_path, tmp_path):
cache_dir = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
default_expected_features = {"text": "string"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = TextDatasetReader({"train": text_path}, features=features, cache_dir=cache_dir).read()
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_text_split(split, text_path, tmp_path):
if split:
path = {split: text_path}
else:
split = "train"
path = {"train": text_path, "test": text_path}
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
dataset = TextDatasetReader(path, cache_dir=cache_dir).read()
_check_text_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
| datasets-main | tests/io/test_text.py |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _check_json_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = JsonDatasetReader(jsonl_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read()
_check_json_dataset(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_dataset_from_json_features(features, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = JsonDatasetReader(jsonl_path, features=features, cache_dir=cache_dir).read()
_check_json_dataset(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
],
)
def test_dataset_from_json_with_unsorted_column_names(features, jsonl_312_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = JsonDatasetReader(jsonl_312_path, features=features, cache_dir=cache_dir).read()
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def test_dataset_from_json_with_mismatched_features(jsonl_312_path, tmp_path):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
features = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
expected_features = features.copy()
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
cache_dir = tmp_path / "cache"
dataset = JsonDatasetReader(jsonl_312_path, features=features, cache_dir=cache_dir).read()
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_dataset_from_json_split(split, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = JsonDatasetReader(jsonl_path, cache_dir=cache_dir, split=split).read()
_check_json_dataset(dataset, expected_features)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type", [str, list])
def test_dataset_from_json_path_type(path_type, jsonl_path, tmp_path):
if issubclass(path_type, str):
path = jsonl_path
elif issubclass(path_type, list):
path = [jsonl_path]
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = JsonDatasetReader(path, cache_dir=cache_dir).read()
_check_json_dataset(dataset, expected_features)
def _check_json_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = JsonDatasetReader({"train": jsonl_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read()
_check_json_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_json_features(features, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = JsonDatasetReader({"train": jsonl_path}, features=features, cache_dir=cache_dir).read()
_check_json_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_json_splits(split, jsonl_path, tmp_path):
if split:
path = {split: jsonl_path}
else:
split = "train"
path = {"train": jsonl_path, "test": jsonl_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = JsonDatasetReader(path, cache_dir=cache_dir).read()
_check_json_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def load_json(buffer):
return json.load(buffer)
def load_json_lines(buffer):
return [json.loads(line) for line in buffer]
class TestJsonDatasetWriter:
@pytest.mark.parametrize("lines, load_json_function", [(True, load_json_lines), (False, load_json)])
def test_dataset_to_json_lines(self, lines, load_json_function, dataset):
with io.BytesIO() as buffer:
JsonDatasetWriter(dataset, buffer, lines=lines).write()
buffer.seek(0)
exported_content = load_json_function(buffer)
assert isinstance(exported_content, list)
assert isinstance(exported_content[0], dict)
assert len(exported_content) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at",
[
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789"), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
],
)
def test_dataset_to_json_orient(self, orient, container, keys, len_at, dataset):
with io.BytesIO() as buffer:
JsonDatasetWriter(dataset, buffer, lines=False, orient=orient).write()
buffer.seek(0)
exported_content = load_json(buffer)
assert isinstance(exported_content, container)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(exported_content, "keys") and not hasattr(exported_content[0], "keys")
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(exported_content) == 10
@pytest.mark.parametrize("lines, load_json_function", [(True, load_json_lines), (False, load_json)])
def test_dataset_to_json_lines_multiproc(self, lines, load_json_function, dataset):
with io.BytesIO() as buffer:
JsonDatasetWriter(dataset, buffer, lines=lines, num_proc=2).write()
buffer.seek(0)
exported_content = load_json_function(buffer)
assert isinstance(exported_content, list)
assert isinstance(exported_content[0], dict)
assert len(exported_content) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at",
[
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789"), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
],
)
def test_dataset_to_json_orient_multiproc(self, orient, container, keys, len_at, dataset):
with io.BytesIO() as buffer:
JsonDatasetWriter(dataset, buffer, lines=False, orient=orient, num_proc=2).write()
buffer.seek(0)
exported_content = load_json(buffer)
assert isinstance(exported_content, container)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(exported_content, "keys") and not hasattr(exported_content[0], "keys")
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(exported_content) == 10
def test_dataset_to_json_orient_invalidproc(self, dataset):
with pytest.raises(ValueError):
with io.BytesIO() as buffer:
JsonDatasetWriter(dataset, buffer, num_proc=0)
@pytest.mark.parametrize("compression, extension", [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")])
def test_dataset_to_json_compression(self, shared_datadir, tmp_path_factory, extension, compression, dataset):
path = tmp_path_factory.mktemp("data") / f"test.json.{extension}"
original_path = str(shared_datadir / f"test_file.json.{extension}")
JsonDatasetWriter(dataset, path, compression=compression).write()
with fsspec.open(path, "rb", compression="infer") as f:
exported_content = f.read()
with fsspec.open(original_path, "rb", compression="infer") as f:
original_content = f.read()
assert exported_content == original_content
| datasets-main | tests/io/test_json.py |
import csv
import os
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.csv import CsvDatasetReader, CsvDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _check_csv_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = CsvDatasetReader(csv_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read()
_check_csv_dataset(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_dataset_from_csv_features(features, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = CsvDatasetReader(csv_path, features=features, cache_dir=cache_dir).read()
_check_csv_dataset(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_dataset_from_csv_split(split, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
dataset = CsvDatasetReader(csv_path, cache_dir=cache_dir, split=split).read()
_check_csv_dataset(dataset, expected_features)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type", [str, list])
def test_dataset_from_csv_path_type(path_type, csv_path, tmp_path):
if issubclass(path_type, str):
path = csv_path
elif issubclass(path_type, list):
path = [csv_path]
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
dataset = CsvDatasetReader(path, cache_dir=cache_dir).read()
_check_csv_dataset(dataset, expected_features)
def _check_csv_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_csv_datasetdict_reader_keep_in_memory(keep_in_memory, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read()
_check_csv_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_csv_datasetdict_reader_features(features, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = CsvDatasetReader({"train": csv_path}, features=features, cache_dir=cache_dir).read()
_check_csv_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_csv_datasetdict_reader_split(split, csv_path, tmp_path):
if split:
path = {split: csv_path}
else:
path = {"train": csv_path, "test": csv_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
dataset = CsvDatasetReader(path, cache_dir=cache_dir).read()
_check_csv_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def iter_csv_file(csv_path):
with open(csv_path, encoding="utf-8") as csvfile:
yield from csv.reader(csvfile)
def test_dataset_to_csv(csv_path, tmp_path):
cache_dir = tmp_path / "cache"
output_csv = os.path.join(cache_dir, "tmp.csv")
dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir).read()
CsvDatasetWriter(dataset["train"], output_csv, num_proc=1).write()
original_csv = iter_csv_file(csv_path)
expected_csv = iter_csv_file(output_csv)
for row1, row2 in zip(original_csv, expected_csv):
assert row1 == row2
def test_dataset_to_csv_multiproc(csv_path, tmp_path):
cache_dir = tmp_path / "cache"
output_csv = os.path.join(cache_dir, "tmp.csv")
dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir).read()
CsvDatasetWriter(dataset["train"], output_csv, num_proc=2).write()
original_csv = iter_csv_file(csv_path)
expected_csv = iter_csv_file(output_csv)
for row1, row2 in zip(original_csv, expected_csv):
assert row1 == row2
def test_dataset_to_csv_invalidproc(csv_path, tmp_path):
cache_dir = tmp_path / "cache"
output_csv = os.path.join(cache_dir, "tmp.csv")
dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir).read()
with pytest.raises(ValueError):
CsvDatasetWriter(dataset["train"], output_csv, num_proc=0)
| datasets-main | tests/io/test_csv.py |
import datetime
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from datasets import Array2D
from datasets.arrow_dataset import Dataset
from datasets.features import Audio, ClassLabel, Features, Image, Sequence, Value
from datasets.features.features import (
_arrow_to_datasets_dtype,
_cast_to_python_objects,
cast_to_python_objects,
encode_nested_example,
generate_from_dict,
string_to_arrow,
)
from datasets.features.translation import Translation, TranslationVariableLanguages
from datasets.info import DatasetInfo
from datasets.utils.py_utils import asdict
from ..utils import require_jax, require_tf, require_torch
class FeaturesTest(TestCase):
def test_from_arrow_schema_simple(self):
data = {"a": [{"b": {"c": "text"}}] * 10, "foo": [1] * 10}
original_features = Features({"a": {"b": {"c": Value("string")}}, "foo": Value("int64")})
dset = Dataset.from_dict(data, features=original_features)
new_features = dset.features
new_dset = Dataset.from_dict(data, features=new_features)
self.assertEqual(original_features.type, new_features.type)
self.assertDictEqual(dset[0], new_dset[0])
self.assertDictEqual(dset[:], new_dset[:])
def test_from_arrow_schema_with_sequence(self):
data = {"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10}
original_features = Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")})
dset = Dataset.from_dict(data, features=original_features)
new_features = dset.features
new_dset = Dataset.from_dict(data, features=new_features)
self.assertEqual(original_features.type, new_features.type)
self.assertDictEqual(dset[0], new_dset[0])
self.assertDictEqual(dset[:], new_dset[:])
def test_string_to_arrow_bijection_for_primitive_types(self):
supported_pyarrow_datatypes = [
pa.time32("s"),
pa.time64("us"),
pa.timestamp("s"),
pa.timestamp("ns", tz="America/New_York"),
pa.date32(),
pa.date64(),
pa.duration("s"),
pa.decimal128(10, 2),
pa.decimal256(40, -3),
pa.string(),
pa.int32(),
pa.float64(),
pa.array([datetime.time(1, 1, 1)]).type, # arrow type: DataType(time64[us])
]
for dt in supported_pyarrow_datatypes:
self.assertEqual(dt, string_to_arrow(_arrow_to_datasets_dtype(dt)))
unsupported_pyarrow_datatypes = [pa.list_(pa.float64())]
for dt in unsupported_pyarrow_datatypes:
with self.assertRaises(ValueError):
string_to_arrow(_arrow_to_datasets_dtype(dt))
supported_datasets_dtypes = [
"time32[s]",
"timestamp[ns]",
"timestamp[ns, tz=+07:30]",
"duration[us]",
"decimal128(30, -4)",
"int32",
"float64",
]
for sdt in supported_datasets_dtypes:
self.assertEqual(sdt, _arrow_to_datasets_dtype(string_to_arrow(sdt)))
unsupported_datasets_dtypes = [
"time32[ns]",
"timestamp[blob]",
"timestamp[[ns]]",
"timestamp[ns, tz=[ns]]",
"duration[[us]]",
"decimal20(30, -4)",
"int",
]
for sdt in unsupported_datasets_dtypes:
with self.assertRaises(ValueError):
string_to_arrow(sdt)
def test_feature_named_type(self):
"""reference: issue #1110"""
features = Features({"_type": Value("string")})
ds_info = DatasetInfo(features=features)
reloaded_features = Features.from_dict(asdict(ds_info)["features"])
assert features == reloaded_features
def test_feature_named_self_as_kwarg(self):
"""reference: issue #5641"""
features = Features(self=Value("string"))
ds_info = DatasetInfo(features=features)
reloaded_features = Features.from_dict(asdict(ds_info)["features"])
assert features == reloaded_features
def test_class_label_feature_with_no_labels(self):
"""reference: issue #4681"""
features = Features({"label": ClassLabel(names=[])})
ds_info = DatasetInfo(features=features)
reloaded_features = Features.from_dict(asdict(ds_info)["features"])
assert features == reloaded_features
def test_reorder_fields_as(self):
features = Features(
{
"id": Value("string"),
"document": {
"title": Value("string"),
"url": Value("string"),
"html": Value("string"),
"tokens": Sequence({"token": Value("string"), "is_html": Value("bool")}),
},
"question": {
"text": Value("string"),
"tokens": Sequence(Value("string")),
},
"annotations": Sequence(
{
"id": Value("string"),
"long_answer": {
"start_token": Value("int64"),
"end_token": Value("int64"),
"start_byte": Value("int64"),
"end_byte": Value("int64"),
},
"short_answers": Sequence(
{
"start_token": Value("int64"),
"end_token": Value("int64"),
"start_byte": Value("int64"),
"end_byte": Value("int64"),
"text": Value("string"),
}
),
"yes_no_answer": ClassLabel(names=["NO", "YES"]),
}
),
}
)
other = Features( # same but with [] instead of sequences, and with a shuffled fields order
{
"id": Value("string"),
"document": {
"tokens": Sequence({"token": Value("string"), "is_html": Value("bool")}),
"title": Value("string"),
"url": Value("string"),
"html": Value("string"),
},
"question": {
"text": Value("string"),
"tokens": [Value("string")],
},
"annotations": {
"yes_no_answer": [ClassLabel(names=["NO", "YES"])],
"id": [Value("string")],
"long_answer": [
{
"end_byte": Value("int64"),
"start_token": Value("int64"),
"end_token": Value("int64"),
"start_byte": Value("int64"),
}
],
"short_answers": [
Sequence(
{
"text": Value("string"),
"start_token": Value("int64"),
"end_token": Value("int64"),
"start_byte": Value("int64"),
"end_byte": Value("int64"),
}
)
],
},
}
)
expected = Features(
{
"id": Value("string"),
"document": {
"tokens": Sequence({"token": Value("string"), "is_html": Value("bool")}),
"title": Value("string"),
"url": Value("string"),
"html": Value("string"),
},
"question": {
"text": Value("string"),
"tokens": Sequence(Value("string")),
},
"annotations": Sequence(
{
"yes_no_answer": ClassLabel(names=["NO", "YES"]),
"id": Value("string"),
"long_answer": {
"end_byte": Value("int64"),
"start_token": Value("int64"),
"end_token": Value("int64"),
"start_byte": Value("int64"),
},
"short_answers": Sequence(
{
"text": Value("string"),
"start_token": Value("int64"),
"end_token": Value("int64"),
"start_byte": Value("int64"),
"end_byte": Value("int64"),
}
),
}
),
}
)
reordered_features = features.reorder_fields_as(other)
self.assertDictEqual(reordered_features, expected)
self.assertEqual(reordered_features.type, other.type)
self.assertEqual(reordered_features.type, expected.type)
self.assertNotEqual(reordered_features.type, features.type)
def test_flatten(self):
features = Features({"foo": {"bar1": Value("int32"), "bar2": {"foobar": Value("string")}}})
_features = features.copy()
flattened_features = features.flatten()
assert flattened_features == {"foo.bar1": Value("int32"), "foo.bar2.foobar": Value("string")}
assert features == _features, "calling flatten shouldn't alter the current features"
def test_flatten_with_sequence(self):
features = Features({"foo": Sequence({"bar": {"my_value": Value("int32")}})})
_features = features.copy()
flattened_features = features.flatten()
assert flattened_features == {"foo.bar": [{"my_value": Value("int32")}]}
assert features == _features, "calling flatten shouldn't alter the current features"
def test_features_dicts_are_synced(self):
def assert_features_dicts_are_synced(features: Features):
assert (
hasattr(features, "_column_requires_decoding")
and features.keys() == features._column_requires_decoding.keys()
)
features = Features({"foo": Sequence({"bar": {"my_value": Value("int32")}})})
assert_features_dicts_are_synced(features)
features["barfoo"] = Image()
assert_features_dicts_are_synced(features)
del features["barfoo"]
assert_features_dicts_are_synced(features)
features.update({"foobar": Value("string")})
assert_features_dicts_are_synced(features)
features.pop("foobar")
assert_features_dicts_are_synced(features)
features.popitem()
assert_features_dicts_are_synced(features)
features.setdefault("xyz", Value("bool"))
assert_features_dicts_are_synced(features)
features.clear()
assert_features_dicts_are_synced(features)
def test_classlabel_init(tmp_path_factory):
names = ["negative", "positive"]
names_file = str(tmp_path_factory.mktemp("features") / "labels.txt")
with open(names_file, "w", encoding="utf-8") as f:
f.write("\n".join(names))
classlabel = ClassLabel(names=names)
assert classlabel.names == names and classlabel.num_classes == len(names)
classlabel = ClassLabel(names_file=names_file)
assert classlabel.names == names and classlabel.num_classes == len(names)
classlabel = ClassLabel(num_classes=len(names), names=names)
assert classlabel.names == names and classlabel.num_classes == len(names)
classlabel = ClassLabel(num_classes=len(names))
assert classlabel.names == [str(i) for i in range(len(names))] and classlabel.num_classes == len(names)
with pytest.raises(ValueError):
classlabel = ClassLabel(num_classes=len(names) + 1, names=names)
with pytest.raises(ValueError):
classlabel = ClassLabel(names=names, names_file=names_file)
with pytest.raises(ValueError):
classlabel = ClassLabel()
with pytest.raises(TypeError):
classlabel = ClassLabel(names=np.array(names))
def test_classlabel_str2int():
names = ["negative", "positive"]
classlabel = ClassLabel(names=names)
for label in names:
assert classlabel.str2int(label) == names.index(label)
with pytest.raises(ValueError):
classlabel.str2int("__bad_label_name__")
with pytest.raises(ValueError):
classlabel.str2int(1)
with pytest.raises(ValueError):
classlabel.str2int(None)
def test_classlabel_int2str():
names = ["negative", "positive"]
classlabel = ClassLabel(names=names)
for i in range(len(names)):
assert classlabel.int2str(i) == names[i]
with pytest.raises(ValueError):
classlabel.int2str(len(names))
with pytest.raises(ValueError):
classlabel.int2str(-1)
with pytest.raises(ValueError):
classlabel.int2str(None)
def test_classlabel_cast_storage():
names = ["negative", "positive"]
classlabel = ClassLabel(names=names)
# from integers
arr = pa.array([0, 1, -1, -100], type=pa.int64())
result = classlabel.cast_storage(arr)
assert result.type == pa.int64()
assert result.to_pylist() == [0, 1, -1, -100]
arr = pa.array([0, 1, -1, -100], type=pa.int32())
result = classlabel.cast_storage(arr)
assert result.type == pa.int64()
assert result.to_pylist() == [0, 1, -1, -100]
arr = pa.array([3])
with pytest.raises(ValueError):
classlabel.cast_storage(arr)
# from strings
arr = pa.array(["negative", "positive"])
result = classlabel.cast_storage(arr)
assert result.type == pa.int64()
assert result.to_pylist() == [0, 1]
arr = pa.array(["__label_that_doesnt_exist__"])
with pytest.raises(ValueError):
classlabel.cast_storage(arr)
# from nulls
arr = pa.array([None])
result = classlabel.cast_storage(arr)
assert result.type == pa.int64()
assert result.to_pylist() == [None]
# from empty
arr = pa.array([], pa.int64())
result = classlabel.cast_storage(arr)
assert result.type == pa.int64()
assert result.to_pylist() == []
arr = pa.array([], pa.string())
result = classlabel.cast_storage(arr)
assert result.type == pa.int64()
assert result.to_pylist() == []
@pytest.mark.parametrize("class_label_arg", ["names", "names_file"])
def test_class_label_to_and_from_dict(class_label_arg, tmp_path_factory):
names = ["negative", "positive"]
names_file = str(tmp_path_factory.mktemp("features") / "labels.txt")
with open(names_file, "w", encoding="utf-8") as f:
f.write("\n".join(names))
if class_label_arg == "names":
class_label = ClassLabel(names=names)
elif class_label_arg == "names_file":
class_label = ClassLabel(names_file=names_file)
generated_class_label = generate_from_dict(asdict(class_label))
assert generated_class_label == class_label
@pytest.mark.parametrize("inner_type", [Value("int32"), {"subcolumn": Value("int32")}])
def test_encode_nested_example_sequence_with_none(inner_type):
schema = Sequence(inner_type)
obj = None
result = encode_nested_example(schema, obj)
assert result is None
def test_encode_batch_with_example_with_empty_first_elem():
features = Features(
{
"x": Sequence(Sequence(ClassLabel(names=["a", "b"]))),
}
)
encoded_batch = features.encode_batch(
{
"x": [
[["a"], ["b"]],
[[], ["b"]],
]
}
)
assert encoded_batch == {"x": [[[0], [1]], [[], [1]]]}
@pytest.mark.parametrize(
"feature",
[
Value("int32"),
ClassLabel(num_classes=2),
Translation(languages=["en", "fr"]),
TranslationVariableLanguages(languages=["en", "fr"]),
],
)
def test_dataset_feature_with_none(feature):
data = {"col": [None]}
features = Features({"col": feature})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"col"}
assert item["col"] is None
batch = dset[:1]
assert len(batch) == 1
assert batch.keys() == {"col"}
assert isinstance(batch["col"], list) and all(item is None for item in batch["col"])
column = dset["col"]
assert len(column) == 1
assert isinstance(column, list) and all(item is None for item in column)
# nested tests
data = {"col": [[None]]}
features = Features({"col": Sequence(feature)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"col"}
assert all(i is None for i in item["col"])
data = {"nested": [{"col": None}]}
features = Features({"nested": {"col": feature}})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"nested"}
assert item["nested"].keys() == {"col"}
assert item["nested"]["col"] is None
def iternumpy(key1, value1, value2):
if value1.dtype != value2.dtype: # check only for dtype
raise AssertionError(
f"dtype of '{key1}' key for casted object: {value1.dtype} and expected object: {value2.dtype} not matching"
)
def dict_diff(d1: dict, d2: dict): # check if 2 dictionaries are equal
np.testing.assert_equal(d1, d2) # sanity check if dict values are equal or not
for (k1, v1), (k2, v2) in zip(d1.items(), d2.items()): # check if their values have same dtype or not
if isinstance(v1, dict): # nested dictionary case
dict_diff(v1, v2)
elif isinstance(v1, np.ndarray): # checks if dtype and value of np.ndarray is equal
iternumpy(k1, v1, v2)
elif isinstance(v1, list):
for element1, element2 in zip(v1, v2): # iterates over all elements of list
if isinstance(element1, dict):
dict_diff(element1, element2)
elif isinstance(element1, np.ndarray):
iternumpy(k1, element1, element2)
class CastToPythonObjectsTest(TestCase):
def test_cast_to_python_objects_list(self):
obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]}
expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]}
casted_obj = cast_to_python_objects(obj)
self.assertDictEqual(casted_obj, expected_obj)
def test_cast_to_python_objects_tuple(self):
obj = {"col_1": [{"vec": (1, 2, 3), "txt": "foo"}] * 3, "col_2": [(1, 2), (3, 4), (5, 6)]}
expected_obj = {"col_1": [{"vec": (1, 2, 3), "txt": "foo"}] * 3, "col_2": [(1, 2), (3, 4), (5, 6)]}
casted_obj = cast_to_python_objects(obj)
self.assertDictEqual(casted_obj, expected_obj)
def test_cast_to_python_or_numpy(self):
obj = {"col_1": [{"vec": np.arange(1, 4), "txt": "foo"}] * 3, "col_2": np.arange(1, 7).reshape(3, 2)}
expected_obj = {
"col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3,
"col_2": np.array([[1, 2], [3, 4], [5, 6]]),
}
casted_obj = cast_to_python_objects(obj)
dict_diff(casted_obj, expected_obj)
def test_cast_to_python_objects_series(self):
obj = {
"col_1": pd.Series([{"vec": [1, 2, 3], "txt": "foo"}] * 3),
"col_2": pd.Series([[1, 2], [3, 4], [5, 6]]),
}
expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]}
casted_obj = cast_to_python_objects(obj)
self.assertDictEqual(casted_obj, expected_obj)
def test_cast_to_python_objects_dataframe(self):
obj = pd.DataFrame({"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]})
expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]}
casted_obj = cast_to_python_objects(obj)
self.assertDictEqual(casted_obj, expected_obj)
def test_cast_to_python_objects_pandas_timestamp(self):
obj = pd.Timestamp(2020, 1, 1)
expected_obj = obj.to_pydatetime()
casted_obj = cast_to_python_objects(obj)
self.assertEqual(casted_obj, expected_obj)
casted_obj = cast_to_python_objects(pd.Series([obj]))
self.assertListEqual(casted_obj, [expected_obj])
casted_obj = cast_to_python_objects(pd.DataFrame({"a": [obj]}))
self.assertDictEqual(casted_obj, {"a": [expected_obj]})
def test_cast_to_python_objects_pandas_timedelta(self):
obj = pd.Timedelta(seconds=1)
expected_obj = obj.to_pytimedelta()
casted_obj = cast_to_python_objects(obj)
self.assertEqual(casted_obj, expected_obj)
casted_obj = cast_to_python_objects(pd.Series([obj]))
self.assertListEqual(casted_obj, [expected_obj])
casted_obj = cast_to_python_objects(pd.DataFrame({"a": [obj]}))
self.assertDictEqual(casted_obj, {"a": [expected_obj]})
@require_torch
def test_cast_to_python_objects_torch(self):
import torch
obj = {
"col_1": [{"vec": torch.tensor(np.arange(1, 4)), "txt": "foo"}] * 3,
"col_2": torch.tensor(np.arange(1, 7).reshape(3, 2)),
}
expected_obj = {
"col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3,
"col_2": np.array([[1, 2], [3, 4], [5, 6]]),
}
casted_obj = cast_to_python_objects(obj)
dict_diff(casted_obj, expected_obj)
@require_tf
def test_cast_to_python_objects_tf(self):
import tensorflow as tf
obj = {
"col_1": [{"vec": tf.constant(np.arange(1, 4)), "txt": "foo"}] * 3,
"col_2": tf.constant(np.arange(1, 7).reshape(3, 2)),
}
expected_obj = {
"col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3,
"col_2": np.array([[1, 2], [3, 4], [5, 6]]),
}
casted_obj = cast_to_python_objects(obj)
dict_diff(casted_obj, expected_obj)
@require_jax
def test_cast_to_python_objects_jax(self):
import jax.numpy as jnp
obj = {
"col_1": [{"vec": jnp.array(np.arange(1, 4)), "txt": "foo"}] * 3,
"col_2": jnp.array(np.arange(1, 7).reshape(3, 2)),
}
assert obj["col_2"].dtype == jnp.int32
expected_obj = {
"col_1": [{"vec": np.array([1, 2, 3], dtype=np.int32), "txt": "foo"}] * 3,
"col_2": np.array([[1, 2], [3, 4], [5, 6]], dtype=np.int32),
}
casted_obj = cast_to_python_objects(obj)
dict_diff(casted_obj, expected_obj)
@patch("datasets.features.features._cast_to_python_objects", side_effect=_cast_to_python_objects)
def test_dont_iterate_over_each_element_in_a_list(self, mocked_cast):
obj = {"col_1": [[1, 2], [3, 4], [5, 6]]}
cast_to_python_objects(obj)
self.assertEqual(mocked_cast.call_count, 4) # 4 = depth of obj
SIMPLE_FEATURES = [
Features(),
Features({"a": Value("int32")}),
Features({"a": Value("int32", id="my feature")}),
Features({"a": Value("int32"), "b": Value("float64"), "c": Value("string")}),
]
CUSTOM_FEATURES = [
Features({"label": ClassLabel(names=["negative", "positive"])}),
Features({"array": Array2D(dtype="float32", shape=(4, 4))}),
Features({"image": Image()}),
Features({"audio": Audio()}),
Features({"image": Image(decode=False)}),
Features({"audio": Audio(decode=False)}),
Features({"translation": Translation(["en", "fr"])}),
Features({"translation": TranslationVariableLanguages(["en", "fr"])}),
]
NESTED_FEATURES = [
Features({"foo": {}}),
Features({"foo": {"bar": Value("int32")}}),
Features({"foo": {"bar1": Value("int32"), "bar2": Value("float64")}}),
Features({"foo": Sequence(Value("int32"))}),
Features({"foo": Sequence({})}),
Features({"foo": Sequence({"bar": Value("int32")})}),
Features({"foo": [Value("int32")]}),
Features({"foo": [{"bar": Value("int32")}]}),
]
NESTED_CUSTOM_FEATURES = [
Features({"foo": {"bar": ClassLabel(names=["negative", "positive"])}}),
Features({"foo": Sequence(ClassLabel(names=["negative", "positive"]))}),
Features({"foo": Sequence({"bar": ClassLabel(names=["negative", "positive"])})}),
Features({"foo": [ClassLabel(names=["negative", "positive"])]}),
Features({"foo": [{"bar": ClassLabel(names=["negative", "positive"])}]}),
]
@pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES)
def test_features_to_dict(features: Features):
features_dict = features.to_dict()
assert isinstance(features_dict, dict)
reloaded = Features.from_dict(features_dict)
assert features == reloaded
@pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES)
def test_features_to_yaml_list(features: Features):
features_yaml_list = features._to_yaml_list()
assert isinstance(features_yaml_list, list)
reloaded = Features._from_yaml_list(features_yaml_list)
assert features == reloaded
@pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES)
def test_features_to_arrow_schema(features: Features):
arrow_schema = features.arrow_schema
assert isinstance(arrow_schema, pa.Schema)
reloaded = Features.from_arrow_schema(arrow_schema)
assert features == reloaded
| datasets-main | tests/features/test_features.py |
import os
import tarfile
import pyarrow as pa
import pytest
from datasets import Dataset, concatenate_datasets, load_dataset
from datasets.features import Audio, Features, Sequence, Value
from ..utils import (
require_sndfile,
)
@pytest.fixture()
def tar_wav_path(shared_datadir, tmp_path_factory):
audio_path = str(shared_datadir / "test_audio_44100.wav")
path = tmp_path_factory.mktemp("data") / "audio_data.wav.tar"
with tarfile.TarFile(path, "w") as f:
f.add(audio_path, arcname=os.path.basename(audio_path))
return path
@pytest.fixture()
def tar_mp3_path(shared_datadir, tmp_path_factory):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
path = tmp_path_factory.mktemp("data") / "audio_data.mp3.tar"
with tarfile.TarFile(path, "w") as f:
f.add(audio_path, arcname=os.path.basename(audio_path))
return path
def iter_archive(archive_path):
with tarfile.open(archive_path) as tar:
for tarinfo in tar:
file_path = tarinfo.name
file_obj = tar.extractfile(tarinfo)
yield file_path, file_obj
def test_audio_instantiation():
audio = Audio()
assert audio.sampling_rate is None
assert audio.mono is True
assert audio.id is None
assert audio.dtype == "dict"
assert audio.pa_type == pa.struct({"bytes": pa.binary(), "path": pa.string()})
assert audio._type == "Audio"
def test_audio_feature_type_to_arrow():
features = Features({"audio": Audio()})
assert features.arrow_schema == pa.schema({"audio": Audio().pa_type})
features = Features({"struct_containing_an_audio": {"audio": Audio()}})
assert features.arrow_schema == pa.schema({"struct_containing_an_audio": pa.struct({"audio": Audio().pa_type})})
features = Features({"sequence_of_audios": Sequence(Audio())})
assert features.arrow_schema == pa.schema({"sequence_of_audios": pa.list_(Audio().pa_type)})
@pytest.mark.parametrize(
"build_example",
[
lambda audio_path: audio_path,
lambda audio_path: open(audio_path, "rb").read(),
lambda audio_path: {"path": audio_path},
lambda audio_path: {"path": audio_path, "bytes": None},
lambda audio_path: {"path": audio_path, "bytes": open(audio_path, "rb").read()},
lambda audio_path: {"path": None, "bytes": open(audio_path, "rb").read()},
lambda audio_path: {"bytes": open(audio_path, "rb").read()},
lambda audio_path: {"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
],
)
def test_audio_feature_encode_example(shared_datadir, build_example):
audio_path = str(shared_datadir / "test_audio_44100.wav")
audio = Audio()
encoded_example = audio.encode_example(build_example(audio_path))
assert isinstance(encoded_example, dict)
assert encoded_example.keys() == {"bytes", "path"}
assert encoded_example["bytes"] is not None or encoded_example["path"] is not None
decoded_example = audio.decode_example(encoded_example)
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
@pytest.mark.parametrize(
"build_example",
[
lambda audio_path: {"path": audio_path, "sampling_rate": 16_000},
lambda audio_path: {"path": audio_path, "bytes": None, "sampling_rate": 16_000},
lambda audio_path: {"path": audio_path, "bytes": open(audio_path, "rb").read(), "sampling_rate": 16_000},
lambda audio_path: {"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
],
)
def test_audio_feature_encode_example_pcm(shared_datadir, build_example):
audio_path = str(shared_datadir / "test_audio_16000.pcm")
audio = Audio(sampling_rate=16_000)
encoded_example = audio.encode_example(build_example(audio_path))
assert isinstance(encoded_example, dict)
assert encoded_example.keys() == {"bytes", "path"}
assert encoded_example["bytes"] is not None or encoded_example["path"] is not None
decoded_example = audio.decode_example(encoded_example)
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
@require_sndfile
def test_audio_decode_example(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
audio = Audio()
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (202311,)
assert decoded_example["sampling_rate"] == 44100
with pytest.raises(RuntimeError):
Audio(decode=False).decode_example(audio_path)
@require_sndfile
def test_audio_resampling(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
audio = Audio(sampling_rate=16000)
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (73401,)
assert decoded_example["sampling_rate"] == 16000
@require_sndfile
def test_audio_decode_example_mp3(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
audio = Audio()
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (110592,)
assert decoded_example["sampling_rate"] == 44100
@require_sndfile
def test_audio_decode_example_opus(shared_datadir):
audio_path = str(shared_datadir / "test_audio_48000.opus")
audio = Audio()
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (48000,)
assert decoded_example["sampling_rate"] == 48000
@pytest.mark.parametrize("sampling_rate", [16_000, 48_000])
def test_audio_decode_example_pcm(shared_datadir, sampling_rate):
audio_path = str(shared_datadir / "test_audio_16000.pcm")
audio_input = {"path": audio_path, "sampling_rate": 16_000}
audio = Audio(sampling_rate=sampling_rate)
decoded_example = audio.decode_example(audio.encode_example(audio_input))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] is None
assert decoded_example["array"].shape == (16208 * sampling_rate // 16_000,)
assert decoded_example["sampling_rate"] == sampling_rate
@require_sndfile
def test_audio_resampling_mp3_different_sampling_rates(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
audio_path2 = str(shared_datadir / "test_audio_16000.mp3")
audio = Audio(sampling_rate=48000)
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (120373,)
assert decoded_example["sampling_rate"] == 48000
decoded_example = audio.decode_example(audio.encode_example(audio_path2))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path2
assert decoded_example["array"].shape == (122688,)
assert decoded_example["sampling_rate"] == 48000
@require_sndfile
def test_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
@require_sndfile
def test_dataset_with_audio_feature_tar_wav(tar_wav_path):
audio_filename = "test_audio_44100.wav"
data = {"audio": []}
for file_path, file_obj in iter_archive(tar_wav_path):
data["audio"].append({"path": file_path, "bytes": file_obj.read()})
break
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_filename
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_filename
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_filename
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
@require_sndfile
def test_dataset_with_audio_feature_tar_mp3(tar_mp3_path):
audio_filename = "test_audio_44100.mp3"
data = {"audio": []}
for file_path, file_obj in iter_archive(tar_mp3_path):
data["audio"].append({"path": file_path, "bytes": file_obj.read()})
break
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_filename
assert item["audio"]["array"].shape == (110592,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_filename
assert batch["audio"][0]["array"].shape == (110592,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_filename
assert column[0]["array"].shape == (110592,)
assert column[0]["sampling_rate"] == 44100
@require_sndfile
def test_dataset_with_audio_feature_with_none():
data = {"audio": [None]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"] is None
batch = dset[:1]
assert len(batch) == 1
assert batch.keys() == {"audio"}
assert isinstance(batch["audio"], list) and all(item is None for item in batch["audio"])
column = dset["audio"]
assert len(column) == 1
assert isinstance(column, list) and all(item is None for item in column)
# nested tests
data = {"audio": [[None]]}
features = Features({"audio": Sequence(Audio())})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert all(i is None for i in item["audio"])
data = {"nested": [{"audio": None}]}
features = Features({"nested": {"audio": Audio()}})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"nested"}
assert item["nested"].keys() == {"audio"}
assert item["nested"]["audio"] is None
@require_sndfile
def test_resampling_at_loading_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(sampling_rate=16000)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (73401,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (73401,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (73401,)
assert column[0]["sampling_rate"] == 16000
@require_sndfile
def test_resampling_at_loading_dataset_with_audio_feature_mp3(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(sampling_rate=16000)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (40125,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (40125,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (40125,)
assert column[0]["sampling_rate"] == 16000
@require_sndfile
def test_resampling_after_loading_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item["audio"]["sampling_rate"] == 44100
dset = dset.cast_column("audio", Audio(sampling_rate=16000))
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (73401,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (73401,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (73401,)
assert column[0]["sampling_rate"] == 16000
@require_sndfile
def test_resampling_after_loading_dataset_with_audio_feature_mp3(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
data = {"audio": [audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item["audio"]["sampling_rate"] == 44100
dset = dset.cast_column("audio", Audio(sampling_rate=16000))
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (40125,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (40125,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (40125,)
assert column[0]["sampling_rate"] == 16000
@pytest.mark.parametrize(
"build_data",
[
lambda audio_path: {"audio": [audio_path]},
lambda audio_path: {"audio": [open(audio_path, "rb").read()]},
lambda audio_path: {"audio": [{"path": audio_path}]},
lambda audio_path: {"audio": [{"path": audio_path, "bytes": None}]},
lambda audio_path: {"audio": [{"path": audio_path, "bytes": open(audio_path, "rb").read()}]},
lambda audio_path: {"audio": [{"path": None, "bytes": open(audio_path, "rb").read()}]},
lambda audio_path: {"audio": [{"bytes": open(audio_path, "rb").read()}]},
],
)
def test_dataset_cast_to_audio_features(shared_datadir, build_data):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = build_data(audio_path)
dset = Dataset.from_dict(data)
item = dset.cast(Features({"audio": Audio()}))[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
item = dset.cast_column("audio", Audio())[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
def test_dataset_concatenate_audio_features(shared_datadir):
# we use a different data structure between 1 and 2 to make sure they are compatible with each other
audio_path = str(shared_datadir / "test_audio_44100.wav")
data1 = {"audio": [audio_path]}
dset1 = Dataset.from_dict(data1, features=Features({"audio": Audio()}))
data2 = {"audio": [{"bytes": open(audio_path, "rb").read()}]}
dset2 = Dataset.from_dict(data2, features=Features({"audio": Audio()}))
concatenated_dataset = concatenate_datasets([dset1, dset2])
assert len(concatenated_dataset) == len(dset1) + len(dset2)
assert concatenated_dataset[0]["audio"]["array"].shape == dset1[0]["audio"]["array"].shape
assert concatenated_dataset[1]["audio"]["array"].shape == dset2[0]["audio"]["array"].shape
def test_dataset_concatenate_nested_audio_features(shared_datadir):
# we use a different data structure between 1 and 2 to make sure they are compatible with each other
audio_path = str(shared_datadir / "test_audio_44100.wav")
features = Features({"list_of_structs_of_audios": [{"audio": Audio()}]})
data1 = {"list_of_structs_of_audios": [[{"audio": audio_path}]]}
dset1 = Dataset.from_dict(data1, features=features)
data2 = {"list_of_structs_of_audios": [[{"audio": {"bytes": open(audio_path, "rb").read()}}]]}
dset2 = Dataset.from_dict(data2, features=features)
concatenated_dataset = concatenate_datasets([dset1, dset2])
assert len(concatenated_dataset) == len(dset1) + len(dset2)
assert (
concatenated_dataset[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape
== dset1[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape
)
assert (
concatenated_dataset[1]["list_of_structs_of_audios"][0]["audio"]["array"].shape
== dset2[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape
)
@require_sndfile
def test_dataset_with_audio_feature_map_is_not_decoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path], "text": ["Hello"]}
features = Features({"audio": Audio(), "text": Value("string")})
dset = Dataset.from_dict(data, features=features)
expected_audio = features.encode_batch(data)["audio"][0]
for item in dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text"}
assert item == {"audio": expected_audio, "text": "Hello"}
def process_text(example):
example["text"] = example["text"] + " World!"
return example
processed_dset = dset.map(process_text)
for item in processed_dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text"}
assert item == {"audio": expected_audio, "text": "Hello World!"}
@require_sndfile
def test_dataset_with_audio_feature_map_is_decoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path], "text": ["Hello"]}
features = Features({"audio": Audio(), "text": Value("string")})
dset = Dataset.from_dict(data, features=features)
def process_audio_sampling_rate_by_example(example):
example["double_sampling_rate"] = 2 * example["audio"]["sampling_rate"]
return example
decoded_dset = dset.map(process_audio_sampling_rate_by_example)
for item in decoded_dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text", "double_sampling_rate"}
assert item["double_sampling_rate"] == 88200
def process_audio_sampling_rate_by_batch(batch):
double_sampling_rates = []
for audio in batch["audio"]:
double_sampling_rates.append(2 * audio["sampling_rate"])
batch["double_sampling_rate"] = double_sampling_rates
return batch
decoded_dset = dset.map(process_audio_sampling_rate_by_batch, batched=True)
for item in decoded_dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text", "double_sampling_rate"}
assert item["double_sampling_rate"] == 88200
@require_sndfile
def test_formatted_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path, audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
with dset.formatted_as("numpy"):
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 2
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
with dset.formatted_as("pandas"):
item = dset[0]
assert item.shape == (1, 1)
assert item.columns == ["audio"]
assert item["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert item["audio"][0]["path"] == audio_path
assert item["audio"][0]["array"].shape == (202311,)
assert item["audio"][0]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.shape == (1, 1)
assert batch.columns == ["audio"]
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 2
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
@pytest.fixture
def jsonl_audio_dataset_path(shared_datadir, tmp_path_factory):
import json
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = [{"audio": audio_path, "text": "Hello world!"}]
path = str(tmp_path_factory.mktemp("data") / "audio_dataset.jsonl")
with open(path, "w") as f:
for item in data:
f.write(json.dumps(item) + "\n")
return path
@require_sndfile
@pytest.mark.parametrize("streaming", [False, True])
def test_load_dataset_with_audio_feature(streaming, jsonl_audio_dataset_path, shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data_files = jsonl_audio_dataset_path
features = Features({"audio": Audio(), "text": Value("string")})
dset = load_dataset("json", split="train", data_files=data_files, features=features, streaming=streaming)
item = dset[0] if not streaming else next(iter(dset))
assert item.keys() == {"audio", "text"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
@require_sndfile
@pytest.mark.integration
def test_dataset_with_audio_feature_loaded_from_cache():
# load first time
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean")
# load from cache
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
assert isinstance(ds, Dataset)
def test_dataset_with_audio_feature_undecoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(decode=False)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"] == {"path": audio_path, "bytes": None}
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0] == {"path": audio_path, "bytes": None}
column = dset["audio"]
assert len(column) == 1
assert column[0] == {"path": audio_path, "bytes": None}
def test_formatted_dataset_with_audio_feature_undecoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(decode=False)})
dset = Dataset.from_dict(data, features=features)
with dset.formatted_as("numpy"):
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"] == {"path": audio_path, "bytes": None}
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0] == {"path": audio_path, "bytes": None}
column = dset["audio"]
assert len(column) == 1
assert column[0] == {"path": audio_path, "bytes": None}
with dset.formatted_as("pandas"):
item = dset[0]
assert item.shape == (1, 1)
assert item.columns == ["audio"]
assert item["audio"][0] == {"path": audio_path, "bytes": None}
batch = dset[:1]
assert batch.shape == (1, 1)
assert batch.columns == ["audio"]
assert batch["audio"][0] == {"path": audio_path, "bytes": None}
column = dset["audio"]
assert len(column) == 1
assert column[0] == {"path": audio_path, "bytes": None}
def test_dataset_with_audio_feature_map_undecoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(decode=False)})
dset = Dataset.from_dict(data, features=features)
def assert_audio_example_undecoded(example):
assert example["audio"] == {"path": audio_path, "bytes": None}
dset.map(assert_audio_example_undecoded)
def assert_audio_batch_undecoded(batch):
for audio in batch["audio"]:
assert audio == {"path": audio_path, "bytes": None}
dset.map(assert_audio_batch_undecoded, batched=True)
def test_audio_embed_storage(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
example = {"bytes": None, "path": audio_path}
storage = pa.array([example], type=pa.struct({"bytes": pa.binary(), "path": pa.string()}))
embedded_storage = Audio().embed_storage(storage)
embedded_example = embedded_storage.to_pylist()[0]
assert embedded_example == {"bytes": open(audio_path, "rb").read(), "path": "test_audio_44100.wav"}
| datasets-main | tests/features/test_audio.py |
import os
import tarfile
import warnings
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from datasets import Dataset, Features, Image, Sequence, Value, concatenate_datasets, load_dataset
from datasets.features.image import encode_np_array, image_to_bytes
from ..utils import require_pil
@pytest.fixture
def tar_jpg_path(shared_datadir, tmp_path_factory):
image_path = str(shared_datadir / "test_image_rgb.jpg")
path = tmp_path_factory.mktemp("data") / "image_data.jpg.tar"
with tarfile.TarFile(path, "w") as f:
f.add(image_path, arcname=os.path.basename(image_path))
return path
def iter_archive(archive_path):
with tarfile.open(archive_path) as tar:
for tarinfo in tar:
file_path = tarinfo.name
file_obj = tar.extractfile(tarinfo)
yield file_path, file_obj
def test_image_instantiation():
image = Image()
assert image.id is None
assert image.dtype == "PIL.Image.Image"
assert image.pa_type == pa.struct({"bytes": pa.binary(), "path": pa.string()})
assert image._type == "Image"
def test_image_feature_type_to_arrow():
features = Features({"image": Image()})
assert features.arrow_schema == pa.schema({"image": Image().pa_type})
features = Features({"struct_containing_an_image": {"image": Image()}})
assert features.arrow_schema == pa.schema({"struct_containing_an_image": pa.struct({"image": Image().pa_type})})
features = Features({"sequence_of_images": Sequence(Image())})
assert features.arrow_schema == pa.schema({"sequence_of_images": pa.list_(Image().pa_type)})
@require_pil
@pytest.mark.parametrize(
"build_example",
[
lambda image_path: image_path,
lambda image_path: open(image_path, "rb").read(),
lambda image_path: {"path": image_path},
lambda image_path: {"path": image_path, "bytes": None},
lambda image_path: {"path": image_path, "bytes": open(image_path, "rb").read()},
lambda image_path: {"path": None, "bytes": open(image_path, "rb").read()},
lambda image_path: {"bytes": open(image_path, "rb").read()},
],
)
def test_image_feature_encode_example(shared_datadir, build_example):
import PIL.Image
image_path = str(shared_datadir / "test_image_rgb.jpg")
image = Image()
encoded_example = image.encode_example(build_example(image_path))
assert isinstance(encoded_example, dict)
assert encoded_example.keys() == {"bytes", "path"}
assert encoded_example["bytes"] is not None or encoded_example["path"] is not None
decoded_example = image.decode_example(encoded_example)
assert isinstance(decoded_example, PIL.Image.Image)
@require_pil
def test_image_decode_example(shared_datadir):
import PIL.Image
image_path = str(shared_datadir / "test_image_rgb.jpg")
image = Image()
decoded_example = image.decode_example({"path": image_path, "bytes": None})
assert isinstance(decoded_example, PIL.Image.Image)
assert os.path.samefile(decoded_example.filename, image_path)
assert decoded_example.size == (640, 480)
assert decoded_example.mode == "RGB"
with pytest.raises(RuntimeError):
Image(decode=False).decode_example(image_path)
@require_pil
def test_dataset_with_image_feature(shared_datadir):
import PIL.Image
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = {"image": [image_path]}
features = Features({"image": Image()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"image"}
assert isinstance(item["image"], PIL.Image.Image)
assert os.path.samefile(item["image"].filename, image_path)
assert item["image"].format == "JPEG"
assert item["image"].size == (640, 480)
assert item["image"].mode == "RGB"
batch = dset[:1]
assert len(batch) == 1
assert batch.keys() == {"image"}
assert isinstance(batch["image"], list) and all(isinstance(item, PIL.Image.Image) for item in batch["image"])
assert os.path.samefile(batch["image"][0].filename, image_path)
assert batch["image"][0].format == "JPEG"
assert batch["image"][0].size == (640, 480)
assert batch["image"][0].mode == "RGB"
column = dset["image"]
assert len(column) == 1
assert isinstance(column, list) and all(isinstance(item, PIL.Image.Image) for item in column)
assert os.path.samefile(column[0].filename, image_path)
assert column[0].format == "JPEG"
assert column[0].size == (640, 480)
assert column[0].mode == "RGB"
@require_pil
@pytest.mark.parametrize("infer_feature", [False, True])
def test_dataset_with_image_feature_from_pil_image(infer_feature, shared_datadir):
import PIL.Image
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = {"image": [PIL.Image.open(image_path)]}
features = Features({"image": Image()}) if not infer_feature else None
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"image"}
assert isinstance(item["image"], PIL.Image.Image)
assert os.path.samefile(item["image"].filename, image_path)
assert item["image"].format == "JPEG"
assert item["image"].size == (640, 480)
assert item["image"].mode == "RGB"
batch = dset[:1]
assert len(batch) == 1
assert batch.keys() == {"image"}
assert isinstance(batch["image"], list) and all(isinstance(item, PIL.Image.Image) for item in batch["image"])
assert os.path.samefile(batch["image"][0].filename, image_path)
assert batch["image"][0].format == "JPEG"
assert batch["image"][0].size == (640, 480)
assert batch["image"][0].mode == "RGB"
column = dset["image"]
assert len(column) == 1
assert isinstance(column, list) and all(isinstance(item, PIL.Image.Image) for item in column)
assert os.path.samefile(column[0].filename, image_path)
assert column[0].format == "JPEG"
assert column[0].size == (640, 480)
assert column[0].mode == "RGB"
@require_pil
def test_dataset_with_image_feature_from_np_array():
import PIL.Image
image_array = np.arange(640 * 480, dtype=np.int32).reshape(480, 640)
data = {"image": [image_array]}
features = Features({"image": Image()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"image"}
assert isinstance(item["image"], PIL.Image.Image)
np.testing.assert_array_equal(np.array(item["image"]), image_array)
assert item["image"].filename == ""
assert item["image"].format in ["PNG", "TIFF"]
assert item["image"].size == (640, 480)
batch = dset[:1]
assert len(batch) == 1
assert batch.keys() == {"image"}
assert isinstance(batch["image"], list) and all(isinstance(item, PIL.Image.Image) for item in batch["image"])
np.testing.assert_array_equal(np.array(batch["image"][0]), image_array)
assert batch["image"][0].filename == ""
assert batch["image"][0].format in ["PNG", "TIFF"]
assert batch["image"][0].size == (640, 480)
column = dset["image"]
assert len(column) == 1
assert isinstance(column, list) and all(isinstance(item, PIL.Image.Image) for item in column)
np.testing.assert_array_equal(np.array(column[0]), image_array)
assert column[0].filename == ""
assert column[0].format in ["PNG", "TIFF"]
assert column[0].size == (640, 480)
@require_pil
def test_dataset_with_image_feature_tar_jpg(tar_jpg_path):
import PIL.Image
data = {"image": []}
for file_path, file_obj in iter_archive(tar_jpg_path):
data["image"].append({"path": file_path, "bytes": file_obj.read()})
break
features = Features({"image": Image()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"image"}
assert isinstance(item["image"], PIL.Image.Image)
assert item["image"].filename == ""
assert item["image"].format == "JPEG"
assert item["image"].size == (640, 480)
assert item["image"].mode == "RGB"
batch = dset[:1]
assert len(batch) == 1
assert batch.keys() == {"image"}
assert isinstance(batch["image"], list) and all(isinstance(item, PIL.Image.Image) for item in batch["image"])
assert batch["image"][0].filename == ""
assert batch["image"][0].format == "JPEG"
assert batch["image"][0].size == (640, 480)
assert batch["image"][0].mode == "RGB"
column = dset["image"]
assert len(column) == 1
assert isinstance(column, list) and all(isinstance(item, PIL.Image.Image) for item in column)
assert column[0].filename == ""
assert column[0].format == "JPEG"
assert column[0].size == (640, 480)
assert column[0].mode == "RGB"
@require_pil
def test_dataset_with_image_feature_with_none():
data = {"image": [None]}
features = Features({"image": Image()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"image"}
assert item["image"] is None
batch = dset[:1]
assert len(batch) == 1
assert batch.keys() == {"image"}
assert isinstance(batch["image"], list) and all(item is None for item in batch["image"])
column = dset["image"]
assert len(column) == 1
assert isinstance(column, list) and all(item is None for item in column)
# nested tests
data = {"images": [[None]]}
features = Features({"images": Sequence(Image())})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"images"}
assert all(i is None for i in item["images"])
data = {"nested": [{"image": None}]}
features = Features({"nested": {"image": Image()}})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"nested"}
assert item["nested"].keys() == {"image"}
assert item["nested"]["image"] is None
@require_pil
@pytest.mark.parametrize(
"build_data",
[
lambda image_path: {"image": [image_path]},
lambda image_path: {"image": [open(image_path, "rb").read()]},
lambda image_path: {"image": [{"path": image_path}]},
lambda image_path: {"image": [{"path": image_path, "bytes": None}]},
lambda image_path: {"image": [{"path": image_path, "bytes": open(image_path, "rb").read()}]},
lambda image_path: {"image": [{"path": None, "bytes": open(image_path, "rb").read()}]},
lambda image_path: {"image": [{"bytes": open(image_path, "rb").read()}]},
],
)
def test_dataset_cast_to_image_features(shared_datadir, build_data):
import PIL.Image
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = build_data(image_path)
dset = Dataset.from_dict(data)
item = dset.cast(Features({"image": Image()}))[0]
assert item.keys() == {"image"}
assert isinstance(item["image"], PIL.Image.Image)
item = dset.cast_column("image", Image())[0]
assert item.keys() == {"image"}
assert isinstance(item["image"], PIL.Image.Image)
@require_pil
def test_dataset_concatenate_image_features(shared_datadir):
# we use a different data structure between 1 and 2 to make sure they are compatible with each other
image_path = str(shared_datadir / "test_image_rgb.jpg")
data1 = {"image": [image_path]}
dset1 = Dataset.from_dict(data1, features=Features({"image": Image()}))
data2 = {"image": [{"bytes": open(image_path, "rb").read()}]}
dset2 = Dataset.from_dict(data2, features=Features({"image": Image()}))
concatenated_dataset = concatenate_datasets([dset1, dset2])
assert len(concatenated_dataset) == len(dset1) + len(dset2)
assert concatenated_dataset[0]["image"] == dset1[0]["image"]
assert concatenated_dataset[1]["image"] == dset2[0]["image"]
@require_pil
def test_dataset_concatenate_nested_image_features(shared_datadir):
# we use a different data structure between 1 and 2 to make sure they are compatible with each other
image_path = str(shared_datadir / "test_image_rgb.jpg")
features = Features({"list_of_structs_of_images": [{"image": Image()}]})
data1 = {"list_of_structs_of_images": [[{"image": image_path}]]}
dset1 = Dataset.from_dict(data1, features=features)
data2 = {"list_of_structs_of_images": [[{"image": {"bytes": open(image_path, "rb").read()}}]]}
dset2 = Dataset.from_dict(data2, features=features)
concatenated_dataset = concatenate_datasets([dset1, dset2])
assert len(concatenated_dataset) == len(dset1) + len(dset2)
assert (
concatenated_dataset[0]["list_of_structs_of_images"][0]["image"]
== dset1[0]["list_of_structs_of_images"][0]["image"]
)
assert (
concatenated_dataset[1]["list_of_structs_of_images"][0]["image"]
== dset2[0]["list_of_structs_of_images"][0]["image"]
)
@require_pil
def test_dataset_with_image_feature_map(shared_datadir):
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = {"image": [image_path], "caption": ["cats sleeping"]}
features = Features({"image": Image(), "caption": Value("string")})
dset = Dataset.from_dict(data, features=features)
for item in dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image", "caption"}
assert item == {"image": {"path": image_path, "bytes": None}, "caption": "cats sleeping"}
# no decoding
def process_caption(example):
example["caption"] = "Two " + example["caption"]
return example
processed_dset = dset.map(process_caption)
for item in processed_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image", "caption"}
assert item == {"image": {"path": image_path, "bytes": None}, "caption": "Two cats sleeping"}
# decoding example
def process_image_by_example(example):
example["mode"] = example["image"].mode
return example
decoded_dset = dset.map(process_image_by_example)
for item in decoded_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image", "caption", "mode"}
assert os.path.samefile(item["image"]["path"], image_path)
assert item["caption"] == "cats sleeping"
assert item["mode"] == "RGB"
# decoding batch
def process_image_by_batch(batch):
batch["mode"] = [image.mode for image in batch["image"]]
return batch
decoded_dset = dset.map(process_image_by_batch, batched=True)
for item in decoded_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image", "caption", "mode"}
assert os.path.samefile(item["image"]["path"], image_path)
assert item["caption"] == "cats sleeping"
assert item["mode"] == "RGB"
@require_pil
def test_formatted_dataset_with_image_feature_map(shared_datadir):
image_path = str(shared_datadir / "test_image_rgb.jpg")
pil_image = Image().decode_example({"path": image_path, "bytes": None})
data = {"image": [image_path], "caption": ["cats sleeping"]}
features = Features({"image": Image(), "caption": Value("string")})
dset = Dataset.from_dict(data, features=features)
for item in dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image", "caption"}
assert item == {"image": {"path": image_path, "bytes": None}, "caption": "cats sleeping"}
def process_image_by_example(example):
example["num_channels"] = example["image"].shape[-1]
return example
decoded_dset = dset.with_format("numpy").map(process_image_by_example)
for item in decoded_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image", "caption", "num_channels"}
assert item["image"] == encode_np_array(np.array(pil_image))
assert item["caption"] == "cats sleeping"
assert item["num_channels"] == 3
def process_image_by_batch(batch):
batch["num_channels"] = [image.shape[-1] for image in batch["image"]]
return batch
decoded_dset = dset.with_format("numpy").map(process_image_by_batch, batched=True)
for item in decoded_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image", "caption", "num_channels"}
assert item["image"] == encode_np_array(np.array(pil_image))
assert item["caption"] == "cats sleeping"
assert item["num_channels"] == 3
@require_pil
def test_dataset_with_image_feature_map_change_image(shared_datadir):
import PIL.Image
image_path = str(shared_datadir / "test_image_rgb.jpg")
pil_image = Image().decode_example({"path": image_path, "bytes": None})
data = {"image": [image_path]}
features = Features({"image": Image()})
dset = Dataset.from_dict(data, features=features)
for item in dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image"}
assert item == {
"image": {
"bytes": None,
"path": image_path,
}
}
# return pil image
def process_image_resize_by_example(example):
example["image"] = example["image"].resize((100, 100))
return example
decoded_dset = dset.map(process_image_resize_by_example)
for item in decoded_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image"}
assert item == {"image": {"bytes": image_to_bytes(pil_image.resize((100, 100))), "path": None}}
def process_image_resize_by_batch(batch):
batch["image"] = [image.resize((100, 100)) for image in batch["image"]]
return batch
decoded_dset = dset.map(process_image_resize_by_batch, batched=True)
for item in decoded_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image"}
assert item == {"image": {"bytes": image_to_bytes(pil_image.resize((100, 100))), "path": None}}
# return np.ndarray (e.g. when using albumentations)
def process_image_resize_by_example_return_np_array(example):
example["image"] = np.array(example["image"].resize((100, 100)))
return example
decoded_dset = dset.map(process_image_resize_by_example_return_np_array)
for item in decoded_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image"}
assert item == {
"image": {
"bytes": image_to_bytes(PIL.Image.fromarray(np.array(pil_image.resize((100, 100))))),
"path": None,
}
}
def process_image_resize_by_batch_return_np_array(batch):
batch["image"] = [np.array(image.resize((100, 100))) for image in batch["image"]]
return batch
decoded_dset = dset.map(process_image_resize_by_batch_return_np_array, batched=True)
for item in decoded_dset.cast_column("image", Image(decode=False)):
assert item.keys() == {"image"}
assert item == {
"image": {
"bytes": image_to_bytes(PIL.Image.fromarray(np.array(pil_image.resize((100, 100))))),
"path": None,
}
}
@require_pil
def test_formatted_dataset_with_image_feature(shared_datadir):
import PIL.Image
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = {"image": [image_path, image_path]}
features = Features({"image": Image()})
dset = Dataset.from_dict(data, features=features)
with dset.formatted_as("numpy"):
item = dset[0]
assert item.keys() == {"image"}
assert isinstance(item["image"], np.ndarray)
assert item["image"].shape == (480, 640, 3)
batch = dset[:1]
assert batch.keys() == {"image"}
assert len(batch) == 1
assert isinstance(batch["image"], np.ndarray)
assert batch["image"].shape == (1, 480, 640, 3)
column = dset["image"]
assert len(column) == 2
assert isinstance(column, np.ndarray)
assert column.shape == (2, 480, 640, 3)
with dset.formatted_as("pandas"):
item = dset[0]
assert item.shape == (1, 1)
assert item.columns == ["image"]
assert isinstance(item["image"][0], PIL.Image.Image)
assert os.path.samefile(item["image"][0].filename, image_path)
assert item["image"][0].format == "JPEG"
assert item["image"][0].size == (640, 480)
assert item["image"][0].mode == "RGB"
batch = dset[:1]
assert batch.shape == (1, 1)
assert batch.columns == ["image"]
assert isinstance(batch["image"], pd.Series) and all(
isinstance(item, PIL.Image.Image) for item in batch["image"]
)
assert os.path.samefile(batch["image"][0].filename, image_path)
assert batch["image"][0].format == "JPEG"
assert batch["image"][0].size == (640, 480)
assert batch["image"][0].mode == "RGB"
column = dset["image"]
assert len(column) == 2
assert isinstance(column, pd.Series) and all(isinstance(item, PIL.Image.Image) for item in column)
assert os.path.samefile(column[0].filename, image_path)
assert column[0].format == "JPEG"
assert column[0].size == (640, 480)
assert column[0].mode == "RGB"
# Currently, the JSONL reader doesn't support complex feature types so we create a temporary dataset script
# to test streaming (without uploading the test dataset to the hub).
DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset__"
DATASET_LOADING_SCRIPT_CODE = """
import os
import datasets
from datasets import DatasetInfo, Features, Image, Split, SplitGenerator, Value
class __DummyDataset__(datasets.GeneratorBasedBuilder):
def _info(self) -> DatasetInfo:
return DatasetInfo(features=Features({"image": Image(), "caption": Value("string")}))
def _split_generators(self, dl_manager):
return [
SplitGenerator(Split.TRAIN, gen_kwargs={"filepath": os.path.join(dl_manager.manual_dir, "train.txt")}),
]
def _generate_examples(self, filepath, **kwargs):
with open(filepath, encoding="utf-8") as f:
for i, line in enumerate(f):
image_path, caption = line.split(",")
yield i, {"image": image_path.strip(), "caption": caption.strip()}
"""
@pytest.fixture
def data_dir(shared_datadir, tmp_path):
data_dir = tmp_path / "dummy_dataset_data"
data_dir.mkdir()
image_path = str(shared_datadir / "test_image_rgb.jpg")
with open(data_dir / "train.txt", "w") as f:
f.write(f"{image_path},Two cats sleeping\n")
return str(data_dir)
@pytest.fixture
def dataset_loading_script_dir(tmp_path):
script_name = DATASET_LOADING_SCRIPT_NAME
script_dir = tmp_path / script_name
script_dir.mkdir()
script_path = script_dir / f"{script_name}.py"
with open(script_path, "w") as f:
f.write(DATASET_LOADING_SCRIPT_CODE)
return str(script_dir)
@require_pil
@pytest.mark.parametrize("streaming", [False, True])
def test_load_dataset_with_image_feature(shared_datadir, data_dir, dataset_loading_script_dir, streaming):
import PIL.Image
image_path = str(shared_datadir / "test_image_rgb.jpg")
dset = load_dataset(dataset_loading_script_dir, split="train", data_dir=data_dir, streaming=streaming)
item = dset[0] if not streaming else next(iter(dset))
assert item.keys() == {"image", "caption"}
assert isinstance(item["image"], PIL.Image.Image)
assert os.path.samefile(item["image"].filename, image_path)
assert item["image"].format == "JPEG"
assert item["image"].size == (640, 480)
assert item["image"].mode == "RGB"
@require_pil
def test_dataset_with_image_feature_undecoded(shared_datadir):
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = {"image": [image_path]}
features = Features({"image": Image(decode=False)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"image"}
assert item["image"] == {"path": image_path, "bytes": None}
batch = dset[:1]
assert batch.keys() == {"image"}
assert len(batch["image"]) == 1
assert batch["image"][0] == {"path": image_path, "bytes": None}
column = dset["image"]
assert len(column) == 1
assert column[0] == {"path": image_path, "bytes": None}
@require_pil
def test_formatted_dataset_with_image_feature_undecoded(shared_datadir):
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = {"image": [image_path]}
features = Features({"image": Image(decode=False)})
dset = Dataset.from_dict(data, features=features)
with dset.formatted_as("numpy"):
item = dset[0]
assert item.keys() == {"image"}
assert item["image"] == {"path": image_path, "bytes": None}
batch = dset[:1]
assert batch.keys() == {"image"}
assert len(batch["image"]) == 1
assert batch["image"][0] == {"path": image_path, "bytes": None}
column = dset["image"]
assert len(column) == 1
assert column[0] == {"path": image_path, "bytes": None}
with dset.formatted_as("pandas"):
item = dset[0]
assert item.shape == (1, 1)
assert item.columns == ["image"]
assert item["image"][0] == {"path": image_path, "bytes": None}
batch = dset[:1]
assert batch.shape == (1, 1)
assert batch.columns == ["image"]
assert batch["image"][0] == {"path": image_path, "bytes": None}
column = dset["image"]
assert len(column) == 1
assert column[0] == {"path": image_path, "bytes": None}
@require_pil
def test_dataset_with_image_feature_map_undecoded(shared_datadir):
image_path = str(shared_datadir / "test_image_rgb.jpg")
data = {"image": [image_path]}
features = Features({"image": Image(decode=False)})
dset = Dataset.from_dict(data, features=features)
def assert_image_example_undecoded(example):
assert example["image"] == {"path": image_path, "bytes": None}
dset.map(assert_image_example_undecoded)
def assert_image_batch_undecoded(batch):
for image in batch["image"]:
assert image == {"path": image_path, "bytes": None}
dset.map(assert_image_batch_undecoded, batched=True)
@require_pil
def test_image_embed_storage(shared_datadir):
image_path = str(shared_datadir / "test_image_rgb.jpg")
example = {"bytes": None, "path": image_path}
storage = pa.array([example], type=pa.struct({"bytes": pa.binary(), "path": pa.string()}))
embedded_storage = Image().embed_storage(storage)
embedded_example = embedded_storage.to_pylist()[0]
assert embedded_example == {"bytes": open(image_path, "rb").read(), "path": "test_image_rgb.jpg"}
@require_pil
@pytest.mark.parametrize(
"array, dtype_cast, expected_image_format",
[
(np.arange(16).reshape(4, 4).astype(np.uint8), "exact_match", "PNG"),
(np.arange(16).reshape(4, 4).astype(np.uint16), "exact_match", "TIFF"),
(np.arange(16).reshape(4, 4).astype(np.int64), "downcast->|i4", "TIFF"),
(np.arange(16).reshape(2, 2, 4).astype(np.uint8), "exact_match", "PNG"),
(np.arange(16).reshape(2, 2, 4), "downcast->|u1", "PNG"),
(np.arange(16).reshape(2, 2, 4).astype(np.float64), "error", None),
],
)
def test_encode_np_array(array, dtype_cast, expected_image_format):
if dtype_cast.startswith("downcast"):
_, dest_dtype = dtype_cast.split("->")
dest_dtype = np.dtype(dest_dtype)
with pytest.warns(UserWarning, match=f"Downcasting array dtype.+{dest_dtype}.+"):
encoded_image = Image().encode_example(array)
elif dtype_cast == "error":
with pytest.raises(TypeError):
Image().encode_example(array)
return
else: # exact_match (no warnings are raised)
with warnings.catch_warnings():
warnings.simplefilter("error")
encoded_image = Image().encode_example(array)
assert isinstance(encoded_image, dict)
assert encoded_image.keys() == {"path", "bytes"}
assert encoded_image["path"] is None
assert encoded_image["bytes"] is not None and isinstance(encoded_image["bytes"], bytes)
decoded_image = Image().decode_example(encoded_image)
assert decoded_image.format == expected_image_format
np.testing.assert_array_equal(np.array(decoded_image), array)
| datasets-main | tests/features/test_image.py |
datasets-main | tests/features/__init__.py |
|
import os
import random
import tempfile
import unittest
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from absl.testing import parameterized
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features import Array2D, Array3D, Array4D, Array5D, Value
from datasets.features.features import Array3DExtensionType, PandasArrayExtensionDtype, _ArrayXD
from datasets.formatting.formatting import NumpyArrowExtractor, SimpleArrowExtractor
SHAPE_TEST_1 = (30, 487)
SHAPE_TEST_2 = (36, 1024)
SHAPE_TEST_3 = (None, 100)
SPEED_TEST_SHAPE = (100, 100)
SPEED_TEST_N_EXAMPLES = 100
DEFAULT_FEATURES = datasets.Features(
{
"text": Array2D(SHAPE_TEST_1, dtype="float32"),
"image": Array2D(SHAPE_TEST_2, dtype="float32"),
"dynamic": Array2D(SHAPE_TEST_3, dtype="float32"),
}
)
def generate_examples(features: dict, num_examples=100, seq_shapes=None):
dummy_data = []
seq_shapes = seq_shapes or {}
for i in range(num_examples):
example = {}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(v, _ArrayXD):
if k == "dynamic":
first_dim = random.randint(1, 3)
data = np.random.rand(first_dim, *v.shape[1:]).astype(v.dtype)
else:
data = np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(v, datasets.Value):
data = "foo"
elif isinstance(v, datasets.Sequence):
while isinstance(v, datasets.Sequence):
v = v.feature
shape = seq_shapes[k]
data = np.random.rand(*shape).astype(v.dtype)
example[k] = data
dummy_data.append((i, example))
return dummy_data
class ExtensionTypeCompatibilityTest(unittest.TestCase):
def test_array2d_nonspecific_shape(self):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = DEFAULT_FEATURES.copy()
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
for key, record in generate_examples(
features=my_features,
num_examples=1,
):
example = my_features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
dataset.set_format("numpy")
row = dataset[0]
first_shape = row["image"].shape
second_shape = row["text"].shape
self.assertTrue(first_shape is not None and second_shape is not None, "need atleast 2 different shapes")
self.assertEqual(len(first_shape), len(second_shape), "both shapes are supposed to be equal length")
self.assertNotEqual(first_shape, second_shape, "shapes must not be the same")
del dataset
def test_multiple_extensions_same_row(self):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = DEFAULT_FEATURES.copy()
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
for key, record in generate_examples(features=my_features, num_examples=1):
example = my_features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
dataset.set_format("numpy")
row = dataset[0]
first_len = len(row["image"].shape)
second_len = len(row["text"].shape)
third_len = len(row["dynamic"].shape)
self.assertEqual(first_len, 2, "use a sequence type if dim is < 2")
self.assertEqual(second_len, 2, "use a sequence type if dim is < 2")
self.assertEqual(third_len, 2, "use a sequence type if dim is < 2")
del dataset
def test_compatability_with_string_values(self):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = DEFAULT_FEATURES.copy()
my_features["image_id"] = datasets.Value("string")
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
for key, record in generate_examples(features=my_features, num_examples=1):
example = my_features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
self.assertIsInstance(dataset[0]["image_id"], str, "image id must be of type string")
del dataset
def test_extension_indexing(self):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = DEFAULT_FEATURES.copy()
my_features["explicit_ext"] = Array2D((3, 3), dtype="float32")
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
for key, record in generate_examples(features=my_features, num_examples=1):
example = my_features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
dataset.set_format("numpy")
data = dataset[0]["explicit_ext"]
self.assertIsInstance(data, np.ndarray, "indexed extension must return numpy.ndarray")
del dataset
def get_array_feature_types():
shape_1 = [3] * 5
shape_2 = [3, 4, 5, 6, 7]
return [
{
"testcase_name": f"{d}d",
"array_feature": array_feature,
"shape_1": tuple(shape_1[:d]),
"shape_2": tuple(shape_2[:d]),
}
for d, array_feature in zip(range(2, 6), [Array2D, Array3D, Array4D, Array5D])
]
@parameterized.named_parameters(get_array_feature_types())
class ArrayXDTest(unittest.TestCase):
def get_features(self, array_feature, shape_1, shape_2):
return datasets.Features(
{
"image": array_feature(shape_1, dtype="float32"),
"source": Value("string"),
"matrix": array_feature(shape_2, dtype="float32"),
}
)
def get_dict_example_0(self, shape_1, shape_2):
return {
"image": np.random.rand(*shape_1).astype("float32"),
"source": "foo",
"matrix": np.random.rand(*shape_2).astype("float32"),
}
def get_dict_example_1(self, shape_1, shape_2):
return {
"image": np.random.rand(*shape_1).astype("float32"),
"matrix": np.random.rand(*shape_2).astype("float32"),
"source": "bar",
}
def get_dict_examples(self, shape_1, shape_2):
return {
"image": np.random.rand(2, *shape_1).astype("float32").tolist(),
"source": ["foo", "bar"],
"matrix": np.random.rand(2, *shape_2).astype("float32").tolist(),
}
def _check_getitem_output_type(self, dataset, shape_1, shape_2, first_matrix):
matrix_column = dataset["matrix"]
self.assertIsInstance(matrix_column, list)
self.assertIsInstance(matrix_column[0], list)
self.assertIsInstance(matrix_column[0][0], list)
self.assertTupleEqual(np.array(matrix_column).shape, (2, *shape_2))
matrix_field_of_first_example = dataset[0]["matrix"]
self.assertIsInstance(matrix_field_of_first_example, list)
self.assertIsInstance(matrix_field_of_first_example, list)
self.assertEqual(np.array(matrix_field_of_first_example).shape, shape_2)
np.testing.assert_array_equal(np.array(matrix_field_of_first_example), np.array(first_matrix))
matrix_field_of_first_two_examples = dataset[:2]["matrix"]
self.assertIsInstance(matrix_field_of_first_two_examples, list)
self.assertIsInstance(matrix_field_of_first_two_examples[0], list)
self.assertIsInstance(matrix_field_of_first_two_examples[0][0], list)
self.assertTupleEqual(np.array(matrix_field_of_first_two_examples).shape, (2, *shape_2))
with dataset.formatted_as("numpy"):
self.assertTupleEqual(dataset["matrix"].shape, (2, *shape_2))
self.assertEqual(dataset[0]["matrix"].shape, shape_2)
self.assertTupleEqual(dataset[:2]["matrix"].shape, (2, *shape_2))
with dataset.formatted_as("pandas"):
self.assertIsInstance(dataset["matrix"], pd.Series)
self.assertIsInstance(dataset[0]["matrix"], pd.Series)
self.assertIsInstance(dataset[:2]["matrix"], pd.Series)
self.assertTupleEqual(dataset["matrix"].to_numpy().shape, (2, *shape_2))
self.assertTupleEqual(dataset[0]["matrix"].to_numpy().shape, (1, *shape_2))
self.assertTupleEqual(dataset[:2]["matrix"].to_numpy().shape, (2, *shape_2))
def test_write(self, array_feature, shape_1, shape_2):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = self.get_features(array_feature, shape_1, shape_2)
my_examples = [
(0, self.get_dict_example_0(shape_1, shape_2)),
(1, self.get_dict_example_1(shape_1, shape_2)),
]
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
for key, record in my_examples:
example = my_features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
self._check_getitem_output_type(dataset, shape_1, shape_2, my_examples[0][1]["matrix"])
del dataset
def test_write_batch(self, array_feature, shape_1, shape_2):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = self.get_features(array_feature, shape_1, shape_2)
dict_examples = self.get_dict_examples(shape_1, shape_2)
dict_examples = my_features.encode_batch(dict_examples)
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
writer.write_batch(dict_examples)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
self._check_getitem_output_type(dataset, shape_1, shape_2, dict_examples["matrix"][0])
del dataset
def test_from_dict(self, array_feature, shape_1, shape_2):
dict_examples = self.get_dict_examples(shape_1, shape_2)
dataset = datasets.Dataset.from_dict(
dict_examples, features=self.get_features(array_feature, shape_1, shape_2)
)
self._check_getitem_output_type(dataset, shape_1, shape_2, dict_examples["matrix"][0])
del dataset
class ArrayXDDynamicTest(unittest.TestCase):
def get_one_col_dataset(self, first_dim_list, fixed_shape):
features = datasets.Features({"image": Array3D(shape=(None, *fixed_shape), dtype="float32")})
dict_values = {"image": [np.random.rand(fdim, *fixed_shape).astype("float32") for fdim in first_dim_list]}
dataset = datasets.Dataset.from_dict(dict_values, features=features)
return dataset
def get_two_col_datasset(self, first_dim_list, fixed_shape):
features = datasets.Features(
{"image": Array3D(shape=(None, *fixed_shape), dtype="float32"), "text": Value("string")}
)
dict_values = {
"image": [np.random.rand(fdim, *fixed_shape).astype("float32") for fdim in first_dim_list],
"text": ["text" for _ in first_dim_list],
}
dataset = datasets.Dataset.from_dict(dict_values, features=features)
return dataset
def test_to_pylist(self):
fixed_shape = (2, 2)
first_dim_list = [1, 3, 10]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
arr_xd = SimpleArrowExtractor().extract_column(dataset._data)
self.assertIsInstance(arr_xd.type, Array3DExtensionType)
pylist = arr_xd.to_pylist()
for first_dim, single_arr in zip(first_dim_list, pylist):
self.assertIsInstance(single_arr, list)
self.assertTupleEqual(np.array(single_arr).shape, (first_dim, *fixed_shape))
def test_to_numpy(self):
fixed_shape = (2, 2)
# ragged
first_dim_list = [1, 3, 10]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
arr_xd = SimpleArrowExtractor().extract_column(dataset._data)
self.assertIsInstance(arr_xd.type, Array3DExtensionType)
# replace with arr_xd = arr_xd.combine_chunks() when 12.0.0 will be the minimal required PyArrow version
arr_xd = arr_xd.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in arr_xd.chunks]))
numpy_arr = arr_xd.to_numpy()
self.assertIsInstance(numpy_arr, np.ndarray)
self.assertEqual(numpy_arr.dtype, object)
for first_dim, single_arr in zip(first_dim_list, numpy_arr):
self.assertIsInstance(single_arr, np.ndarray)
self.assertTupleEqual(single_arr.shape, (first_dim, *fixed_shape))
# non-ragged
first_dim_list = [4, 4, 4]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
arr_xd = SimpleArrowExtractor().extract_column(dataset._data)
self.assertIsInstance(arr_xd.type, Array3DExtensionType)
# replace with arr_xd = arr_xd.combine_chunks() when 12.0.0 will be the minimal required PyArrow version
arr_xd = arr_xd.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in arr_xd.chunks]))
numpy_arr = arr_xd.to_numpy()
self.assertIsInstance(numpy_arr, np.ndarray)
self.assertNotEqual(numpy_arr.dtype, object)
for first_dim, single_arr in zip(first_dim_list, numpy_arr):
self.assertIsInstance(single_arr, np.ndarray)
self.assertTupleEqual(single_arr.shape, (first_dim, *fixed_shape))
def test_iter_dataset(self):
fixed_shape = (2, 2)
first_dim_list = [1, 3, 10]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
for first_dim, ds_row in zip(first_dim_list, dataset):
single_arr = ds_row["image"]
self.assertIsInstance(single_arr, list)
self.assertTupleEqual(np.array(single_arr).shape, (first_dim, *fixed_shape))
def test_to_pandas(self):
fixed_shape = (2, 2)
# ragged
first_dim_list = [1, 3, 10]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
df = dataset.to_pandas()
self.assertEqual(type(df.image.dtype), PandasArrayExtensionDtype)
numpy_arr = df.image.to_numpy()
self.assertIsInstance(numpy_arr, np.ndarray)
self.assertEqual(numpy_arr.dtype, object)
for first_dim, single_arr in zip(first_dim_list, numpy_arr):
self.assertIsInstance(single_arr, np.ndarray)
self.assertTupleEqual(single_arr.shape, (first_dim, *fixed_shape))
# non-ragged
first_dim_list = [4, 4, 4]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
df = dataset.to_pandas()
self.assertEqual(type(df.image.dtype), PandasArrayExtensionDtype)
numpy_arr = df.image.to_numpy()
self.assertIsInstance(numpy_arr, np.ndarray)
self.assertNotEqual(numpy_arr.dtype, object)
for first_dim, single_arr in zip(first_dim_list, numpy_arr):
self.assertIsInstance(single_arr, np.ndarray)
self.assertTupleEqual(single_arr.shape, (first_dim, *fixed_shape))
def test_map_dataset(self):
fixed_shape = (2, 2)
first_dim_list = [1, 3, 10]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
dataset = dataset.map(lambda a: {"image": np.concatenate([a] * 2)}, input_columns="image")
# check also if above function resulted with 2x bigger first dim
for first_dim, ds_row in zip(first_dim_list, dataset):
single_arr = ds_row["image"]
self.assertIsInstance(single_arr, list)
self.assertTupleEqual(np.array(single_arr).shape, (first_dim * 2, *fixed_shape))
@pytest.mark.parametrize("dtype, dummy_value", [("int32", 1), ("bool", True), ("float64", 1)])
def test_table_to_pandas(dtype, dummy_value):
features = datasets.Features({"foo": datasets.Array2D(dtype=dtype, shape=(2, 2))})
dataset = datasets.Dataset.from_dict({"foo": [[[dummy_value] * 2] * 2]}, features=features)
df = dataset._data.to_pandas()
assert type(df.foo.dtype) == PandasArrayExtensionDtype
arr = df.foo.to_numpy()
np.testing.assert_equal(arr, np.array([[[dummy_value] * 2] * 2], dtype=np.dtype(dtype)))
@pytest.mark.parametrize("dtype, dummy_value", [("int32", 1), ("bool", True), ("float64", 1)])
def test_array_xd_numpy_arrow_extractor(dtype, dummy_value):
features = datasets.Features({"foo": datasets.Array2D(dtype=dtype, shape=(2, 2))})
dataset = datasets.Dataset.from_dict({"foo": [[[dummy_value] * 2] * 2]}, features=features)
arr = NumpyArrowExtractor().extract_column(dataset._data)
assert isinstance(arr, np.ndarray)
np.testing.assert_equal(arr, np.array([[[dummy_value] * 2] * 2], dtype=np.dtype(dtype)))
def test_array_xd_with_none():
# Fixed shape
features = datasets.Features({"foo": datasets.Array2D(dtype="int32", shape=(2, 2))})
dummy_array = np.array([[1, 2], [3, 4]], dtype="int32")
dataset = datasets.Dataset.from_dict({"foo": [dummy_array, None, dummy_array, None]}, features=features)
arr = NumpyArrowExtractor().extract_column(dataset._data)
assert isinstance(arr, np.ndarray) and arr.dtype == np.float64 and arr.shape == (4, 2, 2)
assert np.allclose(arr[0], dummy_array) and np.allclose(arr[2], dummy_array)
assert np.all(np.isnan(arr[1])) and np.all(np.isnan(arr[3])) # broadcasted np.nan - use np.all
# Dynamic shape
features = datasets.Features({"foo": datasets.Array2D(dtype="int32", shape=(None, 2))})
dummy_array = np.array([[1, 2], [3, 4]], dtype="int32")
dataset = datasets.Dataset.from_dict({"foo": [dummy_array, None, dummy_array, None]}, features=features)
arr = NumpyArrowExtractor().extract_column(dataset._data)
assert isinstance(arr, np.ndarray) and arr.dtype == object and arr.shape == (4,)
np.testing.assert_equal(arr[0], dummy_array)
np.testing.assert_equal(arr[2], dummy_array)
assert np.isnan(arr[1]) and np.isnan(arr[3]) # a single np.nan value - np.all not needed
@pytest.mark.parametrize("seq_type", ["no_sequence", "sequence", "sequence_of_sequence"])
@pytest.mark.parametrize(
"dtype",
[
"bool",
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
],
)
@pytest.mark.parametrize("shape, feature_class", [((2, 3), datasets.Array2D), ((2, 3, 4), datasets.Array3D)])
def test_array_xd_with_np(seq_type, dtype, shape, feature_class):
feature = feature_class(dtype=dtype, shape=shape)
data = np.zeros(shape, dtype=dtype)
expected = data.tolist()
if seq_type == "sequence":
feature = datasets.Sequence(feature)
data = [data]
expected = [expected]
elif seq_type == "sequence_of_sequence":
feature = datasets.Sequence(datasets.Sequence(feature))
data = [[data]]
expected = [[expected]]
ds = datasets.Dataset.from_dict({"col": [data]}, features=datasets.Features({"col": feature}))
assert ds[0]["col"] == expected
@pytest.mark.parametrize("with_none", [False, True])
def test_dataset_map(with_none):
ds = datasets.Dataset.from_dict({"path": ["path1", "path2"]})
def process_data(batch):
batch = {
"image": [
np.array(
[
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[10, 20, 30], [40, 50, 60], [70, 80, 90]],
[[100, 200, 300], [400, 500, 600], [700, 800, 900]],
]
)
for _ in batch["path"]
]
}
if with_none:
batch["image"][0] = None
return batch
features = datasets.Features({"image": Array3D(dtype="int32", shape=(3, 3, 3))})
processed_ds = ds.map(process_data, batched=True, remove_columns=ds.column_names, features=features)
assert processed_ds.shape == (2, 1)
with processed_ds.with_format("numpy") as pds:
for i, example in enumerate(pds):
assert "image" in example
assert isinstance(example["image"], np.ndarray)
assert example["image"].shape == (3, 3, 3)
if with_none and i == 0:
assert np.all(np.isnan(example["image"]))
| datasets-main | tests/features/test_array_xd.py |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _get_expected_row_ids_and_row_dicts_for_partition_order(df, partition_order):
expected_row_ids_and_row_dicts = []
for part_id in partition_order:
partition = df.where(f"SPARK_PARTITION_ID() = {part_id}").collect()
for row_idx, row in enumerate(partition):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def test_repartition_df_if_needed():
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
df = spark.range(100).repartition(1)
spark_builder = Spark(df)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def test_generate_iterable_examples():
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
df = spark.range(10).repartition(2)
partition_order = [1, 0]
generate_fn = _generate_iterable_examples(df, partition_order) # Reverse the partitions.
expected_row_ids_and_row_dicts = _get_expected_row_ids_and_row_dicts_for_partition_order(df, partition_order)
for i, (row_id, row_dict) in enumerate(generate_fn()):
expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def test_spark_examples_iterable():
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
df = spark.range(10).repartition(1)
it = SparkExamplesIterable(df)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(it):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def test_spark_examples_iterable_shuffle():
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
df = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator") as generator_mock:
generator_mock.shuffle.side_effect = lambda x: x.reverse()
expected_row_ids_and_row_dicts = _get_expected_row_ids_and_row_dicts_for_partition_order(df, [2, 1, 0])
shuffled_it = SparkExamplesIterable(df).shuffle_data_sources(generator_mock)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(shuffled_it):
expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def test_spark_examples_iterable_shard():
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
df = spark.range(20).repartition(4)
# Partitions 0 and 2
shard_it_1 = SparkExamplesIterable(df).shard_data_sources(worker_id=0, num_workers=2)
assert shard_it_1.n_shards == 2
expected_row_ids_and_row_dicts_1 = _get_expected_row_ids_and_row_dicts_for_partition_order(df, [0, 2])
for i, (row_id, row_dict) in enumerate(shard_it_1):
expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts_1[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
shard_it_2 = SparkExamplesIterable(df).shard_data_sources(worker_id=1, num_workers=2)
assert shard_it_2.n_shards == 2
expected_row_ids_and_row_dicts_2 = _get_expected_row_ids_and_row_dicts_for_partition_order(df, [1, 3])
for i, (row_id, row_dict) in enumerate(shard_it_2):
expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts_2[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def test_repartition_df_if_needed_max_num_df_rows():
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
df = spark.range(100).repartition(1)
spark_builder = Spark(df)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| datasets-main | tests/packaged_modules/test_spark.py |
datasets-main | tests/packaged_modules/__init__.py |
|
import shutil
import textwrap
import librosa
import numpy as np
import pytest
import soundfile as sf
from datasets import Audio, ClassLabel, Features, Value
from datasets.data_files import DataFilesDict, get_data_patterns
from datasets.download.streaming_download_manager import StreamingDownloadManager
from datasets.packaged_modules.audiofolder.audiofolder import AudioFolder
from ..utils import require_sndfile
@pytest.fixture
def cache_dir(tmp_path):
return str(tmp_path / "audiofolder_cache_dir")
@pytest.fixture
def data_files_with_labels_no_metadata(tmp_path, audio_file):
data_dir = tmp_path / "data_files_with_labels_no_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
subdir_class_0 = data_dir / "fr"
subdir_class_0.mkdir(parents=True, exist_ok=True)
subdir_class_1 = data_dir / "uk"
subdir_class_1.mkdir(parents=True, exist_ok=True)
audio_filename = subdir_class_0 / "audio_fr.wav"
shutil.copyfile(audio_file, audio_filename)
audio_filename2 = subdir_class_1 / "audio_uk.wav"
shutil.copyfile(audio_file, audio_filename2)
data_files_with_labels_no_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
return data_files_with_labels_no_metadata
@pytest.fixture
def audio_files_with_labels_and_duplicated_label_key_in_metadata(tmp_path, audio_file):
data_dir = tmp_path / "audio_files_with_labels_and_label_key_in_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
subdir_class_0 = data_dir / "fr"
subdir_class_0.mkdir(parents=True, exist_ok=True)
subdir_class_1 = data_dir / "uk"
subdir_class_1.mkdir(parents=True, exist_ok=True)
audio_filename = subdir_class_0 / "audio_fr.wav"
shutil.copyfile(audio_file, audio_filename)
audio_filename2 = subdir_class_1 / "audio_uk.wav"
shutil.copyfile(audio_file, audio_filename2)
audio_metadata_filename = tmp_path / data_dir / "metadata.jsonl"
audio_metadata = textwrap.dedent(
"""\
{"file_name": "fr/audio_fr.wav", "text": "Audio in French", "label": "Fr"}
{"file_name": "uk/audio_uk.wav", "text": "Audio in Ukrainian", "label": "Uk"}
"""
)
with open(audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
return str(audio_filename), str(audio_filename2), str(audio_metadata_filename)
@pytest.fixture
def audio_file_with_metadata(tmp_path, audio_file):
audio_filename = tmp_path / "audio_file.wav"
shutil.copyfile(audio_file, audio_filename)
audio_metadata_filename = tmp_path / "metadata.jsonl"
audio_metadata = textwrap.dedent(
"""\
{"file_name": "audio_file.wav", "text": "Audio transcription"}
"""
)
with open(audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
return str(audio_filename), str(audio_metadata_filename)
@pytest.fixture
def audio_files_with_metadata_that_misses_one_audio(tmp_path, audio_file):
audio_filename = tmp_path / "audio_file.wav"
shutil.copyfile(audio_file, audio_filename)
audio_filename2 = tmp_path / "audio_file2.wav"
shutil.copyfile(audio_file, audio_filename2)
audio_metadata_filename = tmp_path / "metadata.jsonl"
audio_metadata = textwrap.dedent(
"""\
{"file_name": "audio_file.wav", "text": "Audio transcription"}
"""
)
with open(audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
return str(audio_filename), str(audio_filename2), str(audio_metadata_filename)
@pytest.fixture
def data_files_with_one_split_and_metadata(tmp_path, audio_file):
data_dir = tmp_path / "audiofolder_data_dir_with_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
subdir = data_dir / "subdir"
subdir.mkdir(parents=True, exist_ok=True)
audio_filename = data_dir / "audio_file.wav"
shutil.copyfile(audio_file, audio_filename)
audio_filename2 = data_dir / "audio_file2.wav"
shutil.copyfile(audio_file, audio_filename2)
audio_filename3 = subdir / "audio_file3.wav" # in subdir
shutil.copyfile(audio_file, audio_filename3)
audio_metadata_filename = data_dir / "metadata.jsonl"
audio_metadata = textwrap.dedent(
"""\
{"file_name": "audio_file.wav", "text": "First audio transcription"}
{"file_name": "audio_file2.wav", "text": "Second audio transcription"}
{"file_name": "subdir/audio_file3.wav", "text": "Third audio transcription (in subdir)"}
"""
)
with open(audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
data_files_with_one_split_and_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
assert len(data_files_with_one_split_and_metadata) == 1
assert len(data_files_with_one_split_and_metadata["train"]) == 4
return data_files_with_one_split_and_metadata
@pytest.fixture(params=["jsonl", "csv"])
def data_files_with_two_splits_and_metadata(request, tmp_path, audio_file):
data_dir = tmp_path / "audiofolder_data_dir_with_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
train_dir = data_dir / "train"
train_dir.mkdir(parents=True, exist_ok=True)
test_dir = data_dir / "test"
test_dir.mkdir(parents=True, exist_ok=True)
audio_filename = train_dir / "audio_file.wav" # train audio
shutil.copyfile(audio_file, audio_filename)
audio_filename2 = train_dir / "audio_file2.wav" # train audio
shutil.copyfile(audio_file, audio_filename2)
audio_filename3 = test_dir / "audio_file3.wav" # test audio
shutil.copyfile(audio_file, audio_filename3)
train_audio_metadata_filename = train_dir / f"metadata.{request.param}"
audio_metadata = (
textwrap.dedent(
"""\
{"file_name": "audio_file.wav", "text": "First train audio transcription"}
{"file_name": "audio_file2.wav", "text": "Second train audio transcription"}
"""
)
if request.param == "jsonl"
else textwrap.dedent(
"""\
file_name,text
audio_file.wav,First train audio transcription
audio_file2.wav,Second train audio transcription
"""
)
)
with open(train_audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
test_audio_metadata_filename = test_dir / f"metadata.{request.param}"
audio_metadata = (
textwrap.dedent(
"""\
{"file_name": "audio_file3.wav", "text": "Test audio transcription"}
"""
)
if request.param == "jsonl"
else textwrap.dedent(
"""\
file_name,text
audio_file3.wav,Test audio transcription
"""
)
)
with open(test_audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
data_files_with_two_splits_and_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
assert len(data_files_with_two_splits_and_metadata) == 2
assert len(data_files_with_two_splits_and_metadata["train"]) == 3
assert len(data_files_with_two_splits_and_metadata["test"]) == 2
return data_files_with_two_splits_and_metadata
@pytest.fixture
def data_files_with_zip_archives(tmp_path, audio_file):
data_dir = tmp_path / "audiofolder_data_dir_with_zip_archives"
data_dir.mkdir(parents=True, exist_ok=True)
archive_dir = data_dir / "archive"
archive_dir.mkdir(parents=True, exist_ok=True)
subdir = archive_dir / "subdir"
subdir.mkdir(parents=True, exist_ok=True)
audio_filename = archive_dir / "audio_file.wav"
shutil.copyfile(audio_file, audio_filename)
audio_filename2 = subdir / "audio_file2.wav" # in subdir
# make sure they're two different audios
# Indeed we won't be able to compare the audio filenames, since the archive is not extracted in streaming mode
array, sampling_rate = librosa.load(str(audio_filename), sr=16000) # original sampling rate is 44100
sf.write(str(audio_filename2), array, samplerate=16000)
audio_metadata_filename = archive_dir / "metadata.jsonl"
audio_metadata = textwrap.dedent(
"""\
{"file_name": "audio_file.wav", "text": "First audio transcription"}
{"file_name": "subdir/audio_file2.wav", "text": "Second audio transcription (in subdir)"}
"""
)
with open(audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
shutil.make_archive(str(archive_dir), "zip", archive_dir)
shutil.rmtree(str(archive_dir))
data_files_with_zip_archives = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
assert len(data_files_with_zip_archives) == 1
assert len(data_files_with_zip_archives["train"]) == 1
return data_files_with_zip_archives
@require_sndfile
# check that labels are inferred correctly from dir names
def test_generate_examples_with_labels(data_files_with_labels_no_metadata, cache_dir):
# there are no metadata.jsonl files in this test case
audiofolder = AudioFolder(data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, drop_labels=False)
audiofolder.download_and_prepare()
assert audiofolder.info.features == Features({"audio": Audio(), "label": ClassLabel(names=["fr", "uk"])})
dataset = list(audiofolder.as_dataset()["train"])
label_feature = audiofolder.info.features["label"]
assert dataset[0]["label"] == label_feature._str2int["fr"]
assert dataset[1]["label"] == label_feature._str2int["uk"]
@require_sndfile
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_duplicated_label_key(
audio_files_with_labels_and_duplicated_label_key_in_metadata, drop_metadata, drop_labels, cache_dir, caplog
):
fr_audio_file, uk_audio_file, audio_metadata_file = audio_files_with_labels_and_duplicated_label_key_in_metadata
audiofolder = AudioFolder(
drop_metadata=drop_metadata,
drop_labels=drop_labels,
data_files=[fr_audio_file, uk_audio_file, audio_metadata_file],
cache_dir=cache_dir,
)
if drop_labels is False:
# infer labels from directories even if metadata files are found
audiofolder.download_and_prepare()
warning_in_logs = any("ignoring metadata columns" in record.msg.lower() for record in caplog.records)
assert warning_in_logs if drop_metadata is not True else not warning_in_logs
dataset = audiofolder.as_dataset()["train"]
assert audiofolder.info.features["label"] == ClassLabel(names=["fr", "uk"])
assert all(example["label"] in audiofolder.info.features["label"]._str2int.values() for example in dataset)
else:
audiofolder.download_and_prepare()
dataset = audiofolder.as_dataset()["train"]
if drop_metadata is not True:
# labels are from metadata
assert audiofolder.info.features["label"] == Value("string")
assert all(example["label"] in ["Fr", "Uk"] for example in dataset)
else:
# drop both labels and metadata
assert audiofolder.info.features == Features({"audio": Audio()})
assert all(example.keys() == {"audio"} for example in dataset)
@require_sndfile
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_drop_labels(data_files_with_labels_no_metadata, drop_metadata, drop_labels):
audiofolder = AudioFolder(
drop_metadata=drop_metadata, drop_labels=drop_labels, data_files=data_files_with_labels_no_metadata
)
gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
# removing the labels explicitly requires drop_labels=True
assert gen_kwargs["add_labels"] is not bool(drop_labels)
assert gen_kwargs["add_metadata"] is False # metadata files is not present in this case
generator = audiofolder._generate_examples(**gen_kwargs)
if not drop_labels:
assert all(
example.keys() == {"audio", "label"} and all(val is not None for val in example.values())
for _, example in generator
)
else:
assert all(
example.keys() == {"audio"} and all(val is not None for val in example.values())
for _, example in generator
)
@require_sndfile
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_drop_metadata(audio_file_with_metadata, drop_metadata, drop_labels):
audio_file, audio_metadata_file = audio_file_with_metadata
audiofolder = AudioFolder(
drop_metadata=drop_metadata, drop_labels=drop_labels, data_files={"train": [audio_file, audio_metadata_file]}
)
gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
# since the dataset has metadata, removing the metadata explicitly requires drop_metadata=True
assert gen_kwargs["add_metadata"] is not bool(drop_metadata)
# since the dataset has metadata, adding the labels explicitly requires drop_labels=False
assert gen_kwargs["add_labels"] is (drop_labels is False)
generator = audiofolder._generate_examples(**gen_kwargs)
expected_columns = {"audio"}
if gen_kwargs["add_metadata"]:
expected_columns.add("text")
if gen_kwargs["add_labels"]:
expected_columns.add("label")
result = [example for _, example in generator]
assert len(result) == 1
example = result[0]
assert example.keys() == expected_columns
for column in expected_columns:
assert example[column] is not None
@require_sndfile
@pytest.mark.parametrize("drop_metadata", [None, True, False])
def test_generate_examples_with_metadata_in_wrong_location(audio_file, audio_file_with_metadata, drop_metadata):
_, audio_metadata_file = audio_file_with_metadata
audiofolder = AudioFolder(drop_metadata=drop_metadata, data_files={"train": [audio_file, audio_metadata_file]})
gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = audiofolder._generate_examples(**gen_kwargs)
if not drop_metadata:
with pytest.raises(ValueError):
list(generator)
else:
assert all(
example.keys() == {"audio"} and all(val is not None for val in example.values())
for _, example in generator
)
@require_sndfile
@pytest.mark.parametrize("drop_metadata", [None, True, False])
def test_generate_examples_with_metadata_that_misses_one_audio(
audio_files_with_metadata_that_misses_one_audio, drop_metadata
):
audio_file, audio_file2, audio_metadata_file = audio_files_with_metadata_that_misses_one_audio
if not drop_metadata:
features = Features({"audio": Audio(), "text": Value("string")})
else:
features = Features({"audio": Audio()})
audiofolder = AudioFolder(
drop_metadata=drop_metadata,
features=features,
data_files={"train": [audio_file, audio_file2, audio_metadata_file]},
)
gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = audiofolder._generate_examples(**gen_kwargs)
if not drop_metadata:
with pytest.raises(ValueError):
_ = list(generator)
else:
assert all(
example.keys() == {"audio"} and all(val is not None for val in example.values())
for _, example in generator
)
@require_sndfile
@pytest.mark.parametrize("streaming", [False, True])
def test_data_files_with_metadata_and_single_split(streaming, cache_dir, data_files_with_one_split_and_metadata):
data_files = data_files_with_one_split_and_metadata
audiofolder = AudioFolder(data_files=data_files, cache_dir=cache_dir)
audiofolder.download_and_prepare()
datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset()
for split, data_files in data_files.items():
expected_num_of_audios = len(data_files) - 1 # don't count the metadata file
assert split in datasets
dataset = list(datasets[split])
assert len(dataset) == expected_num_of_audios
# make sure each sample has its own audio and metadata
assert len({example["audio"]["path"] for example in dataset}) == expected_num_of_audios
assert len({example["text"] for example in dataset}) == expected_num_of_audios
assert all(example["text"] is not None for example in dataset)
@require_sndfile
@pytest.mark.parametrize("streaming", [False, True])
def test_data_files_with_metadata_and_multiple_splits(streaming, cache_dir, data_files_with_two_splits_and_metadata):
data_files = data_files_with_two_splits_and_metadata
audiofolder = AudioFolder(data_files=data_files, cache_dir=cache_dir)
audiofolder.download_and_prepare()
datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset()
for split, data_files in data_files.items():
expected_num_of_audios = len(data_files) - 1 # don't count the metadata file
assert split in datasets
dataset = list(datasets[split])
assert len(dataset) == expected_num_of_audios
# make sure each sample has its own audio and metadata
assert len({example["audio"]["path"] for example in dataset}) == expected_num_of_audios
assert len({example["text"] for example in dataset}) == expected_num_of_audios
assert all(example["text"] is not None for example in dataset)
@require_sndfile
@pytest.mark.parametrize("streaming", [False, True])
def test_data_files_with_metadata_and_archives(streaming, cache_dir, data_files_with_zip_archives):
audiofolder = AudioFolder(data_files=data_files_with_zip_archives, cache_dir=cache_dir)
audiofolder.download_and_prepare()
datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset()
for split, data_files in data_files_with_zip_archives.items():
num_of_archives = len(data_files) # the metadata file is inside the archive
expected_num_of_audios = 2 * num_of_archives
assert split in datasets
dataset = list(datasets[split])
assert len(dataset) == expected_num_of_audios
# make sure each sample has its own audio (all arrays are different) and metadata
assert (
sum(np.array_equal(dataset[0]["audio"]["array"], example["audio"]["array"]) for example in dataset[1:])
== 0
)
assert len({example["text"] for example in dataset}) == expected_num_of_audios
assert all(example["text"] is not None for example in dataset)
@require_sndfile
def test_data_files_with_wrong_metadata_file_name(cache_dir, tmp_path, audio_file):
data_dir = tmp_path / "data_dir_with_bad_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(audio_file, data_dir / "audio_file.wav")
audio_metadata_filename = data_dir / "bad_metadata.jsonl" # bad file
audio_metadata = textwrap.dedent(
"""\
{"file_name": "audio_file.wav", "text": "Audio transcription"}
"""
)
with open(audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
audiofolder = AudioFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
audiofolder.download_and_prepare()
dataset = audiofolder.as_dataset(split="train")
# check that there are no metadata, since the metadata file name doesn't have the right name
assert "text" not in dataset.column_names
@require_sndfile
def test_data_files_with_wrong_audio_file_name_column_in_metadata_file(cache_dir, tmp_path, audio_file):
data_dir = tmp_path / "data_dir_with_bad_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(audio_file, data_dir / "audio_file.wav")
audio_metadata_filename = data_dir / "metadata.jsonl"
audio_metadata = textwrap.dedent( # with bad column "bad_file_name" instead of "file_name"
"""\
{"bad_file_name_column": "audio_file.wav", "text": "Audio transcription"}
"""
)
with open(audio_metadata_filename, "w", encoding="utf-8") as f:
f.write(audio_metadata)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
audiofolder = AudioFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
with pytest.raises(ValueError) as exc_info:
audiofolder.download_and_prepare()
assert "`file_name` must be present" in str(exc_info.value)
@require_sndfile
def test_data_files_with_with_metadata_in_different_formats(cache_dir, tmp_path, audio_file):
data_dir = tmp_path / "data_dir_with_metadata_in_different_format"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(audio_file, data_dir / "audio_file.wav")
audio_metadata_filename_jsonl = data_dir / "metadata.jsonl"
audio_metadata_jsonl = textwrap.dedent(
"""\
{"file_name": "audio_file.wav", "text": "Audio transcription"}
"""
)
with open(audio_metadata_filename_jsonl, "w", encoding="utf-8") as f:
f.write(audio_metadata_jsonl)
audio_metadata_filename_csv = data_dir / "metadata.csv"
audio_metadata_csv = textwrap.dedent(
"""\
file_name,text
audio_file.wav,Audio transcription
"""
)
with open(audio_metadata_filename_csv, "w", encoding="utf-8") as f:
f.write(audio_metadata_csv)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
audiofolder = AudioFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
with pytest.raises(ValueError) as exc_info:
audiofolder.download_and_prepare()
assert "metadata files with different extensions" in str(exc_info.value)
| datasets-main | tests/packaged_modules/test_audiofolder.py |
import textwrap
import pyarrow as pa
import pytest
from datasets import Features, Image
from datasets.packaged_modules.text.text import Text
from ..utils import require_pil
@pytest.fixture
def text_file(tmp_path):
filename = tmp_path / "text.txt"
data = textwrap.dedent(
"""\
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
Second paragraph:
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
"""
)
with open(filename, "w", encoding="utf-8") as f:
f.write(data)
return str(filename)
@pytest.fixture
def text_file_with_image(tmp_path, image_file):
filename = tmp_path / "text_with_image.txt"
with open(filename, "w", encoding="utf-8") as f:
f.write(image_file)
return str(filename)
@pytest.mark.parametrize("keep_linebreaks", [True, False])
def test_text_linebreaks(text_file, keep_linebreaks):
with open(text_file, encoding="utf-8") as f:
expected_content = f.read().splitlines(keepends=keep_linebreaks)
text = Text(keep_linebreaks=keep_linebreaks, encoding="utf-8")
generator = text._generate_tables([[text_file]])
generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"]
assert generated_content == expected_content
@require_pil
def test_text_cast_image(text_file_with_image):
with open(text_file_with_image, encoding="utf-8") as f:
image_file = f.read().splitlines()[0]
text = Text(encoding="utf-8", features=Features({"image": Image()}))
generator = text._generate_tables([[text_file_with_image]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("image").type == Image()()
generated_content = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
@pytest.mark.parametrize("sample_by", ["line", "paragraph", "document"])
def test_text_sample_by(sample_by, text_file):
with open(text_file, encoding="utf-8") as f:
expected_content = f.read()
if sample_by == "line":
expected_content = expected_content.splitlines()
elif sample_by == "paragraph":
expected_content = expected_content.split("\n\n")
elif sample_by == "document":
expected_content = [expected_content]
text = Text(sample_by=sample_by, encoding="utf-8", chunksize=100)
generator = text._generate_tables([[text_file]])
generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"]
assert generated_content == expected_content
| datasets-main | tests/packaged_modules/test_text.py |
import textwrap
import pyarrow as pa
import pytest
from datasets import Features, Value
from datasets.packaged_modules.json.json import Json
@pytest.fixture
def jsonl_file(tmp_path):
filename = tmp_path / "file.jsonl"
data = textwrap.dedent(
"""\
{"col_1": -1}
{"col_1": 1, "col_2": 2}
{"col_1": 10, "col_2": 20}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def jsonl_file_utf16_encoded(tmp_path):
filename = tmp_path / "file_utf16_encoded.jsonl"
data = textwrap.dedent(
"""\
{"col_1": -1}
{"col_1": 1, "col_2": 2}
{"col_1": 10, "col_2": 20}
"""
)
with open(filename, "w", encoding="utf-16") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts(tmp_path):
filename = tmp_path / "file_with_list_of_dicts.json"
data = textwrap.dedent(
"""\
[
{"col_1": -1},
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def json_file_with_list_of_dicts_field(tmp_path):
filename = tmp_path / "file_with_list_of_dicts_field.json"
data = textwrap.dedent(
"""\
{
"field1": 1,
"field2": "aabb",
"field3": [
{"col_1": -1},
{"col_1": 1, "col_2": 2},
{"col_1": 10, "col_2": 20}
]
}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
("jsonl_file", {}),
("jsonl_file_utf16_encoded", {"encoding": "utf-16"}),
("json_file_with_list_of_dicts", {}),
("json_file_with_list_of_dicts_field", {"field": "field3"}),
],
)
def test_json_generate_tables(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.to_pydict() == {"col_1": [-1, 1, 10], "col_2": [None, 2, 20]}
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
(
"jsonl_file",
{"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})},
),
(
"json_file_with_list_of_dicts",
{"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})},
),
(
"json_file_with_list_of_dicts_field",
{
"field": "field3",
"features": Features(
{"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")}
),
},
),
],
)
def test_json_generate_tables_with_missing_features(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.to_pydict() == {"col_1": [-1, 1, 10], "col_2": [None, 2, 20], "missing_col": [None, None, None]}
| datasets-main | tests/packaged_modules/test_json.py |
import importlib
import shutil
import textwrap
import pytest
from datasets import ClassLabel, DownloadManager, Features, Value
from datasets.data_files import DataFilesDict, get_data_patterns
from datasets.download.streaming_download_manager import StreamingDownloadManager
from datasets.packaged_modules.folder_based_builder.folder_based_builder import (
FolderBasedBuilder,
FolderBasedBuilderConfig,
)
from datasets.tasks import TextClassification
remote_files = [
"https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/hallo.txt",
"https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/hello.txt",
"https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/class1/bonjour.txt",
"https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/class1/bonjour2.txt",
]
class DummyFolderBasedBuilder(FolderBasedBuilder):
BASE_FEATURE = dict
BASE_COLUMN_NAME = "base"
BUILDER_CONFIG_CLASS = FolderBasedBuilderConfig
EXTENSIONS = [".txt"]
CLASSIFICATION_TASK = TextClassification(text_column="base", label_column="label")
@pytest.fixture
def cache_dir(tmp_path):
return str(tmp_path / "autofolder_cache_dir")
@pytest.fixture
def auto_text_file(text_file):
return str(text_file)
@pytest.fixture
def data_files_with_labels_no_metadata(tmp_path, auto_text_file):
data_dir = tmp_path / "data_files_with_labels_no_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
subdir_class_0 = data_dir / "class0"
subdir_class_0.mkdir(parents=True, exist_ok=True)
subdir_class_1 = data_dir / "class1"
subdir_class_1.mkdir(parents=True, exist_ok=True)
filename = subdir_class_0 / "file0.txt"
shutil.copyfile(auto_text_file, filename)
filename2 = subdir_class_1 / "file1.txt"
shutil.copyfile(auto_text_file, filename2)
data_files_with_labels_no_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
return data_files_with_labels_no_metadata
@pytest.fixture
def data_files_with_different_levels_no_metadata(tmp_path, auto_text_file):
data_dir = tmp_path / "data_files_with_different_levels"
data_dir.mkdir(parents=True, exist_ok=True)
subdir_class_0 = data_dir / "class0"
subdir_class_0.mkdir(parents=True, exist_ok=True)
subdir_class_1 = data_dir / "subdir" / "class1"
subdir_class_1.mkdir(parents=True, exist_ok=True)
filename = subdir_class_0 / "file0.txt"
shutil.copyfile(auto_text_file, filename)
filename2 = subdir_class_1 / "file1.txt"
shutil.copyfile(auto_text_file, filename2)
data_files_with_different_levels = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
return data_files_with_different_levels
@pytest.fixture
def data_files_with_one_label_no_metadata(tmp_path, auto_text_file):
# only one label found = all files in a single dir/in a root dir
data_dir = tmp_path / "data_files_with_one_label"
data_dir.mkdir(parents=True, exist_ok=True)
filename = data_dir / "file0.txt"
shutil.copyfile(auto_text_file, filename)
filename2 = data_dir / "file1.txt"
shutil.copyfile(auto_text_file, filename2)
data_files_with_one_label = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
return data_files_with_one_label
@pytest.fixture
def files_with_labels_and_duplicated_label_key_in_metadata(tmp_path, auto_text_file):
data_dir = tmp_path / "files_with_labels_and_label_key_in_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
subdir_class_0 = data_dir / "class0"
subdir_class_0.mkdir(parents=True, exist_ok=True)
subdir_class_1 = data_dir / "class1"
subdir_class_1.mkdir(parents=True, exist_ok=True)
filename = subdir_class_0 / "file_class0.txt"
shutil.copyfile(auto_text_file, filename)
filename2 = subdir_class_1 / "file_class1.txt"
shutil.copyfile(auto_text_file, filename2)
metadata_filename = tmp_path / data_dir / "metadata.jsonl"
metadata = textwrap.dedent(
"""\
{"file_name": "class0/file_class0.txt", "additional_feature": "First dummy file", "label": "CLASS_0"}
{"file_name": "class1/file_class1.txt", "additional_feature": "Second dummy file", "label": "CLASS_1"}
"""
)
with open(metadata_filename, "w", encoding="utf-8") as f:
f.write(metadata)
return str(filename), str(filename2), str(metadata_filename)
@pytest.fixture
def file_with_metadata(tmp_path, text_file):
filename = tmp_path / "file.txt"
shutil.copyfile(text_file, filename)
metadata_filename = tmp_path / "metadata.jsonl"
metadata = textwrap.dedent(
"""\
{"file_name": "file.txt", "additional_feature": "Dummy file"}
"""
)
with open(metadata_filename, "w", encoding="utf-8") as f:
f.write(metadata)
return str(filename), str(metadata_filename)
@pytest.fixture()
def files_with_metadata_that_misses_one_sample(tmp_path, auto_text_file):
filename = tmp_path / "file.txt"
shutil.copyfile(auto_text_file, filename)
filename2 = tmp_path / "file2.txt"
shutil.copyfile(auto_text_file, filename2)
metadata_filename = tmp_path / "metadata.jsonl"
metadata = textwrap.dedent(
"""\
{"file_name": "file.txt", "additional_feature": "Dummy file"}
"""
)
with open(metadata_filename, "w", encoding="utf-8") as f:
f.write(metadata)
return str(filename), str(filename2), str(metadata_filename)
@pytest.fixture
def data_files_with_one_split_and_metadata(tmp_path, auto_text_file):
data_dir = tmp_path / "autofolder_data_dir_with_metadata_one_split"
data_dir.mkdir(parents=True, exist_ok=True)
subdir = data_dir / "subdir"
subdir.mkdir(parents=True, exist_ok=True)
filename = data_dir / "file.txt"
shutil.copyfile(auto_text_file, filename)
filename2 = data_dir / "file2.txt"
shutil.copyfile(auto_text_file, filename2)
filename3 = subdir / "file3.txt" # in subdir
shutil.copyfile(auto_text_file, filename3)
metadata_filename = data_dir / "metadata.jsonl"
metadata = textwrap.dedent(
"""\
{"file_name": "file.txt", "additional_feature": "Dummy file"}
{"file_name": "file2.txt", "additional_feature": "Second dummy file"}
{"file_name": "./subdir/file3.txt", "additional_feature": "Third dummy file"}
"""
)
with open(metadata_filename, "w", encoding="utf-8") as f:
f.write(metadata)
data_files_with_one_split_and_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
assert len(data_files_with_one_split_and_metadata) == 1
assert len(data_files_with_one_split_and_metadata["train"]) == 4
return data_files_with_one_split_and_metadata
@pytest.fixture
def data_files_with_two_splits_and_metadata(tmp_path, auto_text_file):
data_dir = tmp_path / "autofolder_data_dir_with_metadata_two_splits"
data_dir.mkdir(parents=True, exist_ok=True)
train_dir = data_dir / "train"
train_dir.mkdir(parents=True, exist_ok=True)
test_dir = data_dir / "test"
test_dir.mkdir(parents=True, exist_ok=True)
filename = train_dir / "file.txt" # train
shutil.copyfile(auto_text_file, filename)
filename2 = train_dir / "file2.txt" # train
shutil.copyfile(auto_text_file, filename2)
filename3 = test_dir / "file3.txt" # test
shutil.copyfile(auto_text_file, filename3)
train_metadata_filename = train_dir / "metadata.jsonl"
train_metadata = textwrap.dedent(
"""\
{"file_name": "file.txt", "additional_feature": "Train dummy file"}
{"file_name": "file2.txt", "additional_feature": "Second train dummy file"}
"""
)
with open(train_metadata_filename, "w", encoding="utf-8") as f:
f.write(train_metadata)
test_metadata_filename = test_dir / "metadata.jsonl"
test_metadata = textwrap.dedent(
"""\
{"file_name": "file3.txt", "additional_feature": "Test dummy file"}
"""
)
with open(test_metadata_filename, "w", encoding="utf-8") as f:
f.write(test_metadata)
data_files_with_two_splits_and_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
assert len(data_files_with_two_splits_and_metadata) == 2
assert len(data_files_with_two_splits_and_metadata["train"]) == 3
assert len(data_files_with_two_splits_and_metadata["test"]) == 2
return data_files_with_two_splits_and_metadata
@pytest.fixture
def data_files_with_zip_archives(tmp_path, auto_text_file):
data_dir = tmp_path / "autofolder_data_dir_with_zip_archives"
data_dir.mkdir(parents=True, exist_ok=True)
archive_dir = data_dir / "archive"
archive_dir.mkdir(parents=True, exist_ok=True)
subdir = archive_dir / "subdir"
subdir.mkdir(parents=True, exist_ok=True)
filename = archive_dir / "file.txt"
shutil.copyfile(auto_text_file, filename)
filename2 = subdir / "file2.txt" # in subdir
shutil.copyfile(auto_text_file, filename2)
metadata_filename = archive_dir / "metadata.jsonl"
metadata = textwrap.dedent(
"""\
{"file_name": "file.txt", "additional_feature": "Dummy file"}
{"file_name": "subdir/file2.txt", "additional_feature": "Second dummy file"}
"""
)
with open(metadata_filename, "w", encoding="utf-8") as f:
f.write(metadata)
shutil.make_archive(archive_dir, "zip", archive_dir)
shutil.rmtree(str(archive_dir))
data_files_with_zip_archives = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
assert len(data_files_with_zip_archives) == 1
assert len(data_files_with_zip_archives["train"]) == 1
return data_files_with_zip_archives
def test_inferring_labels_from_data_dirs(data_files_with_labels_no_metadata, cache_dir):
autofolder = DummyFolderBasedBuilder(
data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, drop_labels=False
)
gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
assert autofolder.info.features == Features({"base": {}, "label": ClassLabel(names=["class0", "class1"])})
generator = autofolder._generate_examples(**gen_kwargs)
assert all(example["label"] in {"class0", "class1"} for _, example in generator)
def test_default_folder_builder_not_usable(data_files_with_labels_no_metadata, cache_dir):
# builder would try to access non-existing attributes of a default `BuilderConfig` class
# as a custom one is not provided
with pytest.raises(AttributeError):
_ = FolderBasedBuilder(
data_files=data_files_with_labels_no_metadata,
cache_dir=cache_dir,
)
# test that AutoFolder is extended for streaming when it's child class is instantiated:
# see line 115 in src/datasets/streaming.py
def test_streaming_patched():
_ = DummyFolderBasedBuilder()
module = importlib.import_module(FolderBasedBuilder.__module__)
assert hasattr(module, "_patched_for_streaming")
assert module._patched_for_streaming
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_duplicated_label_key(
files_with_labels_and_duplicated_label_key_in_metadata, drop_metadata, drop_labels, cache_dir, caplog
):
class0_file, class1_file, metadata_file = files_with_labels_and_duplicated_label_key_in_metadata
autofolder = DummyFolderBasedBuilder(
data_files=[class0_file, class1_file, metadata_file],
cache_dir=cache_dir,
drop_metadata=drop_metadata,
drop_labels=drop_labels,
)
gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = autofolder._generate_examples(**gen_kwargs)
if drop_labels is False:
# infer labels from directories even if metadata files are found
warning_in_logs = any("ignoring metadata columns" in record.msg.lower() for record in caplog.records)
assert warning_in_logs if drop_metadata is not True else not warning_in_logs
assert autofolder.info.features["label"] == ClassLabel(names=["class0", "class1"])
assert all(example["label"] in ["class0", "class1"] for _, example in generator)
else:
if drop_metadata is not True:
# labels are from metadata
assert autofolder.info.features["label"] == Value("string")
assert all(example["label"] in ["CLASS_0", "CLASS_1"] for _, example in generator)
else:
# drop both labels and metadata
assert autofolder.info.features == Features({"base": {}})
assert all(example.keys() == {"base"} for _, example in generator)
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_drop_labels(
data_files_with_labels_no_metadata, auto_text_file, drop_metadata, drop_labels, cache_dir
):
autofolder = DummyFolderBasedBuilder(
data_files=data_files_with_labels_no_metadata,
drop_metadata=drop_metadata,
drop_labels=drop_labels,
cache_dir=cache_dir,
)
gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
# removing labels explicitly requires drop_labels=True
assert gen_kwargs["add_labels"] is not bool(drop_labels)
assert gen_kwargs["add_metadata"] is False
generator = autofolder._generate_examples(**gen_kwargs)
if not drop_labels:
assert all(
example.keys() == {"base", "label"} and all(val is not None for val in example.values())
for _, example in generator
)
else:
assert all(
example.keys() == {"base"} and all(val is not None for val in example.values()) for _, example in generator
)
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_drop_metadata(file_with_metadata, drop_metadata, drop_labels, cache_dir):
file, metadata_file = file_with_metadata
autofolder = DummyFolderBasedBuilder(
data_files=[file, metadata_file],
drop_metadata=drop_metadata,
drop_labels=drop_labels,
cache_dir=cache_dir,
)
gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
# since the dataset has metadata, removing the metadata explicitly requires drop_metadata=True
assert gen_kwargs["add_metadata"] is not bool(drop_metadata)
# since the dataset has metadata, adding the labels explicitly requires drop_labels=False
assert gen_kwargs["add_labels"] is (drop_labels is False)
generator = autofolder._generate_examples(**gen_kwargs)
expected_columns = {"base"}
if gen_kwargs["add_metadata"]:
expected_columns.add("additional_feature")
if gen_kwargs["add_labels"]:
expected_columns.add("label")
result = [example for _, example in generator]
assert len(result) == 1
example = result[0]
assert example.keys() == expected_columns
for column in expected_columns:
assert example[column] is not None
@pytest.mark.parametrize("remote", [True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_data_files_with_different_levels_no_metadata(
data_files_with_different_levels_no_metadata, drop_labels, remote, cache_dir
):
data_files = remote_files if remote else data_files_with_different_levels_no_metadata
autofolder = DummyFolderBasedBuilder(
data_files=data_files,
cache_dir=cache_dir,
drop_labels=drop_labels,
)
gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = autofolder._generate_examples(**gen_kwargs)
if drop_labels is not False:
# with None (default) we should drop labels if files are on different levels in dir structure
assert "label" not in autofolder.info.features
assert all(example.keys() == {"base"} for _, example in generator)
else:
assert "label" in autofolder.info.features
assert isinstance(autofolder.info.features["label"], ClassLabel)
assert all(example.keys() == {"base", "label"} for _, example in generator)
@pytest.mark.parametrize("remote", [False, True])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_data_files_with_one_label_no_metadata(data_files_with_one_label_no_metadata, drop_labels, remote, cache_dir):
data_files = remote_files[:2] if remote else data_files_with_one_label_no_metadata
autofolder = DummyFolderBasedBuilder(
data_files=data_files,
cache_dir=cache_dir,
drop_labels=drop_labels,
)
gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = autofolder._generate_examples(**gen_kwargs)
if drop_labels is not False:
# with None (default) we should drop labels if only one label is found (=if there is a single dir)
assert "label" not in autofolder.info.features
assert all(example.keys() == {"base"} for _, example in generator)
else:
assert "label" in autofolder.info.features
assert isinstance(autofolder.info.features["label"], ClassLabel)
assert all(example.keys() == {"base", "label"} for _, example in generator)
@pytest.mark.parametrize("drop_metadata", [None, True, False])
def test_data_files_with_metadata_that_misses_one_sample(
files_with_metadata_that_misses_one_sample, drop_metadata, cache_dir
):
file, file2, metadata_file = files_with_metadata_that_misses_one_sample
if not drop_metadata:
features = Features({"base": None, "additional_feature": Value("string")})
else:
features = Features({"base": None})
autofolder = DummyFolderBasedBuilder(
data_files=[file, file2, metadata_file],
drop_metadata=drop_metadata,
features=features,
cache_dir=cache_dir,
)
gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = autofolder._generate_examples(**gen_kwargs)
if not drop_metadata:
with pytest.raises(ValueError):
list(generator)
else:
assert all(
example.keys() == {"base"} and all(val is not None for val in example.values()) for _, example in generator
)
@pytest.mark.parametrize("streaming", [False, True])
@pytest.mark.parametrize("n_splits", [1, 2])
def test_data_files_with_metadata_and_splits(
streaming, cache_dir, n_splits, data_files_with_one_split_and_metadata, data_files_with_two_splits_and_metadata
):
data_files = data_files_with_one_split_and_metadata if n_splits == 1 else data_files_with_two_splits_and_metadata
autofolder = DummyFolderBasedBuilder(
data_files=data_files,
cache_dir=cache_dir,
)
download_manager = StreamingDownloadManager() if streaming else DownloadManager()
generated_splits = autofolder._split_generators(download_manager)
for (split, files), generated_split in zip(data_files.items(), generated_splits):
assert split == generated_split.name
expected_num_of_examples = len(files) - 1
generated_examples = list(autofolder._generate_examples(**generated_split.gen_kwargs))
assert len(generated_examples) == expected_num_of_examples
assert len({example["base"] for _, example in generated_examples}) == expected_num_of_examples
assert len({example["additional_feature"] for _, example in generated_examples}) == expected_num_of_examples
assert all(example["additional_feature"] is not None for _, example in generated_examples)
@pytest.mark.parametrize("streaming", [False, True])
def test_data_files_with_metadata_and_archives(streaming, cache_dir, data_files_with_zip_archives):
autofolder = DummyFolderBasedBuilder(data_files=data_files_with_zip_archives, cache_dir=cache_dir)
download_manager = StreamingDownloadManager() if streaming else DownloadManager()
generated_splits = autofolder._split_generators(download_manager)
for (split, files), generated_split in zip(data_files_with_zip_archives.items(), generated_splits):
assert split == generated_split.name
num_of_archives = len(files)
expected_num_of_examples = 2 * num_of_archives
generated_examples = list(autofolder._generate_examples(**generated_split.gen_kwargs))
assert len(generated_examples) == expected_num_of_examples
assert len({example["base"] for _, example in generated_examples}) == expected_num_of_examples
assert len({example["additional_feature"] for _, example in generated_examples}) == expected_num_of_examples
assert all(example["additional_feature"] is not None for _, example in generated_examples)
def test_data_files_with_wrong_metadata_file_name(cache_dir, tmp_path, auto_text_file):
data_dir = tmp_path / "data_dir_with_bad_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(auto_text_file, data_dir / "file.txt")
metadata_filename = data_dir / "bad_metadata.jsonl" # bad file
metadata = textwrap.dedent(
"""\
{"file_name": "file.txt", "additional_feature": "Dummy file"}
"""
)
with open(metadata_filename, "w", encoding="utf-8") as f:
f.write(metadata)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
autofolder = DummyFolderBasedBuilder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = autofolder._generate_examples(**gen_kwargs)
assert all("additional_feature" not in example for _, example in generator)
def test_data_files_with_wrong_file_name_column_in_metadata_file(cache_dir, tmp_path, auto_text_file):
data_dir = tmp_path / "data_dir_with_bad_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(auto_text_file, data_dir / "file.txt")
metadata_filename = data_dir / "metadata.jsonl"
metadata = textwrap.dedent( # with bad column "bad_file_name" instead of "file_name"
"""\
{"bad_file_name": "file.txt", "additional_feature": "Dummy file"}
"""
)
with open(metadata_filename, "w", encoding="utf-8") as f:
f.write(metadata)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
autofolder = DummyFolderBasedBuilder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
with pytest.raises(ValueError) as exc_info:
_ = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
assert "`file_name` must be present" in str(exc_info.value)
| datasets-main | tests/packaged_modules/test_folder_based_builder.py |
import shutil
import textwrap
import numpy as np
import pytest
from datasets import ClassLabel, Features, Image, Value
from datasets.data_files import DataFilesDict, get_data_patterns
from datasets.download.streaming_download_manager import StreamingDownloadManager
from datasets.packaged_modules.imagefolder.imagefolder import ImageFolder
from ..utils import require_pil
@pytest.fixture
def cache_dir(tmp_path):
return str(tmp_path / "imagefolder_cache_dir")
@pytest.fixture
def data_files_with_labels_no_metadata(tmp_path, image_file):
data_dir = tmp_path / "data_files_with_labels_no_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
subdir_class_0 = data_dir / "cat"
subdir_class_0.mkdir(parents=True, exist_ok=True)
subdir_class_1 = data_dir / "dog"
subdir_class_1.mkdir(parents=True, exist_ok=True)
image_filename = subdir_class_0 / "image_cat.jpg"
shutil.copyfile(image_file, image_filename)
image_filename2 = subdir_class_1 / "image_dog.jpg"
shutil.copyfile(image_file, image_filename2)
data_files_with_labels_no_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
return data_files_with_labels_no_metadata
@pytest.fixture
def image_files_with_labels_and_duplicated_label_key_in_metadata(tmp_path, image_file):
data_dir = tmp_path / "image_files_with_labels_and_label_key_in_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
subdir_class_0 = data_dir / "cat"
subdir_class_0.mkdir(parents=True, exist_ok=True)
subdir_class_1 = data_dir / "dog"
subdir_class_1.mkdir(parents=True, exist_ok=True)
image_filename = subdir_class_0 / "image_cat.jpg"
shutil.copyfile(image_file, image_filename)
image_filename2 = subdir_class_1 / "image_dog.jpg"
shutil.copyfile(image_file, image_filename2)
image_metadata_filename = tmp_path / data_dir / "metadata.jsonl"
image_metadata = textwrap.dedent(
"""\
{"file_name": "cat/image_cat.jpg", "caption": "Nice image of a cat", "label": "Cat"}
{"file_name": "dog/image_dog.jpg", "caption": "Nice image of a dog", "label": "Dog"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
return str(image_filename), str(image_filename2), str(image_metadata_filename)
@pytest.fixture
def image_file_with_metadata(tmp_path, image_file):
image_filename = tmp_path / "image_rgb.jpg"
shutil.copyfile(image_file, image_filename)
image_metadata_filename = tmp_path / "metadata.jsonl"
image_metadata = textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
return str(image_filename), str(image_metadata_filename)
@pytest.fixture
def image_files_with_metadata_that_misses_one_image(tmp_path, image_file):
image_filename = tmp_path / "image_rgb.jpg"
shutil.copyfile(image_file, image_filename)
image_filename2 = tmp_path / "image_rgb2.jpg"
shutil.copyfile(image_file, image_filename2)
image_metadata_filename = tmp_path / "metadata.jsonl"
image_metadata = textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
return str(image_filename), str(image_filename2), str(image_metadata_filename)
@pytest.fixture(params=["jsonl", "csv"])
def data_files_with_one_split_and_metadata(request, tmp_path, image_file):
data_dir = tmp_path / "imagefolder_data_dir_with_metadata_one_split"
data_dir.mkdir(parents=True, exist_ok=True)
subdir = data_dir / "subdir"
subdir.mkdir(parents=True, exist_ok=True)
image_filename = data_dir / "image_rgb.jpg"
shutil.copyfile(image_file, image_filename)
image_filename2 = data_dir / "image_rgb2.jpg"
shutil.copyfile(image_file, image_filename2)
image_filename3 = subdir / "image_rgb3.jpg" # in subdir
shutil.copyfile(image_file, image_filename3)
image_metadata_filename = data_dir / f"metadata.{request.param}"
image_metadata = (
textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
{"file_name": "image_rgb2.jpg", "caption": "Nice second image"}
{"file_name": "subdir/image_rgb3.jpg", "caption": "Nice third image"}
"""
)
if request.param == "jsonl"
else textwrap.dedent(
"""\
file_name,caption
image_rgb.jpg,Nice image
image_rgb2.jpg,Nice second image
subdir/image_rgb3.jpg,Nice third image
"""
)
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
data_files_with_one_split_and_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
assert len(data_files_with_one_split_and_metadata) == 1
assert len(data_files_with_one_split_and_metadata["train"]) == 4
return data_files_with_one_split_and_metadata
@pytest.fixture(params=["jsonl", "csv"])
def data_files_with_two_splits_and_metadata(request, tmp_path, image_file):
data_dir = tmp_path / "imagefolder_data_dir_with_metadata_two_splits"
data_dir.mkdir(parents=True, exist_ok=True)
train_dir = data_dir / "train"
train_dir.mkdir(parents=True, exist_ok=True)
test_dir = data_dir / "test"
test_dir.mkdir(parents=True, exist_ok=True)
image_filename = train_dir / "image_rgb.jpg" # train image
shutil.copyfile(image_file, image_filename)
image_filename2 = train_dir / "image_rgb2.jpg" # train image
shutil.copyfile(image_file, image_filename2)
image_filename3 = test_dir / "image_rgb3.jpg" # test image
shutil.copyfile(image_file, image_filename3)
train_image_metadata_filename = train_dir / f"metadata.{request.param}"
image_metadata = (
textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice train image"}
{"file_name": "image_rgb2.jpg", "caption": "Nice second train image"}
"""
)
if request.param == "jsonl"
else textwrap.dedent(
"""\
file_name,caption
image_rgb.jpg,Nice train image
image_rgb2.jpg,Nice second train image
"""
)
)
with open(train_image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
test_image_metadata_filename = test_dir / f"metadata.{request.param}"
image_metadata = (
textwrap.dedent(
"""\
{"file_name": "image_rgb3.jpg", "caption": "Nice test image"}
"""
)
if request.param == "jsonl"
else textwrap.dedent(
"""\
file_name,caption
image_rgb3.jpg,Nice test image
"""
)
)
with open(test_image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
data_files_with_two_splits_and_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
assert len(data_files_with_two_splits_and_metadata) == 2
assert len(data_files_with_two_splits_and_metadata["train"]) == 3
assert len(data_files_with_two_splits_and_metadata["test"]) == 2
return data_files_with_two_splits_and_metadata
@pytest.fixture
def data_files_with_zip_archives(tmp_path, image_file):
from PIL import Image, ImageOps
data_dir = tmp_path / "imagefolder_data_dir_with_zip_archives"
data_dir.mkdir(parents=True, exist_ok=True)
archive_dir = data_dir / "archive"
archive_dir.mkdir(parents=True, exist_ok=True)
subdir = archive_dir / "subdir"
subdir.mkdir(parents=True, exist_ok=True)
image_filename = archive_dir / "image_rgb.jpg"
shutil.copyfile(image_file, image_filename)
image_filename2 = subdir / "image_rgb2.jpg" # in subdir
# make sure they're two different images
# Indeed we won't be able to compare the image.filename, since the archive is not extracted in streaming mode
ImageOps.flip(Image.open(image_file)).save(image_filename2)
image_metadata_filename = archive_dir / "metadata.jsonl"
image_metadata = textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
{"file_name": "subdir/image_rgb2.jpg", "caption": "Nice second image"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
shutil.make_archive(archive_dir, "zip", archive_dir)
shutil.rmtree(str(archive_dir))
data_files_with_zip_archives = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
assert len(data_files_with_zip_archives) == 1
assert len(data_files_with_zip_archives["train"]) == 1
return data_files_with_zip_archives
@require_pil
# check that labels are inferred correctly from dir names
def test_generate_examples_with_labels(data_files_with_labels_no_metadata, cache_dir):
# there are no metadata.jsonl files in this test case
imagefolder = ImageFolder(data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, drop_labels=False)
imagefolder.download_and_prepare()
assert imagefolder.info.features == Features({"image": Image(), "label": ClassLabel(names=["cat", "dog"])})
dataset = list(imagefolder.as_dataset()["train"])
label_feature = imagefolder.info.features["label"]
assert dataset[0]["label"] == label_feature._str2int["cat"]
assert dataset[1]["label"] == label_feature._str2int["dog"]
@require_pil
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_duplicated_label_key(
image_files_with_labels_and_duplicated_label_key_in_metadata, drop_metadata, drop_labels, cache_dir, caplog
):
cat_image_file, dog_image_file, image_metadata_file = image_files_with_labels_and_duplicated_label_key_in_metadata
imagefolder = ImageFolder(
drop_metadata=drop_metadata,
drop_labels=drop_labels,
data_files=[cat_image_file, dog_image_file, image_metadata_file],
cache_dir=cache_dir,
)
if drop_labels is False:
# infer labels from directories even if metadata files are found
imagefolder.download_and_prepare()
warning_in_logs = any("ignoring metadata columns" in record.msg.lower() for record in caplog.records)
assert warning_in_logs if drop_metadata is not True else not warning_in_logs
dataset = imagefolder.as_dataset()["train"]
assert imagefolder.info.features["label"] == ClassLabel(names=["cat", "dog"])
assert all(example["label"] in imagefolder.info.features["label"]._str2int.values() for example in dataset)
else:
imagefolder.download_and_prepare()
dataset = imagefolder.as_dataset()["train"]
if drop_metadata is not True:
# labels are from metadata
assert imagefolder.info.features["label"] == Value("string")
assert all(example["label"] in ["Cat", "Dog"] for example in dataset)
else:
# drop both labels and metadata
assert imagefolder.info.features == Features({"image": Image()})
assert all(example.keys() == {"image"} for example in dataset)
@require_pil
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_drop_labels(data_files_with_labels_no_metadata, drop_metadata, drop_labels):
imagefolder = ImageFolder(
drop_metadata=drop_metadata, drop_labels=drop_labels, data_files=data_files_with_labels_no_metadata
)
gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
# removing the labels explicitly requires drop_labels=True
assert gen_kwargs["add_labels"] is not bool(drop_labels)
assert gen_kwargs["add_metadata"] is False
generator = imagefolder._generate_examples(**gen_kwargs)
if not drop_labels:
assert all(
example.keys() == {"image", "label"} and all(val is not None for val in example.values())
for _, example in generator
)
else:
assert all(
example.keys() == {"image"} and all(val is not None for val in example.values())
for _, example in generator
)
@require_pil
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_drop_metadata(image_file_with_metadata, drop_metadata, drop_labels):
image_file, image_metadata_file = image_file_with_metadata
imagefolder = ImageFolder(
drop_metadata=drop_metadata, drop_labels=drop_labels, data_files={"train": [image_file, image_metadata_file]}
)
gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
# since the dataset has metadata, removing the metadata explicitly requires drop_metadata=True
assert gen_kwargs["add_metadata"] is not bool(drop_metadata)
# since the dataset has metadata, adding the labels explicitly requires drop_labels=False
assert gen_kwargs["add_labels"] is (drop_labels is False)
generator = imagefolder._generate_examples(**gen_kwargs)
expected_columns = {"image"}
if gen_kwargs["add_metadata"]:
expected_columns.add("caption")
if gen_kwargs["add_labels"]:
expected_columns.add("label")
result = [example for _, example in generator]
assert len(result) == 1
example = result[0]
assert example.keys() == expected_columns
for column in expected_columns:
assert example[column] is not None
@require_pil
@pytest.mark.parametrize("drop_metadata", [None, True, False])
def test_generate_examples_with_metadata_in_wrong_location(image_file, image_file_with_metadata, drop_metadata):
_, image_metadata_file = image_file_with_metadata
imagefolder = ImageFolder(drop_metadata=drop_metadata, data_files={"train": [image_file, image_metadata_file]})
gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = imagefolder._generate_examples(**gen_kwargs)
if not drop_metadata:
with pytest.raises(ValueError):
list(generator)
else:
assert all(
example.keys() == {"image"} and all(val is not None for val in example.values())
for _, example in generator
)
@require_pil
@pytest.mark.parametrize("drop_metadata", [None, True, False])
def test_generate_examples_with_metadata_that_misses_one_image(
image_files_with_metadata_that_misses_one_image, drop_metadata
):
image_file, image_file2, image_metadata_file = image_files_with_metadata_that_misses_one_image
if not drop_metadata:
features = Features({"image": Image(), "caption": Value("string")})
else:
features = Features({"image": Image()})
imagefolder = ImageFolder(
drop_metadata=drop_metadata,
features=features,
data_files={"train": [image_file, image_file2, image_metadata_file]},
)
gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = imagefolder._generate_examples(**gen_kwargs)
if not drop_metadata:
with pytest.raises(ValueError):
list(generator)
else:
assert all(
example.keys() == {"image"} and all(val is not None for val in example.values())
for _, example in generator
)
@require_pil
@pytest.mark.parametrize("streaming", [False, True])
def test_data_files_with_metadata_and_single_split(streaming, cache_dir, data_files_with_one_split_and_metadata):
data_files = data_files_with_one_split_and_metadata
imagefolder = ImageFolder(data_files=data_files, cache_dir=cache_dir)
imagefolder.download_and_prepare()
datasets = imagefolder.as_streaming_dataset() if streaming else imagefolder.as_dataset()
for split, data_files in data_files.items():
expected_num_of_images = len(data_files) - 1 # don't count the metadata file
assert split in datasets
dataset = list(datasets[split])
assert len(dataset) == expected_num_of_images
# make sure each sample has its own image and metadata
assert len({example["image"].filename for example in dataset}) == expected_num_of_images
assert len({example["caption"] for example in dataset}) == expected_num_of_images
assert all(example["caption"] is not None for example in dataset)
@require_pil
@pytest.mark.parametrize("streaming", [False, True])
def test_data_files_with_metadata_and_multiple_splits(streaming, cache_dir, data_files_with_two_splits_and_metadata):
data_files = data_files_with_two_splits_and_metadata
imagefolder = ImageFolder(data_files=data_files, cache_dir=cache_dir)
imagefolder.download_and_prepare()
datasets = imagefolder.as_streaming_dataset() if streaming else imagefolder.as_dataset()
for split, data_files in data_files.items():
expected_num_of_images = len(data_files) - 1 # don't count the metadata file
assert split in datasets
dataset = list(datasets[split])
assert len(dataset) == expected_num_of_images
# make sure each sample has its own image and metadata
assert len({example["image"].filename for example in dataset}) == expected_num_of_images
assert len({example["caption"] for example in dataset}) == expected_num_of_images
assert all(example["caption"] is not None for example in dataset)
@require_pil
@pytest.mark.parametrize("streaming", [False, True])
def test_data_files_with_metadata_and_archives(streaming, cache_dir, data_files_with_zip_archives):
imagefolder = ImageFolder(data_files=data_files_with_zip_archives, cache_dir=cache_dir)
imagefolder.download_and_prepare()
datasets = imagefolder.as_streaming_dataset() if streaming else imagefolder.as_dataset()
for split, data_files in data_files_with_zip_archives.items():
num_of_archives = len(data_files) # the metadata file is inside the archive
expected_num_of_images = 2 * num_of_archives
assert split in datasets
dataset = list(datasets[split])
assert len(dataset) == expected_num_of_images
# make sure each sample has its own image and metadata
assert len({np.array(example["image"])[0, 0, 0] for example in dataset}) == expected_num_of_images
assert len({example["caption"] for example in dataset}) == expected_num_of_images
assert all(example["caption"] is not None for example in dataset)
@require_pil
def test_data_files_with_wrong_metadata_file_name(cache_dir, tmp_path, image_file):
data_dir = tmp_path / "data_dir_with_bad_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(image_file, data_dir / "image_rgb.jpg")
image_metadata_filename = data_dir / "bad_metadata.jsonl" # bad file
image_metadata = textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
imagefolder = ImageFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
imagefolder.download_and_prepare()
dataset = imagefolder.as_dataset(split="train")
# check that there are no metadata, since the metadata file name doesn't have the right name
assert "caption" not in dataset.column_names
@require_pil
def test_data_files_with_wrong_image_file_name_column_in_metadata_file(cache_dir, tmp_path, image_file):
data_dir = tmp_path / "data_dir_with_bad_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(image_file, data_dir / "image_rgb.jpg")
image_metadata_filename = data_dir / "metadata.jsonl"
image_metadata = textwrap.dedent( # with bad column "bad_file_name" instead of "file_name"
"""\
{"bad_file_name": "image_rgb.jpg", "caption": "Nice image"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
imagefolder = ImageFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
with pytest.raises(ValueError) as exc_info:
imagefolder.download_and_prepare()
assert "`file_name` must be present" in str(exc_info.value)
@require_pil
def test_data_files_with_with_metadata_in_different_formats(cache_dir, tmp_path, image_file):
data_dir = tmp_path / "data_dir_with_metadata_in_different_format"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(image_file, data_dir / "image_rgb.jpg")
image_metadata_filename_jsonl = data_dir / "metadata.jsonl"
image_metadata_jsonl = textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
"""
)
with open(image_metadata_filename_jsonl, "w", encoding="utf-8") as f:
f.write(image_metadata_jsonl)
image_metadata_filename_csv = data_dir / "metadata.csv"
image_metadata_csv = textwrap.dedent(
"""\
file_name,caption
image_rgb.jpg,Nice image
"""
)
with open(image_metadata_filename_csv, "w", encoding="utf-8") as f:
f.write(image_metadata_csv)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
imagefolder = ImageFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
with pytest.raises(ValueError) as exc_info:
imagefolder.download_and_prepare()
assert "metadata files with different extensions" in str(exc_info.value)
| datasets-main | tests/packaged_modules/test_imagefolder.py |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def csv_file(tmp_path):
filename = tmp_path / "file.csv"
data = textwrap.dedent(
"""\
header1,header2
1,2
10,20
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def malformed_csv_file(tmp_path):
filename = tmp_path / "malformed_file.csv"
data = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def csv_file_with_image(tmp_path, image_file):
filename = tmp_path / "csv_with_image.csv"
data = textwrap.dedent(
f"""\
image
{image_file}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def csv_file_with_label(tmp_path):
filename = tmp_path / "csv_with_label.csv"
data = textwrap.dedent(
"""\
label
good
bad
good
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def csv_file_with_int_list(tmp_path):
filename = tmp_path / "csv_with_int_list.csv"
data = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
def test_csv_generate_tables_raises_error_with_malformed_csv(csv_file, malformed_csv_file, caplog):
csv = Csv()
generator = csv._generate_tables([[csv_file, malformed_csv_file]])
with pytest.raises(ValueError, match="Error tokenizing data"):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(malformed_csv_file) in record.message
for record in caplog.records
)
@require_pil
def test_csv_cast_image(csv_file_with_image):
with open(csv_file_with_image, encoding="utf-8") as f:
image_file = f.read().splitlines()[1]
csv = Csv(encoding="utf-8", features=Features({"image": Image()}))
generator = csv._generate_tables([[csv_file_with_image]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("image").type == Image()()
generated_content = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def test_csv_cast_label(csv_file_with_label):
with open(csv_file_with_label, encoding="utf-8") as f:
labels = f.read().splitlines()[1:]
csv = Csv(encoding="utf-8", features=Features({"label": ClassLabel(names=["good", "bad"])}))
generator = csv._generate_tables([[csv_file_with_label]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("label").type == ClassLabel(names=["good", "bad"])()
generated_content = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"]).str2int(label) for label in labels]
def test_csv_convert_int_list(csv_file_with_int_list):
csv = Csv(encoding="utf-8", sep=",", converters={"int_list": lambda x: [int(i) for i in x.split()]})
generator = csv._generate_tables([[csv_file_with_int_list]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa.types.is_list(pa_table.schema.field("int_list").type)
generated_content = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| datasets-main | tests/packaged_modules/test_csv.py |
import contextlib
import csv
import json
import os
import sqlite3
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
# dataset + arrow_file
@pytest.fixture(scope="session")
def dataset():
n = 10
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"])),
"answers": datasets.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
"id": datasets.Value("int64"),
}
)
dataset = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(n)),
},
features=features,
)
return dataset
@pytest.fixture(scope="session")
def arrow_file(tmp_path_factory, dataset):
filename = str(tmp_path_factory.mktemp("data") / "file.arrow")
dataset.map(cache_file_name=filename)
return filename
# FILE_CONTENT + files
FILE_CONTENT = """\
Text data.
Second line of data."""
@pytest.fixture(scope="session")
def text_file(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.txt"
data = FILE_CONTENT
with open(filename, "w") as f:
f.write(data)
return filename
@pytest.fixture(scope="session")
def bz2_file(tmp_path_factory):
import bz2
path = tmp_path_factory.mktemp("data") / "file.txt.bz2"
data = bytes(FILE_CONTENT, "utf-8")
with bz2.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def gz_file(tmp_path_factory):
import gzip
path = str(tmp_path_factory.mktemp("data") / "file.txt.gz")
data = bytes(FILE_CONTENT, "utf-8")
with gzip.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def lz4_file(tmp_path_factory):
if datasets.config.LZ4_AVAILABLE:
import lz4.frame
path = tmp_path_factory.mktemp("data") / "file.txt.lz4"
data = bytes(FILE_CONTENT, "utf-8")
with lz4.frame.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def seven_zip_file(tmp_path_factory, text_file):
if datasets.config.PY7ZR_AVAILABLE:
import py7zr
path = tmp_path_factory.mktemp("data") / "file.txt.7z"
with py7zr.SevenZipFile(path, "w") as archive:
archive.write(text_file, arcname=os.path.basename(text_file))
return path
@pytest.fixture(scope="session")
def tar_file(tmp_path_factory, text_file):
import tarfile
path = tmp_path_factory.mktemp("data") / "file.txt.tar"
with tarfile.TarFile(path, "w") as f:
f.add(text_file, arcname=os.path.basename(text_file))
return path
@pytest.fixture(scope="session")
def xz_file(tmp_path_factory):
import lzma
path = tmp_path_factory.mktemp("data") / "file.txt.xz"
data = bytes(FILE_CONTENT, "utf-8")
with lzma.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def zip_file(tmp_path_factory, text_file):
import zipfile
path = tmp_path_factory.mktemp("data") / "file.txt.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(text_file, arcname=os.path.basename(text_file))
return path
@pytest.fixture(scope="session")
def zstd_file(tmp_path_factory):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
path = tmp_path_factory.mktemp("data") / "file.txt.zst"
data = bytes(FILE_CONTENT, "utf-8")
with zstd.open(path, "wb") as f:
f.write(data)
return path
# xml_file
@pytest.fixture(scope="session")
def xml_file(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.xml"
data = textwrap.dedent(
"""\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>"""
)
with open(filename, "w") as f:
f.write(data)
return filename
DATA = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
DATA2 = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
DATA_DICT_OF_LISTS = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
DATA_312 = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
DATA_STR = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope="session")
def dataset_dict():
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session")
def arrow_path(tmp_path_factory):
dataset = datasets.Dataset.from_dict(DATA_DICT_OF_LISTS)
path = str(tmp_path_factory.mktemp("data") / "dataset.arrow")
dataset.map(cache_file_name=path)
return path
@pytest.fixture(scope="session")
def sqlite_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.sqlite")
with contextlib.closing(sqlite3.connect(path)) as con:
cur = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)")
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)", tuple(item.values()))
con.commit()
return path
@pytest.fixture(scope="session")
def csv_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.csv")
with open(path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"])
writer.writeheader()
for item in DATA:
writer.writerow(item)
return path
@pytest.fixture(scope="session")
def csv2_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset2.csv")
with open(path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"])
writer.writeheader()
for item in DATA:
writer.writerow(item)
return path
@pytest.fixture(scope="session")
def bz2_csv_path(csv_path, tmp_path_factory):
import bz2
path = tmp_path_factory.mktemp("data") / "dataset.csv.bz2"
with open(csv_path, "rb") as f:
data = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bz2.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def zip_csv_path(csv_path, csv2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("zip_csv_path") / "csv-dataset.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(csv_path, arcname=os.path.basename(csv_path))
f.write(csv2_path, arcname=os.path.basename(csv2_path))
return path
@pytest.fixture(scope="session")
def zip_uppercase_csv_path(csv_path, csv2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset.csv.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(csv_path, arcname=os.path.basename(csv_path.replace(".csv", ".CSV")))
f.write(csv2_path, arcname=os.path.basename(csv2_path.replace(".csv", ".CSV")))
return path
@pytest.fixture(scope="session")
def zip_csv_with_dir_path(csv_path, csv2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(csv_path, arcname=os.path.join("main_dir", os.path.basename(csv_path)))
f.write(csv2_path, arcname=os.path.join("main_dir", os.path.basename(csv2_path)))
return path
@pytest.fixture(scope="session")
def parquet_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.parquet")
schema = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.int64(),
"col_3": pa.float64(),
}
)
with open(path, "wb") as f:
writer = pq.ParquetWriter(f, schema=schema)
pa_table = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(DATA))] for k in DATA[0]}, schema=schema)
writer.write_table(pa_table)
writer.close()
return path
@pytest.fixture(scope="session")
def json_list_of_dicts_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.json")
data = {"data": DATA}
with open(path, "w") as f:
json.dump(data, f)
return path
@pytest.fixture(scope="session")
def json_dict_of_lists_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.json")
data = {"data": DATA_DICT_OF_LISTS}
with open(path, "w") as f:
json.dump(data, f)
return path
@pytest.fixture(scope="session")
def jsonl_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.jsonl")
with open(path, "w") as f:
for item in DATA:
f.write(json.dumps(item) + "\n")
return path
@pytest.fixture(scope="session")
def jsonl2_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset2.jsonl")
with open(path, "w") as f:
for item in DATA:
f.write(json.dumps(item) + "\n")
return path
@pytest.fixture(scope="session")
def jsonl_312_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset_312.jsonl")
with open(path, "w") as f:
for item in DATA_312:
f.write(json.dumps(item) + "\n")
return path
@pytest.fixture(scope="session")
def jsonl_str_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset-str.jsonl")
with open(path, "w") as f:
for item in DATA_STR:
f.write(json.dumps(item) + "\n")
return path
@pytest.fixture(scope="session")
def text_gz_path(tmp_path_factory, text_path):
import gzip
path = str(tmp_path_factory.mktemp("data") / "dataset.txt.gz")
with open(text_path, "rb") as orig_file:
with gzip.open(path, "wb") as zipped_file:
zipped_file.writelines(orig_file)
return path
@pytest.fixture(scope="session")
def jsonl_gz_path(tmp_path_factory, jsonl_path):
import gzip
path = str(tmp_path_factory.mktemp("data") / "dataset.jsonl.gz")
with open(jsonl_path, "rb") as orig_file:
with gzip.open(path, "wb") as zipped_file:
zipped_file.writelines(orig_file)
return path
@pytest.fixture(scope="session")
def zip_jsonl_path(jsonl_path, jsonl2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset.jsonl.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(jsonl_path, arcname=os.path.basename(jsonl_path))
f.write(jsonl2_path, arcname=os.path.basename(jsonl2_path))
return path
@pytest.fixture(scope="session")
def zip_nested_jsonl_path(zip_jsonl_path, jsonl_path, jsonl2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(zip_jsonl_path, arcname=os.path.join("nested", os.path.basename(zip_jsonl_path)))
return path
@pytest.fixture(scope="session")
def zip_jsonl_with_dir_path(jsonl_path, jsonl2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(jsonl_path, arcname=os.path.join("main_dir", os.path.basename(jsonl_path)))
f.write(jsonl2_path, arcname=os.path.join("main_dir", os.path.basename(jsonl2_path)))
return path
@pytest.fixture(scope="session")
def tar_jsonl_path(jsonl_path, jsonl2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset.jsonl.tar"
with tarfile.TarFile(path, "w") as f:
f.add(jsonl_path, arcname=os.path.basename(jsonl_path))
f.add(jsonl2_path, arcname=os.path.basename(jsonl2_path))
return path
@pytest.fixture(scope="session")
def tar_nested_jsonl_path(tar_jsonl_path, jsonl_path, jsonl2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.tar"
with tarfile.TarFile(path, "w") as f:
f.add(tar_jsonl_path, arcname=os.path.join("nested", os.path.basename(tar_jsonl_path)))
return path
@pytest.fixture(scope="session")
def text_path(tmp_path_factory):
data = ["0", "1", "2", "3"]
path = str(tmp_path_factory.mktemp("data") / "dataset.txt")
with open(path, "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def text2_path(tmp_path_factory):
data = ["0", "1", "2", "3"]
path = str(tmp_path_factory.mktemp("data") / "dataset2.txt")
with open(path, "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def text_dir_with_unsupported_extension(tmp_path_factory):
data = ["0", "1", "2", "3"]
path = tmp_path_factory.mktemp("data") / "dataset.abc"
with open(path, "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def zip_text_path(text_path, text2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset.text.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(text_path, arcname=os.path.basename(text_path))
f.write(text2_path, arcname=os.path.basename(text2_path))
return path
@pytest.fixture(scope="session")
def zip_text_with_dir_path(text_path, text2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset_with_dir.text.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(text_path, arcname=os.path.join("main_dir", os.path.basename(text_path)))
f.write(text2_path, arcname=os.path.join("main_dir", os.path.basename(text2_path)))
return path
@pytest.fixture(scope="session")
def zip_unsupported_ext_path(text_path, text2_path, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset.ext.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(text_path, arcname=os.path.basename("unsupported.ext"))
f.write(text2_path, arcname=os.path.basename("unsupported_2.ext"))
return path
@pytest.fixture(scope="session")
def text_path_with_unicode_new_lines(tmp_path_factory):
text = "\n".join(["First", "Second\u2029with Unicode new line", "Third"])
path = str(tmp_path_factory.mktemp("data") / "dataset_with_unicode_new_lines.txt")
with open(path, "w", encoding="utf-8") as f:
f.write(text)
return path
@pytest.fixture(scope="session")
def image_file():
return os.path.join("tests", "features", "data", "test_image_rgb.jpg")
@pytest.fixture(scope="session")
def audio_file():
return os.path.join("tests", "features", "data", "test_audio_44100.wav")
@pytest.fixture(scope="session")
def zip_image_path(image_file, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset.img.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(image_file, arcname=os.path.basename(image_file))
f.write(image_file, arcname=os.path.basename(image_file).replace(".jpg", "2.jpg"))
return path
@pytest.fixture(scope="session")
def data_dir_with_hidden_files(tmp_path_factory):
data_dir = tmp_path_factory.mktemp("data_dir")
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt", "w") as f:
f.write("foo\n" * 10)
with open(data_dir / "subdir" / "test.txt", "w") as f:
f.write("bar\n" * 10)
# hidden file
with open(data_dir / "subdir" / ".test.txt", "w") as f:
f.write("bar\n" * 10)
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt", "w") as f:
f.write("foo\n" * 10)
with open(data_dir / ".subdir" / "test.txt", "w") as f:
f.write("bar\n" * 10)
return data_dir
| datasets-main | tests/fixtures/files.py |
datasets-main | tests/fixtures/__init__.py |
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
CI_HUB_TOKEN_PATH = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def ci_hub_token_path(monkeypatch):
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", CI_HUB_TOKEN_PATH)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config, ci_hub_token_path):
HfFolder.save_token(CI_HUB_USER_TOKEN)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token():
yield CI_HUB_USER_TOKEN
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id):
try:
yield repo_id
finally:
cleanup_repo(repo_id)
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file):
repo_name = f"repo_txt_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(text_file),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
| datasets-main | tests/fixtures/hub.py |
import posixpath
from pathlib import Path
from unittest.mock import patch
import pytest
from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem, stringify_path
from fsspec.registry import _registry as _fsspec_registry
class MockFileSystem(AbstractFileSystem):
protocol = "mock"
def __init__(self, *args, local_root_dir, **kwargs):
super().__init__()
self._fs = LocalFileSystem(*args, **kwargs)
self.local_root_dir = Path(local_root_dir).resolve().as_posix() + "/"
def mkdir(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.mkdir(path, *args, **kwargs)
def makedirs(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.makedirs(path, *args, **kwargs)
def rmdir(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rmdir(path)
def ls(self, path, detail=True, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = self._fs.ls(path, detail=detail, *args, **kwargs)
if detail:
return [{**info, "name": info["name"][len(self.local_root_dir) :]} for info in out]
else:
return [name[len(self.local_root_dir) :] for name in out]
def info(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = dict(self._fs.info(path, *args, **kwargs))
out["name"] = out["name"][len(self.local_root_dir) :]
return out
def cp_file(self, path1, path2, *args, **kwargs):
path1 = posixpath.join(self.local_root_dir, self._strip_protocol(path1))
path2 = posixpath.join(self.local_root_dir, self._strip_protocol(path2))
return self._fs.cp_file(path1, path2, *args, **kwargs)
def rm_file(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm_file(path, *args, **kwargs)
def rm(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm(path, *args, **kwargs)
def _open(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs._open(path, *args, **kwargs)
def created(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.created(path)
def modified(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.modified(path)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("mock://"):
path = path[7:]
return path
class TmpDirFileSystem(MockFileSystem):
protocol = "tmp"
tmp_dir = None
def __init__(self, *args, **kwargs):
assert self.tmp_dir is not None, "TmpDirFileSystem.tmp_dir is not set"
super().__init__(*args, **kwargs, local_root_dir=self.tmp_dir, auto_mkdir=True)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("tmp://"):
path = path[6:]
return path
@pytest.fixture
def mock_fsspec():
_fsspec_registry["mock"] = MockFileSystem
_fsspec_registry["tmp"] = TmpDirFileSystem
yield
del _fsspec_registry["mock"]
del _fsspec_registry["tmp"]
@pytest.fixture
def mockfs(tmp_path_factory, mock_fsspec):
local_fs_dir = tmp_path_factory.mktemp("mockfs")
return MockFileSystem(local_root_dir=local_fs_dir, auto_mkdir=True)
@pytest.fixture
def tmpfs(tmp_path_factory, mock_fsspec):
tmp_fs_dir = tmp_path_factory.mktemp("tmpfs")
with patch.object(TmpDirFileSystem, "tmp_dir", tmp_fs_dir):
yield TmpDirFileSystem()
TmpDirFileSystem.clear_instance_cache()
| datasets-main | tests/fixtures/fsspec.py |
import pytest
DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset1__"
DATASET_LOADING_SCRIPT_CODE = """
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/hf-internal-testing/raw_jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def dataset_loading_script_name():
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def dataset_loading_script_code():
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def dataset_loading_script_dir(dataset_loading_script_name, dataset_loading_script_code, tmp_path):
script_name = dataset_loading_script_name
script_dir = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=True)
script_path = script_dir / f"{script_name}.py"
with open(script_path, "w") as f:
f.write(dataset_loading_script_code)
return str(script_dir)
| datasets-main | tests/commands/conftest.py |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_TestCommandArgs = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def is_1percent_close(source, target):
return (abs(source - target) / target) < 0.01
@pytest.mark.integration
def test_test_command(dataset_loading_script_dir):
args = _TestCommandArgs(dataset=dataset_loading_script_dir, all_configs=True, save_infos=True)
test_command = TestCommand(*args)
test_command.run()
dataset_readme_path = os.path.join(dataset_loading_script_dir, "README.md")
assert os.path.exists(dataset_readme_path)
dataset_infos = DatasetInfosDict.from_directory(dataset_loading_script_dir)
expected_dataset_infos = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string")),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"])
),
"langs": Sequence(Value("string")),
"spans": Sequence(Value("string")),
}
),
splits=[
{
"name": "train",
"num_bytes": 2351563,
"num_examples": 10000,
},
{
"name": "validation",
"num_bytes": 238418,
"num_examples": 1000,
},
],
download_size=3940680,
dataset_size=2589981,
)
}
)
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
result, expected = getattr(dataset_infos["default"], key), getattr(expected_dataset_infos["default"], key)
if key == "num_bytes":
assert is_1percent_close(result, expected)
elif key == "splits":
assert list(result) == list(expected)
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_1percent_close(result[split].num_bytes, expected[split].num_bytes)
else:
result == expected
| datasets-main | tests/commands/test_test.py |
datasets-main | tests/commands/__init__.py |
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import re
import packaging.version
REPLACE_PATTERNS = {
"init": (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
"setup": (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
}
REPLACE_FILES = {
"init": "src/datasets/__init__.py",
"setup": "setup.py",
}
def update_version_in_file(fname, version, pattern):
"""Update the version in one file using a specific pattern."""
with open(fname, "r", encoding="utf-8", newline="\n") as f:
code = f.read()
re_pattern, replace = REPLACE_PATTERNS[pattern]
replace = replace.replace("VERSION", version)
code = re_pattern.sub(replace, code)
with open(fname, "w", encoding="utf-8", newline="\n") as f:
f.write(code)
def global_version_update(version):
"""Update the version in all needed files."""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(fname, version, pattern)
def get_version():
"""Reads the current version in the __init__."""
with open(REPLACE_FILES["init"], "r") as f:
code = f.read()
default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0]
return packaging.version.parse(default_version)
def pre_release_work(patch=False):
"""Do all the necessary pre-release steps."""
# First let's get the default version: base version if we are in dev, bump minor otherwise.
default_version = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!")
if default_version.is_devrelease:
default_version = default_version.base_version
elif patch:
default_version = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
default_version = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
version = input(f"Which version are you releasing? [{default_version}]")
if len(version) == 0:
version = default_version
print(f"Updating version to {version}.")
global_version_update(version)
def post_release_work():
"""Do all the necesarry post-release steps."""
# First let's get the current version
current_version = get_version()
dev_version = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
current_version = current_version.base_version
# Check with the user we got that right.
version = input(f"Which version are we developing now? [{dev_version}]")
if len(version) == 0:
version = dev_version
print(f"Updating version to {version}.")
global_version_update(version)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether or not this is post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
args = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| datasets-main | utils/release.py |
# docstyle-ignore
INSTALL_CONTENT = """
# Datasets installation
! pip install datasets transformers
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/datasets.git
"""
notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
default_branch_name = "main"
version_prefix = ""
| datasets-main | docs/source/_config.py |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
SPEED_TEST_N_EXAMPLES = 500_000
RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__)
RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def select(dataset: datasets.Dataset):
_ = dataset.select(range(0, len(dataset), 2))
@get_duration
def sort(dataset: datasets.Dataset):
_ = dataset.sort("numbers")
@get_duration
def shuffle(dataset: datasets.Dataset):
_ = dataset.shuffle()
@get_duration
def train_test_split(dataset: datasets.Dataset):
_ = dataset.train_test_split(0.1)
@get_duration
def shard(dataset: datasets.Dataset, num_shards=10):
for shard_id in range(num_shards):
_ = dataset.shard(num_shards, shard_id)
def benchmark_indices_mapping():
times = {"num examples": SPEED_TEST_N_EXAMPLES}
functions = (select, sort, shuffle, train_test_split, shard)
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset")
features = datasets.Features({"text": datasets.Value("string"), "numbers": datasets.Value("float32")})
dataset = generate_example_dataset(
os.path.join(tmp_dir, "dataset.arrow"), features, num_examples=SPEED_TEST_N_EXAMPLES
)
print("Functions")
for func in functions:
print(func.__name__)
times[func.__name__] = func(dataset)
with open(RESULTS_FILE_PATH, "wb") as f:
f.write(json.dumps(times).encode("utf-8"))
if __name__ == "__main__": # useful to run the profiler
benchmark_indices_mapping()
| datasets-main | benchmarks/benchmark_indices_mapping.py |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
SPEED_TEST_N_EXAMPLES = 50_000
SMALL_TEST = 5_000
RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__)
RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def read(dataset: datasets.Dataset, length):
for i in range(length):
_ = dataset[i]
@get_duration
def read_batch(dataset: datasets.Dataset, length, batch_size):
for i in range(0, len(dataset), batch_size):
_ = dataset[i : i + batch_size]
@get_duration
def read_formatted(dataset: datasets.Dataset, length, type):
with dataset.formatted_as(type=type):
for i in range(length):
_ = dataset[i]
@get_duration
def read_formatted_batch(dataset: datasets.Dataset, length, batch_size, type):
with dataset.formatted_as(type=type):
for i in range(0, length, batch_size):
_ = dataset[i : i + batch_size]
def benchmark_iterating():
times = {"num examples": SPEED_TEST_N_EXAMPLES}
functions = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
functions_shuffled = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset")
features = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32")), "numbers": datasets.Value("float32")}
)
dataset = generate_example_dataset(
os.path.join(tmp_dir, "dataset.arrow"),
features,
num_examples=SPEED_TEST_N_EXAMPLES,
seq_shapes={"list": (100,)},
)
print("first set of iterations")
for func, kwargs in functions:
print(func.__name__, str(kwargs))
times[func.__name__ + " " + " ".join(str(v) for v in kwargs.values())] = func(dataset, **kwargs)
print("shuffling dataset")
dataset = dataset.shuffle()
print("Second set of iterations (after shuffling")
for func, kwargs in functions_shuffled:
print("shuffled ", func.__name__, str(kwargs))
times["shuffled " + func.__name__ + " " + " ".join(str(v) for v in kwargs.values())] = func(
dataset, **kwargs
)
with open(RESULTS_FILE_PATH, "wb") as f:
f.write(json.dumps(times).encode("utf-8"))
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| datasets-main | benchmarks/benchmark_iterating.py |
import json
import sys
def format_json_to_md(input_json_file, output_md_file):
with open(input_json_file, encoding="utf-8") as f:
results = json.load(f)
output_md = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(results):
benchmark_res = results[benchmark_name]
benchmark_file_name = benchmark_name.split("/")[-1]
output_md.append(f"### Benchmark: {benchmark_file_name}")
title = "| metric |"
lines = "|--------|"
value = "| new / old (diff) |"
for metric_name in sorted(benchmark_res):
metric_vals = benchmark_res[metric_name]
new_val = metric_vals["new"]
old_val = metric_vals.get("old", None)
dif_val = metric_vals.get("diff", None)
val_str = f" {new_val:f}" if isinstance(new_val, (int, float)) else "None"
if old_val is not None:
val_str += f" / {old_val:f}" if isinstance(old_val, (int, float)) else "None"
if dif_val is not None:
val_str += f" ({dif_val:f})" if isinstance(dif_val, (int, float)) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>")
with open(output_md_file, "w", encoding="utf-8") as f:
f.writelines("\n".join(output_md))
if __name__ == "__main__":
input_json_file = sys.argv[1]
output_md_file = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| datasets-main | benchmarks/format.py |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
SPEED_TEST_N_EXAMPLES = 500_000
RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__)
RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def map(dataset: datasets.Dataset, **kwargs):
_ = dataset.map(**kwargs)
@get_duration
def filter(dataset: datasets.Dataset, **kwargs):
_ = dataset.filter(**kwargs)
def benchmark_map_filter():
times = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
features = datasets.Features({"text": datasets.Value("string"), "numbers": datasets.Value("float32")})
dataset = generate_example_dataset(
os.path.join(tmp_dir, "dataset.arrow"), features, num_examples=SPEED_TEST_N_EXAMPLES
)
tokenizer = transformers.AutoTokenizer.from_pretrained("bert-base-cased", use_fast=True)
def tokenize(examples):
return tokenizer(examples["text"])
times["map identity"] = map(dataset)
times["map identity batched"] = map(dataset, batched=True)
times["map no-op batched"] = map(dataset, function=lambda x: None, batched=True)
with dataset.formatted_as(type="numpy"):
times["map no-op batched numpy"] = map(dataset, function=lambda x: None, batched=True)
with dataset.formatted_as(type="pandas"):
times["map no-op batched pandas"] = map(dataset, function=lambda x: None, batched=True)
with dataset.formatted_as(type="torch", columns="numbers"):
times["map no-op batched pytorch"] = map(dataset, function=lambda x: None, batched=True)
with dataset.formatted_as(type="tensorflow", columns="numbers"):
times["map no-op batched tensorflow"] = map(dataset, function=lambda x: None, batched=True)
times["map fast-tokenizer batched"] = map(dataset, function=tokenize, batched=True)
times["filter"] = filter(dataset)
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(RESULTS_FILE_PATH, "wb") as f:
f.write(json.dumps(times).encode("utf-8"))
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| datasets-main | benchmarks/benchmark_map_filter.py |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def get_duration(func):
def wrapper(*args, **kwargs):
starttime = timeit.default_timer()
_ = func(*args, **kwargs)
delta = timeit.default_timer() - starttime
return delta
wrapper.__name__ = func.__name__
return wrapper
def generate_examples(features: dict, num_examples=100, seq_shapes=None):
dummy_data = []
seq_shapes = seq_shapes or {}
for i in range(num_examples):
example = {}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(v, _ArrayXD):
data = np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(v, datasets.Value):
if v.dtype == "string":
data = "The small grey turtle was surprisingly fast when challenged."
else:
data = np.random.randint(10, size=1).astype(v.dtype).item()
elif isinstance(v, datasets.Sequence):
while isinstance(v, datasets.Sequence):
v = v.feature
shape = seq_shapes[k]
data = np.random.rand(*shape).astype(v.dtype)
example[k] = data
dummy_data.append((i, example))
return dummy_data
def generate_example_dataset(dataset_path, features, num_examples=100, seq_shapes=None):
dummy_data = generate_examples(features, num_examples=num_examples, seq_shapes=seq_shapes)
with ArrowWriter(features=features, path=dataset_path) as writer:
for key, record in dummy_data:
example = features.encode_example(record)
writer.write(example)
num_final_examples, num_bytes = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}."
)
dataset = datasets.Dataset.from_file(filename=dataset_path, info=datasets.DatasetInfo(features=features))
return dataset
| datasets-main | benchmarks/utils.py |
import json
import os
from dataclasses import dataclass
import numpy as np
import pyarrow as pa
import datasets
from utils import get_duration
SPEED_TEST_N_EXAMPLES = 100_000_000_000
SPEED_TEST_CHUNK_SIZE = 10_000
RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__)
RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
def generate_100B_dataset(num_examples: int, chunk_size: int) -> datasets.Dataset:
table = pa.Table.from_pydict({"col": [0] * chunk_size})
table = pa.concat_tables([table] * (num_examples // chunk_size))
return datasets.Dataset(table, fingerprint="table_100B")
@dataclass
class RandIter:
low: int
high: int
size: int
seed: int
def __post_init__(self):
rng = np.random.default_rng(self.seed)
self._sampled_values = rng.integers(low=self.low, high=self.high, size=self.size).tolist()
def __iter__(self):
return iter(self._sampled_values)
def __len__(self):
return self.size
@get_duration
def get_first_row(dataset: datasets.Dataset):
_ = dataset[0]
@get_duration
def get_last_row(dataset: datasets.Dataset):
_ = dataset[-1]
@get_duration
def get_batch_of_1024_rows(dataset: datasets.Dataset):
_ = dataset[range(len(dataset) // 2, len(dataset) // 2 + 1024)]
@get_duration
def get_batch_of_1024_random_rows(dataset: datasets.Dataset):
_ = dataset[RandIter(0, len(dataset), 1024, seed=42)]
def benchmark_table_100B():
times = {"num examples": SPEED_TEST_N_EXAMPLES}
functions = (get_first_row, get_last_row, get_batch_of_1024_rows, get_batch_of_1024_random_rows)
print("generating dataset")
dataset = generate_100B_dataset(num_examples=SPEED_TEST_N_EXAMPLES, chunk_size=SPEED_TEST_CHUNK_SIZE)
print("Functions")
for func in functions:
print(func.__name__)
times[func.__name__] = func(dataset)
with open(RESULTS_FILE_PATH, "wb") as f:
f.write(json.dumps(times).encode("utf-8"))
if __name__ == "__main__": # useful to run the profiler
benchmark_table_100B()
| datasets-main | benchmarks/benchmark_getitem_100B.py |
import json
import os
import tempfile
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features import Array2D
from utils import generate_examples, get_duration
SHAPE_TEST_1 = (30, 487)
SHAPE_TEST_2 = (36, 1024)
SPEED_TEST_SHAPE = (100, 100)
SPEED_TEST_N_EXAMPLES = 100
DEFAULT_FEATURES = datasets.Features(
{"text": Array2D(SHAPE_TEST_1, dtype="float32"), "image": Array2D(SHAPE_TEST_2, dtype="float32")}
)
RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__)
RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def write(my_features, dummy_data, tmp_dir):
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
for key, record in dummy_data:
example = my_features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
@get_duration
def read_unformated(feats, tmp_dir):
dataset = datasets.Dataset.from_file(
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
)
for _ in dataset:
pass
@get_duration
def read_formatted_as_numpy(feats, tmp_dir):
dataset = datasets.Dataset.from_file(
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
)
dataset.set_format("numpy")
for _ in dataset:
pass
@get_duration
def read_batch_unformated(feats, tmp_dir):
batch_size = 10
dataset = datasets.Dataset.from_file(
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
)
for i in range(0, len(dataset), batch_size):
_ = dataset[i : i + batch_size]
@get_duration
def read_batch_formatted_as_numpy(feats, tmp_dir):
batch_size = 10
dataset = datasets.Dataset.from_file(
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
)
dataset.set_format("numpy")
for i in range(0, len(dataset), batch_size):
_ = dataset[i : i + batch_size]
@get_duration
def read_col_unformated(feats, tmp_dir):
dataset = datasets.Dataset.from_file(
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
)
for col in feats:
_ = dataset[col]
@get_duration
def read_col_formatted_as_numpy(feats, tmp_dir):
dataset = datasets.Dataset.from_file(
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
)
dataset.set_format("numpy")
for col in feats:
_ = dataset[col]
def benchmark_array_xd():
times = {}
read_functions = (
read_unformated,
read_formatted_as_numpy,
read_batch_unformated,
read_batch_formatted_as_numpy,
read_col_unformated,
read_col_formatted_as_numpy,
)
with tempfile.TemporaryDirectory() as tmp_dir:
feats = datasets.Features({"image": Array2D(SPEED_TEST_SHAPE, dtype="float32")})
data = generate_examples(features=feats, num_examples=SPEED_TEST_N_EXAMPLES)
times["write_array2d"] = write(feats, data, tmp_dir)
for read_func in read_functions:
times[read_func.__name__ + " after write_array2d"] = read_func(feats, tmp_dir)
with tempfile.TemporaryDirectory() as tmp_dir:
# don't use fixed length for fair comparison
# feats = datasets.Features(
# {"image": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), SPEED_TEST_SHAPE[1]), SPEED_TEST_SHAPE[0])}
# )
feats = datasets.Features({"image": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))})
data = generate_examples(
features=feats, num_examples=SPEED_TEST_N_EXAMPLES, seq_shapes={"image": SPEED_TEST_SHAPE}
)
times["write_nested_sequence"] = write(feats, data, tmp_dir)
for read_func in read_functions:
times[read_func.__name__ + " after write_nested_sequence"] = read_func(feats, tmp_dir)
with tempfile.TemporaryDirectory() as tmp_dir:
# don't use fixed length for fair comparison
# feats = datasets.Features(
# {"image": datasets.Sequence(datasets.Value("float32"), SPEED_TEST_SHAPE[0] * SPEED_TEST_SHAPE[1])}
# )
feats = datasets.Features({"image": datasets.Sequence(datasets.Value("float32"))})
data = generate_examples(
features=feats,
num_examples=SPEED_TEST_N_EXAMPLES,
seq_shapes={"image": [SPEED_TEST_SHAPE[0] * SPEED_TEST_SHAPE[1]]},
)
times["write_flattened_sequence"] = write(feats, data, tmp_dir)
for read_func in read_functions:
times[read_func.__name__ + " after write_flattened_sequence"] = read_func(feats, tmp_dir)
with open(RESULTS_FILE_PATH, "wb") as f:
f.write(json.dumps(times).encode("utf-8"))
if __name__ == "__main__": # useful to run the profiler
benchmark_array_xd()
| datasets-main | benchmarks/benchmark_array_xd.py |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Address all TODOs and remove all explanatory comments
"""TODO: Add a description here."""
import csv
import json
import os
import datasets
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2020}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = ""
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
# TODO: Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = {
"first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
"second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
}
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
class NewDataset(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("1.1.0")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
# BUILDER_CONFIG_CLASS = MyBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
]
DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
features = datasets.Features(
{
"sentence": datasets.Value("string"),
"option1": datasets.Value("string"),
"answer": datasets.Value("string")
# These are the features of your dataset like images, labels ...
}
)
else: # This is an example to show how to have different features for "first_domain" and "second_domain"
features = datasets.Features(
{
"sentence": datasets.Value("string"),
"option2": datasets.Value("string"),
"second_domain_answer": datasets.Value("string")
# These are the features of your dataset like images, labels ...
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
urls = _URLS[self.config.name]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "train.jsonl"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "dev.jsonl"),
"split": "dev",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "test.jsonl"),
"split": "test"
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
with open(filepath, encoding="utf-8") as f:
for key, row in enumerate(f):
data = json.loads(row)
if self.config.name == "first_domain":
# Yields examples as (key, example) tuples
yield key, {
"sentence": data["sentence"],
"option1": data["option1"],
"answer": "" if split == "test" else data["answer"],
}
else:
yield key, {
"sentence": data["sentence"],
"option2": data["option2"],
"second_domain_answer": "" if split == "test" else data["second_domain_answer"],
}
| datasets-main | templates/new_dataset_script.py |
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" Simple Dataset wrapping an Arrow Table."""
import contextlib
import copy
import itertools
import json
import os
import posixpath
import re
import shutil
import sys
import tempfile
import time
import warnings
import weakref
from collections import Counter
from collections.abc import Mapping
from copy import deepcopy
from fnmatch import fnmatch
from functools import partial, wraps
from io import BytesIO
from math import ceil, floor
from pathlib import Path
from random import sample
from typing import (
TYPE_CHECKING,
Any,
BinaryIO,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Tuple,
Union,
overload,
)
from typing import Sequence as Sequence_
import fsspec
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.compute as pc
from huggingface_hub import DatasetCard, DatasetCardData, HfApi, HfFolder
from multiprocess import Pool
from requests import HTTPError
from . import config
from .arrow_reader import ArrowReader
from .arrow_writer import ArrowWriter, OptimizedTypedSequence
from .data_files import sanitize_patterns
from .download.download_config import DownloadConfig
from .download.streaming_download_manager import xgetsize
from .features import Audio, ClassLabel, Features, Image, Sequence, Value
from .features.features import (
FeatureType,
_align_features,
_check_if_features_can_be_aligned,
generate_from_arrow_type,
pandas_types_mapper,
require_decoding,
)
from .filesystems import extract_path_from_uri, is_remote_filesystem
from .fingerprint import (
fingerprint_transform,
format_kwargs_for_fingerprint,
format_transform_for_fingerprint,
generate_fingerprint,
generate_random_fingerprint,
get_temporary_cache_files_directory,
is_caching_enabled,
maybe_register_dataset_for_temp_dir_deletion,
update_fingerprint,
validate_fingerprint,
)
from .formatting import format_table, get_format_type_from_alias, get_formatter, query_table
from .formatting.formatting import LazyDict, _is_range_contiguous
from .info import DatasetInfo, DatasetInfosDict
from .naming import _split_re
from .search import IndexableMixin
from .splits import NamedSplit, Split, SplitDict, SplitInfo
from .table import (
InMemoryTable,
MemoryMappedTable,
Table,
_memory_mapped_record_batch_reader_from_file,
cast_array_to_feature,
concat_tables,
embed_table_storage,
list_table_cache_files,
table_cast,
table_iter,
table_visitor,
)
from .tasks import TaskTemplate
from .utils import logging
from .utils.deprecation_utils import deprecated
from .utils.file_utils import _retry, cached_path, estimate_dataset_size
from .utils.hub import hf_hub_url
from .utils.info_utils import is_small_dataset
from .utils.metadata import MetadataConfigs
from .utils.py_utils import (
Literal,
asdict,
convert_file_size_to_int,
glob_pattern_to_regex,
iflatmap_unordered,
string_to_dict,
unique_values,
)
from .utils.stratify import stratified_shuffle_split_generate_indices
from .utils.tf_utils import dataset_to_tf, minimal_tf_collate_fn, multiprocess_dataset_to_tf
from .utils.typing import ListLike, PathLike
if TYPE_CHECKING:
import sqlite3
import pyspark
import sqlalchemy
from .dataset_dict import DatasetDict
from .iterable_dataset import IterableDataset
logger = logging.get_logger(__name__)
PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED = (
"data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.parquet"
)
class DatasetInfoMixin:
"""This base class exposes some attributes of DatasetInfo
at the base level of the Dataset for easy access.
"""
def __init__(self, info: DatasetInfo, split: Optional[NamedSplit]):
self._info = info
self._split = split
@property
def info(self):
"""[`~datasets.DatasetInfo`] object containing all the metadata in the dataset."""
return self._info
@property
def split(self):
"""[`~datasets.NamedSplit`] object corresponding to a named dataset split."""
return self._split
@property
def builder_name(self) -> str:
return self._info.builder_name
@property
def citation(self) -> str:
return self._info.citation
@property
def config_name(self) -> str:
return self._info.config_name
@property
def dataset_size(self) -> Optional[int]:
return self._info.dataset_size
@property
def description(self) -> str:
return self._info.description
@property
def download_checksums(self) -> Optional[dict]:
return self._info.download_checksums
@property
def download_size(self) -> Optional[int]:
return self._info.download_size
@property
def features(self) -> Optional[Features]:
return self._info.features.copy() if self._info.features is not None else None
@property
def homepage(self) -> Optional[str]:
return self._info.homepage
@property
def license(self) -> Optional[str]:
return self._info.license
@property
def size_in_bytes(self) -> Optional[int]:
return self._info.size_in_bytes
@property
def supervised_keys(self):
return self._info.supervised_keys
@property
def task_templates(self):
return self._info.task_templates
@property
def version(self):
return self._info.version
class TensorflowDatasetMixin:
_TF_DATASET_REFS = set()
@staticmethod
def _get_output_signature(
dataset: "Dataset",
collate_fn: Callable,
collate_fn_args: dict,
cols_to_retain: Optional[List[str]] = None,
batch_size: Optional[int] = None,
num_test_batches: int = 20,
):
"""Private method used by `to_tf_dataset()` to find the shapes and dtypes of samples from this dataset
after being passed through the collate_fn. Tensorflow needs an exact signature for tf.numpy_function, so
the only way to do this is to run test batches - the collator may add or rename columns, so we can't figure
it out just by inspecting the dataset.
Args:
dataset (`Dataset`): Dataset to load samples from.
collate_fn(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for
validation/evaluation.
collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate
lists of samples into a batch.
collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the
`collate_fn`.
batch_size (`int`, optional): The size of batches loaded from the dataset. Used for shape inference.
Can be None, which indicates that batch sizes can be variable.
num_test_batches (`int`): The number of batches to load from the dataset for shape inference.
Returns:
`dict`: Dict mapping column names to tf.Tensorspec objects
`dict`: Dict mapping column names to np.dtype objects
"""
if config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
if len(dataset) == 0:
raise ValueError("Unable to get the output signature because the dataset is empty.")
if batch_size is not None:
batch_size = min(len(dataset), batch_size)
test_batch_size = 1
if cols_to_retain is not None:
cols_to_retain = list(set(cols_to_retain + ["label_ids", "label", "labels"]))
test_batches = []
for _ in range(num_test_batches):
indices = sample(range(len(dataset)), test_batch_size)
test_batch = dataset[indices]
if cols_to_retain is not None:
test_batch = {key: value for key, value in test_batch.items() if key in cols_to_retain}
test_batch = [{key: value[i] for key, value in test_batch.items()} for i in range(test_batch_size)]
test_batch = collate_fn(test_batch, **collate_fn_args)
test_batches.append(test_batch)
tf_columns_to_signatures = {}
np_columns_to_dtypes = {}
for column in test_batches[0].keys():
raw_arrays = [batch[column] for batch in test_batches]
# In case the collate_fn returns something strange
np_arrays = []
for array in raw_arrays:
if isinstance(array, np.ndarray):
np_arrays.append(array)
elif isinstance(array, tf.Tensor):
np_arrays.append(array.numpy())
else:
np_arrays.append(np.array(array))
if np.issubdtype(np_arrays[0].dtype, np.integer) or np_arrays[0].dtype == bool:
tf_dtype = tf.int64
np_dtype = np.int64
elif np.issubdtype(np_arrays[0].dtype, np.number):
tf_dtype = tf.float32
np_dtype = np.float32
elif np_arrays[0].dtype.kind == "U": # Unicode strings
np_dtype = np.unicode_
tf_dtype = tf.string
else:
raise RuntimeError(
f"Unrecognized array dtype {np_arrays[0].dtype}. \n"
"Nested types and image/audio types are not supported yet."
)
shapes = [array.shape for array in np_arrays]
static_shape = []
for dim in range(len(shapes[0])):
sizes = {shape[dim] for shape in shapes}
if dim == 0:
static_shape.append(batch_size)
continue
if len(sizes) == 1: # This dimension looks constant
static_shape.append(sizes.pop())
else: # Use None for variable dimensions
static_shape.append(None)
tf_columns_to_signatures[column] = tf.TensorSpec(shape=static_shape, dtype=tf_dtype)
np_columns_to_dtypes[column] = np_dtype
return tf_columns_to_signatures, np_columns_to_dtypes
def to_tf_dataset(
self,
batch_size: Optional[int] = None,
columns: Optional[Union[str, List[str]]] = None,
shuffle: bool = False,
collate_fn: Optional[Callable] = None,
drop_remainder: bool = False,
collate_fn_args: Optional[Dict[str, Any]] = None,
label_cols: Optional[Union[str, List[str]]] = None,
prefetch: bool = True,
num_workers: int = 0,
num_test_batches: int = 20,
):
"""Create a `tf.data.Dataset` from the underlying Dataset. This `tf.data.Dataset` will load and collate batches from
the Dataset, and is suitable for passing to methods like `model.fit()` or `model.predict()`. The dataset will yield
`dicts` for both inputs and labels unless the `dict` would contain only a single key, in which case a raw
`tf.Tensor` is yielded instead.
Args:
batch_size (`int`, *optional*):
Size of batches to load from the dataset. Defaults to `None`, which implies that the dataset won't be
batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`.
columns (`List[str]` or `str`, *optional*):
Dataset column(s) to load in the `tf.data.Dataset`.
Column names that are created by the `collate_fn` and that do not exist in the original dataset can be used.
shuffle(`bool`, defaults to `False`):
Shuffle the dataset order when loading. Recommended `True` for training, `False` for
validation/evaluation.
drop_remainder(`bool`, defaults to `False`):
Drop the last incomplete batch when loading. Ensures
that all batches yielded by the dataset will have the same length on the batch dimension.
collate_fn(`Callable`, *optional*):
A function or callable object (such as a `DataCollator`) that will collate
lists of samples into a batch.
collate_fn_args (`Dict`, *optional*):
An optional `dict` of keyword arguments to be passed to the
`collate_fn`.
label_cols (`List[str]` or `str`, defaults to `None`):
Dataset column(s) to load as labels.
Note that many models compute loss internally rather than letting Keras do it, in which case
passing the labels here is optional, as long as they're in the input `columns`.
prefetch (`bool`, defaults to `True`):
Whether to run the dataloader in a separate thread and maintain
a small buffer of batches for training. Improves performance by allowing data to be loaded in the
background while the model is training.
num_workers (`int`, defaults to `0`):
Number of workers to use for loading the dataset. Only supported on Python versions >= 3.8.
num_test_batches (`int`, defaults to `20`):
Number of batches to use to infer the output signature of the dataset.
The higher this number, the more accurate the signature will be, but the longer it will take to
create the dataset.
Returns:
`tf.data.Dataset`
Example:
```py
>>> ds_train = ds["train"].to_tf_dataset(
... columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'],
... shuffle=True,
... batch_size=16,
... collate_fn=data_collator,
... )
```
"""
if config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
if (isinstance(columns, list) and len(columns) == 1) or (
isinstance(label_cols, list) and len(label_cols) == 1
):
warnings.warn(
"The output of `to_tf_dataset` will change when a passing single element list for `labels` or "
"`columns` in the next datasets version. To return a tuple structure rather than dict, pass a "
"single string.\n"
"Old behaviour: columns=['a'], labels=['labels'] -> (tf.Tensor, tf.Tensor) \n"
" : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) \n"
"New behaviour: columns=['a'],labels=['labels'] -> ({'a': tf.Tensor}, {'labels': tf.Tensor}) \n"
" : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) ",
FutureWarning,
)
if isinstance(tf.distribute.get_strategy(), tf.distribute.TPUStrategy):
logger.warning(
"Note that to_tf_dataset() loads the data with a generator rather than a full tf.data "
"pipeline and is not compatible with remote TPU connections. If you encounter errors, please "
"try using a TPU VM or, if your data can fit in memory, loading it into memory as a dict of "
"Tensors instead of streaming with to_tf_dataset()."
)
if collate_fn is None:
# Set a very simple default collator that just stacks things together
collate_fn = minimal_tf_collate_fn
if collate_fn_args is None:
collate_fn_args = {}
if label_cols and not columns:
raise ValueError("Cannot specify label_cols without specifying columns!")
if label_cols is None:
label_cols = []
elif isinstance(label_cols, str):
label_cols = [label_cols]
if len(set(label_cols)) < len(label_cols):
raise ValueError("List of label_cols contains duplicates.")
if columns:
if isinstance(columns, str):
columns = [columns]
if len(set(columns)) < len(columns):
raise ValueError("List of columns contains duplicates.")
cols_to_retain = list(set(columns + label_cols))
else:
cols_to_retain = None # Indicates keeping all valid columns
columns = []
if self.format["type"] not in ["custom", "numpy"]:
dataset = self.with_format("numpy")
else:
dataset = self
# TODO(Matt, QL): deprecate the retention of label_ids and label
output_signature, columns_to_np_types = dataset._get_output_signature(
dataset,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
cols_to_retain=cols_to_retain,
batch_size=batch_size if drop_remainder and batch_size is not None else None,
num_test_batches=num_test_batches,
)
if "labels" in output_signature:
if ("label_ids" in columns or "label" in columns) and "labels" not in columns:
columns = [col for col in columns if col not in ["label_ids", "label"]] + ["labels"]
if ("label_ids" in label_cols or "label" in label_cols) and "labels" not in label_cols:
label_cols = [col for col in label_cols if col not in ["label_ids", "label"]] + ["labels"]
for col in columns:
if col not in output_signature:
raise ValueError(f"Column {col} not found in dataset!")
for col in label_cols:
if col not in output_signature:
raise ValueError(f"Label column {col} not found in dataset!")
if num_workers == 0:
tf_dataset = dataset_to_tf(
dataset=dataset,
cols_to_retain=cols_to_retain,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
columns_to_np_types=columns_to_np_types,
output_signature=output_signature,
shuffle=shuffle,
batch_size=batch_size,
drop_remainder=drop_remainder,
)
elif num_workers > 0:
if batch_size is None:
raise NotImplementedError(
"`batch_size` must be specified when using multiple workers, as unbatched multiprocessing "
"is not supported yet. Please provide a `batch_size` if `num_workers` is greater than 0."
)
tf_dataset = multiprocess_dataset_to_tf(
dataset=dataset,
cols_to_retain=cols_to_retain,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
columns_to_np_types=columns_to_np_types,
output_signature=output_signature,
shuffle=shuffle,
batch_size=batch_size,
drop_remainder=drop_remainder,
num_workers=num_workers,
)
else:
raise ValueError("num_workers must be >= 0")
def split_features_and_labels(input_batch):
# TODO(Matt, QL): deprecate returning the dict content when there's only one key
features = {key: tensor for key, tensor in input_batch.items() if key in columns}
labels = {key: tensor for key, tensor in input_batch.items() if key in label_cols}
if len(features) == 1:
features = list(features.values())[0]
if len(labels) == 1:
labels = list(labels.values())[0]
if isinstance(labels, dict) and len(labels) == 0:
return features
else:
return features, labels
if cols_to_retain is not None:
tf_dataset = tf_dataset.map(split_features_and_labels)
if prefetch:
tf_dataset = tf_dataset.prefetch(tf.data.experimental.AUTOTUNE)
# Remove a reference to the open Arrow file on delete
def cleanup_callback(ref):
dataset.__del__()
self._TF_DATASET_REFS.remove(ref)
self._TF_DATASET_REFS.add(weakref.ref(tf_dataset, cleanup_callback))
return tf_dataset
class DatasetTransformationNotAllowedError(Exception):
pass
def transmit_format(func):
"""Wrapper for dataset transforms that recreate a new Dataset to transmit the format of the original dataset to the new dataset"""
@wraps(func)
def wrapper(*args, **kwargs):
if args:
self: "Dataset" = args[0]
args = args[1:]
else:
self: "Dataset" = kwargs.pop("self")
# don't use self.format since it returns a list of columns for 'columns' even if self_format_columns is None
unformatted_columns = set(self.column_names) - set(self._format_columns or [])
self_format = {
"type": self._format_type,
"format_kwargs": self._format_kwargs,
"columns": self._format_columns,
"output_all_columns": self._output_all_columns,
}
# apply actual function
out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out]
# re-apply format to the output
for dataset in datasets:
new_format = self_format.copy()
if new_format["columns"] is not None: # new formatted columns = (columns - previously unformatted columns)
# sort the columns to have a deterministic list of columns that we can compare with `out_format`
new_format["columns"] = sorted(set(dataset.column_names) - unformatted_columns)
out_format = {
"type": dataset._format_type,
"format_kwargs": dataset._format_kwargs,
"columns": sorted(dataset._format_columns) if dataset._format_columns is not None else None,
"output_all_columns": dataset._output_all_columns,
}
if out_format != new_format:
fingerprint = dataset._fingerprint
dataset.set_format(**new_format)
dataset._fingerprint = fingerprint
return out
wrapper._decorator_name_ = "transmit_format"
return wrapper
def transmit_tasks(func):
"""Wrapper for dataset transforms that recreate a new Dataset to transmit the task templates of the original dataset to the new dataset"""
@wraps(func)
def wrapper(*args, **kwargs):
if args:
self: "Dataset" = args[0]
args = args[1:]
else:
self: "Dataset" = kwargs.pop("self")
# apply actual function
out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out]
for dataset in datasets:
# Remove task templates if a column mapping of the template is no longer valid
if self.info.task_templates is not None:
dataset.info.task_templates = [
template
for template in self.info.task_templates
if all(
dataset._info.features.get(k) == self._info.features.get(k)
for k in template.column_mapping.keys()
)
]
return out
wrapper._decorator_name_ = "transmit_tasks"
return wrapper
def update_metadata_with_features(table: Table, features: Features):
"""To be used in dataset transforms that modify the features of the dataset, in order to update the features stored in the metadata of its schema."""
features = Features({col_name: features[col_name] for col_name in table.column_names})
if table.schema.metadata is None or b"huggingface" not in table.schema.metadata:
pa_metadata = ArrowWriter._build_metadata(DatasetInfo(features=features))
else:
metadata = json.loads(table.schema.metadata[b"huggingface"].decode())
if "info" not in metadata:
metadata["info"] = asdict(DatasetInfo(features=features))
else:
metadata["info"]["features"] = asdict(DatasetInfo(features=features))["features"]
pa_metadata = {"huggingface": json.dumps(metadata)}
table = table.replace_schema_metadata(pa_metadata)
return table
def _check_table(table) -> Table:
"""We check the table type to make sure it's an instance of :class:`datasets.table.Table`"""
if isinstance(table, pa.Table):
# for a pyarrow table, we can just consider it as a in-memory table
# this is here for backward compatibility
return InMemoryTable(table)
elif isinstance(table, Table):
return table
else:
raise TypeError(f"Expected a pyarrow.Table or a datasets.table.Table object, but got {table}.")
def _check_column_names(column_names: List[str]):
"""Check the column names to make sure they don't contain duplicates."""
counter = Counter(column_names)
if not all(count == 1 for count in counter.values()):
duplicated_columns = [col for col in counter if counter[col] > 1]
raise ValueError(f"The table can't have duplicated columns but columns {duplicated_columns} are duplicated.")
def _check_valid_indices_value(index, size):
if (index < 0 and index + size < 0) or (index >= size):
raise IndexError(f"Index {index} out of range for dataset of size {size}.")
class NonExistentDatasetError(Exception):
"""Used when we expect the existence of a dataset"""
pass
class Dataset(DatasetInfoMixin, IndexableMixin, TensorflowDatasetMixin):
"""A Dataset backed by an Arrow table."""
def __init__(
self,
arrow_table: Table,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
indices_table: Optional[Table] = None,
fingerprint: Optional[str] = None,
):
info = info.copy() if info is not None else DatasetInfo()
DatasetInfoMixin.__init__(self, info=info, split=split)
IndexableMixin.__init__(self)
self._data: Table = _check_table(arrow_table)
self._indices: Optional[Table] = _check_table(indices_table) if indices_table is not None else None
maybe_register_dataset_for_temp_dir_deletion(self)
self._format_type: Optional[str] = None
self._format_kwargs: dict = {}
self._format_columns: Optional[list] = None
self._output_all_columns: bool = False
self._fingerprint: str = fingerprint
# Read metadata
if self._data.schema.metadata is not None and b"huggingface" in self._data.schema.metadata:
metadata = json.loads(self._data.schema.metadata[b"huggingface"].decode())
if (
"fingerprint" in metadata and self._fingerprint is None
): # try to load fingerprint from the arrow file metadata
self._fingerprint = metadata["fingerprint"]
# Infer features if None
inferred_features = Features.from_arrow_schema(arrow_table.schema)
if self.info.features is None:
self.info.features = inferred_features
else: # make sure the nested columns are in the right order
try:
self.info.features = self.info.features.reorder_fields_as(inferred_features)
except ValueError as e:
raise ValueError(
f"{e}\nThe 'source' features come from dataset_info.json, and the 'target' ones are those of the dataset arrow file."
)
# Infer fingerprint if None
if self._fingerprint is None:
self._fingerprint = generate_fingerprint(self)
# Sanity checks
if self._info.features is None:
raise ValueError("Features can't be None in a Dataset object")
if self._fingerprint is None:
raise ValueError("Fingerprint can't be None in a Dataset object")
if self.info.features.type != inferred_features.type:
raise ValueError(
f"External features info don't match the dataset:\nGot\n{self.info.features}\nwith type\n{self.info.features.type}\n\nbut expected something like\n{inferred_features}\nwith type\n{inferred_features.type}"
)
if self._indices is not None:
if not pa.types.is_unsigned_integer(self._indices.column(0).type):
raise ValueError(
f"indices must be an Arrow table of unsigned integers, current type is {self._indices.column(0).type}"
)
_check_column_names(self._data.column_names)
self._data = update_metadata_with_features(self._data, self._info.features)
@property
def features(self) -> Features:
features = super().features
if features is None: # this is already checked in __init__
raise ValueError("Features can't be None in a Dataset object")
return features
@classmethod
def from_file(
cls,
filename: str,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
indices_filename: Optional[str] = None,
in_memory: bool = False,
) -> "Dataset":
"""Instantiate a Dataset backed by an Arrow table at filename.
Args:
filename (`str`):
File name of the dataset.
info (`DatasetInfo`, *optional*):
Dataset information, like description, citation, etc.
split (`NamedSplit`, *optional*):
Name of the dataset split.
indices_filename (`str`, *optional*):
File names of the indices.
in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
Returns:
[`Dataset`]
"""
table = ArrowReader.read_table(filename, in_memory=in_memory)
if indices_filename is not None:
indices_pa_table = ArrowReader.read_table(indices_filename, in_memory=in_memory)
else:
indices_pa_table = None
return cls(
arrow_table=table,
info=info,
split=split,
indices_table=indices_pa_table,
)
@classmethod
def from_buffer(
cls,
buffer: pa.Buffer,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
indices_buffer: Optional[pa.Buffer] = None,
) -> "Dataset":
"""Instantiate a Dataset backed by an Arrow buffer.
Args:
buffer (`pyarrow.Buffer`):
Arrow buffer.
info (`DatasetInfo`, *optional*):
Dataset information, like description, citation, etc.
split (`NamedSplit`, *optional*):
Name of the dataset split.
indices_buffer (`pyarrow.Buffer`, *optional*):
Indices Arrow buffer.
Returns:
[`Dataset`]
"""
table = InMemoryTable.from_buffer(buffer)
if indices_buffer is not None:
indices_table = InMemoryTable.from_buffer(buffer)
else:
indices_table = None
return cls(table, info=info, split=split, indices_table=indices_table)
@classmethod
def from_pandas(
cls,
df: pd.DataFrame,
features: Optional[Features] = None,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
preserve_index: Optional[bool] = None,
) -> "Dataset":
"""
Convert `pandas.DataFrame` to a `pyarrow.Table` to create a [`Dataset`].
The column types in the resulting Arrow Table are inferred from the dtypes of the `pandas.Series` in the
DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the
case of `object`, we need to guess the datatype by looking at the Python objects in this Series.
Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow
type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only
contains `None/nan` objects, the type is set to `null`. This behavior can be avoided by constructing explicit
features and passing it to this function.
Args:
df (`pandas.DataFrame`):
Dataframe that contains the dataset.
features ([`Features`], *optional*):
Dataset features.
info (`DatasetInfo`, *optional*):
Dataset information, like description, citation, etc.
split (`NamedSplit`, *optional*):
Name of the dataset split.
preserve_index (`bool`, *optional*):
Whether to store the index as an additional column in the resulting Dataset.
The default of `None` will store the index as a column, except for `RangeIndex` which is stored as metadata only.
Use `preserve_index=True` to force it to be stored as a column.
Returns:
[`Dataset`]
Example:
```py
>>> ds = Dataset.from_pandas(df)
```
"""
if info is not None and features is not None and info.features != features:
raise ValueError(
f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}"
)
features = features if features is not None else info.features if info is not None else None
if info is None:
info = DatasetInfo()
info.features = features
table = InMemoryTable.from_pandas(
df=df,
preserve_index=preserve_index,
)
if features is not None:
# more expensive cast than InMemoryTable.from_pandas(..., schema=features.arrow_schema)
# needed to support the str to Audio conversion for instance
table = table.cast(features.arrow_schema)
return cls(table, info=info, split=split)
@classmethod
def from_dict(
cls,
mapping: dict,
features: Optional[Features] = None,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
) -> "Dataset":
"""
Convert `dict` to a `pyarrow.Table` to create a [`Dataset`].
Args:
mapping (`Mapping`):
Mapping of strings to Arrays or Python lists.
features ([`Features`], *optional*):
Dataset features.
info (`DatasetInfo`, *optional*):
Dataset information, like description, citation, etc.
split (`NamedSplit`, *optional*):
Name of the dataset split.
Returns:
[`Dataset`]
"""
if info is not None and features is not None and info.features != features:
raise ValueError(
f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}"
)
features = features if features is not None else info.features if info is not None else None
arrow_typed_mapping = {}
for col, data in mapping.items():
if isinstance(data, (pa.Array, pa.ChunkedArray)):
data = cast_array_to_feature(data, features[col]) if features is not None else data
else:
data = OptimizedTypedSequence(
features.encode_column(data, col) if features is not None else data,
type=features[col] if features is not None else None,
col=col,
)
arrow_typed_mapping[col] = data
mapping = arrow_typed_mapping
pa_table = InMemoryTable.from_pydict(mapping=mapping)
if info is None:
info = DatasetInfo()
info.features = features
if info.features is None:
info.features = Features(
{
col: generate_from_arrow_type(data.type)
if isinstance(data, (pa.Array, pa.ChunkedArray))
else data.get_inferred_type()
for col, data in mapping.items()
}
)
return cls(pa_table, info=info, split=split)
@classmethod
def from_list(
cls,
mapping: List[dict],
features: Optional[Features] = None,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
) -> "Dataset":
"""
Convert a list of dicts to a `pyarrow.Table` to create a [`Dataset`]`.
Note that the keys of the first entry will be used to determine the dataset columns,
regardless of what is passed to features.
Args:
mapping (`List[dict]`): A list of mappings of strings to row values.
features (`Features`, optional): Dataset features.
info (`DatasetInfo`, optional): Dataset information, like description, citation, etc.
split (`NamedSplit`, optional): Name of the dataset split.
Returns:
[`Dataset`]
"""
# for simplicity and consistency wrt OptimizedTypedSequence we do not use InMemoryTable.from_pylist here
mapping = {k: [r.get(k) for r in mapping] for k in mapping[0]} if mapping else {}
return cls.from_dict(mapping, features, info, split)
@staticmethod
def from_csv(
path_or_paths: Union[PathLike, List[PathLike]],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
"""Create Dataset from CSV file(s).
Args:
path_or_paths (`path-like` or list of `path-like`):
Path(s) of the CSV file(s).
split ([`NamedSplit`], *optional*):
Split name to be assigned to the dataset.
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when downloading and generating the dataset locally.
This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
<Added version="2.8.0"/>
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`pandas.read_csv`].
Returns:
[`Dataset`]
Example:
```py
>>> ds = Dataset.from_csv('path/to/dataset.csv')
```
"""
# Dynamic import to avoid circular dependency
from .io.csv import CsvDatasetReader
return CsvDatasetReader(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
num_proc=num_proc,
**kwargs,
).read()
@staticmethod
def from_generator(
generator: Callable,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
gen_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
**kwargs,
):
"""Create a Dataset from a generator.
Args:
generator (:`Callable`):
A generator function that `yields` examples.
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
gen_kwargs(`dict`, *optional*):
Keyword arguments to be passed to the `generator` callable.
You can define a sharded dataset by passing the list of shards in `gen_kwargs`.
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when downloading and generating the dataset locally.
This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
<Added version="2.7.0"/>
**kwargs (additional keyword arguments):
Keyword arguments to be passed to :[`GeneratorConfig`].
Returns:
[`Dataset`]
Example:
```py
>>> def gen():
... yield {"text": "Good", "label": 0}
... yield {"text": "Bad", "label": 1}
...
>>> ds = Dataset.from_generator(gen)
```
```py
>>> def gen(shards):
... for shard in shards:
... with open(shard) as f:
... for line in f:
... yield {"line": line}
...
>>> shards = [f"data{i}.txt" for i in range(32)]
>>> ds = Dataset.from_generator(gen, gen_kwargs={"shards": shards})
```
"""
from .io.generator import GeneratorDatasetInputStream
return GeneratorDatasetInputStream(
generator=generator,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
gen_kwargs=gen_kwargs,
num_proc=num_proc,
**kwargs,
).read()
@staticmethod
def from_json(
path_or_paths: Union[PathLike, List[PathLike]],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
field: Optional[str] = None,
num_proc: Optional[int] = None,
**kwargs,
):
"""Create Dataset from JSON or JSON Lines file(s).
Args:
path_or_paths (`path-like` or list of `path-like`):
Path(s) of the JSON or JSON Lines file(s).
split ([`NamedSplit`], *optional*):
Split name to be assigned to the dataset.
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
field (`str`, *optional*):
Field name of the JSON file where the dataset is contained in.
num_proc (`int`, *optional* defaults to `None`):
Number of processes when downloading and generating the dataset locally.
This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
<Added version="2.8.0"/>
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`JsonConfig`].
Returns:
[`Dataset`]
Example:
```py
>>> ds = Dataset.from_json('path/to/dataset.json')
```
"""
# Dynamic import to avoid circular dependency
from .io.json import JsonDatasetReader
return JsonDatasetReader(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
field=field,
num_proc=num_proc,
**kwargs,
).read()
@staticmethod
def from_parquet(
path_or_paths: Union[PathLike, List[PathLike]],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
columns: Optional[List[str]] = None,
num_proc: Optional[int] = None,
**kwargs,
):
"""Create Dataset from Parquet file(s).
Args:
path_or_paths (`path-like` or list of `path-like`):
Path(s) of the Parquet file(s).
split (`NamedSplit`, *optional*):
Split name to be assigned to the dataset.
features (`Features`, *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
columns (`List[str]`, *optional*):
If not `None`, only these columns will be read from the file.
A column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when downloading and generating the dataset locally.
This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
<Added version="2.8.0"/>
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`ParquetConfig`].
Returns:
[`Dataset`]
Example:
```py
>>> ds = Dataset.from_parquet('path/to/dataset.parquet')
```
"""
# Dynamic import to avoid circular dependency
from .io.parquet import ParquetDatasetReader
return ParquetDatasetReader(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
columns=columns,
num_proc=num_proc,
**kwargs,
).read()
@staticmethod
def from_text(
path_or_paths: Union[PathLike, List[PathLike]],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
"""Create Dataset from text file(s).
Args:
path_or_paths (`path-like` or list of `path-like`):
Path(s) of the text file(s).
split (`NamedSplit`, *optional*):
Split name to be assigned to the dataset.
features (`Features`, *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when downloading and generating the dataset locally.
This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
<Added version="2.8.0"/>
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`TextConfig`].
Returns:
[`Dataset`]
Example:
```py
>>> ds = Dataset.from_text('path/to/dataset.txt')
```
"""
# Dynamic import to avoid circular dependency
from .io.text import TextDatasetReader
return TextDatasetReader(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
num_proc=num_proc,
**kwargs,
).read()
@staticmethod
def from_spark(
df: "pyspark.sql.DataFrame",
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
keep_in_memory: bool = False,
cache_dir: str = None,
working_dir: str = None,
load_from_cache_file: bool = True,
**kwargs,
):
"""Create a Dataset from Spark DataFrame. Dataset downloading is distributed over Spark workers.
Args:
df (`pyspark.sql.DataFrame`):
The DataFrame containing the desired data.
split (`NamedSplit`, *optional*):
Split name to be assigned to the dataset.
features (`Features`, *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data. When using a multi-node Spark cluster, the cache_dir must be accessible to both
workers and the driver.
keep_in_memory (`bool`):
Whether to copy the data in-memory.
working_dir (`str`, *optional*)
Intermediate directory for each Spark worker to write data to before moving it to `cache_dir`. Setting
a non-NFS intermediate directory may improve performance.
load_from_cache_file (`bool`):
Whether to load the dataset from the cache if possible.
Returns:
[`Dataset`]
Example:
```py
>>> df = spark.createDataFrame(
>>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]],
>>> columns=["id", "name"],
>>> )
>>> ds = Dataset.from_spark(df)
```
"""
# Dynamic import to avoid circular dependency
from .io.spark import SparkDatasetReader
if sys.platform == "win32":
raise EnvironmentError("Dataset.from_spark is not currently supported on Windows")
return SparkDatasetReader(
df,
split=split,
features=features,
streaming=False,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
working_dir=working_dir,
load_from_cache_file=load_from_cache_file,
**kwargs,
).read()
@staticmethod
def from_sql(
sql: Union[str, "sqlalchemy.sql.Selectable"],
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
"""Create Dataset from SQL query or database table.
Args:
sql (`str` or `sqlalchemy.sql.Selectable`):
SQL query to be executed or a table name.
con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`):
A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) used to instantiate a database connection or a SQLite3/SQLAlchemy connection object.
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`SqlConfig`].
Returns:
[`Dataset`]
Example:
```py
>>> # Fetch a database table
>>> ds = Dataset.from_sql("test_data", "postgres:///db_name")
>>> # Execute a SQL query on the table
>>> ds = Dataset.from_sql("SELECT sentence FROM test_data", "postgres:///db_name")
>>> # Use a Selectable object to specify the query
>>> from sqlalchemy import select, text
>>> stmt = select([text("sentence")]).select_from(text("test_data"))
>>> ds = Dataset.from_sql(stmt, "postgres:///db_name")
```
<Tip>
The returned dataset can only be cached if `con` is specified as URI string.
</Tip>
"""
from .io.sql import SqlDatasetReader
return SqlDatasetReader(
sql,
con,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
**kwargs,
).read()
def __del__(self):
if hasattr(self, "_data"):
del self._data
if hasattr(self, "_indices"):
del self._indices
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables
self.__del__()
def save_to_disk(
self,
dataset_path: PathLike,
fs="deprecated",
max_shard_size: Optional[Union[str, int]] = None,
num_shards: Optional[int] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
):
"""
Saves a dataset to a dataset directory, or in a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
For [`Image`] and [`Audio`] data:
All the Image() and Audio() data are stored in the arrow files.
If you want to store paths or urls, please use the Value("string") type.
Args:
dataset_path (`str`):
Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`)
of the dataset directory where the dataset will be saved to.
fs (`fsspec.spec.AbstractFileSystem`, *optional*):
Instance of the remote filesystem where the dataset will be saved to.
<Deprecated version="2.8.0">
`fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
</Deprecated>
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
(like `"50MB"`).
num_shards (`int`, *optional*):
Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`.
<Added version="2.8.0"/>
num_proc (`int`, *optional*):
Number of processes when downloading and generating the dataset locally.
Multiprocessing is disabled by default.
<Added version="2.8.0"/>
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.8.0"/>
Example:
```py
>>> ds.save_to_disk("path/to/dataset/directory")
>>> ds.save_to_disk("path/to/dataset/directory", max_shard_size="1GB")
>>> ds.save_to_disk("path/to/dataset/directory", num_shards=1024)
```
"""
if max_shard_size is not None and num_shards is not None:
raise ValueError(
"Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both."
)
if fs != "deprecated":
warnings.warn(
"'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
"You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
FutureWarning,
)
storage_options = fs.storage_options
if num_shards is None:
dataset_nbytes = self._estimate_nbytes()
max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
num_shards = int(dataset_nbytes / max_shard_size) + 1
num_shards = max(num_shards, num_proc or 1)
num_proc = num_proc if num_proc is not None else 1
num_shards = num_shards if num_shards is not None else num_proc
fs_token_paths = fsspec.get_fs_token_paths(dataset_path, storage_options=storage_options)
fs: fsspec.AbstractFileSystem = fs_token_paths[0]
is_local = not is_remote_filesystem(fs)
path_join = os.path.join if is_local else posixpath.join
if self.list_indexes():
raise ValueError("please remove all the indexes using `dataset.drop_index` before saving a dataset")
if is_local:
Path(dataset_path).resolve().mkdir(parents=True, exist_ok=True)
parent_cache_files_paths = {
Path(cache_filename["filename"]).resolve().parent for cache_filename in self.cache_files
}
# Check that the dataset doesn't overwrite iself. It can cause a permission error on Windows and a segfault on linux.
if Path(dataset_path).resolve() in parent_cache_files_paths:
raise PermissionError(
f"Tried to overwrite {Path(dataset_path).resolve()} but a dataset can't overwrite itself."
)
else:
fs.makedirs(dataset_path, exist_ok=True)
# Get json serializable state
state = {
key: self.__dict__[key]
for key in [
"_fingerprint",
"_format_columns",
"_format_kwargs",
"_format_type",
"_output_all_columns",
]
}
state["_split"] = str(self.split) if self.split is not None else self.split
state["_data_files"] = [
{"filename": f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"} for shard_idx in range(num_shards)
]
for k in state["_format_kwargs"].keys():
try:
json.dumps(state["_format_kwargs"][k])
except TypeError as e:
raise TypeError(
str(e) + f"\nThe format kwargs must be JSON serializable, but key '{k}' isn't."
) from None
# Get json serializable dataset info
dataset_info = asdict(self._info)
shards_done = 0
pbar = logging.tqdm(
disable=not logging.is_progress_bar_enabled(),
unit=" examples",
total=len(self),
desc=f"Saving the dataset ({shards_done}/{num_shards} shards)",
)
kwargs_per_job = (
{
"job_id": shard_idx,
"shard": self.shard(num_shards=num_shards, index=shard_idx, contiguous=True),
"fpath": path_join(dataset_path, f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"),
"storage_options": storage_options,
}
for shard_idx in range(num_shards)
)
shard_lengths = [None] * num_shards
shard_sizes = [None] * num_shards
if num_proc > 1:
with Pool(num_proc) as pool:
with pbar:
for job_id, done, content in iflatmap_unordered(
pool, Dataset._save_to_disk_single, kwargs_iterable=kwargs_per_job
):
if done:
shards_done += 1
pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)")
logger.debug(f"Finished writing shard number {job_id} of {num_shards}.")
shard_lengths[job_id], shard_sizes[job_id] = content
else:
pbar.update(content)
else:
with pbar:
for kwargs in kwargs_per_job:
for job_id, done, content in Dataset._save_to_disk_single(**kwargs):
if done:
shards_done += 1
pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)")
logger.debug(f"Finished writing shard number {job_id} of {num_shards}.")
shard_lengths[job_id], shard_sizes[job_id] = content
else:
pbar.update(content)
with fs.open(path_join(dataset_path, config.DATASET_STATE_JSON_FILENAME), "w", encoding="utf-8") as state_file:
json.dump(state, state_file, indent=2, sort_keys=True)
with fs.open(
path_join(dataset_path, config.DATASET_INFO_FILENAME), "w", encoding="utf-8"
) as dataset_info_file:
# Sort only the first level of keys, or we might shuffle fields of nested features if we use sort_keys=True
sorted_keys_dataset_info = {key: dataset_info[key] for key in sorted(dataset_info)}
json.dump(sorted_keys_dataset_info, dataset_info_file, indent=2)
@staticmethod
def _save_to_disk_single(job_id: int, shard: "Dataset", fpath: str, storage_options: Optional[dict]):
batch_size = config.DEFAULT_MAX_BATCH_SIZE
num_examples_progress_update = 0
writer = ArrowWriter(
features=shard.features,
path=fpath,
storage_options=storage_options,
embed_local_files=True,
)
try:
_time = time.time()
for pa_table in shard.with_format("arrow").iter(batch_size):
writer.write_table(pa_table)
num_examples_progress_update += len(pa_table)
if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
_time = time.time()
yield job_id, False, num_examples_progress_update
num_examples_progress_update = 0
finally:
yield job_id, False, num_examples_progress_update
num_examples, num_bytes = writer.finalize()
writer.close()
yield job_id, True, (num_examples, num_bytes)
@staticmethod
def _build_local_temp_path(uri_or_path: str) -> Path:
"""
Builds and returns a Path concatenating a local temporary dir with the dir path (or absolute/relative
path extracted from the uri) passed.
Args:
uri_or_path (`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g.
`"s3://my-bucket/dataset/train"`) to concatenate.
Returns:
:class:`Path`: the concatenated path (temp dir + path)
"""
src_dataset_path = Path(uri_or_path)
tmp_dir = get_temporary_cache_files_directory()
return Path(tmp_dir, src_dataset_path.relative_to(src_dataset_path.anchor))
@staticmethod
def load_from_disk(
dataset_path: str,
fs="deprecated",
keep_in_memory: Optional[bool] = None,
storage_options: Optional[dict] = None,
) -> "Dataset":
"""
Loads a dataset that was previously saved using [`save_to_disk`] from a dataset directory, or from a
filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
Args:
dataset_path (`str`):
Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`)
of the dataset directory where the dataset will be loaded from.
fs (`fsspec.spec.AbstractFileSystem`, *optional*):
Instance of the remote filesystem where the dataset will be saved to.
<Deprecated version="2.8.0">
`fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
</Deprecated>
keep_in_memory (`bool`, defaults to `None`):
Whether to copy the dataset in-memory. If `None`, the
dataset will not be copied in-memory unless explicitly enabled by setting
`datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the
[improve performance](../cache#improve-performance) section.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.8.0"/>
Returns:
[`Dataset`] or [`DatasetDict`]:
- If `dataset_path` is a path of a dataset directory, the dataset requested.
- If `dataset_path` is a path of a dataset dict directory, a `datasets.DatasetDict` with each split.
Example:
```py
>>> ds = load_from_disk("path/to/dataset/directory")
```
"""
if fs != "deprecated":
warnings.warn(
"'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
"You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
FutureWarning,
)
storage_options = fs.storage_options
fs_token_paths = fsspec.get_fs_token_paths(dataset_path, storage_options=storage_options)
fs: fsspec.AbstractFileSystem = fs_token_paths[0]
if is_remote_filesystem(fs):
dest_dataset_path = extract_path_from_uri(dataset_path)
path_join = posixpath.join
else:
fs = fsspec.filesystem("file")
dest_dataset_path = dataset_path
path_join = os.path.join
dataset_dict_json_path = path_join(dest_dataset_path, config.DATASETDICT_JSON_FILENAME)
dataset_state_json_path = path_join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME)
dataset_info_path = path_join(dest_dataset_path, config.DATASET_INFO_FILENAME)
dataset_dict_is_file = fs.isfile(dataset_dict_json_path)
dataset_info_is_file = fs.isfile(dataset_info_path)
dataset_state_is_file = fs.isfile(dataset_state_json_path)
if not dataset_info_is_file and not dataset_state_is_file:
if dataset_dict_is_file:
raise FileNotFoundError(
f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
)
raise FileNotFoundError(
f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object but provided path is not a `Dataset`."
)
if not dataset_info_is_file:
if dataset_dict_is_file:
raise FileNotFoundError(
f"No such file: '{dataset_info_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
)
raise FileNotFoundError(
f"No such file: '{dataset_info_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`."
)
if not dataset_state_is_file:
if dataset_dict_is_file:
raise FileNotFoundError(
f"No such file: '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
)
raise FileNotFoundError(
f"No such file: '{dataset_state_json_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`."
)
# copies file from filesystem if it is remote filesystem to local filesystem and modifies dataset_path to temp directory containing local copies
if is_remote_filesystem(fs):
src_dataset_path = dest_dataset_path
dest_dataset_path = Dataset._build_local_temp_path(src_dataset_path)
fs.download(src_dataset_path, dest_dataset_path.as_posix(), recursive=True)
dataset_state_json_path = path_join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME)
dataset_info_path = path_join(dest_dataset_path, config.DATASET_INFO_FILENAME)
with open(dataset_state_json_path, encoding="utf-8") as state_file:
state = json.load(state_file)
with open(dataset_info_path, encoding="utf-8") as dataset_info_file:
dataset_info = DatasetInfo.from_dict(json.load(dataset_info_file))
dataset_size = estimate_dataset_size(
Path(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"]
)
keep_in_memory = keep_in_memory if keep_in_memory is not None else is_small_dataset(dataset_size)
table_cls = InMemoryTable if keep_in_memory else MemoryMappedTable
arrow_table = concat_tables(
table_cls.from_file(path_join(dest_dataset_path, data_file["filename"]))
for data_file in state["_data_files"]
)
split = state["_split"]
split = Split(split) if split is not None else split
dataset = Dataset(
arrow_table=arrow_table,
info=dataset_info,
split=split,
fingerprint=state["_fingerprint"],
)
format = {
"type": state["_format_type"],
"format_kwargs": state["_format_kwargs"],
"columns": state["_format_columns"],
"output_all_columns": state["_output_all_columns"],
}
dataset = dataset.with_format(**format)
return dataset
@property
def data(self) -> Table:
"""The Apache Arrow table backing the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.data
MemoryMappedTable
text: string
label: int64
----
text: [["compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .","the soundtrack alone is worth the price of admission .","rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .","beneath the film's obvious determination to shock at any cost lies considerable skill and determination , backed by sheer nerve .","bielinsky is a filmmaker of impressive talent .","so beautifully acted and directed , it's clear that washington most certainly has a new career ahead of him if he so chooses .","a visual spectacle full of stunning images and effects .","a gentle and engrossing character study .","it's enough to watch huppert scheming , with her small , intelligent eyes as steady as any noir villain , and to enjoy the perfectly pitched web of tension that chabrol spins .","an engrossing portrait of uncompromising artists trying to create something original against the backdrop of a corporate music industry that only seems to care about the bottom line .",...,"ultimately , jane learns her place as a girl , softens up and loses some of the intensity that made her an interesting character to begin with .","ah-nuld's action hero days might be over .","it's clear why deuces wild , which was shot two years ago , has been gathering dust on mgm's shelf .","feels like nothing quite so much as a middle-aged moviemaker's attempt to surround himself with beautiful , half-naked women .","when the precise nature of matthew's predicament finally comes into sharp focus , the revelation fails to justify the build-up .","this picture is murder by numbers , and as easy to be bored by as your abc's , despite a few whopping shootouts .","hilarious musical comedy though stymied by accents thick as mud .","if you are into splatter movies , then you will probably have a reasonably good time with the salton sea .","a dull , simple-minded and stereotypical tale of drugs , death and mind-numbing indifference on the inner-city streets .","the feature-length stretch . . . strains the show's concept ."]]
label: [[1,1,1,1,1,1,1,1,1,1,...,0,0,0,0,0,0,0,0,0,0]]
```
"""
return self._data
@property
def cache_files(self) -> List[dict]:
"""The cache files containing the Apache Arrow table backing the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.cache_files
[{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]
```
"""
cache_files = list_table_cache_files(self._data)
if self._indices is not None:
cache_files += list_table_cache_files(self._indices)
return [{"filename": cache_filename} for cache_filename in cache_files]
@property
def num_columns(self) -> int:
"""Number of columns in the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.num_columns
2
```
"""
return self._data.num_columns
@property
def num_rows(self) -> int:
"""Number of rows in the dataset (same as [`Dataset.__len__`]).
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.num_rows
1066
```
"""
if self._indices is not None:
return self._indices.num_rows
return self._data.num_rows
@property
def column_names(self) -> List[str]:
"""Names of the columns in the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.column_names
['text', 'label']
```
"""
return self._data.column_names
@property
def shape(self) -> Tuple[int, int]:
"""Shape of the dataset (number of columns, number of rows).
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.shape
(1066, 2)
```
"""
if self._indices is not None:
return (self._indices.num_rows, self._data.num_columns)
return self._data.shape
def unique(self, column: str) -> List:
"""Return a list of the unique elements in a column.
This is implemented in the low-level backend and as such, very fast.
Args:
column (`str`):
Column name (list all the column names with [`~datasets.Dataset.column_names`]).
Returns:
`list`: List of unique elements in the given column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.unique('label')
[1, 0]
```
"""
if column not in self._data.column_names:
raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).")
if self._indices is not None and self._indices.num_rows != self._data.num_rows:
dataset = self.flatten_indices()
else:
dataset = self
return dataset._data.column(column).unique().to_pylist()
def class_encode_column(self, column: str, include_nulls: bool = False) -> "Dataset":
"""Casts the given column as [`~datasets.features.ClassLabel`] and updates the table.
Args:
column (`str`):
The name of the column to cast (list all the column names with [`~datasets.Dataset.column_names`])
include_nulls (`bool`, defaults to `False`):
Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label.
<Added version="1.14.2"/>
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("boolq", split="validation")
>>> ds.features
{'answer': Value(dtype='bool', id=None),
'passage': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None)}
>>> ds = ds.class_encode_column('answer')
>>> ds.features
{'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None),
'passage': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None)}
```
"""
# Sanity checks
if column not in self._data.column_names:
raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).")
src_feat = self._info.features[column]
if not isinstance(src_feat, Value):
raise ValueError(
f"Class encoding is only supported for {Value.__name__} column, and column {column} is {type(src_feat).__name__}."
)
if src_feat.dtype != "string" or (include_nulls and None in self.unique(column)):
def stringify_column(batch):
batch[column] = [
str(sample) if include_nulls or sample is not None else None for sample in batch[column]
]
return batch
dset = self.map(
stringify_column,
batched=True,
desc="Stringifying the column",
)
else:
dset = self
# Create the new feature
class_names = sorted(str(sample) for sample in dset.unique(column) if include_nulls or sample is not None)
dst_feat = ClassLabel(names=class_names)
def cast_to_class_labels(batch):
batch[column] = [
dst_feat.str2int(str(sample)) if include_nulls or sample is not None else None
for sample in batch[column]
]
return batch
new_features = dset.features.copy()
new_features[column] = dst_feat
dset = dset.map(
cast_to_class_labels,
batched=True,
features=new_features,
desc="Casting to class labels",
)
return dset
@fingerprint_transform(inplace=False)
def flatten(self, new_fingerprint: Optional[str] = None, max_depth=16) -> "Dataset":
"""Flatten the table.
Each column with a struct type is flattened into one column per struct field.
Other columns are left unchanged.
Args:
new_fingerprint (`str`, *optional*):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
Returns:
[`Dataset`]: A copy of the dataset with flattened columns.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("squad", split="train")
>>> ds.features
{'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
'context': Value(dtype='string', id=None),
'id': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None),
'title': Value(dtype='string', id=None)}
>>> ds.flatten()
Dataset({
features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
num_rows: 87599
})
```
"""
dataset = copy.deepcopy(self)
for depth in range(1, max_depth):
if any(isinstance(field.type, pa.StructType) for field in dataset._data.schema):
dataset._data = dataset._data.flatten()
else:
break
dataset.info.features = self._info.features.flatten(max_depth=max_depth)
dataset.info.features = Features({col: dataset.info.features[col] for col in dataset.data.column_names})
dataset._data = update_metadata_with_features(dataset._data, dataset.features)
logger.info(f'Flattened dataset from depth {depth} to depth {1 if depth + 1 < max_depth else "unknown"}.')
dataset._fingerprint = new_fingerprint
return dataset
def cast(
self,
features: Features,
batch_size: Optional[int] = 1000,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
num_proc: Optional[int] = None,
) -> "Dataset":
"""
Cast the dataset to a new set of features.
Args:
features ([`Features`]):
New features to cast the dataset to.
The name of the fields in the features must match the current column names.
The type of the data must also be convertible from one type to the other.
For non-trivial conversion, e.g. `str` <-> `ClassLabel` you should use [`~datasets.Dataset.map`] to update the Dataset.
batch_size (`int`, defaults to `1000`):
Number of examples per batch provided to cast.
If `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to cast.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
load_from_cache_file (`bool`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
cache_file_name (`str`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running [`~datasets.Dataset.map`].
num_proc (`int`, *optional*, defaults to `None`):
Number of processes for multiprocessing. By default it doesn't
use multiprocessing.
Returns:
[`Dataset`]: A copy of the dataset with casted features.
Example:
```py
>>> from datasets import load_dataset, ClassLabel, Value
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.features
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> new_features = ds.features.copy()
>>> new_features['label'] = ClassLabel(names=['bad', 'good'])
>>> new_features['text'] = Value('large_string')
>>> ds = ds.cast(new_features)
>>> ds.features
{'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
'text': Value(dtype='large_string', id=None)}
```
"""
if sorted(features) != sorted(self._data.column_names):
raise ValueError(
f"The columns in features ({list(features)}) must be identical "
f"as the columns in the dataset: {self._data.column_names}"
)
schema = features.arrow_schema
format = self.format
dataset = self.with_format("arrow")
# capture the PyArrow version here to make the lambda serializable on Windows
dataset = dataset.map(
partial(table_cast, schema=schema),
batched=True,
batch_size=batch_size,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=cache_file_name,
writer_batch_size=writer_batch_size,
num_proc=num_proc,
features=features,
desc="Casting the dataset",
)
dataset = dataset.with_format(**format)
return dataset
@fingerprint_transform(inplace=False)
def cast_column(self, column: str, feature: FeatureType, new_fingerprint: Optional[str] = None) -> "Dataset":
"""Cast column to feature for decoding.
Args:
column (`str`):
Column name.
feature (`FeatureType`):
Target feature.
new_fingerprint (`str`, *optional*):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
Returns:
[`Dataset`]
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.features
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
>>> ds.features
{'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
if hasattr(feature, "decode_example"):
dataset = copy.deepcopy(self)
dataset._info.features[column] = feature
dataset._fingerprint = new_fingerprint
dataset._data = dataset._data.cast(dataset.features.arrow_schema)
dataset._data = update_metadata_with_features(dataset._data, dataset.features)
return dataset
else:
features = self.features
features[column] = feature
return self.cast(features)
@transmit_tasks
@transmit_format
@fingerprint_transform(inplace=False)
def remove_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset":
"""
Remove one or several column(s) in the dataset and the features associated to them.
You can also remove a column using [`~datasets.Dataset.map`] with `remove_columns` but the present method
is in-place (doesn't copy the data to a new dataset) and is thus faster.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to remove.
new_fingerprint (`str`, *optional*):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
Returns:
[`Dataset`]: A copy of the dataset object without the columns to remove.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.remove_columns('label')
Dataset({
features: ['text'],
num_rows: 1066
})
>>> ds.remove_columns(column_names=ds.column_names) # Removing all the columns returns an empty dataset with the `num_rows` property set to 0
Dataset({
features: [],
num_rows: 0
})
```
"""
dataset = copy.deepcopy(self)
if isinstance(column_names, str):
column_names = [column_names]
for column_name in column_names:
if column_name not in dataset._data.column_names:
raise ValueError(
f"Column name {column_name} not in the dataset. "
f"Current columns in the dataset: {dataset._data.column_names}"
)
for column_name in column_names:
del dataset._info.features[column_name]
dataset._data = dataset._data.drop(column_names)
dataset._data = update_metadata_with_features(dataset._data, dataset.features)
dataset._fingerprint = new_fingerprint
return dataset
@transmit_tasks
@fingerprint_transform(inplace=False)
def rename_column(
self, original_column_name: str, new_column_name: str, new_fingerprint: Optional[str] = None
) -> "Dataset":
"""
Rename a column in the dataset, and move the features associated to the original column under the new column
name.
Args:
original_column_name (`str`):
Name of the column to rename.
new_column_name (`str`):
New name for the column.
new_fingerprint (`str`, *optional*):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
Returns:
[`Dataset`]: A copy of the dataset with a renamed column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.rename_column('label', 'label_new')
Dataset({
features: ['text', 'label_new'],
num_rows: 1066
})
```
"""
dataset = copy.deepcopy(self)
if original_column_name not in dataset._data.column_names:
raise ValueError(
f"Original column name {original_column_name} not in the dataset. "
f"Current columns in the dataset: {dataset._data.column_names}"
)
if new_column_name in dataset._data.column_names:
raise ValueError(
f"New column name {new_column_name} already in the dataset. "
f"Please choose a column name which is not already in the dataset. "
f"Current columns in the dataset: {dataset._data.column_names}"
)
if not new_column_name:
raise ValueError("New column name is empty.")
def rename(columns):
return [new_column_name if col == original_column_name else col for col in columns]
new_column_names = rename(self._data.column_names)
if self._format_columns is not None:
dataset._format_columns = rename(self._format_columns)
dataset._info.features = Features(
{
new_column_name if col == original_column_name else col: feature
for col, feature in self._info.features.items()
}
)
dataset._data = dataset._data.rename_columns(new_column_names)
dataset._data = update_metadata_with_features(dataset._data, dataset.features)
dataset._fingerprint = new_fingerprint
return dataset
@transmit_tasks
@fingerprint_transform(inplace=False)
def rename_columns(self, column_mapping: Dict[str, str], new_fingerprint: Optional[str] = None) -> "Dataset":
"""
Rename several columns in the dataset, and move the features associated to the original columns under
the new column names.
Args:
column_mapping (`Dict[str, str]`):
A mapping of columns to rename to their new names
new_fingerprint (`str`, *optional*):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
Returns:
[`Dataset`]: A copy of the dataset with renamed columns
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'})
Dataset({
features: ['text_new', 'label_new'],
num_rows: 1066
})
```
"""
dataset = copy.deepcopy(self)
extra_columns = set(column_mapping.keys()) - set(dataset.column_names)
if extra_columns:
raise ValueError(
f"Original column names {extra_columns} not in the dataset. "
f"Current columns in the dataset: {dataset._data.column_names}"
)
number_of_duplicates_in_new_columns = len(column_mapping.values()) - len(set(column_mapping.values()))
if number_of_duplicates_in_new_columns != 0:
raise ValueError(
"New column names must all be different, but this column mapping "
f"has {number_of_duplicates_in_new_columns} duplicates"
)
empty_new_columns = [new_col for new_col in column_mapping.values() if not new_col]
if empty_new_columns:
raise ValueError(f"New column names {empty_new_columns} are empty.")
def rename(columns):
return [column_mapping[col] if col in column_mapping else col for col in columns]
new_column_names = rename(self._data.column_names)
if self._format_columns is not None:
dataset._format_columns = rename(self._format_columns)
dataset._info.features = Features(
{
column_mapping[col] if col in column_mapping else col: feature
for col, feature in (self._info.features or {}).items()
}
)
dataset._data = dataset._data.rename_columns(new_column_names)
dataset._data = update_metadata_with_features(dataset._data, dataset.features)
dataset._fingerprint = new_fingerprint
return dataset
@transmit_tasks
@transmit_format
@fingerprint_transform(inplace=False)
def select_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset":
"""Select one or several column(s) in the dataset and the features
associated to them.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to keep.
new_fingerprint (`str`, *optional*):
The new fingerprint of the dataset after transform. If `None`,
the new fingerprint is computed using a hash of the previous
fingerprint, and the transform arguments.
Returns:
[`Dataset`]: A copy of the dataset object which only consists of
selected columns.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.select_columns(['text'])
Dataset({
features: ['text'],
num_rows: 1066
})
```
"""
if isinstance(column_names, str):
column_names = [column_names]
for column_name in column_names:
if column_name not in self._data.column_names:
raise ValueError(
f"Column name {column_name} not in the "
"dataset. Current columns in the dataset: "
f"{self._data.column_names}."
)
dataset = copy.deepcopy(self)
dataset._data = dataset._data.select(column_names)
dataset._info.features = Features({col: self._info.features[col] for col in dataset._data.column_names})
dataset._data = update_metadata_with_features(dataset._data, dataset.features)
dataset._fingerprint = new_fingerprint
return dataset
def __len__(self):
"""Number of rows in the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.__len__
<bound method Dataset.__len__ of Dataset({
features: ['text', 'label'],
num_rows: 1066
})>
```
"""
return self.num_rows
def __iter__(self):
"""Iterate through the examples.
If a formatting is set with :meth:`Dataset.set_format` rows will be returned with the
selected format.
"""
if self._indices is None:
# Fast iteration
# Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch)
format_kwargs = self._format_kwargs if self._format_kwargs is not None else {}
formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs)
batch_size = config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER
for pa_subtable in table_iter(self.data, batch_size=batch_size):
for i in range(pa_subtable.num_rows):
pa_subtable_ex = pa_subtable.slice(i, 1)
formatted_output = format_table(
pa_subtable_ex,
0,
formatter=formatter,
format_columns=self._format_columns,
output_all_columns=self._output_all_columns,
)
yield formatted_output
else:
for i in range(self.num_rows):
yield self._getitem(
i,
)
def iter(self, batch_size: int, drop_last_batch: bool = False):
"""Iterate through the batches of size `batch_size`.
If a formatting is set with [`~datasets.Dataset.set_format`] rows will be returned with the
selected format.
Args:
batch_size (:obj:`int`): size of each batch to yield.
drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be
dropped
"""
if self._indices is None:
# Fast iteration
# Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch)
format_kwargs = self._format_kwargs if self._format_kwargs is not None else {}
formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs)
for pa_subtable in table_iter(self.data, batch_size=batch_size, drop_last_batch=drop_last_batch):
formatted_batch = format_table(
pa_subtable,
range(pa_subtable.num_rows),
formatter=formatter,
format_columns=self._format_columns,
output_all_columns=self._output_all_columns,
)
yield formatted_batch
else:
num_rows = self.num_rows if not drop_last_batch else self.num_rows // batch_size * batch_size
for i in range(0, num_rows, batch_size):
yield self._getitem(
slice(i, i + batch_size),
)
def __repr__(self):
return f"Dataset({{\n features: {list(self._info.features.keys())},\n num_rows: {self.num_rows}\n}})"
@property
def format(self):
return {
"type": self._format_type,
"format_kwargs": self._format_kwargs,
"columns": self.column_names if self._format_columns is None else self._format_columns,
"output_all_columns": self._output_all_columns,
}
@contextlib.contextmanager
def formatted_as(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
):
"""To be used in a `with` statement. Set `__getitem__` return format (type and columns).
Args:
type (`str`, *optional*):
Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
`None` means `__getitem__`` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to `False`):
Keep un-formatted columns as well in the output (as python objects).
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
"""
old_format_type = self._format_type
old_format_kwargs = self._format_kwargs
old_format_columns = self._format_columns
old_output_all_columns = self._output_all_columns
try:
self.set_format(type, columns, output_all_columns, **format_kwargs)
yield
finally:
self.set_format(old_format_type, old_format_columns, old_output_all_columns, **old_format_kwargs)
@fingerprint_transform(inplace=True)
def set_format(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
):
"""Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
It's also possible to use custom transforms for formatting using [`~datasets.Dataset.set_transform`].
Args:
type (`str`, *optional*):
Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
`None` means `__getitem__` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to `False`):
Keep un-formatted columns as well in the output (as python objects).
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
It is possible to call [`~datasets.Dataset.map`] after calling `set_format`. Since `map` may add new columns, then the list of formatted columns
gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted as:
```
new formatted columns = (all columns - previously unformatted columns)
```
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
>>> ds.set_format(type='numpy', columns=['text', 'label'])
>>> ds.format
{'type': 'numpy',
'format_kwargs': {},
'columns': ['text', 'label'],
'output_all_columns': False}
```
"""
format_kwargs.update(format_kwargs.pop("format_kwargs", {})) # allow to use self.set_format(**self.format)
# Check that the format_type and format_kwargs are valid and make it possible to have a Formatter
type = get_format_type_from_alias(type)
get_formatter(type, features=self._info.features, **format_kwargs)
# Check filter column
if isinstance(columns, str):
columns = [columns]
if isinstance(columns, tuple):
columns = list(columns)
if columns is not None and any(col not in self._data.column_names for col in columns):
raise ValueError(
f"Columns {list(filter(lambda col: col not in self._data.column_names, columns))} not in the dataset. Current columns in the dataset: {self._data.column_names}"
)
if columns is not None:
columns = columns.copy() # Ensures modifications made to the list after this call don't cause bugs
self._format_type = type
self._format_kwargs = format_kwargs
self._format_columns = columns
self._output_all_columns = output_all_columns
logger.debug(
"Set __getitem__(key) output type to %s for %s columns "
" (when key is int or slice) and %s output other (un-formatted) columns.",
"python objects" if type is None else type,
"no" if columns is None else str(columns),
"do" if output_all_columns else "don't",
)
def reset_format(self):
"""Reset `__getitem__` return format to python objects and all columns.
Same as `self.set_format()`
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
>>> ds.set_format(type='numpy', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
>>> ds.format
{'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'numpy'}
>>> ds.reset_format()
>>> ds.format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': None}
```
"""
self.set_format()
def set_transform(
self,
transform: Optional[Callable],
columns: Optional[List] = None,
output_all_columns: bool = False,
):
"""Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
Args:
transform (`Callable`, *optional*):
User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch.
This function is applied right before returning the objects in `__getitem__`.
columns (`List[str]`, *optional*):
Columns to format in the output.
If specified, then the input batch of the transform only contains those columns.
output_all_columns (`bool`, defaults to `False`):
Keep un-formatted columns as well in the output (as python objects).
If set to True, then the other un-formatted columns are kept with the output of the transform.
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
>>> def encode(batch):
... return tokenizer(batch['text'], padding=True, truncation=True, return_tensors='pt')
>>> ds.set_transform(encode)
>>> ds[0]
{'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1]),
'input_ids': tensor([ 101, 29353, 2135, 15102, 1996, 9428, 20868, 2890, 8663, 6895,
20470, 2571, 3663, 2090, 4603, 3017, 3008, 1998, 2037, 24211,
5637, 1998, 11690, 2336, 1012, 102]),
'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0])}
```
"""
self.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform)
def with_format(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
):
"""Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`].
Contrary to [`~datasets.Dataset.set_format`], `with_format` returns a new [`Dataset`] object.
Args:
type (`str`, *optional*):
Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
`None` means `__getitem__` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to `False`):
Keep un-formatted columns as well in the output (as python objects).
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
>>> ds.format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': None}
>>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
>>> ds.format
{'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'tensorflow'}
```
"""
dataset = copy.deepcopy(self)
dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
return dataset
def with_transform(
self,
transform: Optional[Callable],
columns: Optional[List] = None,
output_all_columns: bool = False,
):
"""Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
Contrary to [`~datasets.Dataset.set_transform`], `with_transform` returns a new [`Dataset`] object.
Args:
transform (`Callable`, `optional`):
User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch.
This function is applied right before returning the objects in `__getitem__`.
columns (`List[str]`, `optional`):
Columns to format in the output.
If specified, then the input batch of the transform only contains those columns.
output_all_columns (`bool`, defaults to `False`):
Keep un-formatted columns as well in the output (as python objects).
If set to `True`, then the other un-formatted columns are kept with the output of the transform.
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> def encode(example):
... return tokenizer(example["text"], padding=True, truncation=True, return_tensors='pt')
>>> ds = ds.with_transform(encode)
>>> ds[0]
{'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1]),
'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617,
1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105,
1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102]),
'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0])}
```
"""
dataset = copy.deepcopy(self)
dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns)
return dataset
@deprecated()
def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "Dataset":
"""
Prepare a dataset for the given task by casting the dataset's [`Features`] to standardized column names and types as detailed in [`datasets.tasks`](./task_templates).
Casts [`datasets.DatasetInfo.features`] according to a task-specific schema. Intended for single-use only, so all task templates are removed from [`datasets.DatasetInfo.task_templates`] after casting.
Args:
task (`Union[str, TaskTemplate]`):
The task to prepare the dataset for during training and evaluation. If `str`, supported tasks include:
- `"text-classification"`
- `"question-answering"`
If [`TaskTemplate`], must be one of the task templates in [`datasets.tasks`](./task_templates).
id (`int`, defaults to `0`):
The id required to unambiguously identify the task template when multiple task templates of the same type are supported.
"""
# TODO(lewtun): Add support for casting nested features like answers.text and answers.answer_start in SQuAD
if isinstance(task, str):
tasks = [template.task for template in (self.info.task_templates or [])]
compatible_templates = [template for template in (self.info.task_templates or []) if template.task == task]
if not compatible_templates:
raise ValueError(
f"Task {task} is not compatible with this dataset! Available tasks: {list(unique_values(tasks))}"
)
if not 0 <= id < len(compatible_templates):
templates_list_str = "\n".join(
f"- `{idx}` for task {template}" for idx, template in enumerate(compatible_templates)
)
raise ValueError(
f"Id {id} for task {task} is not in a valid range. Supported ids:\n{templates_list_str}"
)
template = compatible_templates[id]
elif isinstance(task, TaskTemplate):
template = task
else:
raise ValueError(
f"Expected a `str` or `datasets.TaskTemplate` object but got task {task} with type {type(task)}."
)
template = template.align_with_features(self.info.features)
column_mapping = template.column_mapping
columns_to_drop = [column for column in self.column_names if column not in column_mapping]
dataset = self.remove_columns(columns_to_drop)
dataset = dataset.rename_columns(column_mapping)
# We found a template so now flush `DatasetInfo` to skip the template update in `DatasetInfo.__post_init__`
dataset.info.task_templates = None
dataset = dataset.cast(features=template.features)
return dataset
def _getitem(self, key: Union[int, slice, str, ListLike[int]], **kwargs) -> Union[Dict, List]:
"""
Can be used to index columns (by string names) or rows (by integer, slice, or list-like of integer indices)
"""
if isinstance(key, bool):
raise TypeError("dataset index must be int, str, slice or collection of int, not bool")
format_type = kwargs["format_type"] if "format_type" in kwargs else self._format_type
format_columns = kwargs["format_columns"] if "format_columns" in kwargs else self._format_columns
output_all_columns = (
kwargs["output_all_columns"] if "output_all_columns" in kwargs else self._output_all_columns
)
format_kwargs = kwargs["format_kwargs"] if "format_kwargs" in kwargs else self._format_kwargs
format_kwargs = format_kwargs if format_kwargs is not None else {}
formatter = get_formatter(format_type, features=self._info.features, **format_kwargs)
pa_subtable = query_table(self._data, key, indices=self._indices if self._indices is not None else None)
formatted_output = format_table(
pa_subtable, key, formatter=formatter, format_columns=format_columns, output_all_columns=output_all_columns
)
return formatted_output
@overload
def __getitem__(self, key: Union[int, slice, Iterable[int]]) -> Dict: # noqa: F811
...
@overload
def __getitem__(self, key: str) -> List: # noqa: F811
...
def __getitem__(self, key): # noqa: F811
"""Can be used to index columns (by string names) or rows (by integer index or iterable of indices or bools)."""
return self._getitem(key)
def __getitems__(self, keys: List) -> List:
"""Can be used to get a batch using a list of integers indices."""
batch = self.__getitem__(keys)
n_examples = len(batch[next(iter(batch))])
return [{col: array[i] for col, array in batch.items()} for i in range(n_examples)]
def cleanup_cache_files(self) -> int:
"""Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is
one.
Be careful when running this command that no other process is currently using other cache files.
Returns:
`int`: Number of removed files.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.cleanup_cache_files()
10
```
"""
current_cache_files = [os.path.abspath(cache_file["filename"]) for cache_file in self.cache_files]
if not current_cache_files:
return 0
cache_directory = os.path.dirname(current_cache_files[0])
logger.info(f"Listing files in {cache_directory}")
files: List[str] = os.listdir(cache_directory)
files_to_remove = []
for f_name in files:
full_name = os.path.abspath(os.path.join(cache_directory, f_name))
if f_name.startswith("cache-") and f_name.endswith(".arrow"):
if full_name in current_cache_files:
logger.info(f"Keeping currently used cache file at {full_name}")
continue
files_to_remove.append(full_name)
for file_path in files_to_remove:
logger.info(f"Removing {file_path}")
os.remove(file_path)
return len(files_to_remove)
def _get_cache_file_path(self, fingerprint):
if is_caching_enabled() and self.cache_files:
cache_file_name = "cache-" + fingerprint + ".arrow"
cache_directory = os.path.dirname(self.cache_files[0]["filename"])
else:
cache_file_name = "cache-" + generate_random_fingerprint() + ".arrow"
cache_directory = get_temporary_cache_files_directory()
cache_file_path = os.path.join(cache_directory, cache_file_name)
return cache_file_path
@transmit_tasks
@transmit_format
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
with_rank: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[Union[str, List[str]]] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
features: Optional[Features] = None,
disable_nullable: bool = False,
fn_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
suffix_template: str = "_{rank:05d}_of_{num_proc:05d}",
new_fingerprint: Optional[str] = None,
desc: Optional[str] = None,
) -> "Dataset":
"""
Apply a function to all the examples in the table (individually or in batches) and update the table.
If your function returns a column that already exists, then it overwrites it.
You can specify whether the function should be batched or not with the `batched` parameter:
- If batched is `False`, then the function takes 1 example in and should return 1 example.
An example is a dictionary, e.g. `{"text": "Hello there !"}`.
- If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`.
- If batched is `True` and `batch_size` is `n > 1`, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
Note that the last batch may have less than `n` examples.
A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
Args:
function (`Callable`): Function with one of the following signatures:
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False`
- `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
- `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False`
- `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
For advanced usage, the function can also return a `pyarrow.Table`.
Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
If no function is provided, default to identity function: `lambda x: x`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the
signature of `function` should be `def function(example, idx[, rank]): ...`.
with_rank (`bool`, defaults to `False`):
Provide process rank to `function`. Note that in this case the
signature of `function` should be `def function(example[, idx], rank): ...`.
input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
The columns to be passed into `function`
as positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`.
If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`.
drop_last_batch (`bool`, defaults to `False`):
Whether a last batch smaller than the batch_size should be
dropped instead of being processed by the function.
remove_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optioanl[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
cache_file_name (`str`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
features (`Optional[datasets.Features]`, defaults to `None`):
Use a specific Features to store the cache file
instead of the automatically generated one.
disable_nullable (`bool`, defaults to `False`):
Disallow null values in the table.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`.
num_proc (`int`, *optional*, defaults to `None`):
Max number of processes when generating cache. Already cached shards are loaded sequentially.
suffix_template (`str`):
If `cache_file_name` is specified, then this suffix
will be added at the end of the base name of each. Defaults to `"_{rank:05d}_of_{num_proc:05d}"`. For example, if `cache_file_name` is "processed.arrow", then for
`rank=1` and `num_proc=4`, the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix.
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
desc (`str`, *optional*, defaults to `None`):
Meaningful description to be displayed alongside with the progress bar while mapping examples.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> def add_prefix(example):
... example["text"] = "Review: " + example["text"]
... return example
>>> ds = ds.map(add_prefix)
>>> ds[0:3]["text"]
['Review: compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .',
'Review: the soundtrack alone is worth the price of admission .',
'Review: rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .']
# process a batch of examples
>>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True)
# set number of processors
>>> ds = ds.map(add_prefix, num_proc=4)
```
"""
if keep_in_memory and cache_file_name is not None:
raise ValueError("Please use either `keep_in_memory` or `cache_file_name` but not both.")
if num_proc is not None and num_proc <= 0:
raise ValueError("num_proc must be an integer > 0.")
# If the array is empty we do nothing (but we make sure to handle an empty indices mapping and remove the requested columns anyway)
if len(self) == 0:
if self._indices is not None: # empty indices mapping
self = Dataset(
self.data.slice(0, 0),
info=self.info.copy(),
split=self.split,
fingerprint=new_fingerprint,
)
if remove_columns:
return self.remove_columns(remove_columns)
else:
return self
if function is None:
function = lambda x: x # noqa: E731
if isinstance(input_columns, str):
input_columns = [input_columns]
if input_columns is not None:
for input_column in input_columns:
if input_column not in self._data.column_names:
raise ValueError(
f"Input column {input_column} not in the dataset. Current columns in the dataset: {self._data.column_names}"
)
if isinstance(remove_columns, str):
remove_columns = [remove_columns]
if remove_columns is not None and any(col not in self._data.column_names for col in remove_columns):
raise ValueError(
f"Column to remove {list(filter(lambda col: col not in self._data.column_names, remove_columns))} not in the dataset. Current columns in the dataset: {self._data.column_names}"
)
load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
if fn_kwargs is None:
fn_kwargs = {}
if num_proc is not None and num_proc > len(self):
num_proc = len(self)
logger.warning(
f"num_proc must be <= {len(self)}. Reducing num_proc to {num_proc} for dataset of size {len(self)}."
)
dataset_kwargs = {
"shard": self,
"function": function,
"with_indices": with_indices,
"with_rank": with_rank,
"input_columns": input_columns,
"batched": batched,
"batch_size": batch_size,
"drop_last_batch": drop_last_batch,
"remove_columns": remove_columns,
"keep_in_memory": keep_in_memory,
"writer_batch_size": writer_batch_size,
"features": features,
"disable_nullable": disable_nullable,
"fn_kwargs": fn_kwargs,
}
if new_fingerprint is None:
# we create a unique hash from the function,
# current dataset file and the mapping args
transform = format_transform_for_fingerprint(Dataset._map_single)
kwargs_for_fingerprint = format_kwargs_for_fingerprint(Dataset._map_single, (), dataset_kwargs)
kwargs_for_fingerprint["fingerprint_name"] = "new_fingerprint"
new_fingerprint = update_fingerprint(self._fingerprint, transform, kwargs_for_fingerprint)
else:
validate_fingerprint(new_fingerprint)
dataset_kwargs["new_fingerprint"] = new_fingerprint
if self.cache_files:
if cache_file_name is None:
cache_file_name = self._get_cache_file_path(new_fingerprint)
dataset_kwargs["cache_file_name"] = cache_file_name
def load_processed_shard_from_cache(shard_kwargs):
"""Load a processed shard from cache if it exists, otherwise throw an error."""
shard = shard_kwargs["shard"]
# Check if we've already cached this computation (indexed by a hash)
if shard_kwargs["cache_file_name"] is not None:
if os.path.exists(shard_kwargs["cache_file_name"]) and load_from_cache_file:
info = shard.info.copy()
info.features = features
info.task_templates = None
return Dataset.from_file(shard_kwargs["cache_file_name"], info=info, split=shard.split)
raise NonExistentDatasetError
num_shards = num_proc if num_proc is not None else 1
if batched and drop_last_batch:
pbar_total = len(self) // num_shards // batch_size * num_shards * batch_size
else:
pbar_total = len(self)
shards_done = 0
if num_proc is None or num_proc == 1:
transformed_dataset = None
try:
transformed_dataset = load_processed_shard_from_cache(dataset_kwargs)
logger.info(f"Loading cached processed dataset at {dataset_kwargs['cache_file_name']}")
except NonExistentDatasetError:
pass
if transformed_dataset is None:
with logging.tqdm(
disable=not logging.is_progress_bar_enabled(),
unit=" examples",
total=pbar_total,
desc=desc or "Map",
) as pbar:
for rank, done, content in Dataset._map_single(**dataset_kwargs):
if done:
shards_done += 1
logger.debug(f"Finished processing shard number {rank} of {num_shards}.")
transformed_dataset = content
else:
pbar.update(content)
assert transformed_dataset is not None, "Failed to retrieve the result from map"
# update fingerprint if the dataset changed
if transformed_dataset._fingerprint != self._fingerprint:
transformed_dataset._fingerprint = new_fingerprint
return transformed_dataset
else:
def format_cache_file_name(
cache_file_name: Optional[str], rank: Union[int, Literal["*"]] # noqa: F722
) -> Optional[str]:
if not cache_file_name:
return cache_file_name
sep = cache_file_name.rindex(".")
base_name, extension = cache_file_name[:sep], cache_file_name[sep:]
if isinstance(rank, int):
cache_file_name = base_name + suffix_template.format(rank=rank, num_proc=num_proc) + extension
logger.info(f"Process #{rank} will write at {cache_file_name}")
else:
cache_file_name = (
base_name
+ suffix_template.replace("{rank:05d}", "{rank}").format(rank=rank, num_proc=num_proc)
+ extension
)
return cache_file_name
def format_new_fingerprint(new_fingerprint: str, rank: int) -> str:
new_fingerprint = new_fingerprint + suffix_template.format(rank=rank, num_proc=num_proc)
validate_fingerprint(new_fingerprint)
return new_fingerprint
prev_env = deepcopy(os.environ)
# check if parallelism if off
# from https://github.com/huggingface/tokenizers/blob/bb668bc439dc34389b71dbb8ce0c597f15707b53/tokenizers/src/utils/parallelism.rs#L22
if prev_env.get("TOKENIZERS_PARALLELISM", "false").lower() not in (
"",
"off",
"false",
"f",
"no",
"n",
"0",
):
logger.warning("Setting TOKENIZERS_PARALLELISM=false for forked processes.")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
shards = [
self.shard(num_shards=num_proc, index=rank, contiguous=True, keep_in_memory=keep_in_memory)
for rank in range(num_proc)
]
kwargs_per_job = [
{
**dataset_kwargs,
"shard": shards[rank],
"cache_file_name": format_cache_file_name(cache_file_name, rank),
"rank": rank,
"offset": sum(len(s) for s in shards[:rank]),
"new_fingerprint": format_new_fingerprint(new_fingerprint, rank),
}
for rank in range(num_shards)
]
transformed_shards = [None] * num_shards
for rank in range(num_shards):
try:
transformed_shards[rank] = load_processed_shard_from_cache(kwargs_per_job[rank])
kwargs_per_job[rank] = None
except NonExistentDatasetError:
pass
kwargs_per_job = [kwargs for kwargs in kwargs_per_job if kwargs is not None]
# We try to create a pool with as many workers as dataset not yet cached.
if kwargs_per_job:
if len(kwargs_per_job) < num_shards:
logger.info(
f"Reprocessing {len(kwargs_per_job)}/{num_shards} shards because some of them were missing from the cache."
)
with Pool(len(kwargs_per_job)) as pool:
os.environ = prev_env
logger.info(f"Spawning {num_proc} processes")
with logging.tqdm(
disable=not logging.is_progress_bar_enabled(),
unit=" examples",
total=pbar_total,
desc=(desc or "Map") + f" (num_proc={num_proc})",
) as pbar:
for rank, done, content in iflatmap_unordered(
pool, Dataset._map_single, kwargs_iterable=kwargs_per_job
):
if done:
shards_done += 1
logger.debug(f"Finished processing shard number {rank} of {num_shards}.")
transformed_shards[rank] = content
else:
pbar.update(content)
# Avoids PermissionError on Windows (the error: https://github.com/huggingface/datasets/actions/runs/4026734820/jobs/6921621805)
for kwargs in kwargs_per_job:
del kwargs["shard"]
else:
logger.info(f"Loading cached processed dataset at {format_cache_file_name(cache_file_name, '*')}")
assert (
None not in transformed_shards
), f"Failed to retrieve results from map: result list {transformed_shards} still contains None - at least one worker failed to return its results"
logger.info(f"Concatenating {num_proc} shards")
result = _concatenate_map_style_datasets(transformed_shards)
# update fingerprint if the dataset changed
if any(
transformed_shard._fingerprint != shard._fingerprint
for transformed_shard, shard in zip(transformed_shards, shards)
):
result._fingerprint = new_fingerprint
else:
result._fingerprint = self._fingerprint
return result
@staticmethod
def _map_single(
shard: "Dataset",
function: Optional[Callable] = None,
with_indices: bool = False,
with_rank: bool = False,
input_columns: Optional[List[str]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[List[str]] = None,
keep_in_memory: bool = False,
cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
features: Optional[Features] = None,
disable_nullable: bool = False,
fn_kwargs: Optional[dict] = None,
new_fingerprint: Optional[str] = None,
rank: Optional[int] = None,
offset: int = 0,
) -> Iterable[Tuple[int, bool, Union[int, "Dataset"]]]:
"""Apply a function to all the elements in the table (individually or in batches)
and update the table (if function does update examples).
Args:
shard (`datasets.Dataset`): Dataset to map the transform on.
function (`Callable`): with one of the following signature:
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False`
- `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
- `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False`
- `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
For advanced usage, the function can also return a `pyarrow.Table`.
Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
If no function is provided, default to identity function: lambda x: x
with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
with_rank (`bool`, default `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`.
input_columns (`Optional[List[str]]`, defaults to `None`): The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`): Provide batch of examples to `function`
batch_size (`int`, optional, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`
`batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function`
drop_last_batch (`bool`, default: `False`): Whether a last batch smaller than the batch_size should be
dropped instead of being processed by the function.
remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file.
cache_file_name (`str`, optional, defaults to `None`): Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.
features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file
instead of the automatically generated one.
disable_nullable (`bool`, defaults to `False`): Disallow null values in the table.
fn_kwargs (`Dict`, optional, defaults to `None`): Keyword arguments to be passed to `function`
new_fingerprint (`str`, optional, defaults to `None`): the new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
rank: (`int`, optional, defaults to `None`): If specified, this is the process rank when doing multiprocessing
offset: (`int`, defaults to 0): If specified, this is an offset applied to the indices passed to `function` if `with_indices=True`.
"""
if fn_kwargs is None:
fn_kwargs = {}
# If we do batch computation but no batch size is provided, default to the full dataset
if batched and (batch_size is None or batch_size <= 0):
batch_size = shard.num_rows
# We set this variable to True after processing the first example/batch in
# `apply_function_on_filtered_inputs` if the map function returns a dict.
# If set to False, no new arrow table will be created
update_data = None
format_kwargs = shard._format_kwargs.copy()
# Lazy formatting is only available for the default format (None/python)
if not input_columns and shard._format_type is None:
format_kwargs["lazy"] = True
input_formatter = get_formatter(
shard._format_type,
features=shard.features,
**format_kwargs,
)
class NumExamplesMismatchError(Exception):
pass
def validate_function_output(processed_inputs, indices):
"""Validate output of the map function."""
if processed_inputs is not None and not isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame)):
raise TypeError(
f"Provided `function` which is applied to all elements of table returns a variable of type {type(processed_inputs)}. Make sure provided `function` returns a variable of type `dict` (or a pyarrow table) to update the dataset or `None` if you are only interested in side effects."
)
elif isinstance(indices, list) and isinstance(processed_inputs, Mapping):
allowed_batch_return_types = (list, np.ndarray, pd.Series)
if config.TF_AVAILABLE and "tensorflow" in sys.modules:
import tensorflow as tf
allowed_batch_return_types += (tf.Tensor,)
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
allowed_batch_return_types += (torch.Tensor,)
if config.JAX_AVAILABLE and "jax" in sys.modules:
import jax.numpy as jnp
allowed_batch_return_types += (jnp.ndarray,)
all_dict_values_are_lists = all(
isinstance(value, allowed_batch_return_types) for value in processed_inputs.values()
)
if all_dict_values_are_lists is False:
raise TypeError(
f"Provided `function` which is applied to all elements of table returns a `dict` of types {[type(x) for x in processed_inputs.values()]}. When using `batched=True`, make sure provided `function` returns a `dict` of types like `{allowed_batch_return_types}`."
)
def apply_function_on_filtered_inputs(pa_inputs, indices, check_same_num_examples=False, offset=0):
"""Utility to apply the function on a selection of columns."""
nonlocal update_data
inputs = format_table(
pa_inputs,
0 if not batched else range(pa_inputs.num_rows),
format_columns=input_columns,
formatter=input_formatter,
)
fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns]
if offset == 0:
effective_indices = indices
else:
effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset
additional_args = ()
if with_indices:
additional_args += (effective_indices,)
if with_rank:
additional_args += (rank,)
processed_inputs = function(*fn_args, *additional_args, **fn_kwargs)
if isinstance(processed_inputs, LazyDict):
processed_inputs = {
k: v for k, v in processed_inputs.data.items() if k not in processed_inputs.keys_to_format
}
returned_lazy_dict = True
else:
returned_lazy_dict = False
if update_data is None:
# Check if the function returns updated examples
update_data = isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame))
validate_function_output(processed_inputs, indices)
if not update_data:
return None # Nothing to update, let's move on
if shard._format_type or input_columns:
# TODO(QL, MS): ideally the behavior should be the same even if the dataset is formatted (may require major release)
inputs_to_merge = dict(zip(pa_inputs.column_names, pa_inputs.itercolumns()))
elif isinstance(inputs, LazyDict):
inputs_to_merge = {
k: (v if k not in inputs.keys_to_format else pa_inputs[k]) for k, v in inputs.data.items()
}
else:
inputs_to_merge = inputs
if remove_columns is not None:
for column in remove_columns:
# `function` can modify input in-place causing column to be already removed.
if column in inputs_to_merge:
inputs_to_merge.pop(column)
if returned_lazy_dict and column in processed_inputs:
processed_inputs.pop(column)
if check_same_num_examples:
input_num_examples = len(pa_inputs)
processed_inputs_num_examples = len(processed_inputs[next(iter(processed_inputs.keys()))])
if input_num_examples != processed_inputs_num_examples:
raise NumExamplesMismatchError()
if isinstance(inputs, Mapping) and isinstance(processed_inputs, Mapping):
# The .map() transform *updates* the dataset:
# the output dictionary contains both the the input data and the output data.
# The output dictionary may contain Arrow values from `inputs_to_merge` so that we can re-write them efficiently.
return {**inputs_to_merge, **processed_inputs}
else:
return processed_inputs
def init_buffer_and_writer():
# Prepare output buffer and batched writer in memory or on file if we update the table
writer_features = features
if writer_features is None:
writer_features = shard.features
update_features = True
else:
update_features = False
if keep_in_memory or cache_file_name is None:
buf_writer = pa.BufferOutputStream()
tmp_file = None
writer = ArrowWriter(
features=writer_features,
stream=buf_writer,
writer_batch_size=writer_batch_size,
update_features=update_features,
fingerprint=new_fingerprint,
disable_nullable=disable_nullable,
)
else:
buf_writer = None
logger.info(f"Caching processed dataset at {cache_file_name}")
tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(cache_file_name), delete=False)
writer = ArrowWriter(
features=writer_features,
path=tmp_file.name,
writer_batch_size=writer_batch_size,
update_features=update_features,
fingerprint=new_fingerprint,
disable_nullable=disable_nullable,
)
return buf_writer, writer, tmp_file
num_examples_progress_update = 0
# If `update_data` is True after processing the first example/batch, initalize these resources with `init_buffer_and_writer`
buf_writer, writer, tmp_file = None, None, None
# Optionally initialize the writer as a context manager
with contextlib.ExitStack() as stack:
try:
arrow_formatted_shard = shard.with_format("arrow")
# Loop over single examples or batches and write to buffer/file if examples are to be updated
if not batched:
shard_iterable = enumerate(arrow_formatted_shard)
else:
num_rows = len(shard) if not drop_last_batch else len(shard) // batch_size * batch_size
shard_iterable = zip(
range(0, num_rows, batch_size),
arrow_formatted_shard.iter(batch_size, drop_last_batch=drop_last_batch),
)
if not batched:
_time = time.time()
for i, example in shard_iterable:
example = apply_function_on_filtered_inputs(example, i, offset=offset)
if update_data:
if i == 0:
buf_writer, writer, tmp_file = init_buffer_and_writer()
stack.enter_context(writer)
if isinstance(example, pa.Table):
writer.write_row(example)
elif isinstance(example, pd.DataFrame):
writer.write_row(pa.Table.from_pandas(example))
else:
writer.write(example)
num_examples_progress_update += 1
if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
_time = time.time()
yield rank, False, num_examples_progress_update
num_examples_progress_update = 0
else:
_time = time.time()
for i, batch in shard_iterable:
num_examples_in_batch = len(batch)
indices = list(
range(*(slice(i, i + batch_size).indices(shard.num_rows)))
) # Something simpler?
try:
batch = apply_function_on_filtered_inputs(
batch,
indices,
check_same_num_examples=len(shard.list_indexes()) > 0,
offset=offset,
)
except NumExamplesMismatchError:
raise DatasetTransformationNotAllowedError(
"Using `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn't create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it."
) from None
if update_data:
if i == 0:
buf_writer, writer, tmp_file = init_buffer_and_writer()
stack.enter_context(writer)
if isinstance(batch, pa.Table):
writer.write_table(batch)
elif isinstance(batch, pd.DataFrame):
writer.write_table(pa.Table.from_pandas(batch))
else:
writer.write_batch(batch)
num_examples_progress_update += num_examples_in_batch
if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
_time = time.time()
yield rank, False, num_examples_progress_update
num_examples_progress_update = 0
if update_data and writer is not None:
writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file
except (Exception, KeyboardInterrupt):
yield rank, False, num_examples_progress_update
if update_data:
if writer is not None:
writer.finalize()
if tmp_file is not None:
tmp_file.close()
if os.path.exists(tmp_file.name):
os.remove(tmp_file.name)
raise
yield rank, False, num_examples_progress_update
if update_data and tmp_file is not None:
tmp_file.close()
shutil.move(tmp_file.name, cache_file_name)
umask = os.umask(0o666)
os.umask(umask)
os.chmod(cache_file_name, 0o666 & ~umask)
if update_data:
# Create new Dataset from buffer or file
info = shard.info.copy()
info.features = writer._features
info.task_templates = None
if buf_writer is None:
yield rank, True, Dataset.from_file(cache_file_name, info=info, split=shard.split)
else:
yield rank, True, Dataset.from_buffer(buf_writer.getvalue(), info=info, split=shard.split)
else:
yield rank, True, shard
@transmit_format
@fingerprint_transform(
inplace=False, ignore_kwargs=["load_from_cache_file", "cache_file_name", "desc"], version="2.0.1"
)
def filter(
self,
function: Optional[Callable] = None,
with_indices=False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
fn_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
suffix_template: str = "_{rank:05d}_of_{num_proc:05d}",
new_fingerprint: Optional[str] = None,
desc: Optional[str] = None,
) -> "Dataset":
"""Apply a filter function to all the elements in the table in batches
and update the table so that the dataset only includes examples according to the filter function.
Args:
function (`Callable`): Callable with one of the following signatures:
- `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
- `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
- `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
- `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True`
If no function is provided, defaults to an always `True` function: `lambda x: True`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
input_columns (`str` or `List[str]`, *optional*):
The columns to be passed into `function` as
positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if
`batched = True`. If `batched = False`, one example per batch is passed to `function`.
If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
cache_file_name (`str`, *optional*):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
fn_kwargs (`dict`, *optional*):
Keyword arguments to be passed to `function`.
num_proc (`int`, *optional*):
Number of processes for multiprocessing. By default it doesn't
use multiprocessing.
suffix_template (`str`):
If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each.
For example, if `cache_file_name` is `"processed.arrow"`, then for `rank = 1` and `num_proc = 4`,
the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix (default
`_{rank:05d}_of_{num_proc:05d}`).
new_fingerprint (`str`, *optional*):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
desc (`str`, *optional*, defaults to `None`):
Meaningful description to be displayed alongside with the progress bar while filtering examples.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.filter(lambda x: x["label"] == 1)
Dataset({
features: ['text', 'label'],
num_rows: 533
})
```
"""
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.filter` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.`"
)
if function is None:
function = lambda x: True # noqa: E731
if len(self) == 0:
return self
indices = self.map(
function=partial(
get_indices_from_mask_function, function, batched, with_indices, input_columns, self._indices
),
with_indices=True,
features=Features({"indices": Value("uint64")}),
batched=True,
batch_size=batch_size,
remove_columns=self.column_names,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=cache_file_name,
writer_batch_size=writer_batch_size,
fn_kwargs=fn_kwargs,
num_proc=num_proc,
suffix_template=suffix_template,
new_fingerprint=new_fingerprint,
input_columns=input_columns,
desc=desc or "Filter",
)
new_dataset = copy.deepcopy(self)
new_dataset._indices = indices.data
new_dataset._fingerprint = new_fingerprint
return new_dataset
@transmit_format
@fingerprint_transform(inplace=False, ignore_kwargs=["cache_file_name"])
def flatten_indices(
self,
keep_in_memory: bool = False,
cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
features: Optional[Features] = None,
disable_nullable: bool = False,
num_proc: Optional[int] = None,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create and cache a new Dataset by flattening the indices mapping.
Args:
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
cache_file_name (`str`, *optional*, default `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
features (`Optional[datasets.Features]`, defaults to `None`):
Use a specific [`Features`] to store the cache file
instead of the automatically generated one.
disable_nullable (`bool`, defaults to `False`):
Allow null values in the table.
num_proc (`int`, optional, default `None`):
Max number of processes when generating cache. Already cached shards are loaded sequentially
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
"""
return self.map(
batched=True, # for speed
keep_in_memory=keep_in_memory,
cache_file_name=cache_file_name,
writer_batch_size=writer_batch_size,
features=features,
disable_nullable=disable_nullable,
new_fingerprint=new_fingerprint,
desc="Flattening the indices",
num_proc=num_proc,
)
def _new_dataset_with_indices(
self,
indices_cache_file_name: Optional[str] = None,
indices_buffer: Optional[pa.Buffer] = None,
fingerprint: Optional[str] = None,
) -> "Dataset":
"""Return a new Dataset obtained by adding indices (provided in indices_cache_file_name or in a buffer) to the
current Dataset.
"""
if indices_cache_file_name is None and indices_buffer is None:
raise ValueError("At least one of indices_cache_file_name or indices_buffer must be provided.")
if fingerprint is None:
raise ValueError("please specify a fingerprint for the dataset with indices")
if indices_cache_file_name is not None:
indices_table = MemoryMappedTable.from_file(indices_cache_file_name)
else:
indices_table = InMemoryTable.from_buffer(indices_buffer)
# Return new Dataset object
# don't forget to copy the objects
return Dataset(
self._data,
info=self.info.copy(),
split=self.split,
indices_table=indices_table,
fingerprint=fingerprint,
)
@transmit_format
@fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"])
def select(
self,
indices: Iterable,
keep_in_memory: bool = False,
indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create a new dataset with rows selected following the list/array of indices.
Args:
indices (`range`, `list`, `iterable`, `ndarray` or `Series`):
Range, list or 1D-array of integer indices for indexing.
If the indices correspond to a contiguous range, the Arrow table is simply sliced.
However passing a list of indices that are not contiguous creates indices mapping, which is much less efficient,
but still faster than recreating an Arrow table made of the requested rows.
keep_in_memory (`bool`, defaults to `False`):
Keep the indices mapping in memory instead of writing it to a cache file.
indices_cache_file_name (`str`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
indices mapping instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.select(range(4))
Dataset({
features: ['text', 'label'],
num_rows: 4
})
```
"""
if keep_in_memory and indices_cache_file_name is not None:
raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
)
# If the array is empty we do nothing
if len(self) == 0:
return self
# If indices is a PyArrow array, we convert to NumPy
if isinstance(indices, (pa.Array, pa.ChunkedArray)):
indices = indices.to_numpy().astype(np.int64)
# Convert generator objects to lists
if isinstance(indices, Iterator):
indices = list(indices)
# If the indices are contiguous, simply slice the arrow table
if isinstance(indices, range):
if _is_range_contiguous(indices) and indices.start >= 0:
start, length = indices.start, indices.stop - indices.start
return self._select_contiguous(start, length, new_fingerprint=new_fingerprint)
else:
try:
start = next(iter(indices))
except StopIteration:
# if `indices` is an empty iterable, we return an empty dataset
return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint)
if start >= 0:
counter_from_start = itertools.count(start=start)
if all(i == j for i, j in zip(indices, counter_from_start)):
length = next(counter_from_start) - start
return self._select_contiguous(start, length, new_fingerprint=new_fingerprint)
# If not contiguous, we need to create a new indices mapping
return self._select_with_indices_mapping(
indices,
keep_in_memory=keep_in_memory,
indices_cache_file_name=indices_cache_file_name,
writer_batch_size=writer_batch_size,
new_fingerprint=new_fingerprint,
)
@transmit_format
@fingerprint_transform(inplace=False)
def _select_contiguous(
self,
start: int,
length: int,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create a new dataset with rows from a contiguous slice of data.
The slice is defined by that start index and its length.
Args:
start (`int`): start index.
length (`int`): length of the slice to select.
new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds._select_contiguous(0, 4)
Dataset({
features: ['text', 'label'],
num_rows: 4
})
```
"""
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
)
# If the array is empty we do nothing
if len(self) == 0:
return self
_check_valid_indices_value(start, len(self))
_check_valid_indices_value(start + length - 1, len(self))
if self._indices is None or length == 0:
return Dataset(
self.data.slice(start, length),
info=self.info.copy(),
split=self.split,
fingerprint=new_fingerprint,
)
else:
return Dataset(
self.data,
info=self.info.copy(),
split=self.split,
indices_table=self._indices.slice(start, length),
fingerprint=new_fingerprint,
)
@transmit_format
@fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"])
def _select_with_indices_mapping(
self,
indices: Iterable,
keep_in_memory: bool = False,
indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create a new dataset with rows selected following the list/array of indices.
The new dataset is made by creating a new indices mapping on top of the main arrow table.
Args:
indices (sequence, iterable, range, ndarray or Series): List or 1D-array of integer indices for indexing.
keep_in_memory (`bool`, default `False`): Keep the indices mapping in memory instead of writing it to a cache file.
indices_cache_file_name (`str`, optional, default `None`): Provide the name of a path for the cache file. It is used to store the
indices mapping instead of the automatically generated cache file name.
writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.
new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds._select_with_indices_mapping(range(4))
Dataset({
features: ['text', 'label'],
num_rows: 4
})
```
"""
if keep_in_memory and indices_cache_file_name is not None:
raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
)
# If the array is empty we do nothing
if len(self) == 0:
return self
# Prepare the writer for our indices arrow table
if keep_in_memory or indices_cache_file_name is None:
buf_writer = pa.BufferOutputStream()
tmp_file = None
writer = ArrowWriter(
stream=buf_writer, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices"
)
else:
buf_writer = None
logger.info(f"Caching indices mapping at {indices_cache_file_name}")
tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(indices_cache_file_name), delete=False)
writer = ArrowWriter(
path=tmp_file.name, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices"
)
indices = indices if isinstance(indices, list) else list(indices)
size = len(self)
if indices:
_check_valid_indices_value(int(max(indices)), size=size)
_check_valid_indices_value(int(min(indices)), size=size)
else:
return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint)
indices_array = pa.array(indices, type=pa.uint64())
# Check if we need to convert indices
if self._indices is not None:
indices_array = self._indices.column(0).take(indices_array)
indices_table = pa.Table.from_arrays([indices_array], names=["indices"])
with writer:
try:
writer.write_table(indices_table)
writer.finalize() # close_stream=bool(buf_writer is None)) We only close if we are writing in a file
except (Exception, KeyboardInterrupt):
if tmp_file is not None:
tmp_file.close()
if os.path.exists(tmp_file.name):
os.remove(tmp_file.name)
raise
if tmp_file is not None:
tmp_file.close()
shutil.move(tmp_file.name, indices_cache_file_name)
umask = os.umask(0o666)
os.umask(umask)
os.chmod(indices_cache_file_name, 0o666 & ~umask)
# Return new Dataset object
if buf_writer is None:
return self._new_dataset_with_indices(
indices_cache_file_name=indices_cache_file_name, fingerprint=new_fingerprint
)
else:
return self._new_dataset_with_indices(indices_buffer=buf_writer.getvalue(), fingerprint=new_fingerprint)
@transmit_format
@fingerprint_transform(inplace=False, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"])
def sort(
self,
column_names: Union[str, Sequence_[str]],
reverse: Union[bool, Sequence_[bool]] = False,
kind="deprecated",
null_placement: str = "at_end",
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create a new dataset sorted according to a single or multiple columns.
Args:
column_names (`Union[str, Sequence[str]]`):
Column name(s) to sort by.
reverse (`Union[bool, Sequence[bool]]`, defaults to `False`):
If `True`, sort by descending order rather than ascending. If a single bool is provided,
the value is applied to the sorting of all column names. Otherwise a list of bools with the
same length and order as column_names must be provided.
kind (`str`, *optional*):
Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`,
The default is `quicksort`. Note that both `stable` and `mergesort` use `timsort` under the covers and, in general,
the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility.
<Deprecated version="2.8.0">
`kind` was deprecated in version 2.10.0 and will be removed in 3.0.0.
</Deprecated>
null_placement (`str`, defaults to `at_end`):
Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last`
<Added version="1.14.2"/>
keep_in_memory (`bool`, defaults to `False`):
Keep the sorted indices in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the sorted indices
can be identified, use it instead of recomputing.
indices_cache_file_name (`str`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
sorted indices instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
Higher value gives smaller cache files, lower value consume less temporary memory.
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset('rotten_tomatoes', split='validation')
>>> ds['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
>>> sorted_ds = ds.sort('label')
>>> sorted_ds['label'][:10]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False])
>>> another_sorted_ds['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
```
"""
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.sort` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
)
# If the array is empty we do nothing
if len(self) == 0:
return self
# Deprecation warning
if kind != "deprecated":
warnings.warn(
"'kind' was deprecated in version 2.10.0 and will be removed in 3.0.0.",
category=FutureWarning,
)
# Check proper format of and for duplicates in column_names
if isinstance(column_names, str):
column_names = [column_names]
# Check proper format and length of reverse
if not isinstance(reverse, bool):
if len(reverse) != len(column_names):
raise ValueError(
"Parameter 'reverse' should be either a boolean or a list of booleans with the same length as 'column_names'."
)
else:
reverse = [reverse] * len(column_names)
# Check whether column name(s) exist in dataset
for column in column_names:
if not isinstance(column, str) or column not in self._data.column_names:
raise ValueError(
f"Column '{column}' not found in the dataset. Please provide a column selected in: {self._data.column_names}"
)
# Change null_placement to conform to pyarrow's sort_indices() while ensuring backwards compatability
if null_placement not in ["at_start", "at_end"]:
if null_placement == "first":
null_placement = "at_start"
elif null_placement == "last":
null_placement = "at_end"
else:
raise ValueError(
f"null_placement '{null_placement}' is an invalid parameter value. Must be either 'last', 'at_end', 'first' or 'at_start'."
)
load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
# Check if we've already cached this computation (indexed by a hash)
if self.cache_files:
if indices_cache_file_name is None:
# we create a unique hash from the function, current dataset file and the mapping args
indices_cache_file_name = self._get_cache_file_path(new_fingerprint)
if os.path.exists(indices_cache_file_name) and load_from_cache_file:
logger.info(f"Loading cached sorted indices for dataset at {indices_cache_file_name}")
return self._new_dataset_with_indices(
fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name
)
sort_table = query_table(
table=self._data,
key=slice(0, len(self)),
indices=self._indices if self._indices is not None else None,
)
sort_keys = [
(col, "ascending" if not col_reverse else "descending") for col, col_reverse in zip(column_names, reverse)
]
indices = pc.sort_indices(sort_table, sort_keys=sort_keys, null_placement=null_placement)
return self.select(
indices=indices,
keep_in_memory=keep_in_memory,
indices_cache_file_name=indices_cache_file_name,
writer_batch_size=writer_batch_size,
new_fingerprint=new_fingerprint,
)
@transmit_format
@fingerprint_transform(
inplace=False, randomized_function=True, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"]
)
def shuffle(
self,
seed: Optional[int] = None,
generator: Optional[np.random.Generator] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create a new Dataset where the rows are shuffled.
Currently shuffling uses numpy random generators.
You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).
Shuffling takes the list of indices `[0:len(my_dataset)]` and shuffles it to create an indices mapping.
However as soon as your [`Dataset`] has an indices mapping, the speed can become 10x slower.
This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore.
To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping.
This may take a lot of time depending of the size of your dataset though:
```python
my_dataset[0] # fast
my_dataset = my_dataset.shuffle(seed=42)
my_dataset[0] # up to 10x slower
my_dataset = my_dataset.flatten_indices() # rewrite the shuffled dataset on disk as contiguous chunks of data
my_dataset[0] # fast again
```
In this case, we recommend switching to an [`IterableDataset`] and leveraging its fast approximate shuffling method [`IterableDataset.shuffle`].
It only shuffles the shards order and adds a shuffle buffer to your dataset, which keeps the speed of your dataset optimal:
```python
my_iterable_dataset = my_dataset.to_iterable_dataset(num_shards=128)
for example in enumerate(my_iterable_dataset): # fast
pass
shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100)
for example in enumerate(shuffled_iterable_dataset): # as fast as before
pass
```
Args:
seed (`int`, *optional*):
A seed to initialize the default BitGenerator if `generator=None`.
If `None`, then fresh, unpredictable entropy will be pulled from the OS.
If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
generator (`numpy.random.Generator`, *optional*):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
keep_in_memory (`bool`, default `False`):
Keep the shuffled indices in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the shuffled indices
can be identified, use it instead of recomputing.
indices_cache_file_name (`str`, *optional*):
Provide the name of a path for the cache file. It is used to store the
shuffled indices instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# set a seed
>>> shuffled_ds = ds.shuffle(seed=42)
>>> shuffled_ds['label'][:10]
[1, 0, 1, 1, 0, 0, 0, 0, 0, 0]
```
"""
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.shuffle` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
)
# If the array is empty we do nothing
if len(self) == 0:
return self
if keep_in_memory and indices_cache_file_name is not None:
raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
if seed is not None and generator is not None:
raise ValueError("Both `seed` and `generator` were provided. Please specify just one of them.")
if generator is not None and not isinstance(generator, np.random.Generator):
raise ValueError("The provided generator must be an instance of numpy.random.Generator")
load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
if generator is None:
if seed is None:
_, seed, pos, *_ = np.random.get_state()
seed = seed[pos] if pos < 624 else seed[0]
_ = np.random.random() # do 1 step of rng
generator = np.random.default_rng(seed)
# Check if we've already cached this computation (indexed by a hash)
if self.cache_files:
if indices_cache_file_name is None:
# we create a unique hash from the function, current dataset file and the mapping args
indices_cache_file_name = self._get_cache_file_path(new_fingerprint)
if os.path.exists(indices_cache_file_name) and load_from_cache_file:
logger.info(f"Loading cached shuffled indices for dataset at {indices_cache_file_name}")
return self._new_dataset_with_indices(
fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name
)
permutation = generator.permutation(len(self))
return self.select(
indices=permutation,
keep_in_memory=keep_in_memory,
indices_cache_file_name=indices_cache_file_name if not keep_in_memory else None,
writer_batch_size=writer_batch_size,
new_fingerprint=new_fingerprint,
)
@transmit_format
@fingerprint_transform(
inplace=False,
randomized_function=True,
fingerprint_names=["train_new_fingerprint", "test_new_fingerprint"],
ignore_kwargs=["load_from_cache_file", "train_indices_cache_file_name", "test_indices_cache_file_name"],
)
def train_test_split(
self,
test_size: Union[float, int, None] = None,
train_size: Union[float, int, None] = None,
shuffle: bool = True,
stratify_by_column: Optional[str] = None,
seed: Optional[int] = None,
generator: Optional[np.random.Generator] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
train_indices_cache_file_name: Optional[str] = None,
test_indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
train_new_fingerprint: Optional[str] = None,
test_new_fingerprint: Optional[str] = None,
) -> "DatasetDict":
"""Return a dictionary ([`datasets.DatasetDict`]) with two random train and test subsets (`train` and `test` `Dataset` splits).
Splits are created from the dataset according to `test_size`, `train_size` and `shuffle`.
This method is similar to scikit-learn `train_test_split`.
Args:
test_size (`numpy.random.Generator`, *optional*):
Size of the test split
If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the test split.
If `int`, represents the absolute number of test samples.
If `None`, the value is set to the complement of the train size.
If `train_size` is also `None`, it will be set to `0.25`.
train_size (`numpy.random.Generator`, *optional*):
Size of the train split
If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the train split.
If `int`, represents the absolute number of train samples.
If `None`, the value is automatically set to the complement of the test size.
shuffle (`bool`, *optional*, defaults to `True`):
Whether or not to shuffle the data before splitting.
stratify_by_column (`str`, *optional*, defaults to `None`):
The column name of labels to be used to perform stratified split of data.
seed (`int`, *optional*):
A seed to initialize the default BitGenerator if `generator=None`.
If `None`, then fresh, unpredictable entropy will be pulled from the OS.
If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
generator (`numpy.random.Generator`, *optional*):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
keep_in_memory (`bool`, defaults to `False`):
Keep the splits indices in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the splits indices
can be identified, use it instead of recomputing.
train_cache_file_name (`str`, *optional*):
Provide the name of a path for the cache file. It is used to store the
train split indices instead of the automatically generated cache file name.
test_cache_file_name (`str`, *optional*):
Provide the name of a path for the cache file. It is used to store the
test split indices instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
train_new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the train set after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
test_new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the test set after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds = ds.train_test_split(test_size=0.2, shuffle=True)
DatasetDict({
train: Dataset({
features: ['text', 'label'],
num_rows: 852
})
test: Dataset({
features: ['text', 'label'],
num_rows: 214
})
})
# set a seed
>>> ds = ds.train_test_split(test_size=0.2, seed=42)
# stratified split
>>> ds = load_dataset("imdb",split="train")
Dataset({
features: ['text', 'label'],
num_rows: 25000
})
>>> ds = ds.train_test_split(test_size=0.2, stratify_by_column="label")
DatasetDict({
train: Dataset({
features: ['text', 'label'],
num_rows: 20000
})
test: Dataset({
features: ['text', 'label'],
num_rows: 5000
})
})
```
"""
from .dataset_dict import DatasetDict # import here because of circular dependency
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.train_test_split` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
)
# If the array is empty we do nothing
if len(self) == 0:
return DatasetDict({"train": self, "test": self})
if test_size is None and train_size is None:
test_size = 0.25
# Safety checks similar to scikit-learn's ones.
# (adapted from https://github.com/scikit-learn/scikit-learn/blob/fd237278e895b42abe8d8d09105cbb82dc2cbba7/sklearn/model_selection/_split.py#L1750)
n_samples = len(self)
if (
isinstance(test_size, int)
and (test_size >= n_samples or test_size <= 0)
or isinstance(test_size, float)
and (test_size <= 0 or test_size >= 1)
):
raise ValueError(
f"test_size={test_size} should be either positive and smaller "
f"than the number of samples {n_samples} or a float in the (0, 1) range"
)
if (
isinstance(train_size, int)
and (train_size >= n_samples or train_size <= 0)
or isinstance(train_size, float)
and (train_size <= 0 or train_size >= 1)
):
raise ValueError(
f"train_size={train_size} should be either positive and smaller "
f"than the number of samples {n_samples} or a float in the (0, 1) range"
)
if train_size is not None and not isinstance(train_size, (int, float)):
raise ValueError(f"Invalid value for train_size: {train_size} of type {type(train_size)}")
if test_size is not None and not isinstance(test_size, (int, float)):
raise ValueError(f"Invalid value for test_size: {test_size} of type {type(test_size)}")
if isinstance(train_size, float) and isinstance(test_size, float) and train_size + test_size > 1:
raise ValueError(
f"The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1)"
" range. Reduce test_size and/or train_size."
)
if isinstance(test_size, float):
n_test = ceil(test_size * n_samples)
elif isinstance(test_size, int):
n_test = float(test_size)
if isinstance(train_size, float):
n_train = floor(train_size * n_samples)
elif isinstance(train_size, int):
n_train = float(train_size)
if train_size is None:
n_train = n_samples - n_test
elif test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError(
f"The sum of train_size and test_size = {n_train + n_test}, "
"should be smaller than the number of "
f"samples {n_samples}. Reduce test_size and/or "
"train_size."
)
n_train, n_test = int(n_train), int(n_test)
if n_train == 0:
raise ValueError(
f"With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the "
"resulting train set will be empty. Adjust any of the "
"aforementioned parameters."
)
load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
if generator is None and shuffle is True:
if seed is None:
_, seed, pos, *_ = np.random.get_state()
seed = seed[pos] if pos < 624 else seed[0]
_ = np.random.random() # do 1 step of rng
generator = np.random.default_rng(seed)
# Check if we've already cached this computation (indexed by a hash)
if self.cache_files:
if train_indices_cache_file_name is None or test_indices_cache_file_name is None:
# we create a unique hash from the function, current dataset file and the mapping args
if train_indices_cache_file_name is None:
train_indices_cache_file_name = self._get_cache_file_path(train_new_fingerprint)
if test_indices_cache_file_name is None:
test_indices_cache_file_name = self._get_cache_file_path(test_new_fingerprint)
if (
os.path.exists(train_indices_cache_file_name)
and os.path.exists(test_indices_cache_file_name)
and load_from_cache_file
):
logger.info(
f"Loading cached split indices for dataset at {train_indices_cache_file_name} and {test_indices_cache_file_name}"
)
return DatasetDict(
{
"train": self._new_dataset_with_indices(
fingerprint=train_new_fingerprint, indices_cache_file_name=train_indices_cache_file_name
),
"test": self._new_dataset_with_indices(
fingerprint=test_new_fingerprint, indices_cache_file_name=test_indices_cache_file_name
),
}
)
if not shuffle:
if stratify_by_column is not None:
raise ValueError("Stratified train/test split is not implemented for `shuffle=False`")
train_indices = np.arange(n_train)
test_indices = np.arange(n_train, n_train + n_test)
else:
# stratified partition
if stratify_by_column is not None:
if stratify_by_column not in self._info.features.keys():
raise ValueError(f"Key {stratify_by_column} not found in {self._info.features.keys()}")
if not isinstance(self._info.features[stratify_by_column], ClassLabel):
raise ValueError(
f"Stratifying by column is only supported for {ClassLabel.__name__} column, and column {stratify_by_column} is {type(self._info.features[stratify_by_column]).__name__}."
)
try:
train_indices, test_indices = next(
stratified_shuffle_split_generate_indices(
self.with_format("numpy")[stratify_by_column], n_train, n_test, rng=generator
)
)
except Exception as error:
if str(error) == "Minimum class count error":
raise ValueError(
f"The least populated class in {stratify_by_column} column has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2."
)
else:
raise error
# random partition
else:
permutation = generator.permutation(len(self))
test_indices = permutation[:n_test]
train_indices = permutation[n_test : (n_test + n_train)]
train_split = self.select(
indices=train_indices,
keep_in_memory=keep_in_memory,
indices_cache_file_name=train_indices_cache_file_name,
writer_batch_size=writer_batch_size,
new_fingerprint=train_new_fingerprint,
)
test_split = self.select(
indices=test_indices,
keep_in_memory=keep_in_memory,
indices_cache_file_name=test_indices_cache_file_name,
writer_batch_size=writer_batch_size,
new_fingerprint=test_new_fingerprint,
)
return DatasetDict({"train": train_split, "test": test_split})
def shard(
self,
num_shards: int,
index: int,
contiguous: bool = False,
keep_in_memory: bool = False,
indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
) -> "Dataset":
"""Return the `index`-nth shard from dataset split into `num_shards` pieces.
This shards deterministically. `dset.shard(n, i)` will contain all elements of dset whose
index mod `n = i`.
`dset.shard(n, i, contiguous=True)` will instead split dset into contiguous chunks,
so it can be easily concatenated back together after processing. If `n % i == l`, then the
first `l` shards will have length `(n // i) + 1`, and the remaining shards will have length `(n // i)`.
`datasets.concatenate([dset.shard(n, i, contiguous=True) for i in range(n)])` will return
a dataset with the same order as the original.
Be sure to shard before using any randomizing operator (such as `shuffle`).
It is best if the shard operator is used early in the dataset pipeline.
Args:
num_shards (`int`):
How many shards to split the dataset into.
index (`int`):
Which shard to select and return.
contiguous: (`bool`, defaults to `False`):
Whether to select contiguous blocks of indices for shards.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
indices_cache_file_name (`str`, *optional*):
Provide the name of a path for the cache file. It is used to store the
indices of each shard instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds
Dataset({
features: ['text', 'label'],
num_rows: 1066
})
>>> ds.shard(num_shards=2, index=0)
Dataset({
features: ['text', 'label'],
num_rows: 533
})
```
"""
if not 0 <= index < num_shards:
raise ValueError("index should be in [0, num_shards-1]")
if contiguous:
div = len(self) // num_shards
mod = len(self) % num_shards
start = div * index + min(index, mod)
end = start + div + (1 if index < mod else 0)
indices = range(start, end)
else:
indices = np.arange(index, len(self), num_shards)
return self.select(
indices=indices,
keep_in_memory=keep_in_memory,
indices_cache_file_name=indices_cache_file_name,
writer_batch_size=writer_batch_size,
)
@deprecated()
def export(
self,
filename: str,
format: str = "tfrecord",
):
"""Writes the Arrow dataset to a TFRecord file.
The dataset must already be in tensorflow format. The records will be written with
keys from `dataset._format_columns`.
Args:
filename (`str`): The filename, including the `.tfrecord` extension, to write to.
format (`str`, optional, default `"tfrecord"`): The type of output file. Currently this is a no-op, as
TFRecords are the only option. This enables a more flexible function signature later.
"""
try:
import tensorflow as tf # noqa: F401
except ImportError:
logger.error("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
# From https://www.tensorflow.org/tutorials/load_data/tfrecord
def _bytes_feature(values):
"""Returns a bytes_list from a list of string / byte."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
def _float_feature(values):
"""Returns a float_list from a list of float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
def _int64_feature(values):
"""Returns an int64_list from a list of bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def _feature(values: Union[float, int, str, np.ndarray, list]) -> "tf.train.Feature":
"""Typechecks `values` and returns the corresponding tf.train.Feature."""
if isinstance(values, list):
if values and isinstance(values[0], str):
return _bytes_feature([v.encode() for v in values])
else:
raise ValueError(f"values={values} is empty or contains items that cannot be serialized")
elif isinstance(values, np.ndarray):
if values.dtype == np.dtype(float):
return _float_feature(values)
elif values.dtype == np.int64:
return _int64_feature(values)
elif values.dtype == np.dtype(str) or (
values.dtype == np.dtype(object) and len(values) > 0 and isinstance(values[0], str)
):
return _bytes_feature([v.encode() for v in values])
else:
raise ValueError(
f"values={values} is empty or is an np.ndarray with items of dtype {values[0].dtype}, which cannot be serialized"
)
elif hasattr(values, "dtype"):
if np.issubdtype(values.dtype, np.floating):
return _float_feature([values.item()])
elif np.issubdtype(values.dtype, np.integer):
return _int64_feature([values.item()])
elif np.issubdtype(values.dtype, str):
return _bytes_feature([values.item().encode()])
else:
raise ValueError(f"values={values} has dtype {values.dtype}, which cannot be serialized")
else:
raise ValueError(f"values={values} are not numpy objects or strings, and so cannot be serialized")
def serialize_example(ex):
feature = {key: _feature(value) for key, value in ex.items()}
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
def tf_serialize_example(ex):
tf_string = tf.py_function(serialize_example, (ex,), tf.string)
return tf.reshape(tf_string, ())
def generator():
for ex in self:
yield serialize_example(ex)
if self._format_type != "numpy":
raise ValueError("Dataset format must be numpy before exporting")
if not filename.endswith(".tfrecord"):
raise ValueError("filename {filename} must end with .tfrecord")
tf_dataset = tf.data.Dataset.from_generator(generator, output_types=tf.string, output_shapes=())
writer = tf.data.experimental.TFRecordWriter(filename)
logger.info(f"Writing TFRecord to {filename}")
writer.write(tf_dataset)
logger.info(f"Finished writing TFRecord to {filename}")
self = None # delete the dataset reference used by tf_dataset
def to_csv(
self,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
**to_csv_kwargs,
) -> int:
"""Exports the dataset to csv
Args:
path_or_buf (`PathLike` or `FileOrBuffer`):
Either a path to a file or a BinaryIO.
batch_size (`int`, *optional*):
Size of the batch to load in memory and write at once.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
num_proc (`int`, *optional*):
Number of processes for multiprocessing. By default it doesn't
use multiprocessing. `batch_size` in this case defaults to
`datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default
value if you have sufficient compute power.
**to_csv_kwargs (additional keyword arguments):
Parameters to pass to pandas's [`pandas.DataFrame.to_csv`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html).
<Changed version="2.10.0">
Now, `index` defaults to `False` if not specified.
If you would like to write the index, pass `index=True` and also set a name for the index column by
passing `index_label`.
</Changed>
Returns:
`int`: The number of characters or bytes written.
Example:
```py
>>> ds.to_csv("path/to/dataset/directory")
```
"""
# Dynamic import to avoid circular dependency
from .io.csv import CsvDatasetWriter
return CsvDatasetWriter(self, path_or_buf, batch_size=batch_size, num_proc=num_proc, **to_csv_kwargs).write()
def to_dict(self, batch_size: Optional[int] = None, batched="deprecated") -> Union[dict, Iterator[dict]]:
"""Returns the dataset as a Python dict. Can also return a generator for large datasets.
Args:
batched (`bool`):
Set to `True` to return a generator that yields the dataset as batches
of `batch_size` rows. Defaults to `False` (returns the whole datasets once).
<Deprecated version="2.11.0">
Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead.
</Deprecated>
batch_size (`int`, *optional*): The size (number of rows) of the batches if `batched` is `True`.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
Returns:
`dict` or `Iterator[dict]`
Example:
```py
>>> ds.to_dict()
```
"""
if batched != "deprecated":
warnings.warn(
"'batched' was deprecated in version 2.11.0 and will be removed in version 3.0.0. Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead.",
FutureWarning,
)
else:
batched = False
if not batched:
return query_table(
table=self._data,
key=slice(0, len(self)),
indices=self._indices if self._indices is not None else None,
).to_pydict()
else:
batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
return (
query_table(
table=self._data,
key=slice(offset, offset + batch_size),
indices=self._indices if self._indices is not None else None,
).to_pydict()
for offset in range(0, len(self), batch_size)
)
def to_list(self) -> list:
"""Returns the dataset as a Python list.
Returns:
`list`
Example:
```py
>>> ds.to_list()
```
"""
return query_table(
table=self._data,
key=slice(0, len(self)),
indices=self._indices if self._indices is not None else None,
).to_pylist()
def to_json(
self,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
**to_json_kwargs,
) -> int:
"""Export the dataset to JSON Lines or JSON.
Args:
path_or_buf (`PathLike` or `FileOrBuffer`):
Either a path to a file or a BinaryIO.
batch_size (`int`, *optional*):
Size of the batch to load in memory and write at once.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
num_proc (`int`, *optional*):
Number of processes for multiprocessing. By default it doesn't
use multiprocessing. `batch_size` in this case defaults to
`datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default
value if you have sufficient compute power.
**to_json_kwargs (additional keyword arguments):
Parameters to pass to pandas's [`pandas.DataFrame.to_json`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html).
<Changed version="2.11.0">
Now, `index` defaults to `False` if `orient` is `"split"` or `"table"`.
If you would like to write the index, pass `index=True`.
</Changed>
Returns:
`int`: The number of characters or bytes written.
Example:
```py
>>> ds.to_json("path/to/dataset/directory")
```
"""
# Dynamic import to avoid circular dependency
from .io.json import JsonDatasetWriter
return JsonDatasetWriter(self, path_or_buf, batch_size=batch_size, num_proc=num_proc, **to_json_kwargs).write()
def to_pandas(
self, batch_size: Optional[int] = None, batched: bool = False
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Returns the dataset as a `pandas.DataFrame`. Can also return a generator for large datasets.
Args:
batched (`bool`):
Set to `True` to return a generator that yields the dataset as batches
of `batch_size` rows. Defaults to `False` (returns the whole datasets once).
batch_size (`int`, *optional*):
The size (number of rows) of the batches if `batched` is `True`.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
Returns:
`pandas.DataFrame` or `Iterator[pandas.DataFrame]`
Example:
```py
>>> ds.to_pandas()
```
"""
if not batched:
return query_table(
table=self._data,
key=slice(0, len(self)),
indices=self._indices if self._indices is not None else None,
).to_pandas(types_mapper=pandas_types_mapper)
else:
batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
return (
query_table(
table=self._data,
key=slice(offset, offset + batch_size),
indices=self._indices if self._indices is not None else None,
).to_pandas(types_mapper=pandas_types_mapper)
for offset in range(0, len(self), batch_size)
)
def to_parquet(
self,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
**parquet_writer_kwargs,
) -> int:
"""Exports the dataset to parquet
Args:
path_or_buf (`PathLike` or `FileOrBuffer`):
Either a path to a file or a BinaryIO.
batch_size (`int`, *optional*):
Size of the batch to load in memory and write at once.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
**parquet_writer_kwargs (additional keyword arguments):
Parameters to pass to PyArrow's `pyarrow.parquet.ParquetWriter`.
Returns:
`int`: The number of characters or bytes written.
Example:
```py
>>> ds.to_parquet("path/to/dataset/directory")
```
"""
# Dynamic import to avoid circular dependency
from .io.parquet import ParquetDatasetWriter
return ParquetDatasetWriter(self, path_or_buf, batch_size=batch_size, **parquet_writer_kwargs).write()
def to_sql(
self,
name: str,
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
batch_size: Optional[int] = None,
**sql_writer_kwargs,
) -> int:
"""Exports the dataset to a SQL database.
Args:
name (`str`):
Name of SQL table.
con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`):
A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) or a SQLite3/SQLAlchemy connection object used to write to a database.
batch_size (`int`, *optional*):
Size of the batch to load in memory and write at once.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
**sql_writer_kwargs (additional keyword arguments):
Parameters to pass to pandas's [`pandas.DataFrame.to_sql`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_sql.html).
<Changed version="2.11.0">
Now, `index` defaults to `False` if not specified.
If you would like to write the index, pass `index=True` and also set a name for the index column by
passing `index_label`.
</Changed>
Returns:
`int`: The number of records written.
Example:
```py
>>> # con provided as a connection URI string
>>> ds.to_sql("data", "sqlite:///my_own_db.sql")
>>> # con provided as a sqlite3 connection object
>>> import sqlite3
>>> con = sqlite3.connect("my_own_db.sql")
>>> with con:
... ds.to_sql("data", con)
```
"""
# Dynamic import to avoid circular dependency
from .io.sql import SqlDatasetWriter
return SqlDatasetWriter(self, name, con, batch_size=batch_size, **sql_writer_kwargs).write()
def _estimate_nbytes(self) -> int:
dataset_nbytes = self.data.nbytes
# Find decodable columns, because if there are any, we need to
# adjust the dataset size computation (needed for sharding) to account for possible external files
decodable_columns = [
k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)
]
if decodable_columns:
# Approximate the space needed to store the bytes from the external files by analyzing the first 1000 examples
extra_nbytes = 0
def extra_nbytes_visitor(array, feature):
nonlocal extra_nbytes
if isinstance(feature, (Audio, Image)):
for x in array.to_pylist():
if x is not None and x["bytes"] is None and x["path"] is not None:
size = xgetsize(x["path"])
extra_nbytes += size
extra_nbytes -= array.field("path").nbytes
table = self.with_format("arrow")[:1000]
table_visitor(table, extra_nbytes_visitor)
extra_nbytes = extra_nbytes * len(self.data) / len(table)
dataset_nbytes = dataset_nbytes + extra_nbytes
if self._indices is not None:
dataset_nbytes = dataset_nbytes * len(self._indices) / len(self.data)
return dataset_nbytes
@staticmethod
def _generate_tables_from_shards(shards: List["Dataset"], batch_size: int):
for shard_idx, shard in enumerate(shards):
for pa_table in shard.with_format("arrow").iter(batch_size):
yield shard_idx, pa_table
@staticmethod
def _generate_tables_from_cache_file(filename: str):
for batch_idx, batch in enumerate(_memory_mapped_record_batch_reader_from_file(filename)):
yield batch_idx, pa.Table.from_batches([batch])
def to_iterable_dataset(self, num_shards: Optional[int] = 1) -> "IterableDataset":
"""Get an [`datasets.IterableDataset`] from a map-style [`datasets.Dataset`].
This is equivalent to loading a dataset in streaming mode with [`datasets.load_dataset`], but much faster since the data is streamed from local files.
Contrary to map-style datasets, iterable datasets are lazy and can only be iterated over (e.g. using a for loop).
Since they are read sequentially in training loops, iterable datasets are much faster than map-style datasets.
All the transformations applied to iterable datasets like filtering or processing are done on-the-fly when you start iterating over the dataset.
Still, it is possible to shuffle an iterable dataset using [`datasets.IterableDataset.shuffle`].
This is a fast approximate shuffling that works best if you have multiple shards and if you specify a buffer size that is big enough.
To get the best speed performance, make sure your dataset doesn't have an indices mapping.
If this is the case, the data are not read contiguously, which can be slow sometimes.
You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed before switching to an iterable dataset.
Args:
num_shards (`int`, default to `1`):
Number of shards to define when instantiating the iterable dataset. This is especially useful for big datasets to be able to shuffle properly,
and also to enable fast parallel loading using a PyTorch DataLoader or in distributed setups for example.
Shards are defined using [`datasets.Dataset.shard`]: it simply slices the data without writing anything on disk.
Returns:
[`datasets.IterableDataset`]
Example:
Basic usage:
```python
>>> ids = ds.to_iterable_dataset()
>>> for example in ids:
... pass
```
With lazy filtering and processing:
```python
>>> ids = ds.to_iterable_dataset()
>>> ids = ids.filter(filter_fn).map(process_fn) # will filter and process on-the-fly when you start iterating over the iterable dataset
>>> for example in ids:
... pass
```
With sharding to enable efficient shuffling:
```python
>>> ids = ds.to_iterable_dataset(num_shards=64) # the dataset is split into 64 shards to be iterated over
>>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer for fast approximate shuffling when you start iterating
>>> for example in ids:
... pass
```
With a PyTorch DataLoader:
```python
>>> import torch
>>> ids = ds.to_iterable_dataset(num_shards=64)
>>> ids = ids.filter(filter_fn).map(process_fn)
>>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards to each worker to load, filter and process when you start iterating
>>> for example in ids:
... pass
```
With a PyTorch DataLoader and shuffling:
```python
>>> import torch
>>> ids = ds.to_iterable_dataset(num_shards=64)
>>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating
>>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from the shuffled list of shards to each worker when you start iterating
>>> for example in ids:
... pass
```
In a distributed setup like PyTorch DDP with a PyTorch DataLoader and shuffling
```python
>>> from datasets.distributed import split_dataset_by_node
>>> ids = ds.to_iterable_dataset(num_shards=512)
>>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating
>>> ids = split_dataset_by_node(ds, world_size=8, rank=0) # will keep only 512 / 8 = 64 shards from the shuffled lists of shards when you start iterating
>>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from this node's list of shards to each worker when you start iterating
>>> for example in ids:
... pass
```
With shuffling and multiple epochs:
```python
>>> ids = ds.to_iterable_dataset(num_shards=64)
>>> ids = ids.shuffle(buffer_size=10_000, seed=42) # will shuffle the shards order and use a shuffle buffer when you start iterating
>>> for epoch in range(n_epochs):
... ids.set_epoch(epoch) # will use effective_seed = seed + epoch to shuffle the shards and for the shuffle buffer when you start iterating
... for example in ids:
... pass
```
Feel free to also use [`IterableDataset.set_epoch`] when using a PyTorch DataLoader or in distributed setups.
"""
from .iterable_dataset import ArrowExamplesIterable, IterableDataset
if self._format_type is not None:
raise NotImplementedError(
"Converting a formatted dataset to a formatted iterable dataset is not implemented yet. Please run `my_dataset = my_dataset.with_format(None)` before calling to_iterable_dataset"
)
if num_shards > len(self):
raise ValueError(
f"Unable to shard a dataset of size {len(self)} into {num_shards} shards (the number of shards exceeds the number of samples)."
)
if self._indices is not None:
logger.info(
"Converting an Arrow dataset to iterable but it has an indices mapping that can make it slower. "
"You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed."
)
shards = (
[copy.deepcopy(self)]
if num_shards == 1
else [
self.shard(num_shards=num_shards, index=shard_idx, contiguous=True) for shard_idx in range(num_shards)
]
)
ex_iterable = ArrowExamplesIterable(
Dataset._generate_tables_from_shards,
kwargs={"shards": shards, "batch_size": config.DEFAULT_MAX_BATCH_SIZE},
)
return IterableDataset(ex_iterable, info=DatasetInfo(features=self.features))
def _push_parquet_shards_to_hub(
self,
repo_id: str,
data_dir: str = "data",
split: Optional[str] = None,
private: Optional[bool] = False,
token: Optional[str] = None,
branch: Optional[str] = None,
max_shard_size: Optional[Union[int, str]] = None,
num_shards: Optional[int] = None,
embed_external_files: bool = True,
) -> Tuple[str, str, int, int, List[str], int]:
"""Pushes the dataset to the hub.
The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
Args:
repo_id (`str`):
The ID of the repository to push to in the following format: `<user>/<dataset_name>` or
`<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace
of the logged-in user.
data_dir (`str`):
The name of directory to store parquet files. Defaults to "data".
split (Optional, `str`):
The name of the split that will be given to that dataset. Defaults to `self.split`.
private (Optional `bool`, defaults to `False`):
Whether the dataset repository should be set to private or not. Only affects repository creation:
a repository that already exists will not be affected by that parameter.
token (Optional `str`):
An optional authentication token for the Hugging Face Hub. If no token is passed, will default
to the token saved locally when logging in with ``huggingface-cli login``. Will raise an error
if no token is passed and the user is not logged-in.
branch (Optional `str`):
The git branch on which to push the dataset. This defaults to the default branch as specified
in your repository, which defaults to `"main"`.
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a
a unit (like `"5MB"`).
num_shards (`int`, *optional*):
Number of shards to write. By default the number of shards depends on `max_shard_size`.
<Added version="2.8.0"/>
embed_external_files (`bool`, default ``True``):
Whether to embed file bytes in the shards.
In particular, this will do the following before the push for the fields of type:
- :class:`Audio` and class:`Image`: remove local path information and embed file content in the Parquet files.
Returns:
repo_id (`str`): ID of the repository in <user>/<dataset_name>` or `<org>/<dataset_name>` format
split (`str`): name of the uploaded split
uploaded_size (`int`): number of uploaded bytes to the repository
dataset_nbytes (`int`): approximate size in bytes of the uploaded dataset afer uncompression
repo_files (`List[str]`): list of files in the repository
deleted_size (`int`): number of deleted bytes in the repository
Example:
```python
>>> dataset.push_to_hub("<organization>/<dataset_id>", split="evaluation")
```
"""
if max_shard_size is not None and num_shards is not None:
raise ValueError(
"Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both."
)
api = HfApi(endpoint=config.HF_ENDPOINT)
token = token if token is not None else HfFolder.get_token()
if token is None:
raise EnvironmentError(
"You need to provide a `token` or be logged in to Hugging Face with `huggingface-cli login`."
)
if split is None:
split = str(self.split) if self.split is not None else "train"
if not re.match(_split_re, split):
raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.")
identifier = repo_id.split("/")
if len(identifier) > 2:
raise ValueError(
f"The identifier should be in the format <repo_id> or <namespace>/<repo_id>. It is {identifier}, "
"which doesn't conform to either format."
)
elif len(identifier) == 1:
dataset_name = identifier[0]
organization_or_username = api.whoami(token)["name"]
repo_id = f"{organization_or_username}/{dataset_name}"
api.create_repo(
repo_id,
token=token,
repo_type="dataset",
private=private,
exist_ok=True,
)
# Find decodable columns, because if there are any, we need to:
# embed the bytes from the files in the shards
decodable_columns = (
[k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)]
if embed_external_files
else []
)
dataset_nbytes = self._estimate_nbytes()
if num_shards is None:
max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
num_shards = int(dataset_nbytes / max_shard_size) + 1
num_shards = max(num_shards, 1)
shards = (self.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards))
if decodable_columns:
def shards_with_embedded_external_files(shards):
for shard in shards:
format = shard.format
shard = shard.with_format("arrow")
shard = shard.map(
embed_table_storage,
batched=True,
batch_size=1000,
keep_in_memory=True,
)
shard = shard.with_format(**format)
yield shard
shards = shards_with_embedded_external_files(shards)
files = api.list_repo_files(repo_id, repo_type="dataset", revision=branch, token=token)
data_files = [file for file in files if file.startswith(f"{data_dir}/")]
def path_in_repo(_index, shard):
return f"{data_dir}/{split}-{_index:05d}-of-{num_shards:05d}-{shard._fingerprint}.parquet"
shards_iter = iter(shards)
first_shard = next(shards_iter)
first_shard_path_in_repo = path_in_repo(0, first_shard)
if first_shard_path_in_repo in data_files and num_shards < len(data_files):
logger.info("Resuming upload of the dataset shards.")
uploaded_size = 0
shards_path_in_repo = []
for index, shard in logging.tqdm(
enumerate(itertools.chain([first_shard], shards_iter)),
desc="Pushing dataset shards to the dataset hub",
total=num_shards,
disable=not logging.is_progress_bar_enabled(),
):
shard_path_in_repo = path_in_repo(index, shard)
# Upload a shard only if it doesn't already exist in the repository
if shard_path_in_repo not in data_files:
buffer = BytesIO()
shard.to_parquet(buffer)
uploaded_size += buffer.tell()
_retry(
api.upload_file,
func_kwargs={
"path_or_fileobj": buffer.getvalue(),
"path_in_repo": shard_path_in_repo,
"repo_id": repo_id,
"token": token,
"repo_type": "dataset",
"revision": branch,
},
exceptions=HTTPError,
status_codes=[504],
base_wait_time=2.0,
max_retries=5,
max_wait_time=20.0,
)
shards_path_in_repo.append(shard_path_in_repo)
# Cleanup to remove unused files
data_files_to_delete = [
data_file
for data_file in data_files
if data_file.startswith(f"{data_dir}/{split}-") and data_file not in shards_path_in_repo
]
download_config = DownloadConfig(token=token)
deleted_size = sum(
xgetsize(hf_hub_url(repo_id, data_file, revision=branch), download_config=download_config)
for data_file in data_files_to_delete
)
def delete_file(file):
api.delete_file(file, repo_id=repo_id, token=token, repo_type="dataset", revision=branch)
if len(data_files_to_delete):
for data_file in logging.tqdm(
data_files_to_delete,
desc="Deleting unused files from dataset repository",
total=len(data_files_to_delete),
disable=not logging.is_progress_bar_enabled(),
):
delete_file(data_file)
repo_files = list(set(files) - set(data_files_to_delete))
return repo_id, split, uploaded_size, dataset_nbytes, repo_files, deleted_size
def push_to_hub(
self,
repo_id: str,
config_name: str = "default",
split: Optional[str] = None,
private: Optional[bool] = False,
token: Optional[str] = None,
branch: Optional[str] = None,
max_shard_size: Optional[Union[int, str]] = None,
num_shards: Optional[int] = None,
embed_external_files: bool = True,
):
"""Pushes the dataset to the hub as a Parquet dataset.
The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
The resulting Parquet files are self-contained by default. If your dataset contains [`Image`] or [`Audio`]
data, the Parquet files will store the bytes of your images or audio files.
You can disable this by setting `embed_external_files` to `False`.
Args:
repo_id (`str`):
The ID of the repository to push to in the following format: `<user>/<dataset_name>` or
`<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace
of the logged-in user.
config_name (`str`, defaults to "default"):
The configuration name (or subset) of a dataset. Defaults to "default"
split (`str`, *optional*):
The name of the split that will be given to that dataset. Defaults to `self.split`.
private (`bool`, *optional*, defaults to `False`):
Whether the dataset repository should be set to private or not. Only affects repository creation:
a repository that already exists will not be affected by that parameter.
token (`str`, *optional*):
An optional authentication token for the Hugging Face Hub. If no token is passed, will default
to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
if no token is passed and the user is not logged-in.
branch (`str`, *optional*):
The git branch on which to push the dataset. This defaults to the default branch as specified
in your repository, which defaults to `"main"`.
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by
a unit (like `"5MB"`).
num_shards (`int`, *optional*): Number of shards to write. By default the number of shards depends on `max_shard_size`.
<Added version="2.8.0"/>
embed_external_files (`bool`, defaults to `True`):
Whether to embed file bytes in the shards.
In particular, this will do the following before the push for the fields of type:
- [`Audio`] and [`Image`]: remove local path information and embed file content in the Parquet files.
Example:
```python
>>> dataset.push_to_hub("<organization>/<dataset_id>")
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True)
>>> dataset.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB")
>>> dataset.push_to_hub("<organization>/<dataset_id>", num_shards=1024)
```
If your dataset has multiple splits (e.g. train/validation/test):
```python
>>> train_dataset.push_to_hub("<organization>/<dataset_id>", split="train")
>>> val_dataset.push_to_hub("<organization>/<dataset_id>", split="validation")
>>> # later
>>> dataset = load_dataset("<organization>/<dataset_id>")
>>> train_dataset = dataset["train"]
>>> val_dataset = dataset["validation"]
```
If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages):
```python
>>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en")
>>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr")
>>> # later
>>> english_dataset = load_dataset("<organization>/<dataset_id>", "en")
>>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr")
```
"""
if config_name == "data":
raise ValueError("`config_name` cannot be 'data'. Please, choose another name for configuration.")
if max_shard_size is not None and num_shards is not None:
raise ValueError(
"Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both."
)
data_dir = config_name if config_name != "default" else "data" # for backward compatibility
repo_id, split, uploaded_size, dataset_nbytes, repo_files, deleted_size = self._push_parquet_shards_to_hub(
repo_id=repo_id,
data_dir=data_dir,
split=split,
private=private,
token=token,
branch=branch,
max_shard_size=max_shard_size,
num_shards=num_shards,
embed_external_files=embed_external_files,
)
organization, dataset_name = repo_id.split("/")
info_to_dump = self.info.copy()
info_to_dump.download_checksums = None
info_to_dump.download_size = uploaded_size
info_to_dump.dataset_size = dataset_nbytes
info_to_dump.size_in_bytes = uploaded_size + dataset_nbytes
info_to_dump.config_name = config_name
info_to_dump.splits = SplitDict(
{split: SplitInfo(split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name)}
)
# get the info from the README to update them
if "README.md" in repo_files:
download_config = DownloadConfig()
download_config.download_desc = "Downloading metadata"
download_config.token = token
dataset_readme_path = cached_path(
hf_hub_url(repo_id, "README.md", revision=branch),
download_config=download_config,
)
dataset_card = DatasetCard.load(Path(dataset_readme_path))
dataset_card_data = dataset_card.data
metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
if dataset_infos and config_name in dataset_infos:
repo_info = dataset_infos[config_name]
else:
repo_info = None
# get the deprecated dataset_infos.json to update them
elif config.DATASETDICT_INFOS_FILENAME in repo_files:
dataset_card = None
dataset_card_data = DatasetCardData()
download_config = DownloadConfig()
metadata_configs = MetadataConfigs()
download_config.download_desc = "Downloading metadata"
download_config.token = token
dataset_infos_path = cached_path(
hf_hub_url(repo_id, config.DATASETDICT_INFOS_FILENAME, revision=branch),
download_config=download_config,
)
with open(dataset_infos_path, encoding="utf-8") as f:
dataset_infos: dict = json.load(f)
dataset_info = dataset_infos.get(config_name, None) if dataset_infos else None
repo_info = DatasetInfo.from_dict(dataset_info) if dataset_info else None
else:
dataset_card = None
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs()
repo_info = None
# update the total info to dump from existing info
if repo_info is not None:
logger.info("Updating downloaded metadata with the new split.")
if repo_info.splits and list(repo_info.splits) != [split]:
if self._info.features != repo_info.features:
raise ValueError(
f"Features of the new split don't match the features of the existing splits on the hub: {self._info.features} != {repo_info.features}"
)
if split in repo_info.splits:
repo_info.download_size -= deleted_size
repo_info.dataset_size -= repo_info.splits.get(split, SplitInfo()).num_bytes or 0
repo_info.download_checksums = None
repo_info.download_size = (repo_info.download_size or 0) + uploaded_size
repo_info.dataset_size = (repo_info.dataset_size or 0) + dataset_nbytes
repo_info.size_in_bytes = repo_info.download_size + repo_info.dataset_size
repo_info.splits[split] = SplitInfo(
split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name
)
info_to_dump = repo_info
# create the metadata configs if it was uploaded with push_to_hub before metadata configs existed
if not metadata_configs:
_matched_paths = [
p
for p in repo_files
if fnmatch(p, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*"))
]
if len(_matched_paths) > 0:
# it was uploaded with push_to_hub before metadata configs existed
_resolved_splits = {
string_to_dict(
p, glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED)
)["split"]
for p in _matched_paths
}
default_metadata_configs_to_dump = {
"data_files": [
{"split": _resolved_split, "path": f"data/{_resolved_split}-*"}
for _resolved_split in _resolved_splits
]
}
MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data)
# update the metadata configs
if config_name in metadata_configs:
metadata_config = metadata_configs[config_name]
if "data_files" in metadata_config:
data_files_to_dump = sanitize_patterns(metadata_config["data_files"])
else:
data_files_to_dump = {}
# add the new split
data_files_to_dump[split] = [f"{data_dir}/{split}-*"]
metadata_config_to_dump = {
"data_files": [
{
"split": _split,
"path": _pattern[0] if len(_pattern) == 1 else _pattern,
}
for _split, _pattern in data_files_to_dump.items()
]
}
else:
metadata_config_to_dump = {"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"}]}
# push to the deprecated dataset_infos.json
if config.DATASETDICT_INFOS_FILENAME in repo_files:
download_config = DownloadConfig()
download_config.download_desc = "Downloading deprecated dataset_infos.json"
download_config.use_auth_token = token
dataset_infos_path = cached_path(
hf_hub_url(repo_id, config.DATASETDICT_INFOS_FILENAME, revision=branch),
download_config=download_config,
)
with open(dataset_infos_path, encoding="utf-8") as f:
dataset_infos: dict = json.load(f)
dataset_infos[config_name] = asdict(info_to_dump)
buffer = BytesIO()
buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8"))
HfApi(endpoint=config.HF_ENDPOINT).upload_file(
path_or_fileobj=buffer.getvalue(),
path_in_repo=config.DATASETDICT_INFOS_FILENAME,
repo_id=repo_id,
token=token,
repo_type="dataset",
revision=branch,
)
# push to README
DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data)
MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data)
dataset_card = (
DatasetCard(
"---\n"
+ str(dataset_card_data)
+ "\n---\n"
+ f'# Dataset Card for "{repo_id.split("/")[-1]}"\n\n[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)'
)
if dataset_card is None
else dataset_card
)
HfApi(endpoint=config.HF_ENDPOINT).upload_file(
path_or_fileobj=str(dataset_card).encode(),
path_in_repo="README.md",
repo_id=repo_id,
token=token,
repo_type="dataset",
revision=branch,
)
@transmit_format
@fingerprint_transform(inplace=False)
def add_column(self, name: str, column: Union[list, np.array], new_fingerprint: str):
"""Add column to Dataset.
<Added version="1.7"/>
Args:
name (`str`):
Column name.
column (`list` or `np.array`):
Column data to be added.
Returns:
[`Dataset`]
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> more_text = ds["text"]
>>> ds.add_column(name="text_2", column=more_text)
Dataset({
features: ['text', 'label', 'text_2'],
num_rows: 1066
})
```
"""
column_table = InMemoryTable.from_pydict({name: column})
_check_column_names(self._data.column_names + column_table.column_names)
dataset = self.flatten_indices() if self._indices is not None else self
# Concatenate tables horizontally
table = concat_tables([dataset._data, column_table], axis=1)
# Update features
info = dataset.info.copy()
info.features.update(Features.from_arrow_schema(column_table.schema))
table = update_metadata_with_features(table, info.features)
return Dataset(table, info=info, split=self.split, indices_table=None, fingerprint=new_fingerprint)
def add_faiss_index(
self,
column: str,
index_name: Optional[str] = None,
device: Optional[int] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None, # noqa: F821
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: bool = False,
dtype=np.float32,
):
"""Add a dense index using Faiss for fast retrieval.
By default the index is done over the vectors of the specified column.
You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
You can find more information about Faiss here:
- For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory)
Args:
column (`str`):
The column of the vectors to add to the index.
index_name (`str`, *optional*):
The `index_name`/identifier of the index.
This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`].
By default it corresponds to `column`.
device (`Union[int, List[int]]`, *optional*):
If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
string_factory (`str`, *optional*):
This is passed to the index factory of Faiss to create the index.
Default index class is `IndexFlat`.
metric_type (`int`, *optional*):
Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
custom_index (`faiss.Index`, *optional*):
Custom Faiss index that you already have instantiated and configured for your needs.
batch_size (`int`):
Size of the batch to use while adding vectors to the `FaissIndex`. Default value is `1000`.
<Added version="2.4.0"/>
train_size (`int`, *optional*):
If the index needs a training step, specifies how many vectors will be used to train the index.
faiss_verbose (`bool`, defaults to `False`):
Enable the verbosity of the Faiss index.
dtype (`data-type`):
The dtype of the numpy arrays that are indexed.
Default is `np.float32`.
Example:
```python
>>> ds = datasets.load_dataset('crime_and_punish', split='train')
>>> ds_with_embeddings = ds.map(lambda example: {'embeddings': embed(example['line']}))
>>> ds_with_embeddings.add_faiss_index(column='embeddings')
>>> # query
>>> scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', embed('my new query'), k=10)
>>> # save index
>>> ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss')
>>> ds = datasets.load_dataset('crime_and_punish', split='train')
>>> # load index
>>> ds.load_faiss_index('embeddings', 'my_index.faiss')
>>> # query
>>> scores, retrieved_examples = ds.get_nearest_examples('embeddings', embed('my new query'), k=10)
```
"""
with self.formatted_as(type="numpy", columns=[column], dtype=dtype):
super().add_faiss_index(
column=column,
index_name=index_name,
device=device,
string_factory=string_factory,
metric_type=metric_type,
custom_index=custom_index,
batch_size=batch_size,
train_size=train_size,
faiss_verbose=faiss_verbose,
)
return self
def add_faiss_index_from_external_arrays(
self,
external_arrays: np.array,
index_name: str,
device: Optional[int] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None, # noqa: F821
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: bool = False,
dtype=np.float32,
):
"""Add a dense index using Faiss for fast retrieval.
The index is created using the vectors of `external_arrays`.
You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
You can find more information about Faiss here:
- For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory)
Args:
external_arrays (`np.array`):
If you want to use arrays from outside the lib for the index, you can set `external_arrays`.
It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`.
index_name (`str`):
The `index_name`/identifier of the index.
This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`].
device (Optional `Union[int, List[int]]`, *optional*):
If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
string_factory (`str`, *optional*):
This is passed to the index factory of Faiss to create the index.
Default index class is `IndexFlat`.
metric_type (`int`, *optional*):
Type of metric. Ex: `faiss.faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
custom_index (`faiss.Index`, *optional*):
Custom Faiss index that you already have instantiated and configured for your needs.
batch_size (`int`, *optional*):
Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
<Added version="2.4.0"/>
train_size (`int`, *optional*):
If the index needs a training step, specifies how many vectors will be used to train the index.
faiss_verbose (`bool`, defaults to False):
Enable the verbosity of the Faiss index.
dtype (`numpy.dtype`):
The dtype of the numpy arrays that are indexed. Default is np.float32.
"""
super().add_faiss_index_from_external_arrays(
external_arrays=external_arrays.astype(dtype),
index_name=index_name,
device=device,
string_factory=string_factory,
metric_type=metric_type,
custom_index=custom_index,
batch_size=batch_size,
train_size=train_size,
faiss_verbose=faiss_verbose,
)
def add_elasticsearch_index(
self,
column: str,
index_name: Optional[str] = None,
host: Optional[str] = None,
port: Optional[int] = None,
es_client: Optional["elasticsearch.Elasticsearch"] = None, # noqa: F821
es_index_name: Optional[str] = None,
es_index_config: Optional[dict] = None,
):
"""Add a text index using ElasticSearch for fast retrieval. This is done in-place.
Args:
column (`str`):
The column of the documents to add to the index.
index_name (`str`, *optional*):
The `index_name`/identifier of the index.
This is the index name that is used to call [`~Dataset.get_nearest_examples`] or [`Dataset.search`].
By default it corresponds to `column`.
host (`str`, *optional*, defaults to `localhost`):
Host of where ElasticSearch is running.
port (`str`, *optional*, defaults to `9200`):
Port of where ElasticSearch is running.
es_client (`elasticsearch.Elasticsearch`, *optional*):
The elasticsearch client used to create the index if host and port are `None`.
es_index_name (`str`, *optional*):
The elasticsearch index name used to create the index.
es_index_config (`dict`, *optional*):
The configuration of the elasticsearch index.
Default config is:
```
{
"settings": {
"number_of_shards": 1,
"analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
},
"mappings": {
"properties": {
"text": {
"type": "text",
"analyzer": "standard",
"similarity": "BM25"
},
}
},
}
```
Example:
```python
>>> es_client = elasticsearch.Elasticsearch()
>>> ds = datasets.load_dataset('crime_and_punish', split='train')
>>> ds.add_elasticsearch_index(column='line', es_client=es_client, es_index_name="my_es_index")
>>> scores, retrieved_examples = ds.get_nearest_examples('line', 'my new query', k=10)
```
"""
with self.formatted_as(type=None, columns=[column]):
super().add_elasticsearch_index(
column=column,
index_name=index_name,
host=host,
port=port,
es_client=es_client,
es_index_name=es_index_name,
es_index_config=es_index_config,
)
return self
@transmit_format
@fingerprint_transform(inplace=False)
def add_item(self, item: dict, new_fingerprint: str):
"""Add item to Dataset.
<Added version="1.7"/>
Args:
item (`dict`):
Item data to be added.
Returns:
[`Dataset`]
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> new_review = {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'}
>>> ds = ds.add_item(new_review)
>>> ds[-1]
{'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'}
```
"""
item_table = InMemoryTable.from_pydict({k: [v] for k, v in item.items()})
# We don't call _check_if_features_can_be_aligned here so this cast is "unsafe"
dset_features, item_features = _align_features(
[self._info.features, Features.from_arrow_schema(item_table.schema)]
)
# Cast to align the schemas of the tables and concatenate the tables
table = concat_tables(
[
self._data.cast(dset_features.arrow_schema) if self._info.features != dset_features else self._data,
item_table.cast(item_features.arrow_schema),
]
)
if self._indices is None:
indices_table = None
else:
item_indices_array = pa.array([len(self._data)], type=pa.uint64())
item_indices_table = InMemoryTable.from_arrays([item_indices_array], names=["indices"])
indices_table = concat_tables([self._indices, item_indices_table])
info = self.info.copy()
info.features.update(item_features)
table = update_metadata_with_features(table, info.features)
return Dataset(
table,
info=info,
split=self.split,
indices_table=indices_table,
fingerprint=new_fingerprint,
)
def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "Dataset":
"""Align the dataset's label ID and label name mapping to match an input `label2id` mapping.
This is useful when you want to ensure that a model's predicted labels are aligned with the dataset.
The alignment in done using the lowercase label names.
Args:
label2id (`dict`):
The label name to ID mapping to align the dataset with.
label_column (`str`):
The column name of labels to align on.
Example:
```python
>>> # dataset with mapping {'entailment': 0, 'neutral': 1, 'contradiction': 2}
>>> ds = load_dataset("glue", "mnli", split="train")
>>> # mapping to align with
>>> label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2}
>>> ds_aligned = ds.align_labels_with_mapping(label2id, "label")
```
"""
# Sanity checks
if label_column not in self._data.column_names:
raise ValueError(f"Column ({label_column}) not in table columns ({self._data.column_names}).")
label_feature = self._info.features[label_column]
if not (
isinstance(label_feature, ClassLabel)
or (isinstance(label_feature, Sequence) and isinstance(label_feature.feature, ClassLabel))
):
raise ValueError(
f"Aligning labels with a mapping is only supported for {ClassLabel.__name__} column or {Sequence.__name__} column with the inner type {ClassLabel.__name__}, and column {label_feature} is of type {type(label_feature).__name__}."
)
# Sort input mapping by ID value to ensure the label names are aligned
label2id = dict(sorted(label2id.items(), key=lambda item: item[1]))
label_names = list(label2id.keys())
# Some label mappings use uppercase label names so we lowercase them during alignment
label2id = {k.lower(): v for k, v in label2id.items()}
int2str_function = (
label_feature.int2str if isinstance(label_feature, ClassLabel) else label_feature.feature.int2str
)
if isinstance(label_feature, ClassLabel):
def process_label_ids(batch):
dset_label_names = [
int2str_function(label_id).lower() if label_id is not None else None
for label_id in batch[label_column]
]
batch[label_column] = [
label2id[label_name] if label_name is not None else None for label_name in dset_label_names
]
return batch
else:
def process_label_ids(batch):
dset_label_names = [
[int2str_function(label_id).lower() if label_id is not None else None for label_id in seq]
for seq in batch[label_column]
]
batch[label_column] = [
[label2id[label_name] if label_name is not None else None for label_name in seq]
for seq in dset_label_names
]
return batch
features = self.features
features[label_column] = (
ClassLabel(num_classes=len(label_names), names=label_names)
if isinstance(label_feature, ClassLabel)
else Sequence(ClassLabel(num_classes=len(label_names), names=label_names))
)
return self.map(process_label_ids, features=features, batched=True, desc="Aligning the labels")
def _concatenate_map_style_datasets(
dsets: List[Dataset],
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
axis: int = 0,
):
"""
Converts a list of :class:`Dataset` with the same schema into a single :class:`Dataset`.
When you concatenate on axis 0, missing data are filled with None values.
Args:
dsets (`List[datasets.Dataset]`): List of Datasets to concatenate.
info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc.
split (:class:`NamedSplit`, optional): Name of the dataset split.
axis (``{0, 1}``, default ``0``, meaning over rows):
Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns
(horizontally).
*New in version 1.6.0*
Example:
```py
>>> ds3 = _concatenate_map_style_datasets([ds1, ds2])
```
"""
# Ignore datasets with no rows
if any(dset.num_rows > 0 for dset in dsets):
dsets = [dset for dset in dsets if dset.num_rows > 0]
else:
# Return first dataset if all datasets are empty
return dsets[0]
# Perform checks (and a potentional cast if axis=0)
if axis == 0:
_check_if_features_can_be_aligned([dset.features for dset in dsets])
else:
if not all(dset.num_rows == dsets[0].num_rows for dset in dsets):
raise ValueError("Number of rows must match for all datasets")
_check_column_names([col_name for dset in dsets for col_name in dset._data.column_names])
# Find common format or reset format
format = dsets[0].format
if any(dset.format != format for dset in dsets):
format = {}
logger.info("Some of the datasets have disparate format. Resetting the format of the concatenated dataset.")
def apply_offset_to_indices_table(table, offset):
if offset == 0:
return table
else:
array = table["indices"]
new_array = pc.add(array, pa.scalar(offset, type=pa.uint64()))
return InMemoryTable.from_arrays([new_array], names=["indices"])
# Concatenate indices if they exist
if any(dset._indices is not None for dset in dsets):
if axis == 0:
# Datasets with no indices tables are replaced with a dataset with an indices table in memory.
# Applying an offset to an indices table also brings the table in memory.
indices_tables = []
for i in range(len(dsets)):
if dsets[i]._indices is None:
dsets[i] = dsets[i]._select_with_indices_mapping(range(len(dsets[i])))
indices_tables.append(dsets[i]._indices)
# An offset needs to be applied to the indices before concatenating
offset = 0
for i in range(len(dsets)):
indices_tables[i] = apply_offset_to_indices_table(indices_tables[i], offset)
offset += len(dsets[i]._data)
# Concatenate indices
indices_tables = [t for t in indices_tables if len(t) > 0]
if indices_tables:
indices_table = concat_tables(indices_tables)
else:
indices_table = InMemoryTable.from_batches([], schema=pa.schema({"indices": pa.int64()}))
else:
if len(dsets) == 1:
indices_table = dsets[0]._indices
else:
for i in range(len(dsets)):
dsets[i] = dsets[i].flatten_indices()
indices_table = None
else:
indices_table = None
table = concat_tables([dset._data for dset in dsets], axis=axis)
if axis == 0:
features_list = _align_features([dset.features for dset in dsets])
else:
features_list = [dset.features for dset in dsets]
table = update_metadata_with_features(table, {k: v for features in features_list for k, v in features.items()})
# Concatenate infos
if info is None:
info = DatasetInfo.from_merge([dset.info for dset in dsets])
fingerprint = update_fingerprint(
"".join(dset._fingerprint for dset in dsets), _concatenate_map_style_datasets, {"info": info, "split": split}
)
# Make final concatenated dataset
concatenated_dataset = Dataset(
table,
info=info,
split=split,
indices_table=indices_table,
fingerprint=fingerprint,
)
concatenated_dataset.set_format(**format)
return concatenated_dataset
def _interleave_map_style_datasets(
datasets: List["Dataset"],
probabilities: Optional[List[float]] = None,
seed: Optional[int] = None,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
**kwargs,
) -> "Dataset":
"""
Interleave several map-style datasets (sources) into a single map-style dataset.
The new dataset is constructed by alternating between the sources to get the examples.
If `probabilities = None` (default) the new dataset is constructed by cycling between each source to get the examples.
If `probabilities` is not `None, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities.
Args:
datasets (`List[Dataset]`): list of datasets to interleave
probabilities (`List[float]`, optional, default None): If specified, the new dataset is constructed by sampling
examples from one source at a time according to these probabilities.
seed (`int`, optional, default None): The random seed used to choose a source for each example.
info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc.
split (:class:`NamedSplit`, optional): Name of the dataset split.
stopping_strategy (`str`, defaults to `first_exhausted`):
Two strategies are proposed right now.
By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
- with no probabilities, the resulting dataset will have max_length_datasets*nb_dataset samples.
- with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
**kwargs (additional keyword arguments): Keyword arguments to be passed to :meth:`datasets.Datasets.select` when selecting the indices used to interleave the datasets.
Output:
:class:`datasets.Dataset`
"""
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(
f"{stopping_strategy} stopping strategy in `interleave_datasets` is not implemented yet with a list of {type(datasets[0])}"
)
# To interleave the datasets, we concatenate them and then we re-order the indices
concatenated_datasets = _concatenate_map_style_datasets(datasets, info=info, split=split)
# Let's now build the indices to pass to .select()
lengths = [len(dset) for dset in datasets]
offsets = np.cumsum([0] + lengths[:-1])
# if stopping_strategy is "first_exhausted", it is an undersampling situation whereas it is an oversampling situation if it is "all_exhausted"
oversampling = stopping_strategy == "all_exhausted"
if probabilities is None and not oversampling:
# Undersampling situation with cycling between each sources
# Example:: If lengths of the datasets are [3, 4, 5]
# Then the resulting indices should be [0, 3, 7, 1, 4, 8, 2, 6, 9]
# Note that we only have 3 examples per dataset since the first dataset ran out of examples
# Reasoning behind the following operation: keeping the min_length first indices of each dataset
# while offsetting in order to correspond to the right indices of the concatenated dataset
# and flattening to effectively interleave the datasets
indices = (offsets.reshape(1, -1) + np.arange(min(lengths)).reshape(-1, 1)).flatten().tolist()
elif probabilities is None:
# Oversampling situation with cycling between each sources
# Then the resulting indices should be [0, 3, 7, 1, 4, 8, 2, 5, 9, 0, 6, 10, 1, 3, 11]
# Note that we have 5 examples per dataset with a rolling window since the longest dataset has 5 samples
# Reasoning behind the following operation: for each dataset indices (i.e column) repeat the indices to have max_length indices per dataset
# For example, if the max_length is 5 and the i-th dataset has 3 samples, the i-th column will be [0,1,2,0,1]
indices = np.mod(np.arange(max(lengths)).reshape(-1, 1), np.array(lengths).reshape(1, -1))
# We have to keep the indices to their respective dataset offsets and to flatten to effectively interleave the datasets
indices = (indices + offsets).flatten().tolist()
else:
# boolean array indicating if at index i if the dataset_i has been fully exhausted
is_exhausted = np.full(len(lengths), False)
# if undersampling ("first_exhausted"), we stop as soon as one dataset is exhausted
# if oversampling ("all_exhausted"), we stop as soons as every dataset is exhausted, i.e as soon as every samples of every dataset has been visited at least once
bool_strategy_func = np.all if oversampling else np.any
def iter_random_indices():
"""Get an infinite iterator that randomly samples the index of the source to pick examples from."""
rng = np.random.default_rng(seed)
while True:
yield from (int(i) for i in rng.choice(len(datasets), size=1000, p=probabilities))
current_index = [0] * len(datasets)
indices = []
for source_idx in iter_random_indices():
# If no oversampling, we stop as soon as a dataset has ran out of examples (np.any)
# Otherwise, we stop as soon as every dataset has ran out of examples (np.all)
if bool_strategy_func(is_exhausted):
# the stopping condition was reached, let's stop
break
# let's add the example at the current index of the `source_idx`-th dataset
indices.append(current_index[source_idx] + offsets[source_idx])
current_index[source_idx] += 1
# we've ran out of examples for the current dataset, let's update our boolean array and bring the current_index back to 0
if current_index[source_idx] >= lengths[source_idx]:
is_exhausted[source_idx] = True
current_index[source_idx] = 0
return concatenated_datasets.select(indices, **kwargs)
def _split_by_node_map_style_dataset(dataset: Dataset, rank: int, world_size: int) -> Dataset:
"""
Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset.
To maximize data loading throughput, chunks are made of contiguous data on disk if possible.
Args:
dataset ([`Dataset`]):
The dataset to split by node.
rank (`int`):
Rank of the current node.
world_size (`int`):
Total number of nodes.
Returns:
[`Dataset`]: The dataset to be used on the node at rank `rank`.
"""
return dataset.shard(num_shards=world_size, index=rank, contiguous=True)
# This is outside Dataset.filter as it needs to be picklable for multiprocessing
def get_indices_from_mask_function(
function: Callable,
batched: bool,
with_indices: bool,
input_columns: Optional[Union[str, List[str]]],
indices_mapping: Optional[Table] = None,
*args,
**fn_kwargs,
):
if batched:
# we extract indices from args
*inputs, indices = args
if with_indices:
mask = function(*inputs, indices, **fn_kwargs)
else:
mask = function(*inputs, **fn_kwargs)
else:
# we get batched data (to do less look-ups) but `function` only accepts one example
# therefore we need to call `function` on each example of the batch to get the mask
*inputs, indices = args
mask = []
if input_columns is None:
# inputs only contains a batch of examples
batch: dict = inputs[0]
num_examples = len(batch[next(iter(batch.keys()))])
for i in range(num_examples):
example = {key: batch[key][i] for key in batch}
mask.append(
function(example, indices[i], **fn_kwargs) if with_indices else function(example, **fn_kwargs)
)
else:
# inputs is a list of columns
columns: List[List] = inputs
num_examples = len(columns[0])
for i in range(num_examples):
input = [column[i] for column in columns]
mask.append(
function(*input, indices[i], **fn_kwargs) if with_indices else function(*input, **fn_kwargs)
)
indices_array = [i for i, to_keep in zip(indices, mask) if to_keep]
if indices_mapping is not None:
indices_array = pa.array(indices_array, type=pa.uint64())
indices_array = indices_mapping.column(0).take(indices_array)
indices_array = indices_array.to_pylist()
return {"indices": indices_array}
| datasets-main | src/datasets/arrow_dataset.py |
import os
import re
from functools import partial
from glob import has_magic
from pathlib import Path, PurePath
from typing import Callable, Dict, List, Optional, Set, Tuple, Union
import huggingface_hub
from fsspec import get_fs_token_paths
from fsspec.implementations.http import HTTPFileSystem
from huggingface_hub import HfFileSystem
from tqdm.contrib.concurrent import thread_map
from . import config
from .download import DownloadConfig
from .download.streaming_download_manager import _prepare_path_and_storage_options, xbasename, xjoin
from .splits import Split
from .utils import logging
from .utils.file_utils import is_local_path, is_relative_path
from .utils.py_utils import glob_pattern_to_regex, string_to_dict
SANITIZED_DEFAULT_SPLIT = str(Split.TRAIN)
logger = logging.get_logger(__name__)
class Url(str):
pass
class EmptyDatasetError(FileNotFoundError):
pass
SPLIT_PATTERN_SHARDED = "data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*"
SPLIT_KEYWORDS = {
Split.TRAIN: ["train", "training"],
Split.VALIDATION: ["validation", "valid", "dev", "val"],
Split.TEST: ["test", "testing", "eval", "evaluation"],
}
NON_WORDS_CHARS = "-._ 0-9"
KEYWORDS_IN_FILENAME_BASE_PATTERNS = ["**[{sep}/]{keyword}[{sep}]*", "{keyword}[{sep}]*"]
KEYWORDS_IN_DIR_NAME_BASE_PATTERNS = ["{keyword}[{sep}/]**", "**[{sep}/]{keyword}[{sep}/]**"]
DEFAULT_SPLITS = [Split.TRAIN, Split.VALIDATION, Split.TEST]
DEFAULT_PATTERNS_SPLIT_IN_FILENAME = {
split: [
pattern.format(keyword=keyword, sep=NON_WORDS_CHARS)
for keyword in SPLIT_KEYWORDS[split]
for pattern in KEYWORDS_IN_FILENAME_BASE_PATTERNS
]
for split in DEFAULT_SPLITS
}
DEFAULT_PATTERNS_SPLIT_IN_DIR_NAME = {
split: [
pattern.format(keyword=keyword, sep=NON_WORDS_CHARS)
for keyword in SPLIT_KEYWORDS[split]
for pattern in KEYWORDS_IN_DIR_NAME_BASE_PATTERNS
]
for split in DEFAULT_SPLITS
}
DEFAULT_PATTERNS_ALL = {
Split.TRAIN: ["**"],
}
ALL_SPLIT_PATTERNS = [SPLIT_PATTERN_SHARDED]
ALL_DEFAULT_PATTERNS = [
DEFAULT_PATTERNS_SPLIT_IN_DIR_NAME,
DEFAULT_PATTERNS_SPLIT_IN_FILENAME,
DEFAULT_PATTERNS_ALL,
]
METADATA_PATTERNS = [
"metadata.csv",
"**/metadata.csv",
"metadata.jsonl",
"**/metadata.jsonl",
] # metadata file for ImageFolder and AudioFolder
WILDCARD_CHARACTERS = "*[]"
FILES_TO_IGNORE = [
"README.md",
"config.json",
"dataset_info.json",
"dataset_infos.json",
"dummy_data.zip",
"dataset_dict.json",
]
def contains_wildcards(pattern: str) -> bool:
return any(wilcard_character in pattern for wilcard_character in WILDCARD_CHARACTERS)
def sanitize_patterns(patterns: Union[Dict, List, str]) -> Dict[str, Union[List[str], "DataFilesList"]]:
"""
Take the data_files patterns from the user, and format them into a dictionary.
Each key is the name of the split, and each value is a list of data files patterns (paths or urls).
The default split is "train".
Returns:
patterns: dictionary of split_name -> list of patterns
"""
if isinstance(patterns, dict):
return {str(key): value if isinstance(value, list) else [value] for key, value in patterns.items()}
elif isinstance(patterns, str):
return {SANITIZED_DEFAULT_SPLIT: [patterns]}
elif isinstance(patterns, list):
if any(isinstance(pattern, dict) for pattern in patterns):
for pattern in patterns:
if not (
isinstance(pattern, dict)
and len(pattern) == 2
and "split" in pattern
and isinstance(pattern.get("path"), (str, list))
):
raise ValueError(
f"Expected each split to have a 'path' key which can be a string or a list of strings, but got {pattern}"
)
splits = [pattern["split"] for pattern in patterns]
if len(set(splits)) != len(splits):
raise ValueError(f"Some splits are duplicated in data_files: {splits}")
return {
str(pattern["split"]): pattern["path"] if isinstance(pattern["path"], list) else [pattern["path"]]
for pattern in patterns
}
else:
return {SANITIZED_DEFAULT_SPLIT: patterns}
else:
return sanitize_patterns(list(patterns))
def _is_inside_unrequested_special_dir(matched_rel_path: str, pattern: str) -> bool:
"""
When a path matches a pattern, we additionnally check if it's inside a special directory
we ignore by default (if it starts with a double underscore).
Users can still explicitly request a filepath inside such a directory if "__pycache__" is
mentioned explicitly in the requested pattern.
Some examples:
base directory:
./
└── __pycache__
└── b.txt
>>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "**")
True
>>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "*/b.txt")
True
>>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__pycache__/*")
False
>>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__*/*")
False
"""
# We just need to check if every special directories from the path is present explicly in the pattern.
# Since we assume that the path matches the pattern, it's equivalent to counting that both
# the parent path and the parent pattern have the same number of special directories.
data_dirs_to_ignore_in_path = [part for part in PurePath(matched_rel_path).parent.parts if part.startswith("__")]
data_dirs_to_ignore_in_pattern = [part for part in PurePath(pattern).parent.parts if part.startswith("__")]
return len(data_dirs_to_ignore_in_path) != len(data_dirs_to_ignore_in_pattern)
def _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(matched_rel_path: str, pattern: str) -> bool:
"""
When a path matches a pattern, we additionnally check if it's a hidden file or if it's inside
a hidden directory we ignore by default, i.e. if the file name or a parent directory name starts with a dot.
Users can still explicitly request a filepath that is hidden or is inside a hidden directory
if the hidden part is mentioned explicitly in the requested pattern.
Some examples:
base directory:
./
└── .hidden_file.txt
>>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", "**")
True
>>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", ".*")
False
base directory:
./
└── .hidden_dir
└── a.txt
>>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", "**")
True
>>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".*/*")
False
>>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".hidden_dir/*")
False
base directory:
./
└── .hidden_dir
└── .hidden_file.txt
>>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", "**")
True
>>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/*")
True
>>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/.*")
False
>>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/*")
True
>>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/.*")
False
"""
# We just need to check if every hidden part from the path is present explicly in the pattern.
# Since we assume that the path matches the pattern, it's equivalent to counting that both
# the path and the pattern have the same number of hidden parts.
hidden_directories_in_path = [
part for part in PurePath(matched_rel_path).parts if part.startswith(".") and not set(part) == {"."}
]
hidden_directories_in_pattern = [
part for part in PurePath(pattern).parts if part.startswith(".") and not set(part) == {"."}
]
return len(hidden_directories_in_path) != len(hidden_directories_in_pattern)
def _get_data_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> Dict[str, List[str]]:
"""
Get the default pattern from a directory or repository by testing all the supported patterns.
The first patterns to return a non-empty list of data files is returned.
In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS.
"""
# first check the split patterns like data/{split}-00000-of-00001.parquet
for split_pattern in ALL_SPLIT_PATTERNS:
pattern = split_pattern.replace("{split}", "*")
try:
data_files = pattern_resolver(pattern)
except FileNotFoundError:
continue
if len(data_files) > 0:
splits: Set[str] = {string_to_dict(p, glob_pattern_to_regex(split_pattern))["split"] for p in data_files}
sorted_splits = [str(split) for split in DEFAULT_SPLITS if split in splits] + sorted(
splits - set(DEFAULT_SPLITS)
)
return {split: [split_pattern.format(split=split)] for split in sorted_splits}
# then check the default patterns based on train/valid/test splits
for patterns_dict in ALL_DEFAULT_PATTERNS:
non_empty_splits = []
for split, patterns in patterns_dict.items():
for pattern in patterns:
try:
data_files = pattern_resolver(pattern)
except FileNotFoundError:
continue
if len(data_files) > 0:
non_empty_splits.append(split)
break
if non_empty_splits:
return {split: patterns_dict[split] for split in non_empty_splits}
raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}")
def _get_metadata_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> List[str]:
"""
Get the supported metadata patterns from a directory or repository.
"""
non_empty_patterns = []
for pattern in METADATA_PATTERNS:
try:
metadata_files = pattern_resolver(pattern)
if len(metadata_files) > 0:
non_empty_patterns.append(pattern)
except FileNotFoundError:
pass
if non_empty_patterns:
return non_empty_patterns
raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}")
def resolve_pattern(
pattern: str,
base_path: str,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> List[str]:
"""
Resolve the paths and URLs of the data files from the pattern passed by the user.
You can use patterns to resolve multiple local files. Here are a few examples:
- *.csv to match all the CSV files at the first level
- **.csv to match all the CSV files at any level
- data/* to match all the files inside "data"
- data/** to match all the files inside "data" and its subdirectories
The patterns are resolved using the fsspec glob.
Here are some behaviors specific to fsspec glob that are different from glob.glob, Path.glob, Path.match or fnmatch:
- '*' matches only first level items
- '**' matches all items
- '**/*' matches all at least second level items
More generally:
- '*' matches any character except a forward-slash (to match just the file or directory name)
- '**' matches any character including a forward-slash /
Hidden files and directories (i.e. whose names start with a dot) are ignored, unless they are explicitly requested.
The same applies to special directories that start with a double underscore like "__pycache__".
You can still include one if the pattern explicilty mentions it:
- to include a hidden file: "*/.hidden.txt" or "*/.*"
- to include a hidden directory: ".hidden/*" or ".*/*"
- to include a special directory: "__special__/*" or "__*/*"
Example::
>>> from datasets.data_files import resolve_pattern
>>> base_path = "."
>>> resolve_pattern("docs/**/*.py", base_path)
[/Users/mariosasko/Desktop/projects/datasets/docs/source/_config.py']
Args:
pattern (str): Unix pattern or paths or URLs of the data files to resolve.
The paths can be absolute or relative to base_path.
Remote filesystems using fsspec are supported, e.g. with the hf:// protocol.
base_path (str): Base path to use when resolving relative paths.
allowed_extensions (Optional[list], optional): White-list of file extensions to use. Defaults to None (all extensions).
For example: allowed_extensions=[".csv", ".json", ".txt", ".parquet"]
Returns:
List[str]: List of paths or URLs to the local or remote files that match the patterns.
"""
if is_relative_path(pattern):
pattern = xjoin(base_path, pattern)
elif is_local_path(pattern):
base_path = os.path.splitdrive(pattern)[0] + os.sep
else:
base_path = ""
pattern, storage_options = _prepare_path_and_storage_options(pattern, download_config=download_config)
fs, _, _ = get_fs_token_paths(pattern, storage_options=storage_options)
fs_base_path = base_path.split("::")[0].split("://")[-1] or fs.root_marker
fs_pattern = pattern.split("::")[0].split("://")[-1]
files_to_ignore = set(FILES_TO_IGNORE) - {xbasename(pattern)}
protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[0]
protocol_prefix = protocol + "://" if protocol != "file" else ""
matched_paths = [
filepath if filepath.startswith(protocol_prefix) else protocol_prefix + filepath
for filepath, info in fs.glob(pattern, detail=True).items()
if info["type"] == "file"
and (xbasename(filepath) not in files_to_ignore)
and not _is_inside_unrequested_special_dir(
os.path.relpath(filepath, fs_base_path), os.path.relpath(fs_pattern, fs_base_path)
)
and not _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(
os.path.relpath(filepath, fs_base_path), os.path.relpath(fs_pattern, fs_base_path)
)
] # ignore .ipynb and __pycache__, but keep /../
if allowed_extensions is not None:
out = [
filepath
for filepath in matched_paths
if any("." + suffix in allowed_extensions for suffix in xbasename(filepath).split(".")[1:])
]
if len(out) < len(matched_paths):
invalid_matched_files = list(set(matched_paths) - set(out))
logger.info(
f"Some files matched the pattern '{pattern}' but don't have valid data file extensions: {invalid_matched_files}"
)
else:
out = matched_paths
if not out:
error_msg = f"Unable to find '{pattern}'"
if allowed_extensions is not None:
error_msg += f" with any supported extension {list(allowed_extensions)}"
raise FileNotFoundError(error_msg)
return out
def get_data_patterns(base_path: str, download_config: Optional[DownloadConfig] = None) -> Dict[str, List[str]]:
"""
Get the default pattern from a directory testing all the supported patterns.
The first patterns to return a non-empty list of data files is returned.
Some examples of supported patterns:
Input:
my_dataset_repository/
├── README.md
└── dataset.csv
Output:
{"train": ["**"]}
Input:
my_dataset_repository/
├── README.md
├── train.csv
└── test.csv
my_dataset_repository/
├── README.md
└── data/
├── train.csv
└── test.csv
my_dataset_repository/
├── README.md
├── train_0.csv
├── train_1.csv
├── train_2.csv
├── train_3.csv
├── test_0.csv
└── test_1.csv
Output:
{"train": ["**train*"], "test": ["**test*"]}
Input:
my_dataset_repository/
├── README.md
└── data/
├── train/
│ ├── shard_0.csv
│ ├── shard_1.csv
│ ├── shard_2.csv
│ └── shard_3.csv
└── test/
├── shard_0.csv
└── shard_1.csv
Output:
{"train": ["**train*/**"], "test": ["**test*/**"]}
Input:
my_dataset_repository/
├── README.md
└── data/
├── train-00000-of-00003.csv
├── train-00001-of-00003.csv
├── train-00002-of-00003.csv
├── test-00000-of-00001.csv
├── random-00000-of-00003.csv
├── random-00001-of-00003.csv
└── random-00002-of-00003.csv
Output:
{
"train": ["data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9].*"],
"test": ["data/test-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9].*"],
"random": ["data/random-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9].*"],
}
In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS.
"""
resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config)
try:
return _get_data_files_patterns(resolver)
except FileNotFoundError:
raise EmptyDatasetError(f"The directory at {base_path} doesn't contain any data files") from None
def get_metadata_patterns(
base_path: str,
download_config: Optional[DownloadConfig] = None,
) -> List[str]:
"""
Get the supported metadata patterns from a local directory.
"""
resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config)
try:
return _get_metadata_files_patterns(resolver)
except FileNotFoundError:
raise FileNotFoundError(f"The directory at {base_path} doesn't contain any metadata file") from None
def _get_single_origin_metadata(
data_file: str,
download_config: Optional[DownloadConfig] = None,
) -> Tuple[str]:
data_file, storage_options = _prepare_path_and_storage_options(data_file, download_config=download_config)
fs, _, _ = get_fs_token_paths(data_file, storage_options=storage_options)
if isinstance(fs, HfFileSystem):
resolved_path = fs.resolve_path(data_file)
return (resolved_path.repo_id, resolved_path.revision)
elif isinstance(fs, HTTPFileSystem) and data_file.startswith(config.HF_ENDPOINT):
hffs = HfFileSystem(endpoint=config.HF_ENDPOINT, token=download_config.token)
data_file = "hf://" + data_file[len(config.HF_ENDPOINT) + 1 :].replace("/resolve/", "@", 1)
resolved_path = hffs.resolve_path(data_file)
return (resolved_path.repo_id, resolved_path.revision)
info = fs.info(data_file)
# s3fs uses "ETag", gcsfs uses "etag", and for local we simply check mtime
for key in ["ETag", "etag", "mtime"]:
if key in info:
return (str(info[key]),)
return ()
def _get_origin_metadata(
data_files: List[str],
max_workers=64,
download_config: Optional[DownloadConfig] = None,
) -> Tuple[str]:
return thread_map(
partial(_get_single_origin_metadata, download_config=download_config),
data_files,
max_workers=max_workers,
tqdm_class=logging.tqdm,
desc="Resolving data files",
disable=len(data_files) <= 16 or not logging.is_progress_bar_enabled(),
)
class DataFilesList(List[str]):
"""
List of data files (absolute local paths or URLs).
It has two construction methods given the user's data files patterns :
- ``from_hf_repo``: resolve patterns inside a dataset repository
- ``from_local_or_remote``: resolve patterns from a local path
Moreover DataFilesList has an additional attribute ``origin_metadata``.
It can store:
- the last modified time of local files
- ETag of remote files
- commit sha of a dataset repository
Thanks to this additional attribute, it is possible to hash the list
and get a different hash if and only if at least one file changed.
This is useful for caching Dataset objects that are obtained from a list of data files.
"""
def __init__(self, data_files: List[str], origin_metadata: List[Tuple[str]]):
super().__init__(data_files)
self.origin_metadata = origin_metadata
def __add__(self, other):
return DataFilesList([*self, *other], self.origin_metadata + other.origin_metadata)
@classmethod
def from_hf_repo(
cls,
patterns: List[str],
dataset_info: huggingface_hub.hf_api.DatasetInfo,
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesList":
base_path = f"hf://datasets/{dataset_info.id}@{dataset_info.sha}/{base_path or ''}".rstrip("/")
return cls.from_patterns(
patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config
)
@classmethod
def from_local_or_remote(
cls,
patterns: List[str],
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesList":
base_path = base_path if base_path is not None else Path().resolve().as_posix()
return cls.from_patterns(
patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config
)
@classmethod
def from_patterns(
cls,
patterns: List[str],
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesList":
base_path = base_path if base_path is not None else Path().resolve().as_posix()
data_files = []
for pattern in patterns:
try:
data_files.extend(
resolve_pattern(
pattern,
base_path=base_path,
allowed_extensions=allowed_extensions,
download_config=download_config,
)
)
except FileNotFoundError:
if not has_magic(pattern):
raise
origin_metadata = _get_origin_metadata(data_files, download_config=download_config)
return cls(data_files, origin_metadata)
def filter_extensions(self, extensions: List[str]) -> "DataFilesList":
pattern = "|".join("\\" + ext for ext in extensions)
pattern = re.compile(f".*({pattern})(\\..+)?$")
return DataFilesList(
[data_file for data_file in self if pattern.match(data_file)],
origin_metadata=self.origin_metadata,
)
class DataFilesDict(Dict[str, DataFilesList]):
"""
Dict of split_name -> list of data files (absolute local paths or URLs).
It has two construction methods given the user's data files patterns :
- ``from_hf_repo``: resolve patterns inside a dataset repository
- ``from_local_or_remote``: resolve patterns from a local path
Moreover each list is a DataFilesList. It is possible to hash the dictionary
and get a different hash if and only if at least one file changed.
For more info, see ``DataFilesList``.
This is useful for caching Dataset objects that are obtained from a list of data files.
Changing the order of the keys of this dictionary also doesn't change its hash.
"""
@classmethod
def from_local_or_remote(
cls,
patterns: Dict[str, Union[List[str], DataFilesList]],
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesDict":
out = cls()
for key, patterns_for_key in patterns.items():
out[key] = (
DataFilesList.from_local_or_remote(
patterns_for_key,
base_path=base_path,
allowed_extensions=allowed_extensions,
download_config=download_config,
)
if not isinstance(patterns_for_key, DataFilesList)
else patterns_for_key
)
return out
@classmethod
def from_hf_repo(
cls,
patterns: Dict[str, Union[List[str], DataFilesList]],
dataset_info: huggingface_hub.hf_api.DatasetInfo,
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesDict":
out = cls()
for key, patterns_for_key in patterns.items():
out[key] = (
DataFilesList.from_hf_repo(
patterns_for_key,
dataset_info=dataset_info,
base_path=base_path,
allowed_extensions=allowed_extensions,
download_config=download_config,
)
if not isinstance(patterns_for_key, DataFilesList)
else patterns_for_key
)
return out
@classmethod
def from_patterns(
cls,
patterns: Dict[str, Union[List[str], DataFilesList]],
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesDict":
out = cls()
for key, patterns_for_key in patterns.items():
out[key] = (
DataFilesList.from_patterns(
patterns_for_key,
base_path=base_path,
allowed_extensions=allowed_extensions,
download_config=download_config,
)
if not isinstance(patterns_for_key, DataFilesList)
else patterns_for_key
)
return out
def filter_extensions(self, extensions: List[str]) -> "DataFilesDict":
out = type(self)()
for key, data_files_list in self.items():
out[key] = data_files_list.filter_extensions(extensions)
return out
| datasets-main | src/datasets/data_files.py |
import copy
import itertools
import sys
import warnings
from collections import Counter
from copy import deepcopy
from dataclasses import dataclass
from functools import partial
from itertools import cycle, islice
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
from . import config
from .arrow_dataset import Dataset, DatasetInfoMixin
from .features import Features
from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects
from .filesystems import _reset_fsspec_lock
from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter
from .info import DatasetInfo
from .splits import NamedSplit
from .table import cast_table_to_features, read_schema_from_file, table_cast
from .utils.logging import get_logger
from .utils.py_utils import Literal
from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs
logger = get_logger(__name__)
Key = Union[int, str]
def identity_func(x):
return x
def _rename_columns_fn(example: Dict, column_mapping: Dict[str, str]):
if any(col not in example for col in column_mapping):
raise ValueError(
f"Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(column_mapping) - set(example)} are not in the dataset."
)
if any(col in example for col in column_mapping.values()):
raise ValueError(
f"Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(example) - set(column_mapping.values())} are already in the dataset."
)
return {
new_column_name: example[original_column_name]
for original_column_name, new_column_name in column_mapping.items()
}
def add_column_fn(example: Dict, idx: int, name: str, column: List[Dict]):
if name in example:
raise ValueError(f"Error when adding {name}: column {name} is already in the dataset.")
return {name: column[idx]}
def _infer_features_from_batch(batch: Dict[str, list], try_features: Optional[Features] = None) -> Features:
pa_table = pa.Table.from_pydict(batch)
if try_features is not None:
try:
pa_table = table_cast(pa_table, pa.schema(try_features.type))
except (TypeError, pa.ArrowInvalid, pa.ArrowNotImplementedError):
pass
return Features.from_arrow_schema(pa_table.schema)
def _examples_to_batch(examples: List[Dict[str, Any]]) -> Dict[str, list]:
# we order the columns by order of appearance
# to do so, we use a dict as an ordered set
cols = {col: None for example in examples for col in example}
# when an example is missing a column, we set the value to None with .get()
arrays = [[example.get(col) for example in examples] for col in cols]
return dict(zip(cols, arrays))
def _batch_to_examples(batch: Dict[str, list]) -> List[Dict[str, Any]]:
"""Convert a batch (dict of examples) to examples list"""
n_examples = len(batch[next(iter(batch))])
for i in range(n_examples):
yield {col: array[i] for col, array in batch.items()}
class _HasNextIterator(Iterator):
"""Iterator with an hasnext() function. Taken from https://stackoverflow.com/questions/1966591/has-next-in-python-iterators."""
def __init__(self, it):
self.it = iter(it)
self._hasnext = None
def __iter__(self):
return self
def __next__(self):
if self._hasnext:
result = self._thenext
else:
result = next(self.it)
self._hasnext = None
return result
def hasnext(self):
if self._hasnext is None:
try:
self._thenext = next(self.it)
except StopIteration:
self._hasnext = False
else:
self._hasnext = True
return self._hasnext
def _convert_to_arrow(
iterable: Iterable[Tuple[Key, dict]],
batch_size: int,
drop_last_batch: bool = False,
) -> Iterator[Tuple[Key, pa.Table]]:
"""Convert and group examples in Arrow tables of size `batch_size`.
Args:
iterable (`Iterable[Tuple[Key, dict]]`):
An examples iterable containing tuples (example_key, example) of type (int/str, dict)
batch_size (`Optional[int]`):
Size of each sub-table to yield. If None or <= 0, yields the full table.
drop_last_batch (`bool`, defaults to `False`):
Drop the last batch if it is smaller than `batch_size`.
"""
if batch_size is None or batch_size <= 0:
yield "all", pa.Table.from_pylist(
cast_to_python_objects([example for _, example in iterable], only_1d_for_numpy=True)
)
return
iterator = iter(iterable)
for key, example in iterator:
iterator_batch = islice(iterator, batch_size - 1)
key_examples_list = [(key, example)] + [(key, example) for key, example in iterator_batch]
if len(key_examples_list) < batch_size and drop_last_batch:
return
keys, examples = zip(*key_examples_list)
new_key = "_".join(str(key) for key in keys)
yield new_key, pa.Table.from_pylist(cast_to_python_objects(examples, only_1d_for_numpy=True))
def _batch_arrow_tables(
iterable: Iterable[Tuple[Key, pa.Table]],
batch_size: Optional[int],
drop_last_batch: bool = False,
) -> Iterator[Tuple[Key, pa.Table]]:
"""Iterate over sub-tables of size `batch_size`.
Args:
iterable (`Iterable[Tuple[Key, pa.Table]]`):
A tables iterable containing tuples (table_key, table) of type (int/str, pa.Table)
batch_size (`Optional[int]`):
Size of each sub-table to yield. If None or <= 0, yields the full table.
drop_last_batch (`bool`, defaults to `False`):
Drop the last batch if it is smaller than `batch_size`.
"""
if batch_size is None or batch_size <= 0:
yield "all", pa.concat_tables([pa_table for _, pa_table in iterable])
return
keys_buffer = []
chunks_buffer = []
chunks_buffer_size = 0
for key, pa_table in iterable:
for chunk in pa_table.to_reader(max_chunksize=batch_size):
if len(chunk) == 0:
continue
elif chunks_buffer_size + len(chunk) < batch_size:
keys_buffer.append(key)
chunks_buffer.append(chunk)
chunks_buffer_size += len(chunk)
continue
elif chunks_buffer_size + len(chunk) == batch_size:
keys_buffer.append(key)
chunks_buffer.append(chunk)
new_key = "_".join(str(_key) for _key in keys_buffer)
yield new_key, pa.Table.from_batches(chunks_buffer)
keys_buffer = []
chunks_buffer = []
chunks_buffer_size = 0
else:
cropped_chunk_length = batch_size - chunks_buffer_size
keys_buffer.append(f"{key}[:{cropped_chunk_length}]")
chunks_buffer.append(chunk.slice(0, cropped_chunk_length))
new_key = "_".join(str(_key) for _key in keys_buffer)
yield new_key, pa.Table.from_batches(chunks_buffer)
keys_buffer = [f"{key}[{cropped_chunk_length}:]"]
chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)]
chunks_buffer_size = len(chunk) - cropped_chunk_length
if not drop_last_batch and chunks_buffer:
new_key = "_".join(str(_key) for _key in keys_buffer)
yield new_key, pa.Table.from_batches(chunks_buffer)
class _BaseExamplesIterable:
"""Base class for the examples iterable used by an IterableDataset"""
def __init__(self) -> None:
self.iter_arrow: Optional[Callable[[], Iterator[Tuple[Key, pa.Table]]]] = None
def __iter__(self) -> Iterator[Tuple[Key, dict]]:
"""An examples iterable should yield tuples (example_key, example) of type (int/str, dict)"""
raise NotImplementedError(f"{type(self)} doesn't implement __iter__ yet")
def shuffle_data_sources(self, generator: np.random.Generator) -> "_BaseExamplesIterable":
"""
Either shuffle the shards/sources of the dataset, or propagate the shuffling to the underlying iterable.
If the order of the shards must stay fixed (when using .skip or .take for example), then this method returns self.
"""
raise NotImplementedError(f"{type(self)} doesn't implement shuffle_data_sources yet")
def shard_data_sources(self, worker_id: int, num_workers: int) -> "_BaseExamplesIterable":
"""Either keep only the requested shard, or propagate the request to the underlying iterable."""
raise NotImplementedError(f"{type(self)} doesn't implement shard_data_sources yet")
def split_shard_indices_by_worker(self, worker_id: int, num_workers: int) -> List[int]:
return list(range(worker_id, self.n_shards, num_workers))
@property
def n_shards(self) -> int:
raise NotImplementedError(f"{type(self)} doesn't implement n_shards yet")
class ExamplesIterable(_BaseExamplesIterable):
def __init__(self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict):
super().__init__()
self.generate_examples_fn = generate_examples_fn
self.kwargs = kwargs
def __iter__(self):
yield from self.generate_examples_fn(**self.kwargs)
def shuffle_data_sources(self, generator: np.random.Generator) -> "ExamplesIterable":
return ShuffledDataSourcesExamplesIterable(self.generate_examples_fn, self.kwargs, generator)
def shard_data_sources(self, worker_id: int, num_workers: int) -> "ExamplesIterable":
"""Keep only the requested shard."""
gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.n_shards)
shard_indices = self.split_shard_indices_by_worker(worker_id, num_workers)
requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices])
return ExamplesIterable(self.generate_examples_fn, requested_gen_kwargs)
@property
def n_shards(self) -> int:
return _number_of_shards_in_gen_kwargs(self.kwargs)
class ShuffledDataSourcesExamplesIterable(ExamplesIterable):
def __init__(
self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict, generator: np.random.Generator
):
super().__init__(generate_examples_fn, kwargs)
self.generator = deepcopy(generator)
def __iter__(self):
"""Shuffle the kwargs order to shuffle shards"""
rng = deepcopy(self.generator)
kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
yield from self.generate_examples_fn(**kwargs_with_shuffled_shards)
def shard_data_sources(self, worker_id: int, num_workers: int) -> "ExamplesIterable":
"""Keep only the requested shard."""
rng = deepcopy(self.generator)
kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
return ExamplesIterable(self.generate_examples_fn, kwargs_with_shuffled_shards).shard_data_sources(
worker_id, num_workers
)
class ArrowExamplesIterable(_BaseExamplesIterable):
def __init__(self, generate_tables_fn: Callable[..., Tuple[Key, pa.Table]], kwargs: dict):
super().__init__()
self.generate_tables_fn = generate_tables_fn
self.kwargs = kwargs
self.iter_arrow = self._iter_arrow
def __iter__(self):
formatter = PythonFormatter()
for key, pa_table in self.generate_tables_fn(**self.kwargs):
for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER):
formatted_batch = formatter.format_batch(pa_subtable)
for example in _batch_to_examples(formatted_batch):
yield key, example
def _iter_arrow(self):
yield from self.generate_tables_fn(**self.kwargs)
def shuffle_data_sources(self, generator: np.random.Generator) -> "ArrowExamplesIterable":
return ShuffledDataSourcesArrowExamplesIterable(self.generate_tables_fn, self.kwargs, generator)
def shard_data_sources(self, worker_id: int, num_workers: int) -> "ArrowExamplesIterable":
"""Keep only the requested shard."""
gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.n_shards)
shard_indices = self.split_shard_indices_by_worker(worker_id, num_workers)
requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices])
return ArrowExamplesIterable(self.generate_tables_fn, requested_gen_kwargs)
@property
def n_shards(self) -> int:
return _number_of_shards_in_gen_kwargs(self.kwargs)
class ShuffledDataSourcesArrowExamplesIterable(ArrowExamplesIterable):
def __init__(
self,
generate_tables_fn: Callable[..., Tuple[Key, pa.Table]],
kwargs: dict,
generator: np.random.Generator,
):
super().__init__(generate_tables_fn, kwargs)
self.generator = deepcopy(generator)
def __iter__(self):
"""Shuffle the kwargs order to shuffle shards"""
rng = deepcopy(self.generator)
kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
formatter = PythonFormatter()
for key, pa_table in self.generate_tables_fn(**kwargs_with_shuffled_shards):
for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER):
formatted_batch = formatter.format_batch(pa_subtable)
for example in _batch_to_examples(formatted_batch):
yield key, example
def _iter_arrow(self):
rng = deepcopy(self.generator)
kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
yield from self.generate_tables_fn(**kwargs_with_shuffled_shards)
def shard_data_sources(self, worker_id: int, num_workers: int) -> "ArrowExamplesIterable":
"""Keep only the requested shard."""
rng = deepcopy(self.generator)
kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
return ArrowExamplesIterable(self.generate_tables_fn, kwargs_with_shuffled_shards).shard_data_sources(
worker_id, num_workers
)
class SelectColumnsIterable(_BaseExamplesIterable):
def __init__(self, ex_iterable: _BaseExamplesIterable, column_names: List[str]):
super().__init__()
self.ex_iterable = ex_iterable
self.column_names = column_names
if self.ex_iterable.iter_arrow:
self.iter_arrow = self._iter_arrow
def __iter__(self):
for idx, row in self.ex_iterable:
yield idx, {c: row[c] for c in self.column_names}
def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
for idx, pa_table in self.ex_iterable.iter_arrow():
yield idx, pa_table.select(self.column_names)
def shuffle_data_sources(self, generator: np.random.Generator) -> "SelectColumnsIterable":
return SelectColumnsIterable(self.ex_iterable.shuffle_data_sources(generator), self.column_names)
def shard_data_sources(self, worker_id: int, num_workers: int) -> "SelectColumnsIterable":
return SelectColumnsIterable(self.ex_iterable.shard_data_sources(worker_id, num_workers), self.column_names)
@property
def n_shards(self) -> int:
return self.ex_iterable.n_shards
class StepExamplesIterable(_BaseExamplesIterable):
def __init__(self, ex_iterable: _BaseExamplesIterable, step: int, offset: int):
super().__init__()
self.ex_iterable = ex_iterable
self.step = step
self.offset = offset
# TODO(QL): implement iter_arrow
def __iter__(self):
ex_iterator = iter(self.ex_iterable)
while True:
batch = list(islice(ex_iterator, self.step))
if len(batch) > self.offset:
yield batch[self.offset]
else:
break
def shuffle_data_sources(self, generator: np.random.Generator) -> "StepExamplesIterable":
return StepExamplesIterable(
self.ex_iterable.shuffle_data_sources(generator), step=self.step, offset=self.offset
)
def shard_data_sources(self, worker_id: int, num_workers: int) -> "StepExamplesIterable":
return StepExamplesIterable(
self.ex_iterable.shard_data_sources(worker_id, num_workers), step=self.step, offset=self.offset
)
@property
def n_shards(self) -> int:
return self.ex_iterable.n_shards
class CyclingMultiSourcesExamplesIterable(_BaseExamplesIterable):
def __init__(
self,
ex_iterables: List[_BaseExamplesIterable],
stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
):
super().__init__()
self.ex_iterables = ex_iterables
self.stopping_strategy = stopping_strategy
# if undersampling ("first_exhausted"), we stop as soon as one dataset is exhausted
# if oversampling ("all_exhausted"), we stop as soons as every dataset is exhausted, i.e as soon as every samples of every dataset has been visited at least once
self.bool_strategy_func = np.all if (stopping_strategy == "all_exhausted") else np.any
# TODO(QL): implement iter_arrow
def _get_indices_iterator(self):
# this is an infinite iterator to keep track of which iterator we want to pick examples from
return cycle(range(len(self.ex_iterables)))
def __iter__(self):
iterators = [_HasNextIterator(ex_iterable) for ex_iterable in self.ex_iterables]
indices_iterator = self._get_indices_iterator()
is_exhausted = np.full(len(self.ex_iterables), False)
for i in indices_iterator:
try: # let's pick one example from the iterator at index i
yield next(iterators[i])
# it will resume from the yield at the next call so that we can directly test if the iterable is exhausted and if we need to break out of the loop
if not iterators[i].hasnext():
is_exhausted[i] = True
if self.bool_strategy_func(is_exhausted):
# if the stopping criteria is met, break the main for loop
break
# otherwise reinitialise the iterator and yield the first example
iterators[i] = _HasNextIterator(self.ex_iterables[i])
except StopIteration:
# here it means that the i-th iterabledataset is empty, i.e we never have the occasion to yield an element of the i-th dataset.
# we still check if the stopping criteria is met and if we break out of the loop in case of an oversampling strategy
is_exhausted[i] = True
if self.bool_strategy_func(is_exhausted):
# if the stopping criteria is met, break the main for loop
break
def shuffle_data_sources(self, generator: np.random.Generator) -> "CyclingMultiSourcesExamplesIterable":
"""Shuffle each underlying examples iterable."""
ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables]
return CyclingMultiSourcesExamplesIterable(ex_iterables, self.stopping_strategy)
@property
def n_shards(self) -> int:
return min(ex_iterable.n_shards for ex_iterable in self.ex_iterables)
def shard_data_sources(self, worker_id: int, num_workers: int) -> "CyclingMultiSourcesExamplesIterable":
"""Either keep only the requested shard, or propagate the request to the underlying iterable."""
return CyclingMultiSourcesExamplesIterable(
[iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables],
stopping_strategy=self.stopping_strategy,
)
class VerticallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable):
"""
VerticallyConcatenatedMultiSourcesExamplesIterable simply chains the input iterables.
It doesn't require the examples iterables to always yield the same columns.
Instead, this is handled by the `IterableDataset` class or `TypedExamplesIterable`.
For information, `IterableDataset` merges the features of all the datasets to concatenate into one.
We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate.
Then for each example, `IterableDataset` and `TypedExamplesIterable` automatically fill missing columns with None.
This is done with `_apply_feature_types_on_example`.
"""
def __init__(self, ex_iterables: List[_BaseExamplesIterable]):
super().__init__()
self.ex_iterables = ex_iterables
if all(ex_iterable.iter_arrow is not None for ex_iterable in ex_iterables):
self.iter_arrow = self._iter_arrow
def __iter__(self):
for ex_iterable in self.ex_iterables:
yield from ex_iterable
def _iter_arrow(self):
for ex_iterable in self.ex_iterables:
yield from ex_iterable.iter_arrow()
def shuffle_data_sources(
self, generator: np.random.Generator
) -> "VerticallyConcatenatedMultiSourcesExamplesIterable":
"""Shuffle the list of examples iterable, as well as each underlying examples iterable."""
rng = deepcopy(generator)
ex_iterables = list(self.ex_iterables)
rng.shuffle(ex_iterables)
ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in ex_iterables]
return VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables)
@property
def n_shards(self) -> int:
return min(ex_iterable.n_shards for ex_iterable in self.ex_iterables)
def shard_data_sources(
self, worker_id: int, num_workers: int
) -> "VerticallyConcatenatedMultiSourcesExamplesIterable":
"""Either keep only the requested shard, or propagate the request to the underlying iterable."""
return VerticallyConcatenatedMultiSourcesExamplesIterable(
[iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables]
)
def _check_column_names(column_names: List[str]):
"""Check the column names to make sure they don't contain duplicates."""
counter = Counter(column_names)
if not all(count == 1 for count in counter.values()):
duplicated_columns = [col for col in counter if counter[col] > 1]
raise ValueError(
f"The examples iterables can't have duplicated columns but columns {duplicated_columns} are duplicated."
)
class HorizontallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable):
"""
HorizontallyConcatenatedMultiSourcesExamplesIterable merges examples together for the input list of iterables.
It also checks that there are no duplicate columns (otherwise we don't know which one to keep).
This check is done once when yielding the first example.
However it doesn't fill missing columns with None.
Instead, this is handled by the `IterableDataset` class or `TypedExamplesIterable`.
For information, `IterableDataset` merges the features of all the datasets to concatenate into one.
We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate.
Then for each example, `IterableDataset` and `TypedExamplesIterable` automatically fill missing columns with None.
This is done with `_apply_feature_types_on_example`.
"""
def __init__(self, ex_iterables: List[_BaseExamplesIterable]):
super().__init__()
self.ex_iterables = ex_iterables
# TODO(QL): implement iter_arrow
def __iter__(self):
ex_iterators = [iter(ex_iterable) for ex_iterable in self.ex_iterables]
for i in itertools.count():
keys = []
examples = []
for ex_iterator in list(ex_iterators):
try:
key, example = next(ex_iterator)
keys.append(key)
examples.append(example)
except StopIteration:
ex_iterators.remove(ex_iterator)
if ex_iterators:
if i == 0:
_check_column_names([column_name for example in examples for column_name in example])
new_example = {}
for example in examples:
new_example.update(example)
new_key = "_".join(str(key) for key in keys)
yield new_key, new_example
else:
break
def shuffle_data_sources(
self, generator: np.random.Generator
) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable":
"""Doesn't shuffle the wrapped examples iterable since it would break the alignment between them."""
return self
@property
def n_shards(self) -> int:
return 1
def shard_data_sources(
self, worker_id: int, num_workers: int
) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable":
"""Either keep only the requested shard, or propagate the request to the underlying iterable."""
return HorizontallyConcatenatedMultiSourcesExamplesIterable(
[iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables]
)
class RandomlyCyclingMultiSourcesExamplesIterable(CyclingMultiSourcesExamplesIterable):
def __init__(
self,
ex_iterables: List[_BaseExamplesIterable],
generator: np.random.Generator,
probabilities: Optional[List[float]] = None,
stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
):
super().__init__(ex_iterables, stopping_strategy)
self.generator = deepcopy(generator)
self.probabilities = probabilities
# TODO(QL): implement iter_arrow
@staticmethod
def _iter_random_indices(
rng: np.random.Generator,
num_sources: int,
random_batch_size=1000,
p: Optional[List[float]] = None,
) -> Iterator[int]:
"""Get an infinite iterator that randomly samples the index of the source to pick examples from."""
if p is None:
while True:
yield from (int(i) for i in rng.integers(0, num_sources, size=random_batch_size))
else:
while True:
yield from (int(i) for i in rng.choice(num_sources, size=random_batch_size, p=p))
def _get_indices_iterator(self):
rng = deepcopy(self.generator)
# this is an infinite iterator that randomly samples the index of the source to pick examples from
return self._iter_random_indices(rng, len(self.ex_iterables), p=self.probabilities)
def shuffle_data_sources(self, generator: np.random.Generator) -> "RandomlyCyclingMultiSourcesExamplesIterable":
"""Shuffle the data sources of each wrapped examples iterable."""
ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables]
return RandomlyCyclingMultiSourcesExamplesIterable(
ex_iterables,
generator=generator,
probabilities=self.probabilities,
stopping_strategy=self.stopping_strategy,
)
def shard_data_sources(self, worker_id: int, num_workers: int) -> "RandomlyCyclingMultiSourcesExamplesIterable":
"""Either keep only the requested shard, or propagate the request to the underlying iterable."""
return RandomlyCyclingMultiSourcesExamplesIterable(
[iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables],
self.generator,
self.probabilities,
self.stopping_strategy,
)
class MappedExamplesIterable(_BaseExamplesIterable):
def __init__(
self,
ex_iterable: _BaseExamplesIterable,
function: Callable,
with_indices: bool = False,
input_columns: Optional[List[str]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[List[str]] = None,
fn_kwargs: Optional[dict] = None,
formatting: Optional["FormattingConfig"] = None,
format_type="deprecated",
):
if format_type != "deprecated":
warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. "
help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead."
warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2)
formatting = FormattingConfig(format_type=format_type)
super().__init__()
self.ex_iterable = ex_iterable
self.function = function
self.batched = batched
self.batch_size = batch_size
self.drop_last_batch = drop_last_batch
self.remove_columns = remove_columns
self.with_indices = with_indices
self.input_columns = input_columns
self.fn_kwargs = fn_kwargs or {}
self.formatting = formatting
if self.formatting and self.formatting.format_type == "arrow":
self.iter_arrow = self._iter_arrow
def __iter__(self):
if self.formatting and self.formatting.format_type == "arrow":
yield from ArrowExamplesIterable(self._iter_arrow, {})
else:
yield from self._iter()
def _iter(self):
iterator = iter(self.ex_iterable)
current_idx = 0
if self.formatting:
formatter = get_formatter(self.formatting.format_type)
format_dict = (
formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
)
else:
format_dict = None
if self.batched:
for key, example in iterator:
# If `batched`, first build the batch, if `batch_size` is None or <=0, then the batch is the whole dataset
iterator_batch = (
iterator
if self.batch_size is None or self.batch_size <= 0
else islice(iterator, self.batch_size - 1)
)
key_examples_list = [(key, example)] + [(key, example) for key, example in iterator_batch]
keys, examples = zip(*key_examples_list)
if (
self.drop_last_batch
and self.batch_size is not None
and self.batch_size > 0
and len(examples) < self.batch_size
): # ignore last batch
return
batch = _examples_to_batch(examples)
batch = format_dict(batch) if format_dict else batch
# then apply the transform
inputs = batch
function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
if self.with_indices:
function_args.append([current_idx + i for i in range(len(key_examples_list))])
transformed_batch = dict(batch) # this will be updated with the function output
transformed_batch.update(self.function(*function_args, **self.fn_kwargs))
# then remove the unwanted columns
if self.remove_columns:
for c in self.remove_columns:
del transformed_batch[c]
if transformed_batch:
first_col = next(iter(transformed_batch))
bad_cols = [
col
for col in transformed_batch
if len(transformed_batch[col]) != len(transformed_batch[first_col])
]
if bad_cols:
raise ValueError(
f"Column lengths mismatch: columns {bad_cols} have length {[len(transformed_batch[col]) for col in bad_cols]} while {first_col} has length {len(transformed_batch[first_col])}."
)
# the new key is the concatenation of the examples keys from the batch
new_key = "_".join(str(key) for key in keys)
# yield one example at a time from the transformed batch
for batch_idx, example in enumerate(_batch_to_examples(transformed_batch)):
yield new_key, example
current_idx += batch_idx + 1
else:
for key, example in iterator:
# If not batched, we can apply the transform and yield the example directly
# first copy the example, since we might drop some keys
example = dict(example)
example = format_dict(example) if format_dict else example
# then apply the transform
inputs = example
function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
if self.with_indices:
function_args.append(current_idx)
transformed_example = dict(example) # this will be updated with the function output
transformed_example.update(self.function(*function_args, **self.fn_kwargs))
# then we remove the unwanted columns
if self.remove_columns:
for c in self.remove_columns:
del transformed_example[c]
yield key, transformed_example
current_idx += 1
def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
if self.ex_iterable.iter_arrow:
iterator = _batch_arrow_tables(
self.ex_iterable.iter_arrow(),
batch_size=self.batch_size if self.batched else 1,
drop_last_batch=self.drop_last_batch,
)
else:
iterator = _convert_to_arrow(
self.ex_iterable,
batch_size=self.batch_size if self.batched else 1,
drop_last_batch=self.drop_last_batch,
)
current_idx = 0
for key, pa_table in iterator:
# first build the batch
function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns]
if self.with_indices:
if self.batched:
function_args.append([current_idx + i for i in range(len(pa_table))])
else:
function_args.append(current_idx)
# then apply the transform
output_table = self.function(*function_args, **self.fn_kwargs)
if not isinstance(output_table, pa.Table):
raise TypeError(
f"Provided `function` which is applied to pyarrow tables returns a variable of type {type(output_table)}. Make sure provided `function` returns a a pyarrow table to update the dataset."
)
# we don't need to merge results for consistency with Dataset.map which merges iif both input and output are dicts
# then remove the unwanted columns
if self.remove_columns:
for column in self.remove_columns:
if column in output_table.column_names:
output_table = output_table.remove_column(output_table.column_names.index(column))
# return output
yield key, output_table
current_idx += len(pa_table)
def shuffle_data_sources(self, generator: np.random.Generator) -> "MappedExamplesIterable":
"""Shuffle the wrapped examples iterable."""
return MappedExamplesIterable(
self.ex_iterable.shuffle_data_sources(generator),
function=self.function,
with_indices=self.with_indices,
input_columns=self.input_columns,
batched=self.batched,
batch_size=self.batch_size,
remove_columns=self.remove_columns,
fn_kwargs=self.fn_kwargs,
)
def shard_data_sources(self, worker_id: int, num_workers: int) -> "MappedExamplesIterable":
"""Keep only the requested shard."""
return MappedExamplesIterable(
self.ex_iterable.shard_data_sources(worker_id, num_workers),
function=self.function,
with_indices=self.with_indices,
input_columns=self.input_columns,
batched=self.batched,
batch_size=self.batch_size,
remove_columns=self.remove_columns,
fn_kwargs=self.fn_kwargs,
)
@property
def n_shards(self) -> int:
return self.ex_iterable.n_shards
class FilteredExamplesIterable(_BaseExamplesIterable):
def __init__(
self,
ex_iterable: _BaseExamplesIterable,
function: Callable,
with_indices: bool = False,
input_columns: Optional[List[str]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
fn_kwargs: Optional[dict] = None,
formatting: Optional["FormattingConfig"] = None,
format_type="deprecated",
):
if format_type != "deprecated":
warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. "
help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead."
warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2)
formatting = FormattingConfig(format_type=format_type)
super().__init__()
self.ex_iterable = ex_iterable
self.function = function
self.batched = batched
self.batch_size = batch_size
self.with_indices = with_indices
self.input_columns = input_columns
self.fn_kwargs = fn_kwargs or {}
self.formatting = formatting
if self.formatting and self.formatting.format_type == "arrow":
self.iter_arrow = self._iter_arrow
def __iter__(self):
if self.formatting and self.formatting.format_type == "arrow":
yield from ArrowExamplesIterable(self._iter_arrow, {})
else:
yield from self._iter()
def _iter(self):
if self.formatting:
formatter = get_formatter(self.formatting.format_type)
format_dict = (
formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
)
else:
format_dict = None
iterator = iter(self.ex_iterable)
current_idx = 0
if self.batched:
for key, example in iterator:
# If `batched`, first build the batch, if `batch_size` is None or <=0, then the batch is the whole dataset
iterator_batch = (
iterator
if self.batch_size is None or self.batch_size <= 0
else islice(iterator, self.batch_size - 1)
)
key_examples_list = [(key, example)] + [(key, example) for key, example in iterator_batch]
keys, examples = zip(*key_examples_list)
batch = _examples_to_batch(examples)
batch = format_dict(batch) if format_dict else batch
# then compute the mask for the batch
inputs = batch
function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
if self.with_indices:
function_args.append([current_idx + i for i in range(len(key_examples_list))])
mask = self.function(*function_args, **self.fn_kwargs)
# yield one example at a time from the batch
for batch_idx, (key_example, to_keep) in enumerate(zip(key_examples_list, mask)):
if to_keep:
yield key_example
current_idx += batch_idx + 1
else:
for key, example in iterator:
# If not batched, we can apply the filtering function direcly
example = dict(example)
inputs = format_dict(example) if format_dict else example
function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
if self.with_indices:
function_args.append(current_idx)
to_keep = self.function(*function_args, **self.fn_kwargs)
if to_keep:
yield key, example
current_idx += 1
def _iter_arrow(self):
if self.ex_iterable.iter_arrow:
iterator = _batch_arrow_tables(
self.ex_iterable.iter_arrow(), batch_size=self.batch_size if self.batched else 1
)
else:
iterator = _convert_to_arrow(self.ex_iterable, batch_size=self.batch_size if self.batched else 1)
current_idx = 0
for key, pa_table in iterator:
# first build the batch
function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns]
if self.with_indices:
if self.batched:
function_args.append([current_idx + i for i in range(len(pa_table))])
else:
function_args.append(current_idx)
# then apply the transform
mask = self.function(*function_args, **self.fn_kwargs)
# yield the filtered table
if self.batched:
yield key, pa_table.filter(mask)
elif mask.as_py() if isinstance(mask, pa.BooleanScalar) else mask:
yield key, pa_table
current_idx += len(pa_table)
def shuffle_data_sources(self, seed: Optional[int]) -> "FilteredExamplesIterable":
"""Shuffle the wrapped examples iterable."""
return FilteredExamplesIterable(
self.ex_iterable.shuffle_data_sources(seed),
function=self.function,
with_indices=self.with_indices,
input_columns=self.input_columns,
batched=self.batched,
batch_size=self.batch_size,
)
def shard_data_sources(self, worker_id: int, num_workers: int) -> "FilteredExamplesIterable":
"""Keep only the requested shard."""
return FilteredExamplesIterable(
self.ex_iterable.shard_data_sources(worker_id, num_workers),
function=self.function,
with_indices=self.with_indices,
input_columns=self.input_columns,
batched=self.batched,
batch_size=self.batch_size,
)
@property
def n_shards(self) -> int:
return self.ex_iterable.n_shards
class BufferShuffledExamplesIterable(_BaseExamplesIterable):
def __init__(self, ex_iterable: _BaseExamplesIterable, buffer_size: int, generator: np.random.Generator):
super().__init__()
self.ex_iterable = ex_iterable
self.buffer_size = buffer_size
self.generator = generator
# TODO(QL): implement iter_arrow
@staticmethod
def _iter_random_indices(rng: np.random.Generator, buffer_size: int, random_batch_size=1000) -> Iterator[int]:
while True:
yield from (int(i) for i in rng.integers(0, buffer_size, size=random_batch_size))
def __iter__(self):
buffer_size = self.buffer_size
rng = deepcopy(self.generator)
indices_iterator = self._iter_random_indices(rng, buffer_size)
# this is the shuffle buffer that we keep in memory
mem_buffer = []
for x in self.ex_iterable:
if len(mem_buffer) == buffer_size: # if the buffer is full, pick and example from it
i = next(indices_iterator)
yield mem_buffer[i]
mem_buffer[i] = x # replace the picked example by a new one
else: # otherwise, keep filling the buffer
mem_buffer.append(x)
# when we run out of examples, we shuffle the remaining examples in the buffer and yield them
rng.shuffle(mem_buffer)
yield from mem_buffer
def shuffle_data_sources(self, generator: np.random.Generator) -> "BufferShuffledExamplesIterable":
"""Shuffle the wrapped examples iterable as well as the shuffling buffer."""
return BufferShuffledExamplesIterable(
self.ex_iterable.shuffle_data_sources(generator), buffer_size=self.buffer_size, generator=generator
)
def shard_data_sources(self, worker_id: int, num_workers: int) -> "BufferShuffledExamplesIterable":
"""Keep only the requested shard."""
return BufferShuffledExamplesIterable(
self.ex_iterable.shard_data_sources(worker_id, num_workers),
buffer_size=self.buffer_size,
generator=self.generator,
)
@property
def n_shards(self) -> int:
return self.ex_iterable.n_shards
class SkipExamplesIterable(_BaseExamplesIterable):
def __init__(self, ex_iterable: _BaseExamplesIterable, n: int):
super().__init__()
self.ex_iterable = ex_iterable
self.n = n
# TODO(QL): implement iter_arrow
def __iter__(self):
yield from islice(self.ex_iterable, self.n, None)
def shuffle_data_sources(self, generator: np.random.Generator) -> "SkipExamplesIterable":
"""Doesn't shuffle the wrapped examples iterable since it would skip examples from other shards instead."""
return self
@property
def n_shards(self) -> int:
return self.ex_iterable.n_shards
class TakeExamplesIterable(_BaseExamplesIterable):
def __init__(self, ex_iterable: _BaseExamplesIterable, n: int):
super().__init__()
self.ex_iterable = ex_iterable
self.n = n
# TODO(QL): implement iter_arrow
def __iter__(self):
yield from islice(self.ex_iterable, self.n)
def shuffle_data_sources(self, generator: np.random.Generator) -> "TakeExamplesIterable":
"""Doesn't shuffle the wrapped examples iterable since it would take examples from other shards instead."""
return self
@staticmethod
def split_number(num, n):
quotient = num // n
remainder = num % n
result = [quotient] * n
for i in range(remainder):
result[i] += 1
return result
def shard_data_sources(self, worker_id: int, num_workers: int) -> "TakeExamplesIterable":
"""Keep only the requested shard."""
return TakeExamplesIterable(
self.ex_iterable.shard_data_sources(worker_id, num_workers),
n=self.split_number(self.n, num_workers)[worker_id],
)
@property
def n_shards(self) -> int:
return self.ex_iterable.n_shards
def _apply_feature_types_on_example(
example: dict, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]]
) -> dict:
example = dict(example)
# add missing columns
for column_name in features:
if column_name not in example:
example[column_name] = None
# we encode the example for ClassLabel feature types for example
encoded_example = features.encode_example(example)
# Decode example for Audio feature, e.g.
decoded_example = features.decode_example(encoded_example, token_per_repo_id=token_per_repo_id)
return decoded_example
def _apply_feature_types_on_batch(
batch: dict, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]]
) -> dict:
batch = dict(batch)
# add missing columns
n_examples = len(batch[next(iter(batch))])
for column_name in features:
if column_name not in batch:
batch[column_name] = [None] * n_examples
# we encode the batch for ClassLabel feature types for example
encoded_batch = features.encode_batch(batch)
# Decode batch for Audio feature, e.g.
decoded_batch = features.decode_batch(encoded_batch, token_per_repo_id=token_per_repo_id)
return decoded_batch
class TypedExamplesIterable(_BaseExamplesIterable):
def __init__(
self,
ex_iterable: _BaseExamplesIterable,
features: Features,
token_per_repo_id: Dict[str, Union[str, bool, None]],
):
super().__init__()
self.ex_iterable = ex_iterable
self.features = features
self.token_per_repo_id = token_per_repo_id
if self.ex_iterable.iter_arrow is not None:
self.iter_arrow = self._iter_arrow
def __iter__(self):
# Then for each example, `TypedExamplesIterable` automatically fills missing columns with None.
# This is done with `_apply_feature_types_on_example`.
for key, example in self.ex_iterable:
yield key, _apply_feature_types_on_example(
example, self.features, token_per_repo_id=self.token_per_repo_id
)
def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
schema = self.features.arrow_schema
for key, pa_table in self.ex_iterable.iter_arrow():
columns = set(pa_table.column_names)
# add missing columns
for column_name in self.features:
if column_name not in columns:
col = pa.NullArray.from_buffers(pa.null(), len(pa_table), [None])
pa_table = pa_table.append_column(column_name, col)
if pa_table.schema != schema:
pa_table = cast_table_to_features(pa_table, self.features)
yield key, pa_table
def shuffle_data_sources(self, generator: np.random.Generator) -> "TypedExamplesIterable":
"""Shuffle the wrapped examples iterable."""
return TypedExamplesIterable(
self.ex_iterable.shuffle_data_sources(generator),
features=self.features,
token_per_repo_id=self.token_per_repo_id,
)
def shard_data_sources(self, worker_id: int, num_workers: int) -> "TypedExamplesIterable":
"""Keep only the requested shard."""
return TypedExamplesIterable(
self.ex_iterable.shard_data_sources(worker_id, num_workers),
features=self.features,
token_per_repo_id=self.token_per_repo_id,
)
@property
def n_shards(self) -> int:
return self.ex_iterable.n_shards
@dataclass
class FormattingConfig:
format_type: Optional[str]
def __post_init__(self):
if self.format_type == "pandas":
raise NotImplementedError(
"The 'pandas' formatting is not implemented for iterable datasets. You can use 'numpy' or 'arrow' instead."
)
@dataclass
class ShufflingConfig:
generator: np.random.Generator
_original_seed: Optional[int] = None
@dataclass
class DistributedConfig:
rank: int
world_size: int
def _maybe_add_torch_iterable_dataset_parent_class(cls):
"""Add torch.utils.data.IterableDataset as a parent class if 'torch' is available"""
if config.TORCH_AVAILABLE:
import torch.utils.data
if torch.utils.data.IterableDataset not in cls.__bases__:
cls.__bases__ += (torch.utils.data.IterableDataset,)
class IterableDataset(DatasetInfoMixin):
"""A Dataset backed by an iterable."""
def __init__(
self,
ex_iterable: _BaseExamplesIterable,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
formatting: Optional[FormattingConfig] = None,
shuffling: Optional[ShufflingConfig] = None,
distributed: Optional[DistributedConfig] = None,
token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None,
format_type="deprecated",
):
if distributed and distributed.world_size > 1 and shuffling and shuffling._original_seed is None:
raise RuntimeError(
"The dataset doesn't have a fixed random seed across nodes to shuffle and split the list of dataset shards by node. "
"Please pass e.g. `seed=42` in `.shuffle()` to make all the nodes use the same seed. "
)
if format_type != "deprecated":
warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. "
help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead."
warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2)
formatting = FormattingConfig(format_type=format_type)
info = info.copy() if info is not None else DatasetInfo()
DatasetInfoMixin.__init__(self, info=info, split=split)
self._ex_iterable = ex_iterable
self._formatting = formatting
self._shuffling = shuffling
self._distributed = distributed
self._epoch = 0
self._token_per_repo_id: Dict[str, Union[str, bool, None]] = token_per_repo_id or {}
_maybe_add_torch_iterable_dataset_parent_class(self.__class__)
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__ = d
# Re-add torch iterable dataset as a parent class, since dynamically added parent classes are not kept when pickling
_maybe_add_torch_iterable_dataset_parent_class(self.__class__)
def _head(self, n=5):
return _examples_to_batch(list(self.take(n)))
def _effective_generator(self):
if self._shuffling and self._epoch == 0:
return self._shuffling.generator
elif self._shuffling:
# Create effective seed using self._epoch (we subtract in order to avoir overflow in long_scalars)
effective_seed = deepcopy(self._shuffling.generator).integers(0, 1 << 63) - self._epoch
effective_seed = (1 << 63) + effective_seed if effective_seed < 0 else effective_seed
return np.random.default_rng(effective_seed)
else:
raise ValueError("This dataset is not shuffled")
@property
def n_shards(self) -> int:
if self._distributed and self._ex_iterable.n_shards % self._distributed.world_size == 0:
return self._ex_iterable.n_shards // self._distributed.world_size
return self._ex_iterable.n_shards
def _iter_pytorch(self):
ex_iterable = self._prepare_ex_iterable_for_iteration()
# fix for fsspec when using multiprocess
_reset_fsspec_lock()
# check if there aren't too many workers
import torch.utils.data
worker_info = torch.utils.data.get_worker_info()
if self._is_main_process() and ex_iterable.n_shards < worker_info.num_workers:
logger.warning(
f"Too many dataloader workers: {worker_info.num_workers} (max is dataset.n_shards={ex_iterable.n_shards}). "
f"Stopping {worker_info.num_workers - ex_iterable.n_shards} dataloader workers."
)
logger.info(
f"To parallelize data loading, we give each process some shards (or data sources) to process. "
f"Therefore it's unnecessary to have a number of workers greater than dataset.n_shards={ex_iterable.n_shards}. "
f"To enable more parallelism, please split the dataset in more files than {ex_iterable.n_shards}."
)
# split workload
_log_prefix = f"node#{self._distributed.rank} " if self._distributed else ""
shards_indices = self._ex_iterable.split_shard_indices_by_worker(worker_info.id, worker_info.num_workers)
if shards_indices:
logger.debug(
f"{_log_prefix}dataloader worker#{worker_info.id}, ': Starting to iterate over {len(shards_indices)}/{ex_iterable.n_shards} shards."
)
ex_iterable = ex_iterable.shard_data_sources(worker_id=worker_info.id, num_workers=worker_info.num_workers)
if self._formatting:
formatter = get_formatter(self._formatting.format_type, features=self.features)
format_dict = (
formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
)
else:
format_dict = None
if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"):
if ex_iterable.iter_arrow:
iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1)
else:
iterator = _convert_to_arrow(ex_iterable, batch_size=1)
for key, pa_table in iterator:
yield formatter.format_row(pa_table)
return
else:
for key, example in ex_iterable:
if self.features:
# `IterableDataset` automatically fills missing columns with None.
# This is done with `_apply_feature_types_on_example`.
example = _apply_feature_types_on_example(
example, self.features, token_per_repo_id=self._token_per_repo_id
)
yield format_dict(example) if format_dict else example
logger.debug(
f"{_log_prefix}dataloader worker#{worker_info.id}, ': Finished iterating over {len(shards_indices)}/{ex_iterable.n_shards} shards."
)
else:
logger.debug(
f"{_log_prefix}dataloader worker#{worker_info.id}, ': Stopping... Number of dataset shards < num_workers ({ex_iterable.n_shards}<{worker_info.num_workers})."
)
def _is_main_process(self):
if self._distributed and self._distributed.rank > 0:
return False
if "torch" in sys.modules:
import torch.utils.data
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None and worker_info.id > 0:
return False
return True
def _prepare_ex_iterable_for_iteration(self) -> _BaseExamplesIterable:
if self._shuffling:
ex_iterable = self._ex_iterable.shuffle_data_sources(self._effective_generator())
else:
ex_iterable = self._ex_iterable
if self._distributed:
rank = self._distributed.rank
world_size = self._distributed.world_size
if ex_iterable.n_shards % world_size == 0:
if self._is_main_process():
n_shards_per_node = ex_iterable.n_shards // world_size
plural = "s" if n_shards_per_node > 1 else ""
logger.info(
f"Assigning {n_shards_per_node} shard{plural} (or data source{plural}) of the dataset to each node."
)
ex_iterable = ex_iterable.shard_data_sources(rank, world_size)
else:
if self._is_main_process():
logger.info(
f"Assigning 1 out of {world_size} examples of the dataset to each node. The others are skipped during the iteration."
)
logger.info(
f"It is more optimized to distribute the dataset shards (or data sources) across nodes. "
f"You can do that by using a dataset with number of shards that is a factor of world_size={world_size}. "
f"The current dataset has {ex_iterable.n_shards} which is not a factor of {world_size}"
)
ex_iterable = StepExamplesIterable(ex_iterable, step=world_size, offset=rank)
return ex_iterable
def __iter__(self):
if "torch" in sys.modules:
import torch.utils.data
worker_info = torch.utils.data.get_worker_info()
if isinstance(self, torch.utils.data.IterableDataset) and worker_info is not None:
# We're a torch.utils.data.IterableDataset in a PyTorch worker process
yield from self._iter_pytorch()
return
ex_iterable = self._prepare_ex_iterable_for_iteration()
if self._formatting:
formatter = get_formatter(self._formatting.format_type, features=self.features)
format_dict = (
formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
)
else:
format_dict = None
if self._formatting and (ex_iterable.iter_arrow or self._formatting.format_type == "arrow"):
if ex_iterable.iter_arrow:
iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1)
else:
iterator = _convert_to_arrow(ex_iterable, batch_size=1)
for key, pa_table in iterator:
yield formatter.format_row(pa_table)
return
for key, example in ex_iterable:
if self.features:
# `IterableDataset` automatically fills missing columns with None.
# This is done with `_apply_feature_types_on_example`.
example = _apply_feature_types_on_example(
example, self.features, token_per_repo_id=self._token_per_repo_id
)
yield format_dict(example) if format_dict else example
def iter(self, batch_size: int, drop_last_batch: bool = False):
"""Iterate through the batches of size `batch_size`.
Args:
batch_size (:obj:`int`): size of each batch to yield.
drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be
dropped
"""
if self._formatting:
formatter = get_formatter(self._formatting.format_type, features=self.features)
format_dict = (
formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
)
else:
format_dict = None
ex_iterable = self._prepare_ex_iterable_for_iteration()
if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"):
if ex_iterable.iter_arrow:
iterator = _batch_arrow_tables(
ex_iterable.iter_arrow(), batch_size=batch_size, drop_last_batch=drop_last_batch
)
else:
iterator = _convert_to_arrow(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch)
for key, pa_table in iterator:
yield formatter.format_batch(pa_table)
return
iterator = iter(ex_iterable)
for key, example in iterator:
# If batched, first build the batch
examples = [example] + [example for key, example in islice(iterator, batch_size - 1)]
if drop_last_batch and len(examples) < batch_size: # ignore last batch
return
batch = _examples_to_batch(examples)
if self.features:
# `IterableDataset` automatically fills missing columns with None.
# This is done with `_apply_feature_types_on_batch`.
batch = _apply_feature_types_on_batch(batch, self.features, token_per_repo_id=self._token_per_repo_id)
yield format_dict(batch) if format_dict else batch
@staticmethod
def from_generator(
generator: Callable,
features: Optional[Features] = None,
gen_kwargs: Optional[dict] = None,
) -> "IterableDataset":
"""Create an Iterable Dataset from a generator.
Args:
generator (`Callable`):
A generator function that `yields` examples.
features (`Features`, *optional*):
Dataset features.
gen_kwargs(`dict`, *optional*):
Keyword arguments to be passed to the `generator` callable.
You can define a sharded iterable dataset by passing the list of shards in `gen_kwargs`.
This can be used to improve shuffling and when iterating over the dataset with multiple workers.
Returns:
`IterableDataset`
Example:
```py
>>> def gen():
... yield {"text": "Good", "label": 0}
... yield {"text": "Bad", "label": 1}
...
>>> ds = IterableDataset.from_generator(gen)
```
```py
>>> def gen(shards):
... for shard in shards:
... with open(shard) as f:
... for line in f:
... yield {"line": line}
...
>>> shards = [f"data{i}.txt" for i in range(32)]
>>> ds = IterableDataset.from_generator(gen, gen_kwargs={"shards": shards})
>>> ds = ds.shuffle(seed=42, buffer_size=10_000) # shuffles the shards order + uses a shuffle buffer
>>> from torch.utils.data import DataLoader
>>> dataloader = DataLoader(ds.with_format("torch"), num_workers=4) # give each worker a subset of 32/4=8 shards
```
"""
from .io.generator import GeneratorDatasetInputStream
return GeneratorDatasetInputStream(
generator=generator,
features=features,
gen_kwargs=gen_kwargs,
streaming=True,
).read()
@staticmethod
def from_spark(
df: "pyspark.sql.DataFrame",
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
**kwargs,
) -> "IterableDataset":
"""Create an IterableDataset from Spark DataFrame. The dataset is streamed to the driver in batches.
Args:
df (`pyspark.sql.DataFrame`):
The DataFrame containing the desired data.
split (`NamedSplit`, *optional*):
Split name to be assigned to the dataset.
features (`Features`, *optional*):
Dataset features.
Returns:
[`IterableDataset`]
Example:
```py
>>> df = spark.createDataFrame(
>>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]],
>>> columns=["id", "name"],
>>> )
>>> ds = IterableDataset.from_spark(df)
```
"""
from .io.spark import SparkDatasetReader
if sys.platform == "win32":
raise EnvironmentError("IterableDataset.from_spark is not currently supported on Windows")
return SparkDatasetReader(
df,
split=split,
features=features,
streaming=True,
**kwargs,
).read()
@staticmethod
def from_file(filename: str) -> "IterableDataset":
"""Instantiate a IterableDataset from Arrow table at filename.
Args:
filename (`str`):
File name of the dataset.
Returns:
[`IterableDataset`]
"""
pa_table_schema = read_schema_from_file(filename)
inferred_features = Features.from_arrow_schema(pa_table_schema)
ex_iterable = ArrowExamplesIterable(Dataset._generate_tables_from_cache_file, kwargs={"filename": filename})
return IterableDataset(ex_iterable=ex_iterable, info=DatasetInfo(features=inferred_features))
def with_format(
self,
type: Optional[str] = None,
) -> "IterableDataset":
"""
Return a dataset with the specified format.
Supported formats: "arrow", or None for regular python objects.
The other formats are currently not implemented.
Args:
type (`str`, optional, default None): if set to "torch", the returned dataset
will be a subclass of torch.utils.data.IterableDataset to be used in a DataLoader
"""
type = get_format_type_from_alias(type)
# TODO(QL): add format_kwargs
# TODO(QL): add format_columns and return_all_columns
# TODO(QL): add pandas format
return IterableDataset(
ex_iterable=self._ex_iterable,
info=self._info.copy(),
split=self._split,
formatting=FormattingConfig(format_type=type),
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[Union[str, List[str]]] = None,
features: Optional[Features] = None,
fn_kwargs: Optional[dict] = None,
) -> "IterableDataset":
"""
Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
If your function returns a column that already exists, then it overwrites it.
The function is applied on-the-fly on the examples when iterating over the dataset.
You can specify whether the function should be batched or not with the `batched` parameter:
- If batched is `False`, then the function takes 1 example in and should return 1 example.
An example is a dictionary, e.g. `{"text": "Hello there !"}`.
- If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
A batch is a dictionary, e.g. a batch of 1 example is {"text": ["Hello there !"]}.
- If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
Note that the last batch may have less than `n` examples.
A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
Args:
function (`Callable`, *optional*, defaults to `None`):
Function applied on-the-fly on the examples when you iterate on the dataset.
It must have one of the following signatures:
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
- `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
- `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
- `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
For advanced usage, the function can also return a `pyarrow.Table`.
Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
If no function is provided, default to identity function: `lambda x: x`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
The columns to be passed into `function`
as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`.
`batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
drop_last_batch (`bool`, defaults to `False`):
Whether a last batch smaller than the batch_size should be
dropped instead of being processed by the function.
remove_columns (`[List[str]]`, *optional*, defaults to `None`):
Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
features (`[Features]`, *optional*, defaults to `None`):
Feature types of the resulting dataset.
fn_kwargs (`Dict`, *optional*, default `None`):
Keyword arguments to be passed to `function`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> def add_prefix(example):
... example["text"] = "Review: " + example["text"]
... return example
>>> ds = ds.map(add_prefix)
>>> list(ds.take(3))
[{'label': 1,
'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'Review: effective but too-tepid biopic'}]
```
"""
if isinstance(input_columns, str):
input_columns = [input_columns]
if isinstance(remove_columns, str):
remove_columns = [remove_columns]
if function is None:
function = identity_func
if fn_kwargs is None:
fn_kwargs = {}
ex_iterable = MappedExamplesIterable(
TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id)
if self._info.features is not None
else self._ex_iterable,
function=function,
with_indices=with_indices,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
remove_columns=remove_columns,
fn_kwargs=fn_kwargs,
formatting=self._formatting,
)
info = self.info.copy()
info.features = features
return IterableDataset(
ex_iterable=ex_iterable,
info=info,
split=self._split,
formatting=self._formatting,
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def filter(
self,
function: Optional[Callable] = None,
with_indices=False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
fn_kwargs: Optional[dict] = None,
) -> "IterableDataset":
"""Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
The filtering is done on-the-fly when iterating over the dataset.
Args:
function (`Callable`):
Callable with one of the following signatures:
- `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
- `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
- `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
- `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True`
If no function is provided, defaults to an always True function: `lambda x: True`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
input_columns (`str` or `List[str]`, *optional*):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, default `1000`):
Number of examples per batch provided to `function` if `batched=True`.
fn_kwargs (`Dict`, *optional*, default `None`):
Keyword arguments to be passed to `function`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> ds = ds.filter(lambda x: x["label"] == 0)
>>> list(ds.take(3))
[{'label': 0, 'movie_review': 'simplistic , silly and tedious .'},
{'label': 0,
'movie_review': "it's so laddish and juvenile , only teenage boys could possibly find it funny ."},
{'label': 0,
'movie_review': 'exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}]
```
"""
if isinstance(input_columns, str):
input_columns = [input_columns]
# TODO(QL): keep the features (right now if we keep it it would call decode_example again on an already decoded example)
info = copy.deepcopy(self._info)
info.features = None
# We need the examples to be decoded for certain feature types like Image or Audio, so we use TypedExamplesIterable here
ex_iterable = FilteredExamplesIterable(
TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id)
if self._info.features is not None
else self._ex_iterable,
function=function,
with_indices=with_indices,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
fn_kwargs=fn_kwargs,
formatting=self._formatting,
)
return IterableDataset(
ex_iterable=ex_iterable,
info=info,
split=self._split,
formatting=self._formatting,
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def shuffle(
self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000
) -> "IterableDataset":
"""
Randomly shuffles the elements of this dataset.
This dataset fills a buffer with `buffer_size` elements, then randomly samples elements from this buffer,
replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or
equal to the full size of the dataset is required.
For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will
initially select a random element from only the first 1000 elements in the buffer. Once an element is
selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element,
maintaining the 1000 element buffer.
If the dataset is made of several shards, it also does shuffle the order of the shards.
However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`]
then the order of the shards is kept unchanged.
Args:
seed (`int`, *optional*, defaults to `None`):
Random seed that will be used to shuffle the dataset.
It is used to sample from the shuffle buffe and also to shuffle the data shards.
generator (`numpy.random.Generator`, *optional*):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
buffer_size (`int`, defaults to `1000`):
Size of the buffer.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> list(ds.take(3))
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'effective but too-tepid biopic'}]
>>> shuffled_ds = ds.shuffle(seed=42)
>>> list(shuffled_ds.take(3))
[{'label': 1,
'text': "a sports movie with action that's exciting on the field and a story you care about off it ."},
{'label': 1,
'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'},
{'label': 1,
'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}]
```
"""
if generator is None:
generator = np.random.default_rng(seed)
else:
generator = deepcopy(generator)
shuffling = ShufflingConfig(generator=generator, _original_seed=seed)
return IterableDataset(
ex_iterable=BufferShuffledExamplesIterable(
self._ex_iterable, buffer_size=buffer_size, generator=generator
).shuffle_data_sources(generator),
info=self._info.copy(),
split=self._split,
formatting=self._formatting,
shuffling=shuffling,
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def set_epoch(self, epoch: int):
self._epoch = epoch
def skip(self, n) -> "IterableDataset":
"""
Create a new [`IterableDataset`] that skips the first `n` elements.
Args:
n (`int`):
Number of elements to skip.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> list(ds.take(3))
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'effective but too-tepid biopic'}]
>>> ds = ds.skip(1)
>>> list(ds.take(3))
[{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'effective but too-tepid biopic'},
{'label': 1,
'text': 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'}]
```
"""
ex_iterable = SkipExamplesIterable(self._ex_iterable, n)
return IterableDataset(
ex_iterable=ex_iterable,
info=self._info.copy(),
split=self._split,
formatting=self._formatting,
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def take(self, n) -> "IterableDataset":
"""
Create a new [`IterableDataset`] with only the first `n` elements.
Args:
n (`int`):
Number of elements to take.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> small_ds = ds.take(2)
>>> list(small_ds)
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}]
```
"""
ex_iterable = TakeExamplesIterable(self._ex_iterable, n)
return IterableDataset(
ex_iterable=ex_iterable,
info=self._info.copy(),
split=self._split,
formatting=self._formatting,
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
@property
def column_names(self) -> Optional[List[str]]:
"""Names of the columns in the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation", streaming=True)
>>> ds.column_names
['text', 'label']
```
"""
return list(self._info.features.keys()) if self._info.features is not None else None
def add_column(self, name: str, column: Union[list, np.array]) -> "IterableDataset":
"""Add column to Dataset.
Args:
name (str): Column name.
column (list or np.array): Column data to be added.
Returns:
`IterableDataset`
"""
return self.map(partial(add_column_fn, name=name, column=column), with_indices=True)
def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDataset":
"""
Rename a column in the dataset, and move the features associated to the original column under the new column
name.
Args:
original_column_name (`str`):
Name of the column to rename.
new_column_name (`str`):
New name for the column.
Returns:
`IterableDataset`: A copy of the dataset with a renamed column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> next(iter(ds))
{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
>>> ds = ds.rename_column("text", "movie_review")
>>> next(iter(ds))
{'label': 1,
'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return self.rename_columns({original_column_name: new_column_name})
def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDataset":
"""
Rename several columns in the dataset, and move the features associated to the original columns under
the new column names.
Args:
column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names
Returns:
`IterableDataset`: A copy of the dataset with renamed columns
"""
original_features = self._info.features.copy() if self._info.features else None
ds_iterable = self.map(
partial(_rename_columns_fn, column_mapping=column_mapping), remove_columns=list(column_mapping)
)
if original_features is not None:
ds_iterable._info.features = Features(
{
column_mapping[col] if col in column_mapping.keys() else col: feature
for col, feature in original_features.items()
}
)
# check that it's still valid, especially with regard to task templates
try:
ds_iterable._info.copy()
except ValueError:
ds_iterable._info.task_templates = None
return ds_iterable
def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset":
"""
Remove one or several column(s) in the dataset and the features associated to them.
The removal is done on-the-fly on the examples when iterating over the dataset.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to remove.
Returns:
`IterableDataset`: A copy of the dataset object without the columns to remove.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> next(iter(ds))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1}
>>> ds = ds.remove_columns("label")
>>> next(iter(ds))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
original_features = self._info.features.copy() if self._info.features else None
ds_iterable = self.map(remove_columns=column_names)
if original_features is not None:
ds_iterable._info.features = original_features.copy()
for col, _ in original_features.items():
if col in column_names:
del ds_iterable._info.features[col]
# check that it's still valid, especially with regard to task templates
try:
ds_iterable._info.copy()
except ValueError:
ds_iterable._info.task_templates = None
return ds_iterable
def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset":
"""Select one or several column(s) in the dataset and the features
associated to them. The selection is done on-the-fly on the examples
when iterating over the dataset.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to select.
Returns:
`IterableDataset`: A copy of the dataset object with selected columns.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> next(iter(ds))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1}
>>> ds = ds.select_columns("text")
>>> next(iter(ds))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
if isinstance(column_names, str):
column_names = [column_names]
if self._info:
info = copy.deepcopy(self._info)
if self._info.features is not None:
for column_name in column_names:
if column_name not in self._info.features:
raise ValueError(
f"Column name {column_name} not in the "
"dataset. Columns in the dataset: "
f"{list(self._info.features.keys())}."
)
info.features = Features({c: info.features[c] for c in column_names})
# check that it's still valid, especially with regard to task templates
try:
info.copy()
except ValueError:
info.task_templates = None
ex_iterable = SelectColumnsIterable(self._ex_iterable, column_names)
return IterableDataset(
ex_iterable=ex_iterable,
info=info,
split=self._split,
formatting=self._formatting,
shuffling=self._shuffling,
distributed=self._distributed,
token_per_repo_id=self._token_per_repo_id,
)
def cast_column(self, column: str, feature: FeatureType) -> "IterableDataset":
"""Cast column to feature for decoding.
Args:
column (`str`):
Column name.
feature (`Feature`):
Target feature.
Returns:
`IterableDataset`
Example:
```py
>>> from datasets import load_dataset, Audio
>>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train", streaming=True)
>>> ds.features
{'audio': Audio(sampling_rate=8000, mono=True, decode=True, id=None),
'english_transcription': Value(dtype='string', id=None),
'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None),
'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None),
'path': Value(dtype='string', id=None),
'transcription': Value(dtype='string', id=None)}
>>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
>>> ds.features
{'audio': Audio(sampling_rate=16000, mono=True, decode=True, id=None),
'english_transcription': Value(dtype='string', id=None),
'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None),
'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None),
'path': Value(dtype='string', id=None),
'transcription': Value(dtype='string', id=None)}
```
"""
info = self._info.copy()
info.features[column] = feature
# check that it's still valid, especially with regard to task templates
try:
info.copy()
except ValueError:
info.task_templates = None
return IterableDataset(
ex_iterable=self._ex_iterable,
info=info,
split=self._split,
formatting=self._formatting,
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def cast(
self,
features: Features,
) -> "IterableDataset":
"""
Cast the dataset to a new set of features.
Args:
features ([`Features`]):
New features to cast the dataset to.
The name of the fields in the features must match the current column names.
The type of the data must also be convertible from one type to the other.
For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset.
Returns:
`IterableDataset`: A copy of the dataset with casted features.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> ds.features
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> new_features = ds.features.copy()
>>> new_features["label"] = ClassLabel(names=["bad", "good"])
>>> new_features["text"] = Value("large_string")
>>> ds = ds.cast(new_features)
>>> ds.features
{'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
'text': Value(dtype='large_string', id=None)}
```
"""
info = self._info.copy()
info.features = features
# check that it's still valid, especially with regard to task templates
try:
info.copy()
except ValueError:
info.task_templates = None
return IterableDataset(
ex_iterable=self._ex_iterable,
info=info,
split=self._split,
formatting=self._formatting,
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def _step(self, step: int, offset: int) -> "IterableDataset":
ex_iterable = StepExamplesIterable(self._ex_iterable, step=step, offset=offset)
return IterableDataset(
ex_iterable=ex_iterable,
info=self._info.copy(),
split=self._split,
formatting=self._formatting,
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def _resolve_features(self):
if self.features is not None:
return self
elif isinstance(self._ex_iterable, TypedExamplesIterable):
features = self._ex_iterable.features
else:
features = _infer_features_from_batch(self.with_format(None)._head())
info = self.info.copy()
info.features = features
return IterableDataset(
ex_iterable=self._ex_iterable,
info=info,
split=self._split,
formatting=self._formatting,
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def _concatenate_iterable_datasets(
dsets: List[IterableDataset],
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
axis: int = 0,
) -> IterableDataset:
"""
Converts a list of `IterableDataset` with the same schema into a single `IterableDataset`.
Missing data are filled with None values.
<Added version="2.4.0"/>
Args:
dsets (`List[datasets.IterableDataset]`): List of Datasets to concatenate.
info (`DatasetInfo`, optional): Dataset information, like description, citation, etc.
split (`NamedSplit`, optional): Name of the dataset split.
axis (``{0, 1}``, default ``0``, meaning over rows):
Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns
(horizontally).
*New in version 1.6.0*
Example:
```py
>>> ds3 = _concatenate_iterable_datasets([ds1, ds2])
```
"""
dsets = [d._resolve_features() for d in dsets]
# Perform checks (and a potentional cast if axis=0)
if axis == 0:
_check_if_features_can_be_aligned([dset.features for dset in dsets])
else:
_check_column_names([col_name for dset in dsets for col_name in dset.features])
# TODO: improve this to account for a mix of ClassLabel and Value for example
# right now it would keep the type of the first dataset in the list
features = Features(
{k: v for features in _align_features([dset.features for dset in dsets]) for k, v in features.items()}
)
ex_iterables = [d._ex_iterable for d in dsets]
if axis == 0:
ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables)
else:
ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable(ex_iterables)
# Set new info - we update the features
# setting the features also ensures to fill missing columns with None
if info is None:
info = DatasetInfo.from_merge([d.info for d in dsets])
else:
info = info.copy()
info.features = features
# Get all the auth tokens per repository - in case the datasets come from different private repositories
token_per_repo_id = {repo_id: token for dataset in dsets for repo_id, token in dataset._token_per_repo_id.items()}
# Return new daset
return IterableDataset(ex_iterable=ex_iterable, info=info, split=split, token_per_repo_id=token_per_repo_id)
def _interleave_iterable_datasets(
datasets: List[IterableDataset],
probabilities: Optional[List[float]] = None,
seed: Optional[int] = None,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
) -> IterableDataset:
"""
Interleave several iterable datasets (sources) into a single iterable dataset.
The new iterable dataset alternates between the sources to yield examples.
If `probabilities = None` (default) the iterable dataset will cycles through the sources in order for each next example in the iteration.
If `probabilities` is not `None, the iterable dataset will sample a random source according to the provided probabilities for each next examples in the iteration.
<Added version="2.4.0"/>
Args:
datasets (`List[IterableDataset]`): list of datasets to interleave
probabilities (`List[float]`, optional, default None): If specified, the new iterable dataset samples
examples from one source at a time according to these probabilities.
seed (`int`, optional, default None): The random seed used to choose a source for each example.
stopping_strategy (`str`, defaults to `first_exhausted`):
Two strategies are proposed right now.
By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
- with no probabilities, the resulting dataset will have max_length_datasets*nb_dataset samples.
- with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
Output:
`datasets.IterableDataset`
"""
datasets = [d._resolve_features() for d in datasets]
# Perform checks
_check_if_features_can_be_aligned([dset.features for dset in datasets])
# TODO: improve this to account for a mix of ClassLabel and Value for example
# right now it would keep the type of the first dataset in the list
features = Features(
{k: v for features in _align_features([dset.features for dset in datasets]) for k, v in features.items()}
)
ex_iterables = [d._ex_iterable for d in datasets]
# Use cycling or random cycling of sources
if probabilities is None:
ex_iterable = CyclingMultiSourcesExamplesIterable(ex_iterables, stopping_strategy=stopping_strategy)
else:
generator = np.random.default_rng(seed)
ex_iterable = RandomlyCyclingMultiSourcesExamplesIterable(
ex_iterables, generator=generator, probabilities=probabilities, stopping_strategy=stopping_strategy
)
# Set new info - we update the features
# setting the features also ensures to fill missing columns with None
if info is None:
info = DatasetInfo.from_merge([d.info for d in datasets])
else:
info = info.copy()
info.features = features
# Get all the auth tokens per repository - in case the datasets come from different private repositories
token_per_repo_id = {
repo_id: token for dataset in datasets for repo_id, token in dataset._token_per_repo_id.items()
}
# Return new daset
return IterableDataset(ex_iterable=ex_iterable, info=info, split=split, token_per_repo_id=token_per_repo_id)
def _split_by_node_iterable_dataset(dataset: IterableDataset, rank: int, world_size: int) -> IterableDataset:
"""
Split an iterable dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`),
then the shards are evenly assigned across the nodes, which is the most optimized.
Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples.
Args:
dataset ([`IterableDataset`]):
The iterable dataset to split by node.
rank (`int`):
Rank of the current node.
world_size (`int`):
Total number of nodes.
Returns:
[`IterableDataset`]: The iterable dataset to be used on the node at rank `rank`.
"""
if dataset._distributed:
world_size = world_size * dataset._distributed.world_size
rank = world_size * dataset._distributed.rank + rank
distributed = DistributedConfig(rank=rank, world_size=world_size)
return IterableDataset(
ex_iterable=dataset._ex_iterable,
info=dataset._info.copy(),
split=dataset._split,
formatting=dataset._formatting,
shuffling=copy.deepcopy(dataset._shuffling),
distributed=distributed,
token_per_repo_id=dataset._token_per_repo_id,
)
| datasets-main | src/datasets/iterable_dataset.py |
import importlib
import importlib.metadata
import os
import platform
from pathlib import Path
from packaging import version
from .utils.logging import get_logger
logger = get_logger(__name__)
# Datasets
S3_DATASETS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets"
CLOUDFRONT_DATASETS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/datasets"
REPO_DATASETS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/datasets/{path}/{name}"
# Metrics
S3_METRICS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/metrics"
CLOUDFRONT_METRICS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/metric"
REPO_METRICS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/metrics/{path}/{name}"
# Hub
HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
HUB_DATASETS_URL = HF_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
HUB_DATASETS_HFFS_URL = "hf://datasets/{repo_id}@{revision}/{path}"
HUB_DEFAULT_VERSION = "main"
PY_VERSION = version.parse(platform.python_version())
# General environment variables accepted values for booleans
ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
# Imports
DILL_VERSION = version.parse(importlib.metadata.version("dill"))
PANDAS_VERSION = version.parse(importlib.metadata.version("pandas"))
PYARROW_VERSION = version.parse(importlib.metadata.version("pyarrow"))
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
USE_JAX = os.environ.get("USE_JAX", "AUTO").upper()
TORCH_VERSION = "N/A"
TORCH_AVAILABLE = False
if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
TORCH_AVAILABLE = importlib.util.find_spec("torch") is not None
if TORCH_AVAILABLE:
try:
TORCH_VERSION = version.parse(importlib.metadata.version("torch"))
logger.info(f"PyTorch version {TORCH_VERSION} available.")
except importlib.metadata.PackageNotFoundError:
pass
else:
logger.info("Disabling PyTorch because USE_TF is set")
TF_VERSION = "N/A"
TF_AVAILABLE = False
if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
TF_AVAILABLE = importlib.util.find_spec("tensorflow") is not None
if TF_AVAILABLE:
# For the metadata, we have to look for both tensorflow and tensorflow-cpu
for package in [
"tensorflow",
"tensorflow-cpu",
"tensorflow-gpu",
"tf-nightly",
"tf-nightly-cpu",
"tf-nightly-gpu",
"intel-tensorflow",
"tensorflow-rocm",
"tensorflow-macos",
]:
try:
TF_VERSION = version.parse(importlib.metadata.version(package))
except importlib.metadata.PackageNotFoundError:
continue
else:
break
else:
TF_AVAILABLE = False
if TF_AVAILABLE:
if TF_VERSION.major < 2:
logger.info(f"TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.")
TF_AVAILABLE = False
else:
logger.info(f"TensorFlow version {TF_VERSION} available.")
else:
logger.info("Disabling Tensorflow because USE_TORCH is set")
JAX_VERSION = "N/A"
JAX_AVAILABLE = False
if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
JAX_AVAILABLE = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("jaxlib") is not None
if JAX_AVAILABLE:
try:
JAX_VERSION = version.parse(importlib.metadata.version("jax"))
logger.info(f"JAX version {JAX_VERSION} available.")
except importlib.metadata.PackageNotFoundError:
pass
else:
logger.info("Disabling JAX because USE_JAX is set to False")
USE_BEAM = os.environ.get("USE_BEAM", "AUTO").upper()
BEAM_VERSION = "N/A"
BEAM_AVAILABLE = False
if USE_BEAM in ENV_VARS_TRUE_AND_AUTO_VALUES:
try:
BEAM_VERSION = version.parse(importlib.metadata.version("apache_beam"))
BEAM_AVAILABLE = True
logger.info(f"Apache Beam version {BEAM_VERSION} available.")
except importlib.metadata.PackageNotFoundError:
pass
else:
logger.info("Disabling Apache Beam because USE_BEAM is set to False")
# Optional tools for data loading
SQLALCHEMY_AVAILABLE = importlib.util.find_spec("sqlalchemy") is not None
# Optional tools for feature decoding
PIL_AVAILABLE = importlib.util.find_spec("PIL") is not None
IS_OPUS_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse(
importlib.import_module("soundfile").__libsndfile_version__
) >= version.parse("1.0.31")
IS_MP3_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse(
importlib.import_module("soundfile").__libsndfile_version__
) >= version.parse("1.1.0")
# Optional compression tools
RARFILE_AVAILABLE = importlib.util.find_spec("rarfile") is not None
ZSTANDARD_AVAILABLE = importlib.util.find_spec("zstandard") is not None
LZ4_AVAILABLE = importlib.util.find_spec("lz4") is not None
PY7ZR_AVAILABLE = importlib.util.find_spec("py7zr") is not None
# Cache location
DEFAULT_XDG_CACHE_HOME = "~/.cache"
XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME)
DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface")
HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME))
DEFAULT_HF_DATASETS_CACHE = os.path.join(HF_CACHE_HOME, "datasets")
HF_DATASETS_CACHE = Path(os.getenv("HF_DATASETS_CACHE", DEFAULT_HF_DATASETS_CACHE))
DEFAULT_HF_METRICS_CACHE = os.path.join(HF_CACHE_HOME, "metrics")
HF_METRICS_CACHE = Path(os.getenv("HF_METRICS_CACHE", DEFAULT_HF_METRICS_CACHE))
DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, "modules")
HF_MODULES_CACHE = Path(os.getenv("HF_MODULES_CACHE", DEFAULT_HF_MODULES_CACHE))
DOWNLOADED_DATASETS_DIR = "downloads"
DEFAULT_DOWNLOADED_DATASETS_PATH = os.path.join(HF_DATASETS_CACHE, DOWNLOADED_DATASETS_DIR)
DOWNLOADED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_DOWNLOADED_DATASETS_PATH", DEFAULT_DOWNLOADED_DATASETS_PATH))
EXTRACTED_DATASETS_DIR = "extracted"
DEFAULT_EXTRACTED_DATASETS_PATH = os.path.join(DEFAULT_DOWNLOADED_DATASETS_PATH, EXTRACTED_DATASETS_DIR)
EXTRACTED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_EXTRACTED_DATASETS_PATH", DEFAULT_EXTRACTED_DATASETS_PATH))
# Download count for the website
HF_UPDATE_DOWNLOAD_COUNTS = (
os.environ.get("HF_UPDATE_DOWNLOAD_COUNTS", "AUTO").upper() in ENV_VARS_TRUE_AND_AUTO_VALUES
)
# Batch size constants. For more info, see:
# https://github.com/apache/arrow/blob/master/docs/source/cpp/arrays.rst#size-limitations-and-recommendations)
DEFAULT_MAX_BATCH_SIZE = 1000
# Size of the preloaded record batch in `Dataset.__iter__`
ARROW_READER_BATCH_SIZE_IN_DATASET_ITER = 10
# Pickling tables works only for small tables (<4GiB)
# For big tables, we write them on disk instead
MAX_TABLE_NBYTES_FOR_PICKLING = 4 << 30
# Max shard size in bytes (e.g. to shard parquet datasets in push_to_hub or download_and_prepare)
MAX_SHARD_SIZE = "500MB"
# Parquet configuration
PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100
PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100
PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100
# Offline mode
HF_DATASETS_OFFLINE = os.environ.get("HF_DATASETS_OFFLINE", "AUTO").upper() in ENV_VARS_TRUE_VALUES
# In-memory
DEFAULT_IN_MEMORY_MAX_SIZE = 0 # Disabled
IN_MEMORY_MAX_SIZE = float(os.environ.get("HF_DATASETS_IN_MEMORY_MAX_SIZE", DEFAULT_IN_MEMORY_MAX_SIZE))
# File names
DATASET_ARROW_FILENAME = "dataset.arrow"
DATASET_INDICES_FILENAME = "indices.arrow"
DATASET_STATE_JSON_FILENAME = "state.json"
DATASET_INFO_FILENAME = "dataset_info.json"
DATASETDICT_INFOS_FILENAME = "dataset_infos.json"
LICENSE_FILENAME = "LICENSE"
METRIC_INFO_FILENAME = "metric_info.json"
DATASETDICT_JSON_FILENAME = "dataset_dict.json"
METADATA_CONFIGS_FIELD = "configs"
MODULE_NAME_FOR_DYNAMIC_MODULES = "datasets_modules"
MAX_DATASET_CONFIG_ID_READABLE_LENGTH = 255
# Streaming
STREAMING_READ_MAX_RETRIES = 20
STREAMING_READ_RETRY_INTERVAL = 5
# Datasets without script
DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200
GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 10
ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200
# Progress bars
PBAR_REFRESH_TIME_INTERVAL = 0.05 # 20 progress updates per sec
| datasets-main | src/datasets/config.py |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
logger = logging.get_logger(__name__)
DatasetType = TypeVar("DatasetType", Dataset, IterableDataset)
def interleave_datasets(
datasets: List[DatasetType],
probabilities: Optional[List[float]] = None,
seed: Optional[int] = None,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
) -> DatasetType:
"""
Interleave several datasets (sources) into a single dataset.
The new dataset is constructed by alternating between the sources to get the examples.
You can use this function on a list of [`Dataset`] objects, or on a list of [`IterableDataset`] objects.
- If `probabilities` is `None` (default) the new dataset is constructed by cycling between each source to get the examples.
- If `probabilities` is not `None`, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities.
The resulting dataset ends when one of the source datasets runs out of examples except when `oversampling` is `True`,
in which case, the resulting dataset ends when all datasets have ran out of examples at least one time.
Note for iterable datasets:
In a distributed setup or in PyTorch DataLoader workers, the stopping strategy is applied per process.
Therefore the "first_exhausted" strategy on an sharded iterable dataset can generate less samples in total (up to 1 missing sample per subdataset per worker).
Args:
datasets (`List[Dataset]` or `List[IterableDataset]`):
List of datasets to interleave.
probabilities (`List[float]`, *optional*, defaults to `None`):
If specified, the new dataset is constructed by sampling
examples from one source at a time according to these probabilities.
seed (`int`, *optional*, defaults to `None`):
The random seed used to choose a source for each example.
info ([`DatasetInfo`], *optional*):
Dataset information, like description, citation, etc.
<Added version="2.4.0"/>
split ([`NamedSplit`], *optional*):
Name of the dataset split.
<Added version="2.4.0"/>
stopping_strategy (`str`, defaults to `first_exhausted`):
Two strategies are proposed right now, `first_exhausted` and `all_exhausted`.
By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
- with no probabilities, the resulting dataset will have `max_length_datasets*nb_dataset` samples.
- with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
Returns:
[`Dataset`] or [`IterableDataset`]: Return type depends on the input `datasets`
parameter. `Dataset` if the input is a list of `Dataset`, `IterableDataset` if the input is a list of
`IterableDataset`.
Example:
For regular datasets (map-style):
```python
>>> from datasets import Dataset, interleave_datasets
>>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
>>> d2 = Dataset.from_dict({"a": [10, 11, 12]})
>>> d3 = Dataset.from_dict({"a": [20, 21, 22]})
>>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted")
>>> dataset["a"]
[10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22]
>>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42)
>>> dataset["a"]
[10, 0, 11, 1, 2]
>>> dataset = interleave_datasets([d1, d2, d3])
>>> dataset["a"]
[0, 10, 20, 1, 11, 21, 2, 12, 22]
>>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
>>> dataset["a"]
[0, 10, 20, 1, 11, 21, 2, 12, 22]
>>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
>>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]})
>>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]})
>>> dataset = interleave_datasets([d1, d2, d3])
>>> dataset["a"]
[0, 10, 20, 1, 11, 21, 2, 12, 22]
>>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
>>> dataset["a"]
[0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24]
>>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42)
>>> dataset["a"]
[10, 0, 11, 1, 2]
>>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted")
>>> dataset["a"]
[10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24]
For datasets in streaming mode (iterable):
>>> from datasets import load_dataset, interleave_datasets
>>> d1 = load_dataset("oscar", "unshuffled_deduplicated_en", split="train", streaming=True)
>>> d2 = load_dataset("oscar", "unshuffled_deduplicated_fr", split="train", streaming=True)
>>> dataset = interleave_datasets([d1, d2])
>>> iterator = iter(dataset)
>>> next(iterator)
{'text': 'Mtendere Village was inspired by the vision...}
>>> next(iterator)
{'text': "Média de débat d'idées, de culture...}
```
"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets.")
for i, dataset in enumerate(datasets):
if not isinstance(dataset, (Dataset, IterableDataset)):
if isinstance(dataset, (DatasetDict, IterableDatasetDict)):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"is an empty dataset dictionary."
)
raise ValueError(
f"Dataset at position {i} has at least one split: {list(dataset)}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']"
)
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}."
)
if i == 0:
dataset_type, other_type = (
(Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset)
)
elif not isinstance(dataset, dataset_type):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects."
)
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy.")
if dataset_type is Dataset:
return _interleave_map_style_datasets(
datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy
)
else:
return _interleave_iterable_datasets(
datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy
)
def concatenate_datasets(
dsets: List[DatasetType],
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
axis: int = 0,
) -> DatasetType:
"""
Converts a list of [`Dataset`] with the same schema into a single [`Dataset`].
Args:
dsets (`List[datasets.Dataset]`):
List of Datasets to concatenate.
info (`DatasetInfo`, *optional*):
Dataset information, like description, citation, etc.
split (`NamedSplit`, *optional*):
Name of the dataset split.
axis (`{0, 1}`, defaults to `0`):
Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns
(horizontally).
<Added version="1.6.0"/>
Example:
```py
>>> ds3 = concatenate_datasets([ds1, ds2])
```
"""
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets.")
for i, dataset in enumerate(dsets):
if not isinstance(dataset, (Dataset, IterableDataset)):
if isinstance(dataset, (DatasetDict, IterableDatasetDict)):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"is an empty dataset dictionary."
)
raise ValueError(
f"Dataset at position {i} has at least one split: {list(dataset)}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']"
)
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}."
)
if i == 0:
dataset_type, other_type = (
(Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset)
)
elif not isinstance(dataset, dataset_type):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects."
)
if dataset_type is Dataset:
return _concatenate_map_style_datasets(dsets, info=info, split=split, axis=axis)
else:
return _concatenate_iterable_datasets(dsets, info=info, split=split, axis=axis)
| datasets-main | src/datasets/combine.py |
import contextlib
import copy
import json
import os
import posixpath
import re
import warnings
from fnmatch import fnmatch
from io import BytesIO
from pathlib import Path
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import fsspec
import numpy as np
from huggingface_hub import DatasetCard, DatasetCardData, HfApi
from . import config
from .arrow_dataset import PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED, Dataset
from .download import DownloadConfig
from .features import Features
from .features.features import FeatureType
from .filesystems import extract_path_from_uri, is_remote_filesystem
from .info import DatasetInfo, DatasetInfosDict
from .naming import _split_re
from .splits import NamedSplit, Split, SplitDict, SplitInfo
from .table import Table
from .tasks import TaskTemplate
from .utils import logging
from .utils.deprecation_utils import deprecated
from .utils.doc_utils import is_documented_by
from .utils.file_utils import cached_path
from .utils.hub import hf_hub_url
from .utils.metadata import MetadataConfigs
from .utils.py_utils import asdict, glob_pattern_to_regex, string_to_dict
from .utils.typing import PathLike
logger = logging.get_logger(__name__)
class DatasetDict(dict):
"""A dictionary (dict of str: datasets.Dataset) with dataset transforms methods (map, filter, etc.)"""
def _check_values_type(self):
for dataset in self.values():
if not isinstance(dataset, Dataset):
raise TypeError(f"Values in `DatasetDict` should be of type `Dataset` but got type '{type(dataset)}'")
def _check_values_features(self):
items = list(self.items())
for item_a, item_b in zip(items[:-1], items[1:]):
if item_a[1].features != item_b[1].features:
raise ValueError(
f"All datasets in `DatasetDict` should have the same features but features for '{item_a[0]}' and '{item_b[0]}' don't match: {item_a[1].features} != {item_b[1].features}"
)
def __getitem__(self, k) -> Dataset:
if isinstance(k, (str, NamedSplit)) or len(self) == 0:
return super().__getitem__(k)
else:
available_suggested_splits = [
split for split in (Split.TRAIN, Split.TEST, Split.VALIDATION) if split in self
]
suggested_split = available_suggested_splits[0] if available_suggested_splits else list(self)[0]
raise KeyError(
f"Invalid key: {k}. Please first select a split. For example: "
f"`my_dataset_dictionary['{suggested_split}'][{k}]`. "
f"Available splits: {sorted(self)}"
)
@property
def data(self) -> Dict[str, Table]:
"""The Apache Arrow tables backing each split.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.data
```
"""
self._check_values_type()
return {k: dataset.data for k, dataset in self.items()}
@property
def cache_files(self) -> Dict[str, Dict]:
"""The cache files containing the Apache Arrow table backing each split.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.cache_files
{'test': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-test.arrow'}],
'train': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-train.arrow'}],
'validation': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]}
```
"""
self._check_values_type()
return {k: dataset.cache_files for k, dataset in self.items()}
@property
def num_columns(self) -> Dict[str, int]:
"""Number of columns in each split of the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.num_columns
{'test': 2, 'train': 2, 'validation': 2}
```
"""
self._check_values_type()
return {k: dataset.num_columns for k, dataset in self.items()}
@property
def num_rows(self) -> Dict[str, int]:
"""Number of rows in each split of the dataset (same as :func:`datasets.Dataset.__len__`).
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.num_rows
{'test': 1066, 'train': 8530, 'validation': 1066}
```
"""
self._check_values_type()
return {k: dataset.num_rows for k, dataset in self.items()}
@property
def column_names(self) -> Dict[str, List[str]]:
"""Names of the columns in each split of the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.column_names
{'test': ['text', 'label'],
'train': ['text', 'label'],
'validation': ['text', 'label']}
```
"""
self._check_values_type()
return {k: dataset.column_names for k, dataset in self.items()}
@property
def shape(self) -> Dict[str, Tuple[int]]:
"""Shape of each split of the dataset (number of columns, number of rows).
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.shape
{'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)}
```
"""
self._check_values_type()
return {k: dataset.shape for k, dataset in self.items()}
def flatten(self, max_depth=16) -> "DatasetDict":
"""Flatten the Apache Arrow Table of each split (nested features are flatten).
Each column with a struct type is flattened into one column per struct field.
Other columns are left unchanged.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("squad")
>>> ds["train"].features
{'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
'context': Value(dtype='string', id=None),
'id': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None),
'title': Value(dtype='string', id=None)}
>>> ds.flatten()
DatasetDict({
train: Dataset({
features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
num_rows: 87599
})
validation: Dataset({
features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
num_rows: 10570
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.flatten(max_depth=max_depth) for k, dataset in self.items()})
def unique(self, column: str) -> Dict[str, List]:
"""Return a list of the unique elements in a column for each split.
This is implemented in the low-level backend and as such, very fast.
Args:
column (`str`):
column name (list all the column names with [`~datasets.Dataset.column_names`])
Returns:
Dict[`str`, `list`]: Dictionary of unique elements in the given column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.unique("label")
{'test': [1, 0], 'train': [1, 0], 'validation': [1, 0]}
```
"""
self._check_values_type()
return {k: dataset.unique(column) for k, dataset in self.items()}
def cleanup_cache_files(self) -> Dict[str, int]:
"""Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one.
Be careful when running this command that no other process is currently using other cache files.
Return:
`Dict` with the number of removed files for each split
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.cleanup_cache_files()
{'test': 0, 'train': 0, 'validation': 0}
```
"""
self._check_values_type()
return {k: dataset.cleanup_cache_files() for k, dataset in self.items()}
def __repr__(self):
repr = "\n".join([f"{k}: {v}" for k, v in self.items()])
repr = re.sub(r"^", " " * 4, repr, 0, re.M)
return f"DatasetDict({{\n{repr}\n}})"
def cast(self, features: Features) -> "DatasetDict":
"""
Cast the dataset to a new set of features.
The transformation is applied to all the datasets of the dataset dictionary.
You can also remove a column using [`Dataset.map`] with `feature` but `cast`
is in-place (doesn't copy the data to a new dataset) and is thus faster.
Args:
features ([`Features`]):
New features to cast the dataset to.
The name and order of the fields in the features must match the current column names.
The type of the data must also be convertible from one type to the other.
For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds["train"].features
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> new_features = ds["train"].features.copy()
>>> new_features['label'] = ClassLabel(names=['bad', 'good'])
>>> new_features['text'] = Value('large_string')
>>> ds = ds.cast(new_features)
>>> ds["train"].features
{'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
'text': Value(dtype='large_string', id=None)}
```
"""
self._check_values_type()
return DatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()})
def cast_column(self, column: str, feature) -> "DatasetDict":
"""Cast column to feature for decoding.
Args:
column (`str`):
Column name.
feature ([`Feature`]):
Target feature.
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds["train"].features
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
>>> ds["train"].features
{'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
self._check_values_type()
return DatasetDict({k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()})
def remove_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict":
"""
Remove one or several column(s) from each split in the dataset
and the features associated to the column(s).
The transformation is applied to all the splits of the dataset dictionary.
You can also remove a column using [`Dataset.map`] with `remove_columns` but the present method
is in-place (doesn't copy the data to a new dataset) and is thus faster.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to remove.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.remove_columns("label")
DatasetDict({
train: Dataset({
features: ['text'],
num_rows: 8530
})
validation: Dataset({
features: ['text'],
num_rows: 1066
})
test: Dataset({
features: ['text'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.remove_columns(column_names=column_names) for k, dataset in self.items()})
def rename_column(self, original_column_name: str, new_column_name: str) -> "DatasetDict":
"""
Rename a column in the dataset and move the features associated to the original column under the new column name.
The transformation is applied to all the datasets of the dataset dictionary.
You can also rename a column using [`~Dataset.map`] with `remove_columns` but the present method:
- takes care of moving the original features under the new column name.
- doesn't copy the data to a new dataset and is thus much faster.
Args:
original_column_name (`str`):
Name of the column to rename.
new_column_name (`str`):
New name for the column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.rename_column("label", "label_new")
DatasetDict({
train: Dataset({
features: ['text', 'label_new'],
num_rows: 8530
})
validation: Dataset({
features: ['text', 'label_new'],
num_rows: 1066
})
test: Dataset({
features: ['text', 'label_new'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict(
{
k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name)
for k, dataset in self.items()
}
)
def rename_columns(self, column_mapping: Dict[str, str]) -> "DatasetDict":
"""
Rename several columns in the dataset, and move the features associated to the original columns under
the new column names.
The transformation is applied to all the datasets of the dataset dictionary.
Args:
column_mapping (`Dict[str, str]`):
A mapping of columns to rename to their new names.
Returns:
[`DatasetDict`]: A copy of the dataset with renamed columns.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'})
DatasetDict({
train: Dataset({
features: ['text_new', 'label_new'],
num_rows: 8530
})
validation: Dataset({
features: ['text_new', 'label_new'],
num_rows: 1066
})
test: Dataset({
features: ['text_new', 'label_new'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()})
def select_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict":
"""Select one or several column(s) from each split in the dataset and
the features associated to the column(s).
The transformation is applied to all the splits of the dataset
dictionary.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to keep.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.select_columns("text")
DatasetDict({
train: Dataset({
features: ['text'],
num_rows: 8530
})
validation: Dataset({
features: ['text'],
num_rows: 1066
})
test: Dataset({
features: ['text'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.select_columns(column_names=column_names) for k, dataset in self.items()})
def class_encode_column(self, column: str, include_nulls: bool = False) -> "DatasetDict":
"""Casts the given column as [`~datasets.features.ClassLabel`] and updates the tables.
Args:
column (`str`):
The name of the column to cast.
include_nulls (`bool`, defaults to `False`):
Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label.
<Added version="1.14.2"/>
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("boolq")
>>> ds["train"].features
{'answer': Value(dtype='bool', id=None),
'passage': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None)}
>>> ds = ds.class_encode_column("answer")
>>> ds["train"].features
{'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None),
'passage': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None)}
```
"""
self._check_values_type()
return DatasetDict(
{k: dataset.class_encode_column(column=column, include_nulls=include_nulls) for k, dataset in self.items()}
)
@contextlib.contextmanager
def formatted_as(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
):
"""To be used in a `with` statement. Set `__getitem__` return format (type and columns).
The transformation is applied to all the datasets of the dataset dictionary.
Args:
type (`str`, *optional*):
Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
`None` means `__getitem__` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to False):
Keep un-formatted columns as well in the output (as python objects).
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
"""
self._check_values_type()
old_format_type = {k: dataset._format_type for k, dataset in self.items()}
old_format_kwargs = {k: dataset._format_kwargs for k, dataset in self.items()}
old_format_columns = {k: dataset._format_columns for k, dataset in self.items()}
old_output_all_columns = {k: dataset._output_all_columns for k, dataset in self.items()}
try:
self.set_format(type, columns, output_all_columns, **format_kwargs)
yield
finally:
for k, dataset in self.items():
dataset.set_format(
old_format_type[k], old_format_columns[k], old_output_all_columns[k], **old_format_kwargs[k]
)
def set_format(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
):
"""Set `__getitem__` return format (type and columns).
The format is set for every dataset in the dataset dictionary.
Args:
type (`str`, *optional*):
Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
`None` means `__getitem__` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to False):
Keep un-formatted columns as well in the output (as python objects),
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
It is possible to call `map` after calling `set_format`. Since `map` may add new columns, then the list of formatted columns
gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted:
`new formatted columns = (all columns - previously unformatted columns)`
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
>>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
>>> ds["train"].format
{'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'numpy'}
```
"""
self._check_values_type()
for dataset in self.values():
dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
def reset_format(self):
"""Reset `__getitem__` return format to python objects and all columns.
The transformation is applied to all the datasets of the dataset dictionary.
Same as `self.set_format()`
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
>>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
>>> ds["train"].format
{'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'numpy'}
>>> ds.reset_format()
>>> ds["train"].format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': None}
```
"""
self._check_values_type()
for dataset in self.values():
dataset.set_format()
def set_transform(
self,
transform: Optional[Callable],
columns: Optional[List] = None,
output_all_columns: bool = False,
):
"""Set ``__getitem__`` return format using this transform. The transform is applied on-the-fly on batches when ``__getitem__`` is called.
The transform is set for every dataset in the dataset dictionary
As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format`
Args:
transform (`Callable`, optional): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format`
A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
This function is applied right before returning the objects in ``__getitem__``.
columns (`List[str]`, optional): columns to format in the output
If specified, then the input batch of the transform only contains those columns.
output_all_columns (`bool`, default to False): keep un-formatted columns as well in the output (as python objects)
If set to True, then the other un-formatted columns are kept with the output of the transform.
"""
self._check_values_type()
for dataset in self.values():
dataset.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform)
def with_format(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
) -> "DatasetDict":
"""Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
The format is set for every dataset in the dataset dictionary.
It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`].
Contrary to [`~datasets.DatasetDict.set_format`], `with_format` returns a new [`DatasetDict`] object with new [`Dataset`] objects.
Args:
type (`str`, *optional*):
Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
`None` means `__getitem__` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to `False`):
Keep un-formatted columns as well in the output (as python objects).
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
>>> ds["train"].format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': None}
>>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
>>> ds["train"].format
{'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'tensorflow'}
```
"""
dataset = copy.deepcopy(self)
dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
return dataset
def with_transform(
self,
transform: Optional[Callable],
columns: Optional[List] = None,
output_all_columns: bool = False,
) -> "DatasetDict":
"""Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
The transform is set for every dataset in the dataset dictionary
As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
Contrary to [`~datasets.DatasetDict.set_transform`], `with_transform` returns a new [`DatasetDict`] object with new [`Dataset`] objects.
Args:
transform (`Callable`, *optional*):
User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
This function is applied right before returning the objects in `__getitem__`.
columns (`List[str]`, *optional*):
Columns to format in the output.
If specified, then the input batch of the transform only contains those columns.
output_all_columns (`bool`, defaults to False):
Keep un-formatted columns as well in the output (as python objects).
If set to `True`, then the other un-formatted columns are kept with the output of the transform.
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> def encode(example):
... return tokenizer(example['text'], truncation=True, padding=True, return_tensors="pt")
>>> ds = ds.with_transform(encode)
>>> ds["train"][0]
{'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1]),
'input_ids': tensor([ 101, 1103, 2067, 1110, 17348, 1106, 1129, 1103, 6880, 1432,
112, 188, 1207, 107, 14255, 1389, 107, 1105, 1115, 1119,
112, 188, 1280, 1106, 1294, 170, 24194, 1256, 3407, 1190,
170, 11791, 5253, 188, 1732, 7200, 10947, 12606, 2895, 117,
179, 7766, 118, 172, 15554, 1181, 3498, 6961, 3263, 1137,
188, 1566, 7912, 14516, 6997, 119, 102]),
'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0])}
```
"""
dataset = copy.deepcopy(self)
dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns)
return dataset
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
with_rank: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[Union[str, List[str]]] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
features: Optional[Features] = None,
disable_nullable: bool = False,
fn_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
desc: Optional[str] = None,
) -> "DatasetDict":
"""Apply a function to all the elements in the table (individually or in batches)
and update the table (if function does updated examples).
The transformation is applied to all the datasets of the dataset dictionary.
Args:
function (`callable`): with one of the following signature:
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
- `function(example: Dict[str, Any], indices: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
- `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
- `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
For advanced usage, the function can also return a `pyarrow.Table`.
Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
with_rank (`bool`, defaults to `False`):
Provide process rank to `function`. Note that in this case the
signature of `function` should be `def function(example[, idx], rank): ...`.
input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`,
`batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
drop_last_batch (`bool`, defaults to `False`):
Whether a last batch smaller than the batch_size should be
dropped instead of being processed by the function.
remove_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, default `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
features (`[datasets.Features]`, *optional*, defaults to `None`):
Use a specific [`Features`] to store the cache file
instead of the automatically generated one.
disable_nullable (`bool`, defaults to `False`):
Disallow null values in the table.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
num_proc (`int`, *optional*, defaults to `None`):
Number of processes for multiprocessing. By default it doesn't
use multiprocessing.
desc (`str`, *optional*, defaults to `None`):
Meaningful description to be displayed alongside with the progress bar while mapping examples.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> def add_prefix(example):
... example["text"] = "Review: " + example["text"]
... return example
>>> ds = ds.map(add_prefix)
>>> ds["train"][0:3]["text"]
['Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .',
'Review: effective but too-tepid biopic']
# process a batch of examples
>>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True)
# set number of processors
>>> ds = ds.map(add_prefix, num_proc=4)
```
"""
self._check_values_type()
if cache_file_names is None:
cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.map(
function=function,
with_indices=with_indices,
with_rank=with_rank,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
remove_columns=remove_columns,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=cache_file_names[k],
writer_batch_size=writer_batch_size,
features=features,
disable_nullable=disable_nullable,
fn_kwargs=fn_kwargs,
num_proc=num_proc,
desc=desc,
)
for k, dataset in self.items()
}
)
def filter(
self,
function,
with_indices=False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
fn_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
desc: Optional[str] = None,
) -> "DatasetDict":
"""Apply a filter function to all the elements in the table in batches
and update the table so that the dataset only includes examples according to the filter function.
The transformation is applied to all the datasets of the dataset dictionary.
Args:
function (`callable`):
With one of the following signature:
- `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
- `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
- `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
- `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if ``with_indices=True, batched=True`
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`
`batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if chaching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
num_proc (`int`, *optional*, defaults to `None`):
Number of processes for multiprocessing. By default it doesn't
use multiprocessing.
desc (`str`, *optional*, defaults to `None`):
Meaningful description to be displayed alongside with the progress bar while filtering examples.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.filter(lambda x: x["label"] == 1)
DatasetDict({
train: Dataset({
features: ['text', 'label'],
num_rows: 4265
})
validation: Dataset({
features: ['text', 'label'],
num_rows: 533
})
test: Dataset({
features: ['text', 'label'],
num_rows: 533
})
})
```
"""
self._check_values_type()
if cache_file_names is None:
cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.filter(
function=function,
with_indices=with_indices,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=cache_file_names[k],
writer_batch_size=writer_batch_size,
fn_kwargs=fn_kwargs,
num_proc=num_proc,
desc=desc,
)
for k, dataset in self.items()
}
)
def flatten_indices(
self,
keep_in_memory: bool = False,
cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
features: Optional[Features] = None,
disable_nullable: bool = False,
num_proc: Optional[int] = None,
new_fingerprint: Optional[str] = None,
) -> "DatasetDict":
"""Create and cache a new Dataset by flattening the indices mapping.
Args:
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
cache_file_names (`Dict[str, str]`, *optional*, default `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
features (`Optional[datasets.Features]`, defaults to `None`):
Use a specific [`Features`] to store the cache file
instead of the automatically generated one.
disable_nullable (`bool`, defaults to `False`):
Allow null values in the table.
num_proc (`int`, optional, default `None`):
Max number of processes when generating cache. Already cached shards are loaded sequentially
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
"""
self._check_values_type()
if cache_file_names is None:
cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.flatten_indices(
keep_in_memory=keep_in_memory,
cache_file_name=cache_file_names[k],
writer_batch_size=writer_batch_size,
features=features,
disable_nullable=disable_nullable,
num_proc=num_proc,
new_fingerprint=new_fingerprint,
)
for k, dataset in self.items()
}
)
def sort(
self,
column_names: Union[str, Sequence[str]],
reverse: Union[bool, Sequence[bool]] = False,
kind="deprecated",
null_placement: str = "at_end",
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
) -> "DatasetDict":
"""Create a new dataset sorted according to a single or multiple columns.
Args:
column_names (`Union[str, Sequence[str]]`):
Column name(s) to sort by.
reverse (`Union[bool, Sequence[bool]]`, defaults to `False`):
If `True`, sort by descending order rather than ascending. If a single bool is provided,
the value is applied to the sorting of all column names. Otherwise a list of bools with the
same length and order as column_names must be provided.
kind (`str`, *optional*):
Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`,
The default is `quicksort`. Note that both `stable` and `mergesort` use timsort under the covers and, in general,
the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility.
<Deprecated version="2.8.0">
`kind` was deprecated in version 2.10.0 and will be removed in 3.0.0.
</Deprecated>
null_placement (`str`, defaults to `at_end`):
Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last`
keep_in_memory (`bool`, defaults to `False`):
Keep the sorted indices in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the sorted indices
can be identified, use it instead of recomputing.
indices_cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
indices mapping instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
Higher value gives smaller cache files, lower value consume less temporary memory.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset('rotten_tomatoes')
>>> ds['train']['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
>>> sorted_ds = ds.sort('label')
>>> sorted_ds['train']['label'][:10]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False])
>>> another_sorted_ds['train']['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
```
"""
self._check_values_type()
if indices_cache_file_names is None:
indices_cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.sort(
column_names=column_names,
reverse=reverse,
kind=kind,
null_placement=null_placement,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
indices_cache_file_name=indices_cache_file_names[k],
writer_batch_size=writer_batch_size,
)
for k, dataset in self.items()
}
)
def shuffle(
self,
seeds: Optional[Union[int, Dict[str, Optional[int]]]] = None,
seed: Optional[int] = None,
generators: Optional[Dict[str, np.random.Generator]] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
) -> "DatasetDict":
"""Create a new Dataset where the rows are shuffled.
The transformation is applied to all the datasets of the dataset dictionary.
Currently shuffling uses numpy random generators.
You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).
Args:
seeds (`Dict[str, int]` or `int`, *optional*):
A seed to initialize the default BitGenerator if `generator=None`.
If `None`, then fresh, unpredictable entropy will be pulled from the OS.
If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
You can provide one `seed` per dataset in the dataset dictionary.
seed (`int`, *optional*):
A seed to initialize the default BitGenerator if `generator=None`. Alias for seeds (a `ValueError` is raised if both are provided).
generators (`Dict[str, *optional*, np.random.Generator]`):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
You have to provide one `generator` per dataset in the dataset dictionary.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
indices_cache_file_names (`Dict[str, str]`, *optional*):
Provide the name of a path for the cache file. It is used to store the
indices mappings instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds["train"]["label"][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# set a seed
>>> shuffled_ds = ds.shuffle(seed=42)
>>> shuffled_ds["train"]["label"][:10]
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0]
```
"""
self._check_values_type()
if seed is not None and seeds is not None:
raise ValueError("Please specify seed or seeds, but not both")
seeds = seed if seed is not None else seeds
if seeds is None:
seeds = {k: None for k in self}
elif not isinstance(seeds, dict):
seeds = {k: seeds for k in self}
if generators is None:
generators = {k: None for k in self}
if indices_cache_file_names is None:
indices_cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.shuffle(
seed=seeds[k],
generator=generators[k],
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
indices_cache_file_name=indices_cache_file_names[k],
writer_batch_size=writer_batch_size,
)
for k, dataset in self.items()
}
)
def save_to_disk(
self,
dataset_dict_path: PathLike,
fs="deprecated",
max_shard_size: Optional[Union[str, int]] = None,
num_shards: Optional[Dict[str, int]] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
):
"""
Saves a dataset dict to a filesystem using `fsspec.spec.AbstractFileSystem`.
For [`Image`] and [`Audio`] data:
All the Image() and Audio() data are stored in the arrow files.
If you want to store paths or urls, please use the Value("string") type.
Args:
dataset_dict_path (`str`):
Path (e.g. `dataset/train`) or remote URI
(e.g. `s3://my-bucket/dataset/train`) of the dataset dict directory where the dataset dict will be
saved to.
fs (`fsspec.spec.AbstractFileSystem`, *optional*):
Instance of the remote filesystem where the dataset will be saved to.
<Deprecated version="2.8.0">
`fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
</Deprecated>
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
(like `"50MB"`).
num_shards (`Dict[str, int]`, *optional*):
Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`.
You need to provide the number of shards for each dataset in the dataset dictionary.
Use a dictionary to define a different num_shards for each split.
<Added version="2.8.0"/>
num_proc (`int`, *optional*, default `None`):
Number of processes when downloading and generating the dataset locally.
Multiprocessing is disabled by default.
<Added version="2.8.0"/>
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.8.0"/>
Example:
```python
>>> dataset_dict.save_to_disk("path/to/dataset/directory")
>>> dataset_dict.save_to_disk("path/to/dataset/directory", max_shard_size="1GB")
>>> dataset_dict.save_to_disk("path/to/dataset/directory", num_shards={"train": 1024, "test": 8})
```
"""
if fs != "deprecated":
warnings.warn(
"'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
"You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
FutureWarning,
)
storage_options = fs.storage_options
fs_token_paths = fsspec.get_fs_token_paths(dataset_dict_path, storage_options=storage_options)
fs: fsspec.AbstractFileSystem = fs_token_paths[0]
is_local = not is_remote_filesystem(fs)
path_join = os.path.join if is_local else posixpath.join
if num_shards is None:
num_shards = {k: None for k in self}
elif not isinstance(num_shards, dict):
raise ValueError(
"Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
)
if is_local:
Path(dataset_dict_path).resolve().mkdir(parents=True, exist_ok=True)
else:
fs.makedirs(dataset_dict_path, exist_ok=True)
with fs.open(path_join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME), "w", encoding="utf-8") as f:
json.dump({"splits": list(self)}, f)
for k, dataset in self.items():
dataset.save_to_disk(
path_join(dataset_dict_path, k),
num_shards=num_shards.get(k),
max_shard_size=max_shard_size,
num_proc=num_proc,
storage_options=storage_options,
)
@staticmethod
def load_from_disk(
dataset_dict_path: PathLike,
fs="deprecated",
keep_in_memory: Optional[bool] = None,
storage_options: Optional[dict] = None,
) -> "DatasetDict":
"""
Load a dataset that was previously saved using [`save_to_disk`] from a filesystem using `fsspec.spec.AbstractFileSystem`.
Args:
dataset_dict_path (`str`):
Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`)
of the dataset dict directory where the dataset dict will be loaded from.
fs (`fsspec.spec.AbstractFileSystem`, *optional*):
Instance of the remote filesystem where the dataset will be saved to.
<Deprecated version="2.8.0">
`fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
</Deprecated>
keep_in_memory (`bool`, defaults to `None`):
Whether to copy the dataset in-memory. If `None`, the
dataset will not be copied in-memory unless explicitly enabled by setting
`datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the
[improve performance](../cache#improve-performance) section.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.8.0"/>
Returns:
[`DatasetDict`]
Example:
```py
>>> ds = load_from_disk('path/to/dataset/directory')
```
"""
if fs != "deprecated":
warnings.warn(
"'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
"You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
FutureWarning,
)
storage_options = fs.storage_options
fs_token_paths = fsspec.get_fs_token_paths(dataset_dict_path, storage_options=storage_options)
fs: fsspec.AbstractFileSystem = fs_token_paths[0]
if is_remote_filesystem(fs):
dest_dataset_dict_path = extract_path_from_uri(dataset_dict_path)
path_join = posixpath.join
else:
fs = fsspec.filesystem("file")
dest_dataset_dict_path = dataset_dict_path
path_join = os.path.join
dataset_dict_json_path = path_join(dest_dataset_dict_path, config.DATASETDICT_JSON_FILENAME)
dataset_state_json_path = path_join(dest_dataset_dict_path, config.DATASET_STATE_JSON_FILENAME)
dataset_info_path = path_join(dest_dataset_dict_path, config.DATASET_INFO_FILENAME)
if not fs.isfile(dataset_dict_json_path):
if fs.isfile(dataset_info_path) and fs.isfile(dataset_state_json_path):
raise FileNotFoundError(
f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but got a `Dataset`. Please use either `datasets.load_from_disk` or `Dataset.load_from_disk` instead."
)
raise FileNotFoundError(
f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but provided path is not a `DatasetDict`."
)
with fs.open(dataset_dict_json_path, "r", encoding="utf-8") as f:
splits = json.load(f)["splits"]
dataset_dict = DatasetDict()
for k in splits:
dataset_dict_split_path = (
dataset_dict_path.split("://")[0] + "://" + path_join(dest_dataset_dict_path, k)
if is_remote_filesystem(fs)
else path_join(dest_dataset_dict_path, k)
)
dataset_dict[k] = Dataset.load_from_disk(
dataset_dict_split_path, keep_in_memory=keep_in_memory, storage_options=storage_options
)
return dataset_dict
@staticmethod
def from_csv(
path_or_paths: Dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from CSV file(s).
Args:
path_or_paths (`dict` of path-like):
Path(s) of the CSV file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`pandas.read_csv`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_csv({'train': 'path/to/dataset.csv'})
```
"""
# Dynamic import to avoid circular dependency
from .io.csv import CsvDatasetReader
return CsvDatasetReader(
path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
).read()
@staticmethod
def from_json(
path_or_paths: Dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from JSON Lines file(s).
Args:
path_or_paths (`path-like` or list of `path-like`):
Path(s) of the JSON Lines file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`JsonConfig`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_json({'train': 'path/to/dataset.json'})
```
"""
# Dynamic import to avoid circular dependency
from .io.json import JsonDatasetReader
return JsonDatasetReader(
path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
).read()
@staticmethod
def from_parquet(
path_or_paths: Dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
columns: Optional[List[str]] = None,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from Parquet file(s).
Args:
path_or_paths (`dict` of path-like):
Path(s) of the CSV file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
columns (`List[str]`, *optional*):
If not `None`, only these columns will be read from the file.
A column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`ParquetConfig`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_parquet({'train': 'path/to/dataset/parquet'})
```
"""
# Dynamic import to avoid circular dependency
from .io.parquet import ParquetDatasetReader
return ParquetDatasetReader(
path_or_paths,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
columns=columns,
**kwargs,
).read()
@staticmethod
def from_text(
path_or_paths: Dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from text file(s).
Args:
path_or_paths (`dict` of path-like):
Path(s) of the text file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`TextConfig`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_text({'train': 'path/to/dataset.txt'})
```
"""
# Dynamic import to avoid circular dependency
from .io.text import TextDatasetReader
return TextDatasetReader(
path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
).read()
@deprecated()
@is_documented_by(Dataset.prepare_for_task)
def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "DatasetDict":
self._check_values_type()
return DatasetDict({k: dataset.prepare_for_task(task=task, id=id) for k, dataset in self.items()})
@is_documented_by(Dataset.align_labels_with_mapping)
def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "DatasetDict":
self._check_values_type()
return DatasetDict(
{
k: dataset.align_labels_with_mapping(label2id=label2id, label_column=label_column)
for k, dataset in self.items()
}
)
def push_to_hub(
self,
repo_id,
config_name: str = "default",
private: Optional[bool] = False,
token: Optional[str] = None,
branch: Optional[None] = None,
max_shard_size: Optional[Union[int, str]] = None,
num_shards: Optional[Dict[str, int]] = None,
embed_external_files: bool = True,
):
"""Pushes the [`DatasetDict`] to the hub as a Parquet dataset.
The [`DatasetDict`] is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
Each dataset split will be pushed independently. The pushed dataset will keep the original split names.
The resulting Parquet files are self-contained by default: if your dataset contains [`Image`] or [`Audio`]
data, the Parquet files will store the bytes of your images or audio files.
You can disable this by setting `embed_external_files` to False.
Args:
repo_id (`str`):
The ID of the repository to push to in the following format: `<user>/<dataset_name>` or
`<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace
of the logged-in user.
private (`bool`, *optional*):
Whether the dataset repository should be set to private or not. Only affects repository creation:
a repository that already exists will not be affected by that parameter.
config_name (`str`):
Configuration name of a dataset. Defaults to "default".
token (`str`, *optional*):
An optional authentication token for the Hugging Face Hub. If no token is passed, will default
to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
if no token is passed and the user is not logged-in.
branch (`str`, *optional*):
The git branch on which to push the dataset.
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
(like `"500MB"` or `"1GB"`).
num_shards (`Dict[str, int]`, *optional*):
Number of shards to write. By default the number of shards depends on `max_shard_size`.
Use a dictionary to define a different num_shards for each split.
<Added version="2.8.0"/>
embed_external_files (`bool`, defaults to `True`):
Whether to embed file bytes in the shards.
In particular, this will do the following before the push for the fields of type:
- [`Audio`] and [`Image`] removes local path information and embed file content in the Parquet files.
Example:
```python
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>")
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True)
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB")
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", num_shards={"train": 1024, "test": 8})
```
If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages):
```python
>>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en")
>>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr")
>>> # later
>>> english_dataset = load_dataset("<organization>/<dataset_id>", "en")
>>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr")
```
"""
if num_shards is None:
num_shards = {k: None for k in self}
elif not isinstance(num_shards, dict):
raise ValueError(
"Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
)
self._check_values_type()
self._check_values_features()
total_uploaded_size = 0
total_dataset_nbytes = 0
info_to_dump: DatasetInfo = next(iter(self.values())).info.copy()
info_to_dump.config_name = config_name
info_to_dump.splits = SplitDict()
for split in self.keys():
if not re.match(_split_re, split):
raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.")
data_dir = config_name if config_name != "default" else "data" # for backward compatibility
for split in self.keys():
logger.info(f"Pushing split {split} to the Hub.")
# The split=key needs to be removed before merging
repo_id, split, uploaded_size, dataset_nbytes, _, _ = self[split]._push_parquet_shards_to_hub(
repo_id,
data_dir=data_dir,
split=split,
private=private,
token=token,
branch=branch,
max_shard_size=max_shard_size,
num_shards=num_shards.get(split),
embed_external_files=embed_external_files,
)
total_uploaded_size += uploaded_size
total_dataset_nbytes += dataset_nbytes
info_to_dump.splits[split] = SplitInfo(str(split), num_bytes=dataset_nbytes, num_examples=len(self[split]))
info_to_dump.download_checksums = None
info_to_dump.download_size = total_uploaded_size
info_to_dump.dataset_size = total_dataset_nbytes
info_to_dump.size_in_bytes = total_uploaded_size + total_dataset_nbytes
metadata_config_to_dump = {
"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"} for split in self.keys()],
}
api = HfApi(endpoint=config.HF_ENDPOINT)
repo_files = api.list_repo_files(repo_id, repo_type="dataset", revision=branch, token=token)
# get the info from the README to update them
if "README.md" in repo_files:
download_config = DownloadConfig()
download_config.download_desc = "Downloading metadata"
download_config.token = token
dataset_readme_path = cached_path(
hf_hub_url(repo_id, "README.md", revision=branch),
download_config=download_config,
)
dataset_card = DatasetCard.load(Path(dataset_readme_path))
dataset_card_data = dataset_card.data
metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
# get the deprecated dataset_infos.json to update them
elif config.DATASETDICT_INFOS_FILENAME in repo_files:
dataset_card = None
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs()
else:
dataset_card = None
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs()
# create the metadata configs if it was uploaded with push_to_hub before metadata configs existed
if not metadata_configs:
_matched_paths = [
p
for p in repo_files
if fnmatch(p, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*"))
]
if len(_matched_paths) > 0:
# it was uploaded with push_to_hub before metadata configs existed
_resolved_splits = {
string_to_dict(
p, glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED)
)["split"]
for p in _matched_paths
}
default_metadata_configs_to_dump = {
"data_files": [
{"split": _resolved_split, "path": f"data/{_resolved_split}-*"}
for _resolved_split in _resolved_splits
]
}
MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data)
# push to the deprecated dataset_infos.json
if config.DATASETDICT_INFOS_FILENAME in repo_files:
download_config = DownloadConfig()
download_config.download_desc = "Downloading metadata"
download_config.token = token
dataset_infos_path = cached_path(
hf_hub_url(repo_id, config.DATASETDICT_INFOS_FILENAME, revision=branch),
download_config=download_config,
)
with open(dataset_infos_path, encoding="utf-8") as f:
dataset_infos: dict = json.load(f)
dataset_infos[config_name] = asdict(info_to_dump)
buffer = BytesIO()
buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8"))
HfApi(endpoint=config.HF_ENDPOINT).upload_file(
path_or_fileobj=buffer.getvalue(),
path_in_repo=config.DATASETDICT_INFOS_FILENAME,
repo_id=repo_id,
token=token,
repo_type="dataset",
revision=branch,
)
# push to README
DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data)
MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data)
dataset_card = (
DatasetCard(
"---\n"
+ str(dataset_card_data)
+ "\n---\n"
+ f'# Dataset Card for "{repo_id.split("/")[-1]}"\n\n[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)'
)
if dataset_card is None
else dataset_card
)
HfApi(endpoint=config.HF_ENDPOINT).upload_file(
path_or_fileobj=str(dataset_card).encode(),
path_in_repo="README.md",
repo_id=repo_id,
token=token,
repo_type="dataset",
revision=branch,
)
class IterableDatasetDict(dict):
def with_format(
self,
type: Optional[str] = None,
) -> "IterableDatasetDict":
"""
Return a dataset with the specified format.
This method only supports the "torch" format for now.
The format is set to all the datasets of the dataset dictionary.
Args:
type (`str`, *optional*, defaults to `None`):
If set to "torch", the returned dataset
will be a subclass of `torch.utils.data.IterableDataset` to be used in a `DataLoader`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
>>> def encode(example):
... return tokenizer(examples["text"], truncation=True, padding="max_length")
>>> ds = ds.map(encode, batched=True, remove_columns=["text"])
>>> ds = ds.with_format("torch")
```
"""
return IterableDatasetDict({k: dataset.with_format(type=type) for k, dataset in self.items()})
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: int = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[Union[str, List[str]]] = None,
fn_kwargs: Optional[dict] = None,
) -> "IterableDatasetDict":
"""
Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
If your function returns a column that already exists, then it overwrites it.
The function is applied on-the-fly on the examples when iterating over the dataset.
The transformation is applied to all the datasets of the dataset dictionary.
You can specify whether the function should be batched or not with the `batched` parameter:
- If batched is `False`, then the function takes 1 example in and should return 1 example.
An example is a dictionary, e.g. `{"text": "Hello there !"}`.
- If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`.
- If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
Note that the last batch may have less than `n` examples.
A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
Args:
function (`Callable`, *optional*, defaults to `None`):
Function applied on-the-fly on the examples when you iterate on the dataset.
It must have one of the following signatures:
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
- `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
- `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
- `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
For advanced usage, the function can also return a `pyarrow.Table`.
Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
If no function is provided, default to identity function: `lambda x: x`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
The columns to be passed into `function`
as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`.
drop_last_batch (`bool`, defaults to `False`):
Whether a last batch smaller than the `batch_size` should be
dropped instead of being processed by the function.
remove_columns (`[List[str]]`, *optional*, defaults to `None`):
Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> def add_prefix(example):
... example["text"] = "Review: " + example["text"]
... return example
>>> ds = ds.map(add_prefix)
>>> next(iter(ds["train"]))
{'label': 1,
'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict(
{
k: dataset.map(
function=function,
with_indices=with_indices,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
remove_columns=remove_columns,
fn_kwargs=fn_kwargs,
)
for k, dataset in self.items()
}
)
def filter(
self,
function: Optional[Callable] = None,
with_indices=False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
fn_kwargs: Optional[dict] = None,
) -> "IterableDatasetDict":
"""Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
The filtering is done on-the-fly when iterating over the dataset.
The filtering is applied to all the datasets of the dataset dictionary.
Args:
function (`Callable`):
Callable with one of the following signatures:
- `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
- `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
- `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
- `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True`
If no function is provided, defaults to an always True function: `lambda x: True`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
input_columns (`str` or `List[str]`, *optional*):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.filter(lambda x: x["label"] == 0)
>>> list(ds["train"].take(3))
[{'label': 0, 'text': 'Review: simplistic , silly and tedious .'},
{'label': 0,
'text': "Review: it's so laddish and juvenile , only teenage boys could possibly find it funny ."},
{'label': 0,
'text': 'Review: exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}]
```
"""
return IterableDatasetDict(
{
k: dataset.filter(
function=function,
with_indices=with_indices,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
fn_kwargs=fn_kwargs,
)
for k, dataset in self.items()
}
)
def shuffle(
self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000
) -> "IterableDatasetDict":
"""
Randomly shuffles the elements of this dataset.
The shuffling is applied to all the datasets of the dataset dictionary.
This dataset fills a buffer with buffer_size elements, then randomly samples elements from this buffer,
replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or
equal to the full size of the dataset is required.
For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will
initially select a random element from only the first 1000 elements in the buffer. Once an element is
selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element,
maintaining the 1000 element buffer.
If the dataset is made of several shards, it also does `shuffle` the order of the shards.
However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`]
then the order of the shards is kept unchanged.
Args:
seed (`int`, *optional*, defaults to `None`):
Random seed that will be used to shuffle the dataset.
It is used to sample from the shuffle buffe and als oto shuffle the data shards.
generator (`numpy.random.Generator`, *optional*):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
buffer_size (`int`, defaults to `1000`):
Size of the buffer.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> list(ds["train"].take(3))
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'effective but too-tepid biopic'}]
>>> ds = ds.shuffle(seed=42)
>>> list(ds["train"].take(3))
[{'label': 1,
'text': "a sports movie with action that's exciting on the field and a story you care about off it ."},
{'label': 1,
'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'},
{'label': 1,
'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}]
```
"""
return IterableDatasetDict(
{
k: dataset.shuffle(seed=seed, generator=generator, buffer_size=buffer_size)
for k, dataset in self.items()
}
)
def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDatasetDict":
"""
Rename a column in the dataset, and move the features associated to the original column under the new column
name.
The renaming is applied to all the datasets of the dataset dictionary.
Args:
original_column_name (`str`):
Name of the column to rename.
new_column_name (`str`):
New name for the column.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with a renamed column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.rename_column("text", "movie_review")
>>> next(iter(ds["train"]))
{'label': 1,
'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict(
{
k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name)
for k, dataset in self.items()
}
)
def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDatasetDict":
"""
Rename several columns in the dataset, and move the features associated to the original columns under
the new column names.
The renaming is applied to all the datasets of the dataset dictionary.
Args:
column_mapping (`Dict[str, str]`):
A mapping of columns to rename to their new names.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with renamed columns
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.rename_columns({"text": "movie_review", "label": "rating"})
>>> next(iter(ds["train"]))
{'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
'rating': 1}
```
"""
return IterableDatasetDict(
{k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()}
)
def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict":
"""
Remove one or several column(s) in the dataset and the features associated to them.
The removal is done on-the-fly on the examples when iterating over the dataset.
The removal is applied to all the datasets of the dataset dictionary.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to remove.
Returns:
[`IterableDatasetDict`]: A copy of the dataset object without the columns to remove.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.remove_columns("label")
>>> next(iter(ds["train"]))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict({k: dataset.remove_columns(column_names) for k, dataset in self.items()})
def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict":
"""Select one or several column(s) in the dataset and the features
associated to them. The selection is done on-the-fly on the examples
when iterating over the dataset. The selection is applied to all the
datasets of the dataset dictionary.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to keep.
Returns:
[`IterableDatasetDict`]: A copy of the dataset object with only selected columns.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.select("text")
>>> next(iter(ds["train"]))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict({k: dataset.select_columns(column_names) for k, dataset in self.items()})
def cast_column(self, column: str, feature: FeatureType) -> "IterableDatasetDict":
"""Cast column to feature for decoding.
The type casting is applied to all the datasets of the dataset dictionary.
Args:
column (`str`):
Column name.
feature ([`Feature`]):
Target feature.
Returns:
[`IterableDatasetDict`]
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds["train"].features
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
>>> ds["train"].features
{'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
return IterableDatasetDict(
{k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()}
)
def cast(
self,
features: Features,
) -> "IterableDatasetDict":
"""
Cast the dataset to a new set of features.
The type casting is applied to all the datasets of the dataset dictionary.
Args:
features (`Features`):
New features to cast the dataset to.
The name of the fields in the features must match the current column names.
The type of the data must also be convertible from one type to the other.
For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`map`] to update the Dataset.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with casted features.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds["train"].features
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> new_features = ds["train"].features.copy()
>>> new_features['label'] = ClassLabel(names=['bad', 'good'])
>>> new_features['text'] = Value('large_string')
>>> ds = ds.cast(new_features)
>>> ds["train"].features
{'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
'text': Value(dtype='large_string', id=None)}
```
"""
return IterableDatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()})
| datasets-main | src/datasets/dataset_dict.py |
import inspect
import json
import os
import random
import shutil
import tempfile
import weakref
from functools import wraps
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import xxhash
from .info import DatasetInfo
from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH
from .table import ConcatenationTable, InMemoryTable, MemoryMappedTable, Table
from .utils.deprecation_utils import deprecated
from .utils.logging import get_logger
from .utils.py_utils import asdict, dumps
if TYPE_CHECKING:
from .arrow_dataset import Dataset
logger = get_logger(__name__)
# Fingerprinting allows to have one deterministic fingerprint per dataset state.
# A dataset fingerprint is updated after each transform.
# Re-running the same transforms on a dataset in a different session results in the same fingerprint.
# This is possible thanks to a custom hashing function that works with most python objects.
# Fingerprinting is the main mechanism that enables caching.
# The caching mechanism allows to reload an existing cache file if it's already been computed.
#################
# Caching
#################
_CACHING_ENABLED = True
_TEMP_DIR_FOR_TEMP_CACHE_FILES: Optional["_TempDirWithCustomCleanup"] = None
_DATASETS_WITH_TABLE_IN_TEMP_DIR: Optional[weakref.WeakSet] = None
class _TempDirWithCustomCleanup:
"""
A temporary directory with a custom cleanup function.
We need a custom temporary directory cleanup in order to delete the dataset objects that have
cache files in the temporary directory before deleting the dorectory itself.
"""
def __init__(self, cleanup_func=None, *cleanup_func_args, **cleanup_func_kwargs):
self.name = tempfile.mkdtemp()
self._finalizer = weakref.finalize(self, self._cleanup)
self._cleanup_func = cleanup_func
self._cleanup_func_args = cleanup_func_args
self._cleanup_func_kwargs = cleanup_func_kwargs
def _cleanup(self):
self._cleanup_func(*self._cleanup_func_args, **self._cleanup_func_kwargs)
if os.path.exists(self.name):
shutil.rmtree(self.name)
def cleanup(self):
if self._finalizer.detach():
self._cleanup()
def maybe_register_dataset_for_temp_dir_deletion(dataset):
"""
This function registers the datasets that have cache files in _TEMP_DIR_FOR_TEMP_CACHE_FILES in order
to properly delete them before deleting the temporary directory.
The temporary directory _TEMP_DIR_FOR_TEMP_CACHE_FILES is used when caching is disabled.
"""
if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None:
return
global _DATASETS_WITH_TABLE_IN_TEMP_DIR
if _DATASETS_WITH_TABLE_IN_TEMP_DIR is None:
_DATASETS_WITH_TABLE_IN_TEMP_DIR = weakref.WeakSet()
if any(
Path(_TEMP_DIR_FOR_TEMP_CACHE_FILES.name) in Path(cache_file["filename"]).parents
for cache_file in dataset.cache_files
):
_DATASETS_WITH_TABLE_IN_TEMP_DIR.add(dataset)
def get_datasets_with_cache_file_in_temp_dir():
return list(_DATASETS_WITH_TABLE_IN_TEMP_DIR) if _DATASETS_WITH_TABLE_IN_TEMP_DIR is not None else []
def enable_caching():
"""
When applying transforms on a dataset, the data are stored in cache files.
The caching mechanism allows to reload an existing cache file if it's already been computed.
Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
after each transform.
If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
More precisely, if the caching is disabled:
- cache files are always recreated
- cache files are written to a temporary directory that is deleted when session closes
- cache files are named using a random hash instead of the dataset fingerprint
- use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes
- caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use
the `download_mode` parameter in [`~datasets.load_dataset`].
"""
global _CACHING_ENABLED
_CACHING_ENABLED = True
def disable_caching():
"""
When applying transforms on a dataset, the data are stored in cache files.
The caching mechanism allows to reload an existing cache file if it's already been computed.
Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
after each transform.
If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
More precisely, if the caching is disabled:
- cache files are always recreated
- cache files are written to a temporary directory that is deleted when session closes
- cache files are named using a random hash instead of the dataset fingerprint
- use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes
- caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use
the `download_mode` parameter in [`~datasets.load_dataset`].
"""
global _CACHING_ENABLED
_CACHING_ENABLED = False
@deprecated(
"Use datasets.enable_caching() or datasets.disable_caching() instead. This function will be removed in a future version of datasets."
)
def set_caching_enabled(boolean: bool):
"""
When applying transforms on a dataset, the data are stored in cache files.
The caching mechanism allows to reload an existing cache file if it's already been computed.
Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
after each transform.
If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
More precisely, if the caching is disabled:
- cache files are always recreated
- cache files are written to a temporary directory that is deleted when session closes
- cache files are named using a random hash instead of the dataset fingerprint
- use :func:`datasets.Dataset.save_to_disk` to save a transformed dataset or it will be deleted when session closes
- caching doesn't affect :func:`datasets.load_dataset`. If you want to regenerate a dataset from scratch you should use
the ``download_mode`` parameter in :func:`datasets.load_dataset`.
"""
global _CACHING_ENABLED
_CACHING_ENABLED = bool(boolean)
def is_caching_enabled() -> bool:
"""
When applying transforms on a dataset, the data are stored in cache files.
The caching mechanism allows to reload an existing cache file if it's already been computed.
Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
after each transform.
If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
More precisely, if the caching is disabled:
- cache files are always recreated
- cache files are written to a temporary directory that is deleted when session closes
- cache files are named using a random hash instead of the dataset fingerprint
- use [`~datasets.Dataset.save_to_disk`]] to save a transformed dataset or it will be deleted when session closes
- caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use
the `download_mode` parameter in [`~datasets.load_dataset`].
"""
global _CACHING_ENABLED
return bool(_CACHING_ENABLED)
def get_temporary_cache_files_directory() -> str:
"""Return a directory that is deleted when session closes."""
global _TEMP_DIR_FOR_TEMP_CACHE_FILES
if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None:
# Avoids a PermissionError on Windows caused by the datasets referencing
# the files from the cache directory on clean-up
def cleanup_func():
for dset in get_datasets_with_cache_file_in_temp_dir():
dset.__del__()
_TEMP_DIR_FOR_TEMP_CACHE_FILES = _TempDirWithCustomCleanup(cleanup_func=cleanup_func)
return _TEMP_DIR_FOR_TEMP_CACHE_FILES.name
#################
# Hashing
#################
def hashregister(*types):
def proxy(func):
for t in types:
Hasher.dispatch[t] = func
return func
return proxy
class Hasher:
"""Hasher that accepts python objects as inputs."""
dispatch: Dict = {}
def __init__(self):
self.m = xxhash.xxh64()
@classmethod
def hash_bytes(cls, value: Union[bytes, List[bytes]]) -> str:
value = [value] if isinstance(value, bytes) else value
m = xxhash.xxh64()
for x in value:
m.update(x)
return m.hexdigest()
@classmethod
def hash_default(cls, value: Any) -> str:
return cls.hash_bytes(dumps(value))
@classmethod
def hash(cls, value: Any) -> str:
if type(value) in cls.dispatch:
return cls.dispatch[type(value)](cls, value)
else:
return cls.hash_default(value)
def update(self, value: Any) -> None:
header_for_update = f"=={type(value)}=="
value_for_update = self.hash(value)
self.m.update(header_for_update.encode("utf8"))
self.m.update(value_for_update.encode("utf-8"))
def hexdigest(self) -> str:
return self.m.hexdigest()
# Register a new hasher can be useful for two possible reasons:
# 1 - optimize the hashing of large amount of data (e.g. pa.Table)
# 2 - take advantage of a custom serialization method (e.g. DatasetInfo)
@hashregister(pa.Table, Table, InMemoryTable, MemoryMappedTable, ConcatenationTable)
def _hash_pa_table(hasher, value):
def _hash_pa_array(value):
if isinstance(value, pa.ChunkedArray):
return hasher.hash_bytes(c.to_string().encode("utf-8") for c in value.chunks)
else:
return hasher.hash_bytes(value.to_string().encode("utf-8"))
value = "-".join(col + "-" + _hash_pa_array(value[col]) for col in sorted(value.column_names))
return hasher.hash_bytes(value.encode("utf-8"))
@hashregister(DatasetInfo)
def _hash_dataset_info(hasher, value):
return hasher.hash_bytes(json.dumps(asdict(value), sort_keys=True).encode("utf-8"))
#################
# Fingerprinting
#################
# we show a warning only once when fingerprinting fails to avoid spam
fingerprint_warnings: Dict[str, bool] = {}
def generate_fingerprint(dataset) -> str:
state = dataset.__dict__
hasher = Hasher()
for key in sorted(state):
if key == "_fingerprint":
continue
hasher.update(key)
hasher.update(state[key])
# hash data files last modification timestamps as well
for cache_file in dataset.cache_files:
hasher.update(os.path.getmtime(cache_file["filename"]))
return hasher.hexdigest()
def generate_random_fingerprint(nbits=64) -> str:
return f"{random.getrandbits(nbits):0{nbits//4}x}"
def update_fingerprint(fingerprint, transform, transform_args):
global fingerprint_warnings
hasher = Hasher()
hasher.update(fingerprint)
try:
hasher.update(transform)
except: # noqa various errors might raise here from pickle or dill
if _CACHING_ENABLED:
if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False):
logger.warning(
f"Transform {transform} couldn't be hashed properly, a random hash was used instead. "
"Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. "
"If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. "
"This warning is only showed once. Subsequent hashing failures won't be showed."
)
fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True
else:
logger.info(f"Transform {transform} couldn't be hashed properly, a random hash was used instead.")
else:
logger.info(
f"Transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled."
)
return generate_random_fingerprint()
for key in sorted(transform_args):
hasher.update(key)
try:
hasher.update(transform_args[key])
except: # noqa various errors might raise here from pickle or dill
if _CACHING_ENABLED:
if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False):
logger.warning(
f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. "
"Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. "
"If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. "
"This warning is only showed once. Subsequent hashing failures won't be showed."
)
fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True
else:
logger.info(
f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead."
)
else:
logger.info(
f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled."
)
return generate_random_fingerprint()
return hasher.hexdigest()
def validate_fingerprint(fingerprint: str, max_length=64):
"""
Make sure the fingerprint is a non-empty string that is not longer that max_length=64 by default,
so that the fingerprint can be used to name cache files without issues.
"""
if not isinstance(fingerprint, str) or not fingerprint:
raise ValueError(f"Invalid fingerprint '{fingerprint}': it should be a non-empty string.")
for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH:
if invalid_char in fingerprint:
raise ValueError(
f"Invalid fingerprint. Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{fingerprint}'. "
f"They could create issues when creating cache files."
)
if len(fingerprint) > max_length:
raise ValueError(
f"Invalid fingerprint. Maximum lenth is {max_length} but '{fingerprint}' has length {len(fingerprint)}."
"It could create issues when creating cache files."
)
def format_transform_for_fingerprint(func: Callable, version: Optional[str] = None) -> str:
"""
Format a transform to the format that will be used to update the fingerprint.
"""
transform = f"{func.__module__}.{func.__qualname__}"
if version is not None:
transform += f"@{version}"
return transform
def format_kwargs_for_fingerprint(
func: Callable,
args: Tuple,
kwargs: Dict[str, Any],
use_kwargs: Optional[List[str]] = None,
ignore_kwargs: Optional[List[str]] = None,
randomized_function: bool = False,
) -> Dict[str, Any]:
"""
Format the kwargs of a transform to the format that will be used to update the fingerprint.
"""
kwargs_for_fingerprint = kwargs.copy()
if args:
params = [p.name for p in inspect.signature(func).parameters.values() if p != p.VAR_KEYWORD]
args = args[1:] # assume the first argument is the dataset
params = params[1:]
kwargs_for_fingerprint.update(zip(params, args))
else:
del kwargs_for_fingerprint[
next(iter(inspect.signature(func).parameters))
] # assume the first key is the dataset
# keep the right kwargs to be hashed to generate the fingerprint
if use_kwargs:
kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k in use_kwargs}
if ignore_kwargs:
kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k not in ignore_kwargs}
if randomized_function: # randomized functions have `seed` and `generator` parameters
if kwargs_for_fingerprint.get("seed") is None and kwargs_for_fingerprint.get("generator") is None:
_, seed, pos, *_ = np.random.get_state()
seed = seed[pos] if pos < 624 else seed[0]
kwargs_for_fingerprint["generator"] = np.random.default_rng(seed)
# remove kwargs that are the default values
default_values = {
p.name: p.default for p in inspect.signature(func).parameters.values() if p.default != inspect._empty
}
for default_varname, default_value in default_values.items():
if default_varname in kwargs_for_fingerprint and kwargs_for_fingerprint[default_varname] == default_value:
kwargs_for_fingerprint.pop(default_varname)
return kwargs_for_fingerprint
def fingerprint_transform(
inplace: bool,
use_kwargs: Optional[List[str]] = None,
ignore_kwargs: Optional[List[str]] = None,
fingerprint_names: Optional[List[str]] = None,
randomized_function: bool = False,
version: Optional[str] = None,
):
"""
Wrapper for dataset transforms to update the dataset fingerprint using ``update_fingerprint``
Args:
inplace (:obj:`bool`): If inplace is True, the fingerprint of the dataset is updated inplace.
Otherwise, a parameter "new_fingerprint" is passed to the wrapped method that should take care of
setting the fingerprint of the returned Dataset.
use_kwargs (:obj:`List[str]`, optional): optional white list of argument names to take into account
to update the fingerprint to the wrapped method that should take care of
setting the fingerprint of the returned Dataset. By default all the arguments are used.
ignore_kwargs (:obj:`List[str]`, optional): optional black list of argument names to take into account
to update the fingerprint. Note that ignore_kwargs prevails on use_kwargs.
fingerprint_names (:obj:`List[str]`, optional, defaults to ["new_fingerprint"]):
If the dataset transforms is not inplace and returns a DatasetDict, then it can require
several fingerprints (one per dataset in the DatasetDict). By specifying fingerprint_names,
one fingerprint named after each element of fingerprint_names is going to be passed.
randomized_function (:obj:`bool`, defaults to False): If the dataset transform is random and has
optional parameters "seed" and "generator", then you can set randomized_function to True.
This way, even if users set "seed" and "generator" to None, then the fingerprint is
going to be randomly generated depending on numpy's current state. In this case, the
generator is set to np.random.default_rng(np.random.get_state()[1][0]).
version (:obj:`str`, optional): version of the transform. The version is taken into account when
computing the fingerprint. If a datase transform changes (or at least if the output data
that are cached changes), then one should increase the version. If the version stays the
same, then old cached data could be reused that are not compatible with the new transform.
It should be in the format "MAJOR.MINOR.PATCH".
"""
if use_kwargs is not None and not isinstance(use_kwargs, list):
raise ValueError(f"use_kwargs is supposed to be a list, not {type(use_kwargs)}")
if ignore_kwargs is not None and not isinstance(ignore_kwargs, list):
raise ValueError(f"ignore_kwargs is supposed to be a list, not {type(use_kwargs)}")
if inplace and fingerprint_names:
raise ValueError("fingerprint_names are only used when inplace is False")
fingerprint_names = fingerprint_names if fingerprint_names is not None else ["new_fingerprint"]
def _fingerprint(func):
if not inplace and not all(name in func.__code__.co_varnames for name in fingerprint_names):
raise ValueError(f"function {func} is missing parameters {fingerprint_names} in signature")
if randomized_function: # randomized function have seed and generator parameters
if "seed" not in func.__code__.co_varnames:
raise ValueError(f"'seed' must be in {func}'s signature")
if "generator" not in func.__code__.co_varnames:
raise ValueError(f"'generator' must be in {func}'s signature")
# this call has to be outside the wrapper or since __qualname__ changes in multiprocessing
transform = format_transform_for_fingerprint(func, version=version)
@wraps(func)
def wrapper(*args, **kwargs):
kwargs_for_fingerprint = format_kwargs_for_fingerprint(
func,
args,
kwargs,
use_kwargs=use_kwargs,
ignore_kwargs=ignore_kwargs,
randomized_function=randomized_function,
)
if args:
dataset: Dataset = args[0]
args = args[1:]
else:
dataset: Dataset = kwargs.pop(next(iter(inspect.signature(func).parameters)))
# compute new_fingerprint and add it to the args of not in-place transforms
if inplace:
new_fingerprint = update_fingerprint(dataset._fingerprint, transform, kwargs_for_fingerprint)
else:
for fingerprint_name in fingerprint_names: # transforms like `train_test_split` have several hashes
if kwargs.get(fingerprint_name) is None:
kwargs_for_fingerprint["fingerprint_name"] = fingerprint_name
kwargs[fingerprint_name] = update_fingerprint(
dataset._fingerprint, transform, kwargs_for_fingerprint
)
else:
validate_fingerprint(kwargs[fingerprint_name])
# Call actual function
out = func(dataset, *args, **kwargs)
# Update fingerprint of in-place transforms + update in-place history of transforms
if inplace: # update after calling func so that the fingerprint doesn't change if the function fails
dataset._fingerprint = new_fingerprint
return out
wrapper._decorator_name_ = "fingerprint"
return wrapper
return _fingerprint
| datasets-main | src/datasets/fingerprint.py |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for file names."""
import itertools
import os
import re
_uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])")
_lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])")
_single_underscore_re = re.compile(r"(?<!_)_(?!_)")
_multiple_underscores_re = re.compile(r"(_{2,})")
_split_re = r"^\w+(\.\w+)*$"
INVALID_WINDOWS_CHARACTERS_IN_PATH = r"<>:/\|?*"
def camelcase_to_snakecase(name):
"""Convert camel-case string to snake-case."""
name = _uppercase_uppercase_re.sub(r"\1_\2", name)
name = _lowercase_uppercase_re.sub(r"\1_\2", name)
return name.lower()
def snakecase_to_camelcase(name):
"""Convert snake-case string to camel-case string."""
name = _single_underscore_re.split(name)
name = [_multiple_underscores_re.split(n) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "")
def filename_prefix_for_name(name):
if os.path.basename(name) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}")
return camelcase_to_snakecase(name)
def filename_prefix_for_split(name, split):
if os.path.basename(name) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}")
if not re.match(_split_re, split):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.")
return f"{filename_prefix_for_name(name)}-{split}"
def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
prefix = filename_prefix_for_split(dataset_name, split)
if filetype_suffix:
prefix += f".{filetype_suffix}"
filepath = os.path.join(data_dir, prefix)
return f"{filepath}*"
def filenames_for_dataset_split(path, dataset_name, split, filetype_suffix=None, shard_lengths=None):
prefix = filename_prefix_for_split(dataset_name, split)
prefix = os.path.join(path, prefix)
if shard_lengths:
num_shards = len(shard_lengths)
filenames = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(num_shards)]
if filetype_suffix:
filenames = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
filename = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| datasets-main | src/datasets/naming.py |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""To write records into Parquet files."""
import errno
import json
import os
import sys
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
import fsspec
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
from . import config
from .features import Features, Image, Value
from .features.features import (
FeatureType,
_ArrayXDExtensionType,
cast_to_python_objects,
generate_from_arrow_type,
get_nested_type,
list_of_np_array_to_pyarrow_listarray,
numpy_to_pyarrow_listarray,
to_pyarrow_listarray,
)
from .filesystems import is_remote_filesystem
from .info import DatasetInfo
from .keyhash import DuplicatedKeysError, KeyHasher
from .table import array_cast, array_concat, cast_array_to_feature, embed_table_storage, table_cast
from .utils import logging
from .utils.file_utils import hash_url_to_filename
from .utils.py_utils import asdict, first_non_null_value
logger = logging.get_logger(__name__)
type_ = type # keep python's type function
class SchemaInferenceError(ValueError):
pass
class TypedSequence:
"""
This data container generalizes the typing when instantiating pyarrow arrays, tables or batches.
More specifically it adds several features:
- Support extension types like ``datasets.features.Array2DExtensionType``:
By default pyarrow arrays don't return extension arrays. One has to call
``pa.ExtensionArray.from_storage(type, pa.array(data, type.storage_type))``
in order to get an extension array.
- Support for ``try_type`` parameter that can be used instead of ``type``:
When an array is transformed, we like to keep the same type as before if possible.
For example when calling :func:`datasets.Dataset.map`, we don't want to change the type
of each column by default.
- Better error message when a pyarrow array overflows.
Example::
from datasets.features import Array2D, Array2DExtensionType, Value
from datasets.arrow_writer import TypedSequence
import pyarrow as pa
arr = pa.array(TypedSequence([1, 2, 3], type=Value("int32")))
assert arr.type == pa.int32()
arr = pa.array(TypedSequence([1, 2, 3], try_type=Value("int32")))
assert arr.type == pa.int32()
arr = pa.array(TypedSequence(["foo", "bar"], try_type=Value("int32")))
assert arr.type == pa.string()
arr = pa.array(TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64")))
assert arr.type == Array2DExtensionType((1, 3), "int64")
table = pa.Table.from_pydict({
"image": TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64"))
})
assert table["image"].type == Array2DExtensionType((1, 3), "int64")
"""
def __init__(
self,
data: Iterable,
type: Optional[FeatureType] = None,
try_type: Optional[FeatureType] = None,
optimized_int_type: Optional[FeatureType] = None,
):
# assert type is None or try_type is None,
if type is not None and try_type is not None:
raise ValueError("You cannot specify both type and try_type")
# set attributes
self.data = data
self.type = type
self.try_type = try_type # is ignored if it doesn't match the data
self.optimized_int_type = optimized_int_type
# when trying a type (is ignored if data is not compatible)
self.trying_type = self.try_type is not None
self.trying_int_optimization = optimized_int_type is not None and type is None and try_type is None
# used to get back the inferred type after __arrow_array__() is called once
self._inferred_type = None
def get_inferred_type(self) -> FeatureType:
"""Return the inferred feature type.
This is done by converting the sequence to an Arrow array, and getting the corresponding
feature type.
Since building the Arrow array can be expensive, the value of the inferred type is cached
as soon as pa.array is called on the typed sequence.
Returns:
FeatureType: inferred feature type of the sequence.
"""
if self._inferred_type is None:
self._inferred_type = generate_from_arrow_type(pa.array(self).type)
return self._inferred_type
@staticmethod
def _infer_custom_type_and_encode(data: Iterable) -> Tuple[Iterable, Optional[FeatureType]]:
"""Implement type inference for custom objects like PIL.Image.Image -> Image type.
This function is only used for custom python objects that can't be direclty passed to build
an Arrow array. In such cases is infers the feature type to use, and it encodes the data so
that they can be passed to an Arrow array.
Args:
data (Iterable): array of data to infer the type, e.g. a list of PIL images.
Returns:
Tuple[Iterable, Optional[FeatureType]]: a tuple with:
- the (possibly encoded) array, if the inferred feature type requires encoding
- the inferred feature type if the array is made of supported custom objects like
PIL images, else None.
"""
if config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
non_null_idx, non_null_value = first_non_null_value(data)
if isinstance(non_null_value, PIL.Image.Image):
return [Image().encode_example(value) if value is not None else None for value in data], Image()
return data, None
def __arrow_array__(self, type: Optional[pa.DataType] = None):
"""This function is called when calling pa.array(typed_sequence)"""
if type is not None:
raise ValueError("TypedSequence is supposed to be used with pa.array(typed_sequence, type=None)")
del type # make sure we don't use it
data = self.data
# automatic type inference for custom objects
if self.type is None and self.try_type is None:
data, self._inferred_type = self._infer_custom_type_and_encode(data)
if self._inferred_type is None:
type = self.try_type if self.trying_type else self.type
else:
type = self._inferred_type
pa_type = get_nested_type(type) if type is not None else None
optimized_int_pa_type = (
get_nested_type(self.optimized_int_type) if self.optimized_int_type is not None else None
)
trying_cast_to_python_objects = False
try:
# custom pyarrow types
if isinstance(pa_type, _ArrayXDExtensionType):
storage = to_pyarrow_listarray(data, pa_type)
return pa.ExtensionArray.from_storage(pa_type, storage)
# efficient np array to pyarrow array
if isinstance(data, np.ndarray):
out = numpy_to_pyarrow_listarray(data)
elif isinstance(data, list) and data and isinstance(first_non_null_value(data)[1], np.ndarray):
out = list_of_np_array_to_pyarrow_listarray(data)
else:
trying_cast_to_python_objects = True
out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True))
# use smaller integer precisions if possible
if self.trying_int_optimization:
if pa.types.is_int64(out.type):
out = out.cast(optimized_int_pa_type)
elif pa.types.is_list(out.type):
if pa.types.is_int64(out.type.value_type):
out = array_cast(out, pa.list_(optimized_int_pa_type))
elif pa.types.is_list(out.type.value_type) and pa.types.is_int64(out.type.value_type.value_type):
out = array_cast(out, pa.list_(pa.list_(optimized_int_pa_type)))
# otherwise we can finally use the user's type
elif type is not None:
# We use cast_array_to_feature to support casting to custom types like Audio and Image
# Also, when trying type "string", we don't want to convert integers or floats to "string".
# We only do it if trying_type is False - since this is what the user asks for.
out = cast_array_to_feature(out, type, allow_number_to_str=not self.trying_type)
return out
except (
TypeError,
pa.lib.ArrowInvalid,
pa.lib.ArrowNotImplementedError,
) as e: # handle type errors and overflows
# Ignore ArrowNotImplementedError caused by trying type, otherwise re-raise
if not self.trying_type and isinstance(e, pa.lib.ArrowNotImplementedError):
raise
if self.trying_type:
try: # second chance
if isinstance(data, np.ndarray):
return numpy_to_pyarrow_listarray(data)
elif isinstance(data, list) and data and any(isinstance(value, np.ndarray) for value in data):
return list_of_np_array_to_pyarrow_listarray(data)
else:
trying_cast_to_python_objects = True
return pa.array(cast_to_python_objects(data, only_1d_for_numpy=True))
except pa.lib.ArrowInvalid as e:
if "overflow" in str(e):
raise OverflowError(
f"There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})"
) from None
elif self.trying_int_optimization and "not in range" in str(e):
optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name
logger.info(
f"Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64."
)
return out
elif trying_cast_to_python_objects and "Could not convert" in str(e):
out = pa.array(
cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False)
)
if type is not None:
out = cast_array_to_feature(out, type, allow_number_to_str=True)
return out
else:
raise
elif "overflow" in str(e):
raise OverflowError(
f"There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})"
) from None
elif self.trying_int_optimization and "not in range" in str(e):
optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name
logger.info(f"Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64.")
return out
elif trying_cast_to_python_objects and "Could not convert" in str(e):
out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False))
if type is not None:
out = cast_array_to_feature(out, type, allow_number_to_str=True)
return out
else:
raise
class OptimizedTypedSequence(TypedSequence):
def __init__(
self,
data,
type: Optional[FeatureType] = None,
try_type: Optional[FeatureType] = None,
col: Optional[str] = None,
optimized_int_type: Optional[FeatureType] = None,
):
optimized_int_type_by_col = {
"attention_mask": Value("int8"), # binary tensor
"special_tokens_mask": Value("int8"),
"input_ids": Value("int32"), # typical vocab size: 0-50k (max ~500k, never > 1M)
"token_type_ids": Value(
"int8"
), # binary mask; some (XLNetModel) use an additional token represented by a 2
}
if type is None and try_type is None:
optimized_int_type = optimized_int_type_by_col.get(col, None)
super().__init__(data, type=type, try_type=try_type, optimized_int_type=optimized_int_type)
class ArrowWriter:
"""Shuffles and writes Examples to Arrow files."""
_WRITER_CLASS = pa.RecordBatchStreamWriter
def __init__(
self,
schema: Optional[pa.Schema] = None,
features: Optional[Features] = None,
path: Optional[str] = None,
stream: Optional[pa.NativeFile] = None,
fingerprint: Optional[str] = None,
writer_batch_size: Optional[int] = None,
hash_salt: Optional[str] = None,
check_duplicates: Optional[bool] = False,
disable_nullable: bool = False,
update_features: bool = False,
with_metadata: bool = True,
unit: str = "examples",
embed_local_files: bool = False,
storage_options: Optional[dict] = None,
):
if path is None and stream is None:
raise ValueError("At least one of path and stream must be provided.")
if features is not None:
self._features = features
self._schema = None
elif schema is not None:
self._schema: pa.Schema = schema
self._features = Features.from_arrow_schema(self._schema)
else:
self._features = None
self._schema = None
if hash_salt is not None:
# Create KeyHasher instance using split name as hash salt
self._hasher = KeyHasher(hash_salt)
else:
self._hasher = KeyHasher("")
self._check_duplicates = check_duplicates
self._disable_nullable = disable_nullable
if stream is None:
fs_token_paths = fsspec.get_fs_token_paths(path, storage_options=storage_options)
self._fs: fsspec.AbstractFileSystem = fs_token_paths[0]
self._path = (
fs_token_paths[2][0]
if not is_remote_filesystem(self._fs)
else self._fs.unstrip_protocol(fs_token_paths[2][0])
)
self.stream = self._fs.open(fs_token_paths[2][0], "wb")
self._closable_stream = True
else:
self._fs = None
self._path = None
self.stream = stream
self._closable_stream = False
self.fingerprint = fingerprint
self.disable_nullable = disable_nullable
self.writer_batch_size = writer_batch_size or config.DEFAULT_MAX_BATCH_SIZE
self.update_features = update_features
self.with_metadata = with_metadata
self.unit = unit
self.embed_local_files = embed_local_files
self._num_examples = 0
self._num_bytes = 0
self.current_examples: List[Tuple[Dict[str, Any], str]] = []
self.current_rows: List[pa.Table] = []
self.pa_writer: Optional[pa.RecordBatchStreamWriter] = None
self.hkey_record = []
def __len__(self):
"""Return the number of writed and staged examples"""
return self._num_examples + len(self.current_examples) + len(self.current_rows)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
# Try closing if opened; if closed: pyarrow.lib.ArrowInvalid: Invalid operation on closed file
if self.pa_writer: # it might be None
try:
self.pa_writer.close()
except Exception: # pyarrow.lib.ArrowInvalid, OSError
pass
if self._closable_stream and not self.stream.closed:
self.stream.close() # This also closes self.pa_writer if it is opened
def _build_writer(self, inferred_schema: pa.Schema):
schema = self.schema
inferred_features = Features.from_arrow_schema(inferred_schema)
if self._features is not None:
if self.update_features: # keep original features it they match, or update them
fields = {field.name: field for field in self._features.type}
for inferred_field in inferred_features.type:
name = inferred_field.name
if name in fields:
if inferred_field == fields[name]:
inferred_features[name] = self._features[name]
self._features = inferred_features
schema: pa.Schema = inferred_schema
else:
self._features = inferred_features
schema: pa.Schema = inferred_features.arrow_schema
if self.disable_nullable:
schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in schema)
if self.with_metadata:
schema = schema.with_metadata(self._build_metadata(DatasetInfo(features=self._features), self.fingerprint))
else:
schema = schema.with_metadata({})
self._schema = schema
self.pa_writer = self._WRITER_CLASS(self.stream, schema)
@property
def schema(self):
_schema = (
self._schema
if self._schema is not None
else (pa.schema(self._features.type) if self._features is not None else None)
)
if self._disable_nullable and _schema is not None:
_schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in _schema)
return _schema if _schema is not None else []
@staticmethod
def _build_metadata(info: DatasetInfo, fingerprint: Optional[str] = None) -> Dict[str, str]:
info_keys = ["features"] # we can add support for more DatasetInfo keys in the future
info_as_dict = asdict(info)
metadata = {}
metadata["info"] = {key: info_as_dict[key] for key in info_keys}
if fingerprint is not None:
metadata["fingerprint"] = fingerprint
return {"huggingface": json.dumps(metadata)}
def write_examples_on_file(self):
"""Write stored examples from the write-pool of examples. It makes a table out of the examples and write it."""
if not self.current_examples:
return
# order the columns properly
cols = (
[col for col in self.schema.names if col in self.current_examples[0][0]]
+ [col for col in self.current_examples[0][0].keys() if col not in self.schema.names]
if self.schema
else self.current_examples[0][0].keys()
)
batch_examples = {}
for col in cols:
# We use row[0][col] since current_examples contains (example, key) tuples.
# Morever, examples could be Arrow arrays of 1 element.
# This can happen in `.map()` when we want to re-write the same Arrow data
if all(isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) for row in self.current_examples):
arrays = [row[0][col] for row in self.current_examples]
batch_examples[col] = array_concat(arrays)
else:
batch_examples[col] = [
row[0][col].to_pylist()[0] if isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) else row[0][col]
for row in self.current_examples
]
self.write_batch(batch_examples=batch_examples)
self.current_examples = []
def write_rows_on_file(self):
"""Write stored rows from the write-pool of rows. It concatenates the single-row tables and it writes the resulting table."""
if not self.current_rows:
return
table = pa.concat_tables(self.current_rows)
self.write_table(table)
self.current_rows = []
def write(
self,
example: Dict[str, Any],
key: Optional[Union[str, int, bytes]] = None,
writer_batch_size: Optional[int] = None,
):
"""Add a given (Example,Key) pair to the write-pool of examples which is written to file.
Args:
example: the Example to add.
key: Optional, a unique identifier(str, int or bytes) associated with each example
"""
# Utilize the keys and duplicate checking when `self._check_duplicates` is passed True
if self._check_duplicates:
# Create unique hash from key and store as (key, example) pairs
hash = self._hasher.hash(key)
self.current_examples.append((example, hash))
# Maintain record of keys and their respective hashes for checking duplicates
self.hkey_record.append((hash, key))
else:
# Store example as a tuple so as to keep the structure of `self.current_examples` uniform
self.current_examples.append((example, ""))
if writer_batch_size is None:
writer_batch_size = self.writer_batch_size
if writer_batch_size is not None and len(self.current_examples) >= writer_batch_size:
if self._check_duplicates:
self.check_duplicate_keys()
# Re-intializing to empty list for next batch
self.hkey_record = []
self.write_examples_on_file()
def check_duplicate_keys(self):
"""Raises error if duplicates found in a batch"""
tmp_record = set()
for hash, key in self.hkey_record:
if hash in tmp_record:
duplicate_key_indices = [
str(self._num_examples + index)
for index, (duplicate_hash, _) in enumerate(self.hkey_record)
if duplicate_hash == hash
]
raise DuplicatedKeysError(key, duplicate_key_indices)
else:
tmp_record.add(hash)
def write_row(self, row: pa.Table, writer_batch_size: Optional[int] = None):
"""Add a given single-row Table to the write-pool of rows which is written to file.
Args:
row: the row to add.
"""
if len(row) != 1:
raise ValueError(f"Only single-row pyarrow tables are allowed but got table with {len(row)} rows.")
self.current_rows.append(row)
if writer_batch_size is None:
writer_batch_size = self.writer_batch_size
if writer_batch_size is not None and len(self.current_rows) >= writer_batch_size:
self.write_rows_on_file()
def write_batch(
self,
batch_examples: Dict[str, List],
writer_batch_size: Optional[int] = None,
):
"""Write a batch of Example to file.
Ignores the batch if it appears to be empty,
preventing a potential schema update of unknown types.
Args:
batch_examples: the batch of examples to add.
"""
if batch_examples and len(next(iter(batch_examples.values()))) == 0:
return
features = None if self.pa_writer is None and self.update_features else self._features
try_features = self._features if self.pa_writer is None and self.update_features else None
arrays = []
inferred_features = Features()
cols = (
[col for col in self.schema.names if col in batch_examples]
+ [col for col in batch_examples.keys() if col not in self.schema.names]
if self.schema
else batch_examples.keys()
)
for col in cols:
col_values = batch_examples[col]
col_type = features[col] if features else None
if isinstance(col_values, (pa.Array, pa.ChunkedArray)):
array = cast_array_to_feature(col_values, col_type) if col_type is not None else col_values
arrays.append(array)
inferred_features[col] = generate_from_arrow_type(col_values.type)
else:
col_try_type = try_features[col] if try_features is not None and col in try_features else None
typed_sequence = OptimizedTypedSequence(col_values, type=col_type, try_type=col_try_type, col=col)
arrays.append(pa.array(typed_sequence))
inferred_features[col] = typed_sequence.get_inferred_type()
schema = inferred_features.arrow_schema if self.pa_writer is None else self.schema
pa_table = pa.Table.from_arrays(arrays, schema=schema)
self.write_table(pa_table, writer_batch_size)
def write_table(self, pa_table: pa.Table, writer_batch_size: Optional[int] = None):
"""Write a Table to file.
Args:
example: the Table to add.
"""
if writer_batch_size is None:
writer_batch_size = self.writer_batch_size
if self.pa_writer is None:
self._build_writer(inferred_schema=pa_table.schema)
pa_table = pa_table.combine_chunks()
pa_table = table_cast(pa_table, self._schema)
if self.embed_local_files:
pa_table = embed_table_storage(pa_table)
self._num_bytes += pa_table.nbytes
self._num_examples += pa_table.num_rows
self.pa_writer.write_table(pa_table, writer_batch_size)
def finalize(self, close_stream=True):
self.write_rows_on_file()
# In case current_examples < writer_batch_size, but user uses finalize()
if self._check_duplicates:
self.check_duplicate_keys()
# Re-intializing to empty list for next batch
self.hkey_record = []
self.write_examples_on_file()
# If schema is known, infer features even if no examples were written
if self.pa_writer is None and self.schema:
self._build_writer(self.schema)
if self.pa_writer is not None:
self.pa_writer.close()
self.pa_writer = None
if close_stream:
self.stream.close()
else:
if close_stream:
self.stream.close()
raise SchemaInferenceError("Please pass `features` or at least one example when writing data")
logger.debug(
f"Done writing {self._num_examples} {self.unit} in {self._num_bytes} bytes {self._path if self._path else ''}."
)
return self._num_examples, self._num_bytes
class ParquetWriter(ArrowWriter):
_WRITER_CLASS = pq.ParquetWriter
class BeamWriter:
"""
Shuffles and writes Examples to Arrow files.
The Arrow files are converted from Parquet files that are the output of Apache Beam pipelines.
"""
def __init__(
self,
features: Optional[Features] = None,
schema: Optional[pa.Schema] = None,
path: Optional[str] = None,
namespace: Optional[str] = None,
cache_dir: Optional[str] = None,
):
if features is None and schema is None:
raise ValueError("At least one of features and schema must be provided.")
if path is None:
raise ValueError("Path must be provided.")
if features is not None:
self._features: Features = features
self._schema: pa.Schema = features.arrow_schema
else:
self._schema: pa.Schema = schema
self._features: Features = Features.from_arrow_schema(schema)
self._path = path
self._parquet_path = os.path.splitext(path)[0] # remove extension
self._namespace = namespace or "default"
self._num_examples = None
self._cache_dir = cache_dir or config.HF_DATASETS_CACHE
def write_from_pcollection(self, pcoll_examples):
"""Add the final steps of the beam pipeline: write to parquet files."""
import apache_beam as beam
def inc_num_examples(example):
beam.metrics.Metrics.counter(self._namespace, "num_examples").inc()
# count examples
_ = pcoll_examples | "Count N. Examples" >> beam.Map(inc_num_examples)
# save dataset
return (
pcoll_examples
| "Get values" >> beam.Values()
| "Save to parquet"
>> beam.io.parquetio.WriteToParquet(
self._parquet_path, self._schema, shard_name_template="-SSSSS-of-NNNNN.parquet"
)
)
def finalize(self, metrics_query_result: dict):
"""
Run after the pipeline has finished.
It converts the resulting parquet files to arrow and it completes the info from the pipeline metrics.
Args:
metrics_query_result: `dict` obtained from pipeline_results.metrics().query(m_filter). Make sure
that the filter keeps only the metrics for the considered split, under the namespace `split_name`.
"""
import apache_beam as beam
from .utils import beam_utils
shards_metadata = list(
beam.io.filesystems.FileSystems.match([self._parquet_path + "*.parquet"])[0].metadata_list
)
shards = [metadata.path for metadata in shards_metadata]
num_bytes = sum([metadata.size_in_bytes for metadata in shards_metadata])
shard_lengths = get_parquet_lengths(shards)
# Convert to arrow
if self._path.endswith(".arrow"):
logger.info(f"Converting parquet files {self._parquet_path} to arrow {self._path}")
shards = [
metadata.path
for metadata in beam.io.filesystems.FileSystems.match([self._parquet_path + "*.parquet"])[
0
].metadata_list
]
try: # stream conversion
disable = not logging.is_progress_bar_enabled()
num_bytes = 0
for shard in logging.tqdm(shards, unit="shards", disable=disable):
with beam.io.filesystems.FileSystems.open(shard) as source:
with beam.io.filesystems.FileSystems.create(
shard.replace(".parquet", ".arrow")
) as destination:
shard_num_bytes, _ = parquet_to_arrow(source, destination)
num_bytes += shard_num_bytes
except OSError as e: # broken pipe can happen if the connection is unstable, do local conversion instead
if e.errno != errno.EPIPE: # not a broken pipe
raise
logger.warning(
"Broken Pipe during stream conversion from parquet to arrow. Using local convert instead"
)
local_convert_dir = os.path.join(self._cache_dir, "beam_convert")
os.makedirs(local_convert_dir, exist_ok=True)
disable = not logging.is_progress_bar_enabled()
num_bytes = 0
for shard in logging.tqdm(shards, unit="shards", disable=disable):
local_parquet_path = os.path.join(local_convert_dir, hash_url_to_filename(shard) + ".parquet")
beam_utils.download_remote_to_local(shard, local_parquet_path)
local_arrow_path = local_parquet_path.replace(".parquet", ".arrow")
shard_num_bytes, _ = parquet_to_arrow(local_parquet_path, local_arrow_path)
num_bytes += shard_num_bytes
remote_arrow_path = shard.replace(".parquet", ".arrow")
beam_utils.upload_local_to_remote(local_arrow_path, remote_arrow_path)
# Save metrics
counters_dict = {metric.key.metric.name: metric.result for metric in metrics_query_result["counters"]}
self._num_examples = counters_dict["num_examples"]
self._num_bytes = num_bytes
self._shard_lengths = shard_lengths
return self._num_examples, self._num_bytes
def get_parquet_lengths(sources) -> List[int]:
shard_lengths = []
disable = not logging.is_progress_bar_enabled()
for source in logging.tqdm(sources, unit="parquet files", disable=disable):
parquet_file = pa.parquet.ParquetFile(source)
shard_lengths.append(parquet_file.metadata.num_rows)
return shard_lengths
def parquet_to_arrow(source, destination) -> List[int]:
"""Convert parquet file to arrow file. Inputs can be str paths or file-like objects"""
stream = None if isinstance(destination, str) else destination
with ArrowWriter(path=destination, stream=stream) as writer:
parquet_file = pa.parquet.ParquetFile(source)
for record_batch in parquet_file.iter_batches():
pa_table = pa.Table.from_batches([record_batch])
writer.write_table(pa_table)
num_bytes, num_examples = writer.finalize()
return num_bytes, num_examples
| datasets-main | src/datasets/arrow_writer.py |
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.14.6.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| datasets-main | src/datasets/__init__.py |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" Arrow ArrowReader."""
import copy
import math
import os
import re
import shutil
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, List, Optional, Union
import pyarrow as pa
import pyarrow.parquet as pq
from .download.download_config import DownloadConfig
from .naming import _split_re, filenames_for_dataset_split
from .table import InMemoryTable, MemoryMappedTable, Table, concat_tables
from .utils import logging
from .utils.file_utils import cached_path
if TYPE_CHECKING:
from .info import DatasetInfo # noqa: F401
from .splits import Split, SplitInfo # noqa: F401
logger = logging.get_logger(__name__)
HF_GCP_BASE_URL = "https://storage.googleapis.com/huggingface-nlp/cache/datasets"
_SUB_SPEC_RE = re.compile(
rf"""
^
(?P<split>{_split_re[1:-1]})
(\[
((?P<from>-?\d+)
(?P<from_pct>%)?)?
:
((?P<to>-?\d+)
(?P<to_pct>%)?)?
\])?(\((?P<rounding>[^\)]*)\))?
$
""", # remove ^ and $
re.X,
)
_ADDITION_SEP_RE = re.compile(r"\s*\+\s*")
class DatasetNotOnHfGcsError(ConnectionError):
"""When you can't get the dataset from the Hf google cloud storage"""
pass
class MissingFilesOnHfGcsError(ConnectionError):
"""When some files are missing on the Hf oogle cloud storage"""
pass
@dataclass(frozen=True)
class FileInstructions:
"""The file instructions associated with a split ReadInstruction.
Attributes:
num_examples: `int`, The total number of examples
file_instructions: List[dict(filename, skip, take)], the files information.
The filenames contains the relative path, not absolute.
skip/take indicates which example read in the file: `ds.slice(skip, take)`
"""
num_examples: int
file_instructions: List[dict]
def make_file_instructions(
name: str,
split_infos: List["SplitInfo"],
instruction: Union[str, "ReadInstruction"],
filetype_suffix: Optional[str] = None,
prefix_path: Optional[str] = None,
) -> FileInstructions:
"""Returns instructions of the split dict.
Args:
name (`str`): Name of the dataset.
split_infos (`list` of `[SplitInfo]`): Dataset splits information.
instruction ([`ReadInstruction`] or `str`): Reading instruction for a dataset.
filetype_suffix (`str`, *optional*): Suffix of dataset files, e.g. 'arrow' or 'parquet'.
prefix_path (`str`, *optional*): Prefix of dataset files, e.g. directory name.
Returns:
[`FileInstructions`]
"""
if not isinstance(name, str):
raise TypeError(f"Expected str 'name', but got: {type(name).__name__}")
elif not name:
raise ValueError("Expected non-empty str 'name'")
name2len = {info.name: info.num_examples for info in split_infos}
name2shard_lengths = {info.name: info.shard_lengths for info in split_infos}
name2filenames = {
info.name: filenames_for_dataset_split(
path=prefix_path,
dataset_name=name,
split=info.name,
filetype_suffix=filetype_suffix,
shard_lengths=name2shard_lengths[info.name],
)
for info in split_infos
}
if not isinstance(instruction, ReadInstruction):
instruction = ReadInstruction.from_spec(instruction)
# Create the absolute instruction (per split)
absolute_instructions = instruction.to_absolute(name2len)
# For each split, return the files instruction (skip/take)
file_instructions = []
num_examples = 0
for abs_instr in absolute_instructions:
split_length = name2len[abs_instr.splitname]
filenames = name2filenames[abs_instr.splitname]
shard_lengths = name2shard_lengths[abs_instr.splitname]
from_ = 0 if abs_instr.from_ is None else abs_instr.from_
to = split_length if abs_instr.to is None else abs_instr.to
if shard_lengths is None: # not sharded
for filename in filenames:
num_examples += to - from_
file_instructions.append({"filename": filename, "skip": from_, "take": to - from_})
else: # sharded
index_start = 0 # Beginning (included) of moving window.
index_end = 0 # End (excluded) of moving window.
for filename, shard_length in zip(filenames, shard_lengths):
index_end += shard_length
if from_ < index_end and to > index_start: # There is something to take.
skip = from_ - index_start if from_ > index_start else 0
take = to - index_start - skip if to < index_end else -1
if take == 0:
continue
file_instructions.append({"filename": filename, "skip": skip, "take": take})
num_examples += shard_length - skip if take == -1 else take
index_start += shard_length
return FileInstructions(
num_examples=num_examples,
file_instructions=file_instructions,
)
class BaseReader:
"""
Build a Dataset object out of Instruction instance(s).
"""
def __init__(self, path: str, info: Optional["DatasetInfo"]):
"""Initializes ArrowReader.
Args:
path (str): path where tfrecords are stored.
info (DatasetInfo): info about the dataset.
"""
self._path: str = path
self._info: Optional["DatasetInfo"] = info
self._filetype_suffix: Optional[str] = None
def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table:
"""Returns a Dataset instance from given (filename, skip, take)."""
raise NotImplementedError
def _read_files(self, files, in_memory=False) -> Table:
"""Returns Dataset for given file instructions.
Args:
files: List[dict(filename, skip, take)], the files information.
The filenames contain the absolute path, not relative.
skip/take indicates which example read in the file: `ds.slice(skip, take)`
in_memory (bool, default False): Whether to copy the data in-memory.
"""
if len(files) == 0 or not all(isinstance(f, dict) for f in files):
raise ValueError("please provide valid file informations")
pa_tables = []
files = copy.deepcopy(files)
for f in files:
f["filename"] = os.path.join(self._path, f["filename"])
for f_dict in files:
pa_table: Table = self._get_table_from_filename(f_dict, in_memory=in_memory)
pa_tables.append(pa_table)
pa_tables = [t for t in pa_tables if len(t) > 0]
if not pa_tables and (self._info is None or self._info.features is None):
raise ValueError(
"Tried to read an empty table. Please specify at least info.features to create an empty table with the right type."
)
pa_tables = pa_tables or [InMemoryTable.from_batches([], schema=pa.schema(self._info.features.type))]
pa_table = concat_tables(pa_tables) if len(pa_tables) != 1 else pa_tables[0]
return pa_table
def get_file_instructions(self, name, instruction, split_infos):
"""Return list of dict {'filename': str, 'skip': int, 'take': int}"""
file_instructions = make_file_instructions(
name, split_infos, instruction, filetype_suffix=self._filetype_suffix, prefix_path=self._path
)
files = file_instructions.file_instructions
return files
def read(
self,
name,
instructions,
split_infos,
in_memory=False,
):
"""Returns Dataset instance(s).
Args:
name (str): name of the dataset.
instructions (ReadInstruction): instructions to read.
Instruction can be string and will then be passed to the Instruction
constructor as it.
split_infos (list of SplitInfo proto): the available splits for dataset.
in_memory (bool, default False): Whether to copy the data in-memory.
Returns:
kwargs to build a single Dataset instance.
"""
files = self.get_file_instructions(name, instructions, split_infos)
if not files:
msg = f'Instruction "{instructions}" corresponds to no data!'
raise ValueError(msg)
return self.read_files(files=files, original_instructions=instructions, in_memory=in_memory)
def read_files(
self,
files: List[dict],
original_instructions: Union[None, "ReadInstruction", "Split"] = None,
in_memory=False,
):
"""Returns single Dataset instance for the set of file instructions.
Args:
files: List[dict(filename, skip, take)], the files information.
The filenames contains the relative path, not absolute.
skip/take indicates which example read in the file: `ds.skip().take()`
original_instructions: store the original instructions used to build the dataset split in the dataset.
in_memory (bool, default False): Whether to copy the data in-memory.
Returns:
kwargs to build a Dataset instance.
"""
# Prepend path to filename
pa_table = self._read_files(files, in_memory=in_memory)
# If original_instructions is not None, convert it to a human-readable NamedSplit
if original_instructions is not None:
from .splits import Split # noqa
split = Split(str(original_instructions))
else:
split = None
dataset_kwargs = {"arrow_table": pa_table, "info": self._info, "split": split}
return dataset_kwargs
def download_from_hf_gcs(self, download_config: DownloadConfig, relative_data_dir):
"""
Download the dataset files from the Hf GCS
Args:
dl_cache_dir: `str`, the local cache directory used to download files
relative_data_dir: `str`, the relative directory of the remote files from
the `datasets` directory on GCS.
"""
remote_cache_dir = HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/")
try:
remote_dataset_info = os.path.join(remote_cache_dir, "dataset_info.json")
downloaded_dataset_info = cached_path(remote_dataset_info.replace(os.sep, "/"))
shutil.move(downloaded_dataset_info, os.path.join(self._path, "dataset_info.json"))
if self._info is not None:
self._info.update(self._info.from_directory(self._path))
except FileNotFoundError as err:
raise DatasetNotOnHfGcsError(err) from None
try:
for split in self._info.splits:
file_instructions = self.get_file_instructions(
name=self._info.builder_name,
instruction=split,
split_infos=self._info.splits.values(),
)
for file_instruction in file_instructions:
file_to_download = str(Path(file_instruction["filename"]).relative_to(self._path))
remote_prepared_filename = os.path.join(remote_cache_dir, file_to_download)
downloaded_prepared_filename = cached_path(
remote_prepared_filename.replace(os.sep, "/"), download_config=download_config
)
shutil.move(downloaded_prepared_filename, file_instruction["filename"])
except FileNotFoundError as err:
raise MissingFilesOnHfGcsError(err) from None
class ArrowReader(BaseReader):
"""
Build a Dataset object out of Instruction instance(s).
This Reader uses either memory mapping or file descriptors (in-memory) on arrow files.
"""
def __init__(self, path: str, info: Optional["DatasetInfo"]):
"""Initializes ArrowReader.
Args:
path (str): path where Arrow files are stored.
info (DatasetInfo): info about the dataset.
"""
super().__init__(path, info)
self._filetype_suffix = "arrow"
def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table:
"""Returns a Dataset instance from given (filename, skip, take)."""
filename, skip, take = (
filename_skip_take["filename"],
filename_skip_take["skip"] if "skip" in filename_skip_take else None,
filename_skip_take["take"] if "take" in filename_skip_take else None,
)
table = ArrowReader.read_table(filename, in_memory=in_memory)
if take == -1:
take = len(table) - skip
# here we don't want to slice an empty table, or it may segfault
if skip is not None and take is not None and not (skip == 0 and take == len(table)):
table = table.slice(skip, take)
return table
@staticmethod
def read_table(filename, in_memory=False) -> Table:
"""
Read table from file.
Args:
filename (str): File name of the table.
in_memory (bool, default=False): Whether to copy the data in-memory.
Returns:
pyarrow.Table
"""
table_cls = InMemoryTable if in_memory else MemoryMappedTable
return table_cls.from_file(filename)
class ParquetReader(BaseReader):
"""
Build a Dataset object out of Instruction instance(s).
This Reader uses memory mapping on parquet files.
"""
def __init__(self, path: str, info: Optional["DatasetInfo"]):
"""Initializes ParquetReader.
Args:
path (str): path where tfrecords are stored.
info (DatasetInfo): info about the dataset.
"""
super().__init__(path, info)
self._filetype_suffix = "parquet"
def _get_table_from_filename(self, filename_skip_take, **kwargs):
"""Returns a Dataset instance from given (filename, skip, take)."""
filename, skip, take = (
filename_skip_take["filename"],
filename_skip_take["skip"] if "skip" in filename_skip_take else None,
filename_skip_take["take"] if "take" in filename_skip_take else None,
)
# Parquet read_table always loads data in memory, independently of memory_map
pa_table = pq.read_table(filename, memory_map=True)
# here we don't want to slice an empty table, or it may segfault
if skip is not None and take is not None and not (skip == 0 and take == len(pa_table)):
pa_table = pa_table.slice(skip, take)
return pa_table
@dataclass(frozen=True)
class _AbsoluteInstruction:
"""A machine friendly slice: defined absolute positive boundaries."""
splitname: str
from_: int # uint (starting index).
to: int # uint (ending index).
@dataclass(frozen=True)
class _RelativeInstruction:
"""Represents a single parsed slicing instruction, can use % and negatives."""
splitname: str
from_: Optional[int] = None # int (starting index) or None if no lower boundary.
to: Optional[int] = None # int (ending index) or None if no upper boundary.
unit: Optional[str] = None
rounding: Optional[str] = None
def __post_init__(self):
if self.unit is not None and self.unit not in ["%", "abs"]:
raise ValueError("unit must be either % or abs")
if self.rounding is not None and self.rounding not in ["closest", "pct1_dropremainder"]:
raise ValueError("rounding must be either closest or pct1_dropremainder")
if self.unit != "%" and self.rounding is not None:
raise ValueError("It is forbidden to specify rounding if not using percent slicing.")
if self.unit == "%" and self.from_ is not None and abs(self.from_) > 100:
raise ValueError("Percent slice boundaries must be > -100 and < 100.")
if self.unit == "%" and self.to is not None and abs(self.to) > 100:
raise ValueError("Percent slice boundaries must be > -100 and < 100.")
# Update via __dict__ due to instance being "frozen"
self.__dict__["rounding"] = "closest" if self.rounding is None and self.unit == "%" else self.rounding
def _str_to_read_instruction(spec):
"""Returns ReadInstruction for given string."""
res = _SUB_SPEC_RE.match(spec)
if not res:
raise ValueError(f"Unrecognized instruction format: {spec}")
unit = "%" if res.group("from_pct") or res.group("to_pct") else "abs"
return ReadInstruction(
split_name=res.group("split"),
rounding=res.group("rounding"),
from_=int(res.group("from")) if res.group("from") else None,
to=int(res.group("to")) if res.group("to") else None,
unit=unit,
)
def _pct_to_abs_pct1(boundary, num_examples):
# Using math.trunc here, since -99.5% should give -99%, not -100%.
if num_examples < 100:
msg = (
'Using "pct1_dropremainder" rounding on a split with less than 100 '
"elements is forbidden: it always results in an empty dataset."
)
raise ValueError(msg)
return boundary * math.trunc(num_examples / 100.0)
def _pct_to_abs_closest(boundary, num_examples):
return int(round(boundary * num_examples / 100.0))
def _rel_to_abs_instr(rel_instr, name2len):
"""Returns _AbsoluteInstruction instance for given RelativeInstruction.
Args:
rel_instr: RelativeInstruction instance.
name2len: dict {split_name: num_examples}.
"""
pct_to_abs = _pct_to_abs_closest if rel_instr.rounding == "closest" else _pct_to_abs_pct1
split = rel_instr.splitname
if split not in name2len:
raise ValueError(f'Unknown split "{split}". Should be one of {list(name2len)}.')
num_examples = name2len[split]
from_ = rel_instr.from_
to = rel_instr.to
if rel_instr.unit == "%":
from_ = 0 if from_ is None else pct_to_abs(from_, num_examples)
to = num_examples if to is None else pct_to_abs(to, num_examples)
else:
from_ = 0 if from_ is None else from_
to = num_examples if to is None else to
if abs(from_) > num_examples or abs(to) > num_examples:
msg = f'Requested slice [{from_ or ""}:{to or ""}] incompatible with {num_examples} examples.'
raise ValueError(msg)
if from_ < 0:
from_ = num_examples + from_
elif from_ == 0:
from_ = None
if to < 0:
to = num_examples + to
elif to == num_examples:
to = None
return _AbsoluteInstruction(split, from_, to)
class ReadInstruction:
"""Reading instruction for a dataset.
Examples::
# The following lines are equivalent:
ds = datasets.load_dataset('mnist', split='test[:33%]')
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec('test[:33%]'))
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction('test', to=33, unit='%'))
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction(
'test', from_=0, to=33, unit='%'))
# The following lines are equivalent:
ds = datasets.load_dataset('mnist', split='test[:33%]+train[1:-1]')
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec(
'test[:33%]+train[1:-1]'))
ds = datasets.load_dataset('mnist', split=(
datasets.ReadInstruction('test', to=33, unit='%') +
datasets.ReadInstruction('train', from_=1, to=-1, unit='abs')))
# The following lines are equivalent:
ds = datasets.load_dataset('mnist', split='test[:33%](pct1_dropremainder)')
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec(
'test[:33%](pct1_dropremainder)'))
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction(
'test', from_=0, to=33, unit='%', rounding="pct1_dropremainder"))
# 10-fold validation:
tests = datasets.load_dataset(
'mnist',
[datasets.ReadInstruction('train', from_=k, to=k+10, unit='%')
for k in range(0, 100, 10)])
trains = datasets.load_dataset(
'mnist',
[datasets.ReadInstruction('train', to=k, unit='%') + datasets.ReadInstruction('train', from_=k+10, unit='%')
for k in range(0, 100, 10)])
"""
def _init(self, relative_instructions):
# Private initializer.
self._relative_instructions = relative_instructions
@classmethod
def _read_instruction_from_relative_instructions(cls, relative_instructions):
"""Returns ReadInstruction obj initialized with relative_instructions."""
# Use __new__ to bypass __init__ used by public API and not conveniant here.
result = cls.__new__(cls)
result._init(relative_instructions) # pylint: disable=protected-access
return result
def __init__(self, split_name, rounding=None, from_=None, to=None, unit=None):
"""Initialize ReadInstruction.
Args:
split_name (str): name of the split to read. Eg: 'train'.
rounding (str, optional): The rounding behaviour to use when percent slicing is
used. Ignored when slicing with absolute indices.
Possible values:
- 'closest' (default): The specified percentages are rounded to the
closest value. Use this if you want specified percents to be as
much exact as possible.
- 'pct1_dropremainder': the specified percentages are treated as
multiple of 1%. Use this option if you want consistency. Eg:
len(5%) == 5 * len(1%).
Using this option, one might not be able to use the full set of
examples, if the number of those is not a multiple of 100.
from_ (int):
to (int): alternative way of specifying slicing boundaries. If any of
{from_, to, unit} argument is used, slicing cannot be specified as
string.
unit (str): optional, one of:
'%': to set the slicing unit as percents of the split size.
'abs': to set the slicing unit as absolute numbers.
"""
# This constructor is not always called. See factory method
# `_read_instruction_from_relative_instructions`. Common init instructions
# MUST be placed in the _init method.
self._init([_RelativeInstruction(split_name, from_, to, unit, rounding)])
@classmethod
def from_spec(cls, spec):
"""Creates a `ReadInstruction` instance out of a string spec.
Args:
spec (`str`):
Split(s) + optional slice(s) to read + optional rounding
if percents are used as the slicing unit. A slice can be specified,
using absolute numbers (`int`) or percentages (`int`).
Examples:
```
test: test split.
test + validation: test split + validation split.
test[10:]: test split, minus its first 10 records.
test[:10%]: first 10% records of test split.
test[:20%](pct1_dropremainder): first 10% records, rounded with the pct1_dropremainder rounding.
test[:-5%]+train[40%:60%]: first 95% of test + middle 20% of train.
```
Returns:
ReadInstruction instance.
"""
spec = str(spec) # Need to convert to str in case of NamedSplit instance.
subs = _ADDITION_SEP_RE.split(spec)
if not subs:
raise ValueError(f"No instructions could be built out of {spec}")
instruction = _str_to_read_instruction(subs[0])
return sum((_str_to_read_instruction(sub) for sub in subs[1:]), instruction)
def to_spec(self):
rel_instr_specs = []
for rel_instr in self._relative_instructions:
rel_instr_spec = rel_instr.splitname
if rel_instr.from_ is not None or rel_instr.to is not None:
from_ = rel_instr.from_
to = rel_instr.to
unit = rel_instr.unit
rounding = rel_instr.rounding
unit = unit if unit == "%" else ""
from_ = str(from_) + unit if from_ is not None else ""
to = str(to) + unit if to is not None else ""
slice_str = f"[{from_}:{to}]"
rounding_str = (
f"({rounding})" if unit == "%" and rounding is not None and rounding != "closest" else ""
)
rel_instr_spec += slice_str + rounding_str
rel_instr_specs.append(rel_instr_spec)
return "+".join(rel_instr_specs)
def __add__(self, other):
"""Returns a new ReadInstruction obj, result of appending other to self."""
if not isinstance(other, ReadInstruction):
msg = "ReadInstruction can only be added to another ReadInstruction obj."
raise TypeError(msg)
self_ris = self._relative_instructions
other_ris = other._relative_instructions # pylint: disable=protected-access
if (
self_ris[0].unit != "abs"
and other_ris[0].unit != "abs"
and self._relative_instructions[0].rounding != other_ris[0].rounding
):
raise ValueError("It is forbidden to sum ReadInstruction instances with different rounding values.")
return self._read_instruction_from_relative_instructions(self_ris + other_ris)
def __str__(self):
return self.to_spec()
def __repr__(self):
return f"ReadInstruction({self._relative_instructions})"
def to_absolute(self, name2len):
"""Translate instruction into a list of absolute instructions.
Those absolute instructions are then to be added together.
Args:
name2len (`dict`):
Associating split names to number of examples.
Returns:
list of _AbsoluteInstruction instances (corresponds to the + in spec).
"""
return [_rel_to_abs_instr(rel_instr, name2len) for rel_instr in self._relative_instructions]
| datasets-main | src/datasets/arrow_reader.py |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""DatasetBuilder base class."""
import abc
import contextlib
import copy
import inspect
import os
import posixpath
import shutil
import textwrap
import time
import urllib
import warnings
from dataclasses import dataclass
from functools import partial
from pathlib import Path
from typing import Dict, Iterable, Mapping, Optional, Tuple, Union
import fsspec
import pyarrow as pa
from multiprocess import Pool
from tqdm.contrib.concurrent import thread_map
from . import config, utils
from .arrow_dataset import Dataset
from .arrow_reader import (
HF_GCP_BASE_URL,
ArrowReader,
DatasetNotOnHfGcsError,
MissingFilesOnHfGcsError,
ReadInstruction,
)
from .arrow_writer import ArrowWriter, BeamWriter, ParquetWriter, SchemaInferenceError
from .data_files import DataFilesDict, sanitize_patterns
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download.download_config import DownloadConfig
from .download.download_manager import DownloadManager, DownloadMode
from .download.mock_download_manager import MockDownloadManager
from .download.streaming_download_manager import StreamingDownloadManager, xopen
from .features import Features
from .filesystems import (
is_remote_filesystem,
rename,
)
from .fingerprint import Hasher
from .info import DatasetInfo, DatasetInfosDict, PostProcessedInfo
from .iterable_dataset import ArrowExamplesIterable, ExamplesIterable, IterableDataset
from .keyhash import DuplicatedKeysError
from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH, camelcase_to_snakecase
from .splits import Split, SplitDict, SplitGenerator, SplitInfo
from .streaming import extend_dataset_builder_for_streaming
from .utils import logging
from .utils.file_utils import cached_path, is_remote_url
from .utils.filelock import FileLock
from .utils.info_utils import VerificationMode, get_size_checksum_dict, verify_checksums, verify_splits
from .utils.py_utils import (
classproperty,
convert_file_size_to_int,
has_sufficient_disk_space,
iflatmap_unordered,
map_nested,
memoize,
size_str,
temporary_assignment,
)
from .utils.sharding import _number_of_shards_in_gen_kwargs, _split_gen_kwargs
logger = logging.get_logger(__name__)
class InvalidConfigName(ValueError):
pass
class DatasetBuildError(Exception):
pass
class ManualDownloadError(DatasetBuildError):
pass
class DatasetGenerationError(DatasetBuildError):
pass
class FileFormatError(DatasetBuildError):
pass
@dataclass
class BuilderConfig:
"""Base class for `DatasetBuilder` data configuration.
`DatasetBuilder` subclasses with data configuration options should subclass
`BuilderConfig` and add their own properties.
Attributes:
name (`str`, defaults to `default`):
The name of the configuration.
version (`Version` or `str`, defaults to `0.0.0`):
The version of the configuration.
data_dir (`str`, *optional*):
Path to the directory containing the source data.
data_files (`str` or `Sequence` or `Mapping`, *optional*):
Path(s) to source data file(s).
description (`str`, *optional*):
A human description of the configuration.
"""
name: str = "default"
version: Optional[Union[utils.Version, str]] = utils.Version("0.0.0")
data_dir: Optional[str] = None
data_files: Optional[DataFilesDict] = None
description: Optional[str] = None
def __post_init__(self):
# The config name is used to name the cache directory.
for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH:
if invalid_char in self.name:
raise InvalidConfigName(
f"Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{self.name}'. "
f"They could create issues when creating a directory for this config on Windows filesystem."
)
if self.data_files is not None and not isinstance(self.data_files, DataFilesDict):
raise ValueError(f"Expected a DataFilesDict in data_files but got {self.data_files}")
def __eq__(self, o):
# we need to override the default dataclass __eq__ since it doesn't check for
# other attributes that the ones of the signature.
if set(self.__dict__.keys()) != set(o.__dict__.keys()):
return False
return all((k, getattr(self, k)) == (k, getattr(o, k)) for k in self.__dict__.keys())
def create_config_id(
self,
config_kwargs: dict,
custom_features: Optional[Features] = None,
) -> str:
"""
The config id is used to build the cache directory.
By default it is equal to the config name.
However the name of a config is not sufficient to have a unique identifier for the dataset being generated
since it doesn't take into account:
- the config kwargs that can be used to overwrite attributes
- the custom features used to write the dataset
- the data_files for json/text/csv/pandas datasets
Therefore the config id is just the config name with an optional suffix based on these.
"""
# Possibly add a suffix to the name to handle custom features/data_files/config_kwargs
suffix: Optional[str] = None
config_kwargs_to_add_to_suffix = config_kwargs.copy()
# name and version are already used to build the cache directory
config_kwargs_to_add_to_suffix.pop("name", None)
config_kwargs_to_add_to_suffix.pop("version", None)
# data dir handling (when specified it points to the manually downloaded data):
# it was previously ignored before the introduction of config id because we didn't want
# to change the config name. Now it's fine to take it into account for the config id.
# config_kwargs_to_add_to_suffix.pop("data_dir", None)
if "data_dir" in config_kwargs_to_add_to_suffix:
if config_kwargs_to_add_to_suffix["data_dir"] is None:
config_kwargs_to_add_to_suffix.pop("data_dir", None)
else:
# canonicalize the data dir to avoid two paths to the same location having different
# hashes
data_dir = config_kwargs_to_add_to_suffix["data_dir"]
data_dir = os.path.normpath(data_dir)
config_kwargs_to_add_to_suffix["data_dir"] = data_dir
if config_kwargs_to_add_to_suffix:
# we don't care about the order of the kwargs
config_kwargs_to_add_to_suffix = {
k: config_kwargs_to_add_to_suffix[k] for k in sorted(config_kwargs_to_add_to_suffix)
}
if all(isinstance(v, (str, bool, int, float)) for v in config_kwargs_to_add_to_suffix.values()):
suffix = ",".join(
str(k) + "=" + urllib.parse.quote_plus(str(v)) for k, v in config_kwargs_to_add_to_suffix.items()
)
if len(suffix) > 32: # hash if too long
suffix = Hasher.hash(config_kwargs_to_add_to_suffix)
else:
suffix = Hasher.hash(config_kwargs_to_add_to_suffix)
if custom_features is not None:
m = Hasher()
if suffix:
m.update(suffix)
m.update(custom_features)
suffix = m.hexdigest()
if suffix:
config_id = self.name + "-" + suffix
if len(config_id) > config.MAX_DATASET_CONFIG_ID_READABLE_LENGTH:
config_id = self.name + "-" + Hasher.hash(suffix)
return config_id
else:
return self.name
class DatasetBuilder:
"""Abstract base class for all datasets.
`DatasetBuilder` has 3 key methods:
- [`DatasetBuilder.info`]: Documents the dataset, including feature
names, types, shapes, version, splits, citation, etc.
- [`DatasetBuilder.download_and_prepare`]: Downloads the source data
and writes it to disk.
- [`DatasetBuilder.as_dataset`]: Generates a [`Dataset`].
Some `DatasetBuilder`s expose multiple variants of the
dataset by defining a [`BuilderConfig`] subclass and accepting a
config object (or name) on construction. Configurable datasets expose a
pre-defined set of configurations in [`DatasetBuilder.builder_configs`].
Args:
cache_dir (`str`, *optional*):
Directory to cache data. Defaults to `"~/.cache/huggingface/datasets"`.
dataset_name (`str`, *optional*):
Name of the dataset, if different from the builder name. Useful for packaged builders
like csv, imagefolder, audiofolder, etc. to reflect the difference between datasets
that use the same packaged builder.
config_name (`str`, *optional*):
Name of the dataset configuration.
It affects the data generated on disk. Different configurations will have their own subdirectories and
versions.
If not provided, the default configuration is used (if it exists).
<Added version="2.3.0">
Parameter `name` was renamed to `config_name`.
</Added>
hash (`str`, *optional*):
Hash specific to the dataset code. Used to update the caching directory when the
dataset loading script code is updated (to avoid reusing old data).
The typical caching directory (defined in `self._relative_data_dir`) is `name/version/hash/`.
base_path (`str`, *optional*):
Base path for relative paths that are used to download files.
This can be a remote URL.
features ([`Features`], *optional*):
Features types to use with this dataset.
It can be used to change the [`Features`] types of a dataset, for example.
token (`str` or `bool`, *optional*):
String or boolean to use as Bearer token for remote files on the
Datasets Hub. If `True`, will get token from `"~/.huggingface"`.
repo_id (`str`, *optional*):
ID of the dataset repository.
Used to distinguish builders with the same name but not coming from the same namespace, for example "squad"
and "lhoestq/squad" repo IDs. In the latter, the builder name would be "lhoestq___squad".
data_files (`str` or `Sequence` or `Mapping`, *optional*):
Path(s) to source data file(s).
For builders like "csv" or "json" that need the user to specify data files. They can be either
local or remote files. For convenience, you can use a `DataFilesDict`.
data_dir (`str`, *optional*):
Path to directory containing source data file(s).
Use only if `data_files` is not passed, in which case it is equivalent to passing
`os.path.join(data_dir, "**")` as `data_files`.
For builders that require manual download, it must be the path to the local directory containing the
manually downloaded data.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
writer_batch_size (`int`, *optional*):
Batch size used by the ArrowWriter.
It defines the number of samples that are kept in memory before writing them
and also the length of the arrow chunks.
None means that the ArrowWriter will use its default value.
name (`str`): Configuration name for the dataset.
<Deprecated version="2.3.0">
Use `config_name` instead.
</Deprecated>
**config_kwargs (additional keyword arguments): Keyword arguments to be passed to the corresponding builder
configuration class, set on the class attribute [`DatasetBuilder.BUILDER_CONFIG_CLASS`]. The builder
configuration class is [`BuilderConfig`] or a subclass of it.
"""
# Default version
VERSION = None # Default version set in BuilderConfig
# Class for the builder config.
BUILDER_CONFIG_CLASS = BuilderConfig
# Named configurations that modify the data generated by download_and_prepare.
BUILDER_CONFIGS = []
# Optional default config name to be used when name is None
DEFAULT_CONFIG_NAME = None
# Default batch size used by the ArrowWriter
# It defines the number of samples that are kept in memory before writing them
# and also the length of the arrow chunks
# None means that the ArrowWriter will use its default value
DEFAULT_WRITER_BATCH_SIZE = None
def __init__(
self,
cache_dir: Optional[str] = None,
dataset_name: Optional[str] = None,
config_name: Optional[str] = None,
hash: Optional[str] = None,
base_path: Optional[str] = None,
info: Optional[DatasetInfo] = None,
features: Optional[Features] = None,
token: Optional[Union[bool, str]] = None,
use_auth_token="deprecated",
repo_id: Optional[str] = None,
data_files: Optional[Union[str, list, dict, DataFilesDict]] = None,
data_dir: Optional[str] = None,
storage_options: Optional[dict] = None,
writer_batch_size: Optional[int] = None,
name="deprecated",
**config_kwargs,
):
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={use_auth_token}' instead.",
FutureWarning,
)
token = use_auth_token
if name != "deprecated":
warnings.warn(
"Parameter 'name' was renamed to 'config_name' in version 2.3.0 and will be removed in 3.0.0.",
category=FutureWarning,
)
config_name = name
# DatasetBuilder name
self.name: str = camelcase_to_snakecase(self.__module__.split(".")[-1])
self.hash: Optional[str] = hash
self.base_path = base_path
self.token = token
# For backwards compatibility (e.g. if accessed in a dataset script)
self.use_auth_token = token
self.repo_id = repo_id
self.storage_options = storage_options or {}
self.dataset_name = camelcase_to_snakecase(dataset_name) if dataset_name else self.name
self._writer_batch_size = writer_batch_size or self.DEFAULT_WRITER_BATCH_SIZE
if data_files is not None and not isinstance(data_files, DataFilesDict):
data_files = DataFilesDict.from_patterns(
sanitize_patterns(data_files),
base_path=base_path,
download_config=DownloadConfig(token=token, storage_options=self.storage_options),
)
# Prepare config: DatasetConfig contains name, version and description but can be extended by each dataset
if "features" in inspect.signature(self.BUILDER_CONFIG_CLASS.__init__).parameters and features is not None:
config_kwargs["features"] = features
if data_files is not None:
config_kwargs["data_files"] = data_files
if data_dir is not None:
config_kwargs["data_dir"] = data_dir
self.config, self.config_id = self._create_builder_config(
config_name=config_name,
custom_features=features,
**config_kwargs,
)
# prepare info: DatasetInfo are a standardized dataclass across all datasets
# Prefill datasetinfo
if info is None:
# TODO FOR PACKAGED MODULES IT IMPORTS DATA FROM src/packaged_modules which doesn't make sense
info = self.get_exported_dataset_info()
info.update(self._info())
info.builder_name = self.name
info.dataset_name = self.dataset_name
info.config_name = self.config.name
info.version = self.config.version
self.info = info
# update info with user specified infos
if features is not None:
self.info.features = features
# Prepare data dirs:
# cache_dir can be a remote bucket on GCS or S3 (when using BeamBasedBuilder for distributed data processing)
self._cache_dir_root = str(cache_dir or config.HF_DATASETS_CACHE)
self._cache_dir_root = (
self._cache_dir_root if is_remote_url(self._cache_dir_root) else os.path.expanduser(self._cache_dir_root)
)
path_join = posixpath.join if is_remote_url(self._cache_dir_root) else os.path.join
self._cache_downloaded_dir = (
path_join(self._cache_dir_root, config.DOWNLOADED_DATASETS_DIR)
if cache_dir
else str(config.DOWNLOADED_DATASETS_PATH)
)
self._cache_downloaded_dir = (
self._cache_downloaded_dir
if is_remote_url(self._cache_downloaded_dir)
else os.path.expanduser(self._cache_downloaded_dir)
)
self._cache_dir = self._build_cache_dir()
if not is_remote_url(self._cache_dir_root):
os.makedirs(self._cache_dir_root, exist_ok=True)
lock_path = os.path.join(self._cache_dir_root, self._cache_dir.replace(os.sep, "_") + ".lock")
with FileLock(lock_path):
if os.path.exists(self._cache_dir): # check if data exist
if len(os.listdir(self._cache_dir)) > 0:
if os.path.exists(path_join(self._cache_dir, config.DATASET_INFO_FILENAME)):
logger.info("Overwrite dataset info from restored data version if exists.")
self.info = DatasetInfo.from_directory(self._cache_dir)
else: # dir exists but no data, remove the empty dir as data aren't available anymore
logger.warning(
f"Old caching folder {self._cache_dir} for dataset {self.dataset_name} exists but no data were found. Removing it. "
)
os.rmdir(self._cache_dir)
# Store in the cache by default unless the user specifies a custom output_dir to download_and_prepare
self._output_dir = self._cache_dir
self._fs: fsspec.AbstractFileSystem = fsspec.filesystem("file")
# Set download manager
self.dl_manager = None
# Set to True by "datasets-cli test" to generate file checksums for (deprecated) dataset_infos.json independently of verification_mode value.
self._record_infos = False
# Set in `.download_and_prepare` once the format of the generated dataset is known
self._file_format = None
# Enable streaming (e.g. it patches "open" to work with remote files)
extend_dataset_builder_for_streaming(self)
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__ = d
# Re-enable streaming, since patched functions are not kept when pickling
extend_dataset_builder_for_streaming(self)
# Must be set for datasets that use 'data_dir' functionality - the ones
# that require users to do additional steps to download the data
# (this is usually due to some external regulations / rules).
# This field should contain a string with user instructions, including
# the list of files that should be present. It will be
# displayed in the dataset documentation.
@property
def manual_download_instructions(self) -> Optional[str]:
return None
def _has_legacy_cache(self) -> bool:
"""Check for the old cache directory template {cache_dir}/{namespace}___{builder_name}"""
if (
self.__module__.startswith("datasets.")
and not is_remote_url(self._cache_dir_root)
and self.config.name == "default"
):
namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
legacy_config_name = self.repo_id.replace("/", "--") if self.repo_id is not None else self.dataset_name
legacy_config_id = legacy_config_name + self.config_id[len(self.config.name) :]
legacy_cache_dir = os.path.join(
self._cache_dir_root,
self.name if namespace is None else f"{namespace}___{self.name}",
legacy_config_id,
)
return os.path.isdir(legacy_cache_dir)
return False
@classmethod
def get_all_exported_dataset_infos(cls) -> DatasetInfosDict:
"""Empty dict if doesn't exist
Example:
```py
>>> from datasets import load_dataset_builder
>>> ds_builder = load_dataset_builder('rotten_tomatoes')
>>> ds_builder.get_all_exported_dataset_infos()
{'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)}
```
"""
return DatasetInfosDict.from_directory(cls.get_imported_module_dir())
def get_exported_dataset_info(self) -> DatasetInfo:
"""Empty `DatasetInfo` if doesn't exist
Example:
```py
>>> from datasets import load_dataset_builder
>>> ds_builder = load_dataset_builder('rotten_tomatoes')
>>> ds_builder.get_exported_dataset_info()
DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)
```
"""
return self.get_all_exported_dataset_infos().get(self.config.name, DatasetInfo())
def _create_builder_config(
self, config_name=None, custom_features=None, **config_kwargs
) -> Tuple[BuilderConfig, str]:
"""Create and validate BuilderConfig object as well as a unique config id for this config.
Raises ValueError if there are multiple builder configs and config_name and DEFAULT_CONFIG_NAME are None.
config_kwargs override the defaults kwargs in config
"""
builder_config = None
# try default config
if config_name is None and self.BUILDER_CONFIGS and not config_kwargs:
if self.DEFAULT_CONFIG_NAME is not None:
builder_config = self.builder_configs.get(self.DEFAULT_CONFIG_NAME)
logger.info(f"No config specified, defaulting to: {self.dataset_name}/{builder_config.name}")
else:
if len(self.BUILDER_CONFIGS) > 1:
example_of_usage = f"load_dataset('{self.dataset_name}', '{self.BUILDER_CONFIGS[0].name}')"
raise ValueError(
"Config name is missing."
f"\nPlease pick one among the available configs: {list(self.builder_configs.keys())}"
+ f"\nExample of usage:\n\t`{example_of_usage}`"
)
builder_config = self.BUILDER_CONFIGS[0]
logger.info(
f"No config specified, defaulting to the single config: {self.dataset_name}/{builder_config.name}"
)
# try to get config by name
if isinstance(config_name, str):
builder_config = self.builder_configs.get(config_name)
if builder_config is None and self.BUILDER_CONFIGS:
raise ValueError(
f"BuilderConfig '{config_name}' not found. Available: {list(self.builder_configs.keys())}"
)
# if not using an existing config, then create a new config on the fly
if not builder_config:
if config_name is not None:
config_kwargs["name"] = config_name
elif self.DEFAULT_CONFIG_NAME and not config_kwargs:
# Use DEFAULT_CONFIG_NAME only if no config_kwargs are passed
config_kwargs["name"] = self.DEFAULT_CONFIG_NAME
if "version" not in config_kwargs and hasattr(self, "VERSION") and self.VERSION:
config_kwargs["version"] = self.VERSION
builder_config = self.BUILDER_CONFIG_CLASS(**config_kwargs)
# otherwise use the config_kwargs to overwrite the attributes
else:
builder_config = copy.deepcopy(builder_config)
for key, value in config_kwargs.items():
if value is not None:
if not hasattr(builder_config, key):
raise ValueError(f"BuilderConfig {builder_config} doesn't have a '{key}' key.")
setattr(builder_config, key, value)
if not builder_config.name:
raise ValueError(f"BuilderConfig must have a name, got {builder_config.name}")
# compute the config id that is going to be used for caching
config_id = builder_config.create_config_id(
config_kwargs,
custom_features=custom_features,
)
is_custom = (config_id not in self.builder_configs) and config_id != "default"
if is_custom:
logger.info(f"Using custom data configuration {config_id}")
else:
if (
builder_config.name in self.builder_configs
and builder_config != self.builder_configs[builder_config.name]
):
raise ValueError(
"Cannot name a custom BuilderConfig the same as an available "
f"BuilderConfig. Change the name. Available BuilderConfigs: {list(self.builder_configs.keys())}"
)
if not builder_config.version:
raise ValueError(f"BuilderConfig {builder_config.name} must have a version")
return builder_config, config_id
@classproperty
@classmethod
@memoize()
def builder_configs(cls):
"""Dictionary of pre-defined configurations for this builder class."""
configs = {config.name: config for config in cls.BUILDER_CONFIGS}
if len(configs) != len(cls.BUILDER_CONFIGS):
names = [config.name for config in cls.BUILDER_CONFIGS]
raise ValueError(f"Names in BUILDER_CONFIGS must not be duplicated. Got {names}")
return configs
@property
def cache_dir(self):
return self._cache_dir
def _relative_data_dir(self, with_version=True, with_hash=True, is_local=True) -> str:
"""Relative path of this dataset in cache_dir:
Will be:
self.dataset_name/self.config.version/self.hash/
or if a repo_id with a namespace has been specified:
self.namespace___self.dataset_name/self.config.version/self.hash/
If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
"""
# Check for the legacy cache directory template (datasets<3.0.0)
if self._has_legacy_cache():
# use legacy names
dataset_name = self.name
config_name = self.repo_id.replace("/", "--") if self.repo_id is not None else self.dataset_name
config_id = config_name + self.config_id[len(self.config.name) :]
else:
dataset_name = self.dataset_name
config_name = self.config.name
config_id = self.config_id
path_join = os.path.join if is_local else posixpath.join
namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
builder_data_dir = dataset_name if namespace is None else f"{namespace}___{dataset_name}"
builder_data_dir = path_join(builder_data_dir, config_id)
if with_version:
builder_data_dir = path_join(builder_data_dir, str(self.config.version))
if with_hash and self.hash and isinstance(self.hash, str):
builder_data_dir = path_join(builder_data_dir, self.hash)
return builder_data_dir
def _build_cache_dir(self):
"""Return the data directory for the current version."""
is_local = not is_remote_url(self._cache_dir_root)
path_join = os.path.join if is_local else posixpath.join
builder_data_dir = path_join(
self._cache_dir_root, self._relative_data_dir(with_version=False, is_local=is_local)
)
version_data_dir = path_join(
self._cache_dir_root, self._relative_data_dir(with_version=True, is_local=is_local)
)
def _other_versions_on_disk():
"""Returns previous versions on disk."""
if not os.path.exists(builder_data_dir):
return []
version_dirnames = []
for dir_name in os.listdir(builder_data_dir):
try:
version_dirnames.append((utils.Version(dir_name), dir_name))
except ValueError: # Invalid version (ex: incomplete data dir)
pass
version_dirnames.sort(reverse=True)
return version_dirnames
# Check and warn if other versions exist
if not is_remote_url(builder_data_dir):
version_dirs = _other_versions_on_disk()
if version_dirs:
other_version = version_dirs[0][0]
if other_version != self.config.version:
warn_msg = (
f"Found a different version {str(other_version)} of dataset {self.dataset_name} in "
f"cache_dir {self._cache_dir_root}. Using currently defined version "
f"{str(self.config.version)}."
)
logger.warning(warn_msg)
return version_data_dir
@abc.abstractmethod
def _info(self) -> DatasetInfo:
"""Construct the DatasetInfo object. See `DatasetInfo` for details.
Warning: This function is only called once and the result is cached for all
following .info() calls.
Returns:
info: (DatasetInfo) The dataset information
"""
raise NotImplementedError
@classmethod
def get_imported_module_dir(cls):
"""Return the path of the module of this class or subclass."""
return os.path.dirname(inspect.getfile(inspect.getmodule(cls)))
def _rename(self, src: str, dst: str):
rename(self._fs, src, dst)
def download_and_prepare(
self,
output_dir: Optional[str] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
verification_mode: Optional[Union[VerificationMode, str]] = None,
ignore_verifications="deprecated",
try_from_hf_gcs: bool = True,
dl_manager: Optional[DownloadManager] = None,
base_path: Optional[str] = None,
use_auth_token="deprecated",
file_format: str = "arrow",
max_shard_size: Optional[Union[int, str]] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
**download_and_prepare_kwargs,
):
"""Downloads and prepares dataset for reading.
Args:
output_dir (`str`, *optional*):
Output directory for the dataset.
Default to this builder's `cache_dir`, which is inside `~/.cache/huggingface/datasets` by default.
<Added version="2.5.0"/>
download_config (`DownloadConfig`, *optional*):
Specific download configuration parameters.
download_mode ([`DownloadMode`] or `str`, *optional*):
Select the download/generate mode, default to `REUSE_DATASET_IF_EXISTS`.
verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...).
<Added version="2.9.1"/>
ignore_verifications (`bool`, defaults to `False`):
Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...).
<Deprecated version="2.9.1">
`ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0.
Please use `verification_mode` instead.
</Deprecated>
try_from_hf_gcs (`bool`):
If `True`, it will try to download the already prepared dataset from the HF Google cloud storage.
dl_manager (`DownloadManager`, *optional*):
Specific `DownloadManger` to use.
base_path (`str`, *optional*):
Base path for relative paths that are used to download files. This can be a remote url.
If not specified, the value of the `base_path` attribute (`self.base_path`) will be used instead.
use_auth_token (`Union[str, bool]`, *optional*):
Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If True, or not specified, will get token from ~/.huggingface.
<Deprecated version="2.7.1">
Pass `use_auth_token` to `load_dataset_builder` instead.
</Deprecated>
file_format (`str`, *optional*):
Format of the data files in which the dataset will be written.
Supported formats: "arrow", "parquet". Default to "arrow" format.
If the format is "parquet", then image and audio data are embedded into the Parquet files instead of pointing to local files.
<Added version="2.5.0"/>
max_shard_size (`Union[str, int]`, *optional*):
Maximum number of bytes written per shard, default is "500MB".
The size is based on uncompressed data size, so in practice your shard files may be smaller than
`max_shard_size` thanks to Parquet compression for example.
<Added version="2.5.0"/>
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when downloading and generating the dataset locally.
Multiprocessing is disabled by default.
<Added version="2.7.0"/>
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the caching file-system backend, if any.
<Added version="2.5.0"/>
**download_and_prepare_kwargs (additional keyword arguments): Keyword arguments.
Example:
Download and prepare the dataset as Arrow files that can be loaded as a Dataset using `builder.as_dataset()`:
```py
>>> from datasets import load_dataset_builder
>>> builder = load_dataset_builder("rotten_tomatoes")
>>> builder.download_and_prepare()
```
Download and prepare the dataset as sharded Parquet files locally:
```py
>>> from datasets import load_dataset_builder
>>> builder = load_dataset_builder("rotten_tomatoes")
>>> builder.download_and_prepare("./output_dir", file_format="parquet")
```
Download and prepare the dataset as sharded Parquet files in a cloud storage:
```py
>>> from datasets import load_dataset_builder
>>> storage_options = {"key": aws_access_key_id, "secret": aws_secret_access_key}
>>> builder = load_dataset_builder("rotten_tomatoes")
>>> builder.download_and_prepare("s3://my-bucket/my_rotten_tomatoes", storage_options=storage_options, file_format="parquet")
```
"""
if ignore_verifications != "deprecated":
verification_mode = VerificationMode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS
warnings.warn(
"'ignore_verifications' was deprecated in favor of 'verification_mode' in version 2.9.1 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.",
FutureWarning,
)
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in version 2.7.1 and will be removed in 3.0.0. Pass `token` to `load_dataset_builder` instead.",
FutureWarning,
)
token = use_auth_token
else:
token = self.token
output_dir = output_dir if output_dir is not None else self._cache_dir
# output_dir can be a remote bucket on GCS or S3 (when using BeamBasedBuilder for distributed data processing)
fs_token_paths = fsspec.get_fs_token_paths(output_dir, storage_options=storage_options)
self._fs: fsspec.AbstractFileSystem = fs_token_paths[0]
is_local = not is_remote_filesystem(self._fs)
self._output_dir = fs_token_paths[2][0] if is_local else self._fs.unstrip_protocol(fs_token_paths[2][0])
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS)
base_path = base_path if base_path is not None else self.base_path
if file_format is not None and file_format not in ["arrow", "parquet"]:
raise ValueError(f"Unsupported file_format: {file_format}. Expected 'arrow' or 'parquet'")
self._file_format = file_format
if self._fs._strip_protocol(self._output_dir) == "":
# We don't support the root directory, because it has no dirname,
# and we need a dirname to use a <dirname>.incomplete directory
# when the dataset is being written
raise RuntimeError(
f"Unable to download and prepare the dataset at the root {self._output_dir}. "
f"Please specify a subdirectory, e.g. '{self._output_dir + self.dataset_name}'"
)
if dl_manager is None:
if download_config is None:
download_config = DownloadConfig(
cache_dir=self._cache_downloaded_dir,
force_download=download_mode == DownloadMode.FORCE_REDOWNLOAD,
force_extract=download_mode == DownloadMode.FORCE_REDOWNLOAD,
use_etag=False,
num_proc=num_proc,
token=token,
storage_options=self.storage_options,
) # We don't use etag for data files to speed up the process
dl_manager = DownloadManager(
dataset_name=self.dataset_name,
download_config=download_config,
data_dir=self.config.data_dir,
base_path=base_path,
record_checksums=(self._record_infos or verification_mode == VerificationMode.ALL_CHECKS),
)
if (
isinstance(dl_manager, MockDownloadManager)
or not is_local
or file_format != "arrow"
or max_shard_size is not None
):
try_from_hf_gcs = False
self.dl_manager = dl_manager
# Prevent parallel local disk operations
if is_local:
# Create parent directory of the output_dir to put the lock file in there
Path(self._output_dir).parent.mkdir(parents=True, exist_ok=True)
lock_path = self._output_dir + "_builder.lock"
# File locking only with local paths; no file locking on GCS or S3
with FileLock(lock_path) if is_local else contextlib.nullcontext():
# Check if the data already exists
path_join = os.path.join if is_local else posixpath.join
data_exists = self._fs.exists(path_join(self._output_dir, config.DATASET_INFO_FILENAME))
if data_exists and download_mode == DownloadMode.REUSE_DATASET_IF_EXISTS:
logger.info(f"Found cached dataset {self.dataset_name} ({self._output_dir})")
# We need to update the info in case some splits were added in the meantime
# for example when calling load_dataset from multiple workers.
self.info = self._load_info()
self.download_post_processing_resources(dl_manager)
return
logger.info(f"Generating dataset {self.dataset_name} ({self._output_dir})")
if is_local: # if cache dir is local, check for available space
if not has_sufficient_disk_space(
self.info.size_in_bytes or 0, directory=Path(self._output_dir).parent
):
raise OSError(
f"Not enough disk space. Needed: {size_str(self.info.size_in_bytes or 0)} (download: {size_str(self.info.download_size or 0)}, generated: {size_str(self.info.dataset_size or 0)}, post-processed: {size_str(self.info.post_processing_size or 0)})"
)
@contextlib.contextmanager
def incomplete_dir(dirname):
"""Create temporary dir for dirname and rename on exit."""
if not is_local:
self._fs.makedirs(dirname, exist_ok=True)
yield dirname
else:
tmp_dir = dirname + ".incomplete"
os.makedirs(tmp_dir, exist_ok=True)
try:
yield tmp_dir
if os.path.isdir(dirname):
shutil.rmtree(dirname)
# LocalFileSystem.mv does copy + rm, it is more efficient to simply rename a local directory
shutil.move(tmp_dir, dirname)
finally:
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
# Print is intentional: we want this to always go to stdout so user has
# information needed to cancel download/preparation if needed.
# This comes right before the progress bar.
if self.info.size_in_bytes:
logger.info(
f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} "
f"(download: {size_str(self.info.download_size)}, generated: {size_str(self.info.dataset_size)}, "
f"post-processed: {size_str(self.info.post_processing_size)}, "
f"total: {size_str(self.info.size_in_bytes)}) to {self._output_dir}..."
)
else:
_dest = self._fs._strip_protocol(self._output_dir) if is_local else self._output_dir
logger.info(f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} to {_dest}...")
self._check_manual_download(dl_manager)
# Create a tmp dir and rename to self._output_dir on successful exit.
with incomplete_dir(self._output_dir) as tmp_output_dir:
# Temporarily assign _output_dir to tmp_data_dir to avoid having to forward
# it to every sub function.
with temporary_assignment(self, "_output_dir", tmp_output_dir):
# Try to download the already prepared dataset files
downloaded_from_gcs = False
if try_from_hf_gcs:
try:
self._download_prepared_from_hf_gcs(dl_manager.download_config)
downloaded_from_gcs = True
except (DatasetNotOnHfGcsError, MissingFilesOnHfGcsError):
logger.info("Dataset not on Hf google storage. Downloading and preparing it from source")
except ConnectionError:
logger.warning("HF google storage unreachable. Downloading and preparing it from source")
if not downloaded_from_gcs:
prepare_split_kwargs = {"file_format": file_format}
if max_shard_size is not None:
prepare_split_kwargs["max_shard_size"] = max_shard_size
if num_proc is not None:
prepare_split_kwargs["num_proc"] = num_proc
self._download_and_prepare(
dl_manager=dl_manager,
verification_mode=verification_mode,
**prepare_split_kwargs,
**download_and_prepare_kwargs,
)
# Sync info
self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())
self.info.download_checksums = dl_manager.get_recorded_sizes_checksums()
self.info.size_in_bytes = self.info.dataset_size + self.info.download_size
# Save info
self._save_info()
# Download post processing resources
self.download_post_processing_resources(dl_manager)
logger.info(
f"Dataset {self.dataset_name} downloaded and prepared to {self._output_dir}. "
f"Subsequent calls will reuse this data."
)
def _check_manual_download(self, dl_manager):
if self.manual_download_instructions is not None and dl_manager.manual_dir is None:
raise ManualDownloadError(
textwrap.dedent(
f"""\
The dataset {self.dataset_name} with config {self.config.name} requires manual data.
Please follow the manual download instructions:
{self.manual_download_instructions}
Manual data can be loaded with:
datasets.load_dataset("{self.dataset_name}", data_dir="<path/to/manual/data>")"""
)
)
def _download_prepared_from_hf_gcs(self, download_config: DownloadConfig):
relative_data_dir = self._relative_data_dir(with_version=True, with_hash=False)
reader = ArrowReader(self._output_dir, self.info)
# use reader instructions to download the right files
reader.download_from_hf_gcs(download_config, relative_data_dir)
downloaded_info = DatasetInfo.from_directory(self._output_dir)
self.info.update(downloaded_info)
# download post processing resources
remote_cache_dir = HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/")
for split in self.info.splits:
for resource_file_name in self._post_processing_resources(split).values():
if os.sep in resource_file_name:
raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
try:
resource_path = cached_path(remote_cache_dir + "/" + resource_file_name)
shutil.move(resource_path, os.path.join(self._output_dir, resource_file_name))
except ConnectionError:
logger.info(f"Couldn't download resourse file {resource_file_name} from Hf google storage.")
logger.info("Dataset downloaded from Hf google storage.")
def _download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs):
"""Downloads and prepares dataset for reading.
This is the internal implementation to overwrite called when user calls
`download_and_prepare`. It should download all required data and generate
the pre-processed datasets files.
Args:
dl_manager ([`DownloadManager`]):
`DownloadManager` used to download and cache data.
verification_mode ([`VerificationMode`]):
if `ALL_CHECKS`, perform all the verifications including checksums.
if `BASIC_CHECKS`, do not perform checksums, only perform split tests.
if `NO_CHECKS`, do not perform any verification.
prepare_split_kwargs: Additional options, such as `file_format`, `max_shard_size`
"""
# Generating data for all splits
split_dict = SplitDict(dataset_name=self.dataset_name)
split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs)
split_generators = self._split_generators(dl_manager, **split_generators_kwargs)
# Checksums verification
if verification_mode == VerificationMode.ALL_CHECKS and dl_manager.record_checksums:
verify_checksums(
self.info.download_checksums, dl_manager.get_recorded_sizes_checksums(), "dataset source files"
)
# Build splits
for split_generator in split_generators:
if str(split_generator.split_info.name).lower() == "all":
raise ValueError(
"`all` is a special split keyword corresponding to the "
"union of all splits, so cannot be used as key in "
"._split_generator()."
)
logger.info(f"Generating {split_generator.split_info.name} split")
split_dict.add(split_generator.split_info)
try:
# Prepare split will record examples associated to the split
self._prepare_split(split_generator, **prepare_split_kwargs)
except OSError as e:
raise OSError(
"Cannot find data file. "
+ (self.manual_download_instructions or "")
+ "\nOriginal error:\n"
+ str(e)
) from None
# If check_duplicates is set to True , then except DuplicatedKeysError
except DuplicatedKeysError as e:
raise DuplicatedKeysError(
e.key,
e.duplicate_key_indices,
fix_msg=f"To avoid duplicate keys, please fix the dataset script {self.name}.py",
) from None
dl_manager.manage_extracted_files()
if verification_mode == VerificationMode.BASIC_CHECKS or verification_mode == VerificationMode.ALL_CHECKS:
verify_splits(self.info.splits, split_dict)
# Update the info object with the splits.
self.info.splits = split_dict
self.info.download_size = dl_manager.downloaded_size
def download_post_processing_resources(self, dl_manager):
for split in self.info.splits or []:
for resource_name, resource_file_name in self._post_processing_resources(split).items():
if not not is_remote_filesystem(self._fs):
raise NotImplementedError(f"Post processing is not supported on filesystem {self._fs}")
if os.sep in resource_file_name:
raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
resource_path = os.path.join(self._output_dir, resource_file_name)
if not os.path.exists(resource_path):
downloaded_resource_path = self._download_post_processing_resources(
split, resource_name, dl_manager
)
if downloaded_resource_path:
logger.info(f"Downloaded post-processing resource {resource_name} as {resource_file_name}")
shutil.move(downloaded_resource_path, resource_path)
def _load_info(self) -> DatasetInfo:
return DatasetInfo.from_directory(self._output_dir, storage_options=self._fs.storage_options)
def _save_info(self):
is_local = not is_remote_filesystem(self._fs)
if is_local:
lock_path = self._output_dir + "_info.lock"
with FileLock(lock_path) if is_local else contextlib.nullcontext():
self.info.write_to_directory(self._output_dir, storage_options=self._fs.storage_options)
def _save_infos(self):
is_local = not is_remote_filesystem(self._fs)
if is_local:
lock_path = self._output_dir + "_infos.lock"
with FileLock(lock_path) if is_local else contextlib.nullcontext():
DatasetInfosDict(**{self.config.name: self.info}).write_to_directory(self.get_imported_module_dir())
def _make_split_generators_kwargs(self, prepare_split_kwargs):
"""Get kwargs for `self._split_generators()` from `prepare_split_kwargs`."""
del prepare_split_kwargs
return {}
def as_dataset(
self,
split: Optional[Split] = None,
run_post_process=True,
verification_mode: Optional[Union[VerificationMode, str]] = None,
ignore_verifications="deprecated",
in_memory=False,
) -> Union[Dataset, DatasetDict]:
"""Return a Dataset for the specified split.
Args:
split (`datasets.Split`):
Which subset of the data to return.
run_post_process (`bool`, defaults to `True`):
Whether to run post-processing dataset transforms and/or add
indexes.
verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
Verification mode determining the checks to run on the
downloaded/processed dataset information (checksums/size/splits/...).
<Added version="2.9.1"/>
ignore_verifications (`bool`, defaults to `False`):
Whether to ignore the verifications of the
downloaded/processed dataset information (checksums/size/splits/...).
<Deprecated version="2.9.1">
`ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0.
Please use `verification_mode` instead.
</Deprecated>
in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
Returns:
datasets.Dataset
Example:
```py
>>> from datasets import load_dataset_builder
>>> builder = load_dataset_builder('rotten_tomatoes')
>>> builder.download_and_prepare()
>>> ds = builder.as_dataset(split='train')
>>> ds
Dataset({
features: ['text', 'label'],
num_rows: 8530
})
```
"""
if ignore_verifications != "deprecated":
verification_mode = verification_mode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS
warnings.warn(
"'ignore_verifications' was deprecated in favor of 'verification' in version 2.9.1 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.",
FutureWarning,
)
if self._file_format is not None and self._file_format != "arrow":
raise FileFormatError('Loading a dataset not written in the "arrow" format is not supported.')
is_local = not is_remote_filesystem(self._fs)
if not is_local:
raise NotImplementedError(f"Loading a dataset cached in a {type(self._fs).__name__} is not supported.")
if not os.path.exists(self._output_dir):
raise FileNotFoundError(
f"Dataset {self.dataset_name}: could not find data in {self._output_dir}. Please make sure to call "
"builder.download_and_prepare(), or use "
"datasets.load_dataset() before trying to access the Dataset object."
)
logger.debug(f'Constructing Dataset for split {split or ", ".join(self.info.splits)}, from {self._output_dir}')
# By default, return all splits
if split is None:
split = {s: s for s in self.info.splits}
verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS)
# Create a dataset for each of the given splits
datasets = map_nested(
partial(
self._build_single_dataset,
run_post_process=run_post_process,
verification_mode=verification_mode,
in_memory=in_memory,
),
split,
map_tuple=True,
disable_tqdm=True,
)
if isinstance(datasets, dict):
datasets = DatasetDict(datasets)
return datasets
def _build_single_dataset(
self,
split: Union[str, ReadInstruction, Split],
run_post_process: bool,
verification_mode: VerificationMode,
in_memory: bool = False,
):
"""as_dataset for a single split."""
if not isinstance(split, ReadInstruction):
split = str(split)
if split == "all":
split = "+".join(self.info.splits.keys())
split = Split(split)
# Build base dataset
ds = self._as_dataset(
split=split,
in_memory=in_memory,
)
if run_post_process:
for resource_file_name in self._post_processing_resources(split).values():
if os.sep in resource_file_name:
raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
resources_paths = {
resource_name: os.path.join(self._output_dir, resource_file_name)
for resource_name, resource_file_name in self._post_processing_resources(split).items()
}
post_processed = self._post_process(ds, resources_paths)
if post_processed is not None:
ds = post_processed
recorded_checksums = {}
record_checksums = False
for resource_name, resource_path in resources_paths.items():
size_checksum = get_size_checksum_dict(resource_path)
recorded_checksums[resource_name] = size_checksum
if verification_mode == VerificationMode.ALL_CHECKS and record_checksums:
if self.info.post_processed is None or self.info.post_processed.resources_checksums is None:
expected_checksums = None
else:
expected_checksums = self.info.post_processed.resources_checksums.get(split)
verify_checksums(expected_checksums, recorded_checksums, "post processing resources")
if self.info.post_processed is None:
self.info.post_processed = PostProcessedInfo()
if self.info.post_processed.resources_checksums is None:
self.info.post_processed.resources_checksums = {}
self.info.post_processed.resources_checksums[str(split)] = recorded_checksums
self.info.post_processing_size = sum(
checksums_dict["num_bytes"]
for split_checksums_dicts in self.info.post_processed.resources_checksums.values()
for checksums_dict in split_checksums_dicts.values()
)
if self.info.dataset_size is not None and self.info.download_size is not None:
self.info.size_in_bytes = (
self.info.dataset_size + self.info.download_size + self.info.post_processing_size
)
self._save_info()
ds._info.post_processed = self.info.post_processed
ds._info.post_processing_size = self.info.post_processing_size
ds._info.size_in_bytes = self.info.size_in_bytes
if self.info.post_processed.features is not None:
if self.info.post_processed.features.type != ds.features.type:
raise ValueError(
f"Post-processed features info don't match the dataset:\nGot\n{self.info.post_processed.features}\nbut expected something like\n{ds.features}"
)
else:
ds.info.features = self.info.post_processed.features
return ds
def _as_dataset(self, split: Union[ReadInstruction, Split] = Split.TRAIN, in_memory: bool = False) -> Dataset:
"""Constructs a `Dataset`.
This is the internal implementation to overwrite called when user calls
`as_dataset`. It should read the pre-processed datasets files and generate
the `Dataset` object.
Args:
split (`datasets.Split`):
which subset of the data to read.
in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
Returns:
`Dataset`
"""
cache_dir = self._fs._strip_protocol(self._output_dir)
dataset_name = self.dataset_name
if self._has_legacy_cache():
dataset_name = self.name
dataset_kwargs = ArrowReader(cache_dir, self.info).read(
name=dataset_name,
instructions=split,
split_infos=self.info.splits.values(),
in_memory=in_memory,
)
fingerprint = self._get_dataset_fingerprint(split)
return Dataset(fingerprint=fingerprint, **dataset_kwargs)
def _get_dataset_fingerprint(self, split: Union[ReadInstruction, Split]) -> str:
"""The dataset fingerprint is the hash of the relative directory dataset_name/config_name/version/hash, as well as the split specs."""
hasher = Hasher()
hasher.update(self._relative_data_dir().replace(os.sep, "/"))
hasher.update(str(split)) # for example: train, train+test, train[:10%], test[:33%](pct1_dropremainder)
fingerprint = hasher.hexdigest()
return fingerprint
def as_streaming_dataset(
self,
split: Optional[str] = None,
base_path: Optional[str] = None,
) -> Union[Dict[str, IterableDataset], IterableDataset]:
is_local = not is_remote_filesystem(self._fs)
if not is_local:
raise NotImplementedError(
f"Loading a streaming dataset cached in a {type(self._fs).__name__} is not supported yet."
)
dl_manager = StreamingDownloadManager(
base_path=base_path or self.base_path,
download_config=DownloadConfig(token=self.token, storage_options=self.storage_options),
dataset_name=self.dataset_name,
data_dir=self.config.data_dir,
)
self._check_manual_download(dl_manager)
splits_generators = {sg.name: sg for sg in self._split_generators(dl_manager)}
# By default, return all splits
if split is None:
splits_generator = splits_generators
elif split in splits_generators:
splits_generator = splits_generators[split]
else:
raise ValueError(f"Bad split: {split}. Available splits: {list(splits_generators)}")
# Create a dataset for each of the given splits
datasets = map_nested(
self._as_streaming_dataset_single,
splits_generator,
map_tuple=True,
)
if isinstance(datasets, dict):
datasets = IterableDatasetDict(datasets)
return datasets
def _as_streaming_dataset_single(
self,
splits_generator,
) -> IterableDataset:
ex_iterable = self._get_examples_iterable_for_split(splits_generator)
# add auth to be able to access and decode audio/image files from private repositories.
token_per_repo_id = {self.repo_id: self.token} if self.repo_id else {}
return IterableDataset(
ex_iterable, info=self.info, split=splits_generator.name, token_per_repo_id=token_per_repo_id
)
def _post_process(self, dataset: Dataset, resources_paths: Mapping[str, str]) -> Optional[Dataset]:
"""Run dataset transforms or add indexes"""
return None
def _post_processing_resources(self, split: str) -> Dict[str, str]:
"""Mapping resource_name -> resource_file_name"""
return {}
def _download_post_processing_resources(
self, split: str, resource_name: str, dl_manager: DownloadManager
) -> Optional[str]:
"""Download the resource using the download manager and return the downloaded path."""
return None
@abc.abstractmethod
def _split_generators(self, dl_manager: DownloadManager):
"""Specify feature dictionary generators and dataset splits.
This function returns a list of `SplitGenerator`s defining how to generate
data and what splits to use.
Example:
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={'file': 'train_data.zip'},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={'file': 'test_data.zip'},
),
]
The above code will first call `_generate_examples(file='train_data.zip')`
to write the train data, then `_generate_examples(file='test_data.zip')` to
write the test data.
Datasets are typically split into different subsets to be used at various
stages of training and evaluation.
Note that for datasets without a `VALIDATION` split, you can use a
fraction of the `TRAIN` data for evaluation as you iterate on your model
so as not to overfit to the `TEST` data.
For downloads and extractions, use the given `download_manager`.
Note that the `DownloadManager` caches downloads, so it is fine to have each
generator attempt to download the source data.
A good practice is to download all data in this function, and then
distribute the relevant parts to each split with the `gen_kwargs` argument
Args:
dl_manager (`DownloadManager`):
Download manager to download the data
Returns:
`list<SplitGenerator>`.
"""
raise NotImplementedError()
@abc.abstractmethod
def _prepare_split(
self,
split_generator: SplitGenerator,
file_format: str = "arrow",
max_shard_size: Optional[Union[str, int]] = None,
num_proc: Optional[int] = None,
**kwargs,
):
"""Generate the examples and record them on disk.
Args:
split_generator (`SplitGenerator`):
Split generator to process
file_format (`str`, *optional*):
format of the data files in which the dataset will be written.
Supported formats: "arrow", "parquet". Default to "arrow" format.
max_shard_size (`Union[str, int]`, *optional*):
Maximum number of bytes written per shard, default is "500MB".
The size is based on uncompressed data size, so in practice your shard files may be smaller than
`max_shard_size` thanks to Parquet compression for example.
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when downloading and generating the dataset locally.
Multiprocessing is disabled by default.
<Added version="2.7.0"/>
**kwargs: Additional kwargs forwarded from _download_and_prepare (ex:
beam pipeline)
"""
raise NotImplementedError()
def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
"""Generate the examples on the fly.
Args:
split_generator (`SplitGenerator`):
Split generator to process
"""
raise NotImplementedError()
class GeneratorBasedBuilder(DatasetBuilder):
"""Base class for datasets with data generation based on dict generators.
`GeneratorBasedBuilder` is a convenience class that abstracts away much
of the data writing and reading of `DatasetBuilder`. It expects subclasses to
implement generators of feature dictionaries across the dataset splits
(`_split_generators`). See the method docstrings for details.
"""
@abc.abstractmethod
def _generate_examples(self, **kwargs):
"""Default function generating examples for each `SplitGenerator`.
This function preprocess the examples from the raw data to the preprocessed
dataset files.
This function is called once for each `SplitGenerator` defined in
`_split_generators`. The examples yielded here will be written on
disk.
Args:
**kwargs (additional keyword arguments):
Arguments forwarded from the SplitGenerator.gen_kwargs
Yields:
key: `str` or `int`, a unique deterministic example identification key.
* Unique: An error will be raised if two examples are yield with the
same key.
* Deterministic: When generating the dataset twice, the same example
should have the same key.
Good keys can be the image id, or line number if examples are extracted
from a text file.
The key will be hashed and sorted to shuffle examples deterministically,
such as generating the dataset multiple times keep examples in the
same order.
example: `dict<str feature_name, feature_value>`, a feature dictionary
ready to be encoded and written to disk. The example will be
encoded with `self.info.features.encode_example({...})`.
"""
raise NotImplementedError()
def _prepare_split(
self,
split_generator: SplitGenerator,
check_duplicate_keys: bool,
file_format="arrow",
num_proc: Optional[int] = None,
max_shard_size: Optional[Union[int, str]] = None,
):
max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
is_local = not is_remote_filesystem(self._fs)
path_join = os.path.join if is_local else posixpath.join
if self.info.splits is not None:
split_info = self.info.splits[split_generator.name]
else:
split_info = split_generator.split_info
SUFFIX = "-JJJJJ-SSSSS-of-NNNNN"
fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}"
fpath = path_join(self._output_dir, fname)
if num_proc and num_proc > 1:
num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs)
if num_input_shards <= 1 and num_proc is not None:
logger.warning(
f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard."
)
num_proc = 1
elif num_proc is not None and num_input_shards < num_proc:
logger.warning(
f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards."
)
num_proc = num_input_shards
pbar = logging.tqdm(
disable=not logging.is_progress_bar_enabled(),
unit=" examples",
total=split_info.num_examples,
desc=f"Generating {split_info.name} split",
)
_prepare_split_args = {
"fpath": fpath,
"file_format": file_format,
"max_shard_size": max_shard_size,
"split_info": split_info,
"check_duplicate_keys": check_duplicate_keys,
}
if num_proc is None or num_proc == 1:
result = None
gen_kwargs = split_generator.gen_kwargs
job_id = 0
with pbar:
for job_id, done, content in self._prepare_split_single(
gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
):
if done:
result = content
else:
pbar.update(content)
# wrapping everything into lists for consistency with the multiprocessed code path
assert result is not None, "Failed to retrieve results from prepare_split"
examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [
[item] for item in result
]
else:
kwargs_per_job = [
{"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args}
for job_id, gen_kwargs in enumerate(
_split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc)
)
]
num_jobs = len(kwargs_per_job)
examples_per_job = [None] * num_jobs
bytes_per_job = [None] * num_jobs
features_per_job = [None] * num_jobs
shards_per_job = [None] * num_jobs
shard_lengths_per_job = [None] * num_jobs
with Pool(num_proc) as pool:
with pbar:
for job_id, done, content in iflatmap_unordered(
pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job
):
if done:
# the content is the result of the job
(
examples_per_job[job_id],
bytes_per_job[job_id],
features_per_job[job_id],
shards_per_job[job_id],
shard_lengths_per_job[job_id],
) = content
else:
# the content is the number of examples progress update
pbar.update(content)
assert (
None not in examples_per_job
), f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results"
total_shards = sum(shards_per_job)
total_num_examples = sum(examples_per_job)
total_num_bytes = sum(bytes_per_job)
features = features_per_job[0]
split_generator.split_info.num_examples = total_num_examples
split_generator.split_info.num_bytes = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards.")
if total_shards > 1:
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(shard_and_job: Tuple[int]):
shard_id, job_id = shard_and_job
global_shard_id = sum(shards_per_job[:job_id]) + shard_id
self._rename(
fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
)
shards_and_jobs = [
(shard_id, job_id)
for job_id, num_shards in enumerate(shards_per_job)
for shard_id in range(num_shards)
]
thread_map(_rename_shard, shards_and_jobs, disable=True, max_workers=64)
split_generator.split_info.shard_lengths = [
shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths
]
else:
# don't use any pattern
shard_id, job_id = 0, 0
self._rename(
fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
fpath.replace(SUFFIX, ""),
)
if self.info.features is None:
self.info.features = features
def _prepare_split_single(
self,
gen_kwargs: dict,
fpath: str,
file_format: str,
max_shard_size: int,
split_info: SplitInfo,
check_duplicate_keys: bool,
job_id: int,
) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
generator = self._generate_examples(**gen_kwargs)
writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
embed_local_files = file_format == "parquet"
shard_lengths = []
total_num_examples, total_num_bytes = 0, 0
shard_id = 0
num_examples_progress_update = 0
try:
writer = writer_class(
features=self.info.features,
path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
writer_batch_size=self._writer_batch_size,
hash_salt=split_info.name,
check_duplicates=check_duplicate_keys,
storage_options=self._fs.storage_options,
embed_local_files=embed_local_files,
)
try:
_time = time.time()
for key, record in generator:
if max_shard_size is not None and writer._num_bytes > max_shard_size:
num_examples, num_bytes = writer.finalize()
writer.close()
shard_lengths.append(num_examples)
total_num_examples += num_examples
total_num_bytes += num_bytes
shard_id += 1
writer = writer_class(
features=writer._features,
path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
writer_batch_size=self._writer_batch_size,
hash_salt=split_info.name,
check_duplicates=check_duplicate_keys,
storage_options=self._fs.storage_options,
embed_local_files=embed_local_files,
)
example = self.info.features.encode_example(record) if self.info.features is not None else record
writer.write(example, key)
num_examples_progress_update += 1
if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
_time = time.time()
yield job_id, False, num_examples_progress_update
num_examples_progress_update = 0
finally:
yield job_id, False, num_examples_progress_update
num_shards = shard_id + 1
num_examples, num_bytes = writer.finalize()
writer.close()
shard_lengths.append(num_examples)
total_num_examples += num_examples
total_num_bytes += num_bytes
except Exception as e:
# Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded
if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
e = e.__context__
raise DatasetGenerationError("An error occurred while generating the dataset") from e
yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):
super()._download_and_prepare(
dl_manager,
verification_mode,
check_duplicate_keys=verification_mode == VerificationMode.BASIC_CHECKS
or verification_mode == VerificationMode.ALL_CHECKS,
**prepare_splits_kwargs,
)
def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
return ExamplesIterable(self._generate_examples, split_generator.gen_kwargs)
class ArrowBasedBuilder(DatasetBuilder):
"""Base class for datasets with data generation based on Arrow loading functions (CSV/JSON/Parquet)."""
@abc.abstractmethod
def _generate_tables(self, **kwargs):
"""Default function generating examples for each `SplitGenerator`.
This function preprocess the examples from the raw data to the preprocessed
dataset files.
This function is called once for each `SplitGenerator` defined in
`_split_generators`. The examples yielded here will be written on
disk.
Args:
**kwargs (additional keyword arguments):
Arguments forwarded from the SplitGenerator.gen_kwargs
Yields:
key: `str` or `int`, a unique deterministic example identification key.
* Unique: An error will be raised if two examples are yield with the
same key.
* Deterministic: When generating the dataset twice, the same example
should have the same key.
Good keys can be the image id, or line number if examples are extracted
from a text file.
The key will be hashed and sorted to shuffle examples deterministically,
such as generating the dataset multiple times keep examples in the
same order.
example: `pyarrow.Table`, a feature table
ready to be encoded and written to disk.
"""
raise NotImplementedError()
def _prepare_split(
self,
split_generator: SplitGenerator,
file_format: str = "arrow",
num_proc: Optional[int] = None,
max_shard_size: Optional[Union[str, int]] = None,
):
max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
is_local = not is_remote_filesystem(self._fs)
path_join = os.path.join if is_local else posixpath.join
try:
split_info = self.info.splits[split_generator.name]
except Exception:
split_info = split_generator.split_info
SUFFIX = "-JJJJJ-SSSSS-of-NNNNN"
fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}"
fpath = path_join(self._output_dir, fname)
if num_proc and num_proc > 1:
num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs)
if num_input_shards <= 1 and num_proc is not None:
logger.warning(
f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard."
)
num_proc = 1
elif num_proc is not None and num_input_shards < num_proc:
logger.warning(
f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards."
)
num_proc = num_input_shards
pbar = logging.tqdm(
disable=not logging.is_progress_bar_enabled(),
unit=" examples",
total=split_info.num_examples,
desc=f"Generating {split_info.name} split",
)
_prepare_split_args = {
"fpath": fpath,
"file_format": file_format,
"max_shard_size": max_shard_size,
}
if num_proc is None or num_proc == 1:
result = None
gen_kwargs = split_generator.gen_kwargs
job_id = 0
with pbar:
for job_id, done, content in self._prepare_split_single(
gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
):
if done:
result = content
else:
pbar.update(content)
# wrapping everything into lists for consistency with the multiprocessed code path
assert result is not None, "Failed to retrieve results from prepare_split"
examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [
[item] for item in result
]
else:
kwargs_per_job = [
{"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args}
for job_id, gen_kwargs in enumerate(
_split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc)
)
]
num_jobs = len(kwargs_per_job)
examples_per_job = [None] * num_jobs
bytes_per_job = [None] * num_jobs
features_per_job = [None] * num_jobs
shards_per_job = [None] * num_jobs
shard_lengths_per_job = [None] * num_jobs
with Pool(num_proc) as pool:
with pbar:
for job_id, done, content in iflatmap_unordered(
pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job
):
if done:
# the content is the result of the job
(
examples_per_job[job_id],
bytes_per_job[job_id],
features_per_job[job_id],
shards_per_job[job_id],
shard_lengths_per_job[job_id],
) = content
else:
# the content is the number of examples progress update
pbar.update(content)
assert (
None not in examples_per_job
), f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results"
total_shards = sum(shards_per_job)
total_num_examples = sum(examples_per_job)
total_num_bytes = sum(bytes_per_job)
features = features_per_job[0]
split_generator.split_info.num_examples = total_num_examples
split_generator.split_info.num_bytes = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards.")
if total_shards > 1:
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(shard_id_and_job: Tuple[int]):
shard_id, job_id = shard_id_and_job
global_shard_id = sum(shards_per_job[:job_id]) + shard_id
self._rename(
fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
)
shard_ids_and_jobs = [
(shard_id, job_id)
for job_id, num_shards in enumerate(shards_per_job)
for shard_id in range(num_shards)
]
thread_map(_rename_shard, shard_ids_and_jobs, disable=True, max_workers=64)
split_generator.split_info.shard_lengths = [
shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths
]
else:
# don't use any pattern
shard_id, job_id = 0, 0
self._rename(
fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
fpath.replace(SUFFIX, ""),
)
if self.info.features is None:
self.info.features = features
def _prepare_split_single(
self, gen_kwargs: dict, fpath: str, file_format: str, max_shard_size: int, job_id: int
) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
generator = self._generate_tables(**gen_kwargs)
writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
embed_local_files = file_format == "parquet"
shard_lengths = []
total_num_examples, total_num_bytes = 0, 0
shard_id = 0
num_examples_progress_update = 0
try:
writer = writer_class(
features=self.info.features,
path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
writer_batch_size=self._writer_batch_size,
storage_options=self._fs.storage_options,
embed_local_files=embed_local_files,
)
try:
_time = time.time()
for _, table in generator:
if max_shard_size is not None and writer._num_bytes > max_shard_size:
num_examples, num_bytes = writer.finalize()
writer.close()
shard_lengths.append(num_examples)
total_num_examples += num_examples
total_num_bytes += num_bytes
shard_id += 1
writer = writer_class(
features=writer._features,
path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
writer_batch_size=self._writer_batch_size,
storage_options=self._fs.storage_options,
embed_local_files=embed_local_files,
)
writer.write_table(table)
num_examples_progress_update += len(table)
if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
_time = time.time()
yield job_id, False, num_examples_progress_update
num_examples_progress_update = 0
finally:
yield job_id, False, num_examples_progress_update
num_shards = shard_id + 1
num_examples, num_bytes = writer.finalize()
writer.close()
shard_lengths.append(num_examples)
total_num_examples += num_examples
total_num_bytes += num_bytes
except Exception as e:
# Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded
if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
e = e.__context__
raise DatasetGenerationError("An error occurred while generating the dataset") from e
yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
return ArrowExamplesIterable(self._generate_tables, kwargs=split_generator.gen_kwargs)
class MissingBeamOptions(ValueError):
pass
class BeamBasedBuilder(DatasetBuilder):
"""Beam-based Builder."""
def __init__(self, *args, beam_runner=None, beam_options=None, **kwargs):
self._beam_runner = beam_runner
self._beam_options = beam_options
self._beam_writers = {} # {split: beam_writer} mapping.
super().__init__(*args, **kwargs)
def _make_split_generators_kwargs(self, prepare_split_kwargs):
# Pass `pipeline` into `_split_generators()` from `prepare_split_kwargs` if
# it's in the call signature of `_split_generators()`.
# This allows for global preprocessing in beam.
split_generators_kwargs = {}
split_generators_arg_names = inspect.signature(self._split_generators).parameters.keys()
if "pipeline" in split_generators_arg_names:
split_generators_kwargs["pipeline"] = prepare_split_kwargs["pipeline"]
return split_generators_kwargs
@abc.abstractmethod
def _build_pcollection(self, pipeline, **kwargs):
"""Build the beam pipeline examples for each `SplitGenerator`.
This function extracts examples from the raw data with parallel transforms
in a Beam pipeline. It is called once for each `SplitGenerator` defined in
`_split_generators`. The examples from the PCollection will be
encoded and written to disk.
<Tip warning={true}>
Warning: When running in a distributed setup, make sure that the data
which will be read (download_dir, manual_dir,...) and written (cache_dir)
can be accessed by the workers jobs. The data should be located in a
shared filesystem, like GCS.
</Tip>
Args:
pipeline ([`utils.beam_utils.BeamPipeline`]):
Apache Beam pipeline.
**kwargs (additional keyword arguments):
Arguments forwarded from the SplitGenerator.gen_kwargs.
Returns:
`beam.PCollection`: Apache Beam PCollection containing the
example to send to `self.info.features.encode_example(...)`.
Example:
```
def _build_pcollection(pipeline, extracted_dir=None):
return (
pipeline
| beam.Create(gfile.io.listdir(extracted_dir))
| beam.Map(_process_file)
)
```
"""
raise NotImplementedError()
def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):
# Create the Beam pipeline and forward it to `_prepare_split`
import apache_beam as beam
import datasets.utils.beam_utils as beam_utils
beam_runner = self._beam_runner
beam_options = self._beam_options
if not beam_runner and not beam_options:
usage_example = f"load_dataset('{self.name}', '{self.config.name}', beam_runner='DirectRunner')"
raise MissingBeamOptions(
"Trying to generate a dataset using Apache Beam, yet no Beam Runner "
"or PipelineOptions() has been provided in `load_dataset` or in the "
"builder arguments. For big datasets it has to run on large-scale data "
"processing tools like Dataflow, Spark, etc. More information about "
"Apache Beam runners at "
"https://beam.apache.org/documentation/runners/capability-matrix/"
"\nIf you really want to run it locally because you feel like the "
"Dataset is small enough, you can use the local beam runner called "
"`DirectRunner` (you may run out of memory). \nExample of usage: "
f"\n\t`{usage_example}`"
)
if self._writer_batch_size is not None:
logger.warning(
"`writer_batch_size` is not supported for beam pipelines yet. Using the default chunk size for writing."
)
# Beam type checking assumes transforms multiple outputs are of same type,
# which is not our case. Plus it doesn't handle correctly all types, so we
# are better without it.
pipeline_options = {"pipeline_type_check": False}
if "num_proc" in prepare_splits_kwargs:
num_workers = prepare_splits_kwargs.pop("num_proc")
pipeline_options["direct_num_workers"] = num_workers
pipeline_options["num_workers"] = num_workers
pipeline_options["direct_running_mode"] = "multi_processing"
# TODO: Fix ModuleNotFoundError: No module named 'datasets_modules' when running multiprocessed DirectRunner
raise NotImplementedError("Using a DirectRunner with `num_proc` for multiprocessing it not supported yet.")
beam_options = beam_options or beam.options.pipeline_options.PipelineOptions.from_dictionary(pipeline_options)
# Use a single pipeline for all splits
pipeline = beam_utils.BeamPipeline(
runner=beam_runner,
options=beam_options,
)
super()._download_and_prepare(
dl_manager, verification_mode=VerificationMode.NO_CHECKS, pipeline=pipeline, **prepare_splits_kwargs
) # TODO handle verification_mode in beam datasets
# Run pipeline
pipeline_results = pipeline.run()
pipeline_results.wait_until_finish()
metrics = pipeline_results.metrics()
# Update `info.splits`.
split_dict = self.info.splits
for split_name, beam_writer in self._beam_writers.items():
m_filter = beam.metrics.MetricsFilter().with_namespace(namespace=split_name)
num_examples, num_bytes = beam_writer.finalize(metrics.query(m_filter))
split_info = split_dict[split_name]
split_info.num_examples = num_examples
split_info.num_bytes = num_bytes
if hasattr(beam_writer, "_shard_lengths") and len(beam_writer._shard_lengths) > 1:
# keep the -SSSSS-of-NNNNN pattern
split_info.shard_lengths = beam_writer._shard_lengths
else:
# don't use any pattern
file_format = prepare_splits_kwargs.get("file_format", "arrow")
src_fname = f"{self.dataset_name}-{split_name}-00000-of-00001.{file_format}"
dst_fname = f"{self.dataset_name}-{split_name}.{file_format}"
path_join = os.path.join if not is_remote_filesystem(self._fs) else posixpath.join
src_fpath = path_join(self._output_dir, src_fname)
dst_fpath = path_join(self._output_dir, dst_fname)
self._rename(src_fpath, dst_fpath)
def _save_info(self):
import apache_beam as beam
fs = beam.io.filesystems.FileSystems
path_join = os.path.join if not is_remote_filesystem(self._fs) else posixpath.join
with fs.create(path_join(self._output_dir, config.DATASET_INFO_FILENAME)) as f:
self.info._dump_info(f)
if self.info.license:
with fs.create(path_join(self._output_dir, config.LICENSE_FILENAME)) as f:
self.info._dump_license(f)
def _prepare_split(
self, split_generator, pipeline, file_format="arrow", max_shard_size: Optional[Union[str, int]] = None
):
import apache_beam as beam
if max_shard_size is not None:
raise NotImplementedError(
"max_shard_size is not supported for Beam datasets."
"Please set it to None to use the default Apache Beam sharding and get the best performance."
)
# To write examples in filesystem:
split_name = split_generator.split_info.name
fname = f"{self.dataset_name}-{split_name}.{file_format}"
path_join = os.path.join if not is_remote_filesystem(self._fs) else posixpath.join
fpath = path_join(self._output_dir, fname)
beam_writer = BeamWriter(
features=self.info.features, path=fpath, namespace=split_name, cache_dir=self._output_dir
)
self._beam_writers[split_name] = beam_writer
encode_example = self.info.features.encode_example
# Note: We need to wrap the pipeline in a PTransform to avoid re-using the
# same label names for each split
@beam.ptransform_fn
def _build_pcollection(pipeline):
"""PTransformation which build a single split."""
# Encode the PCollection
pcoll_examples = self._build_pcollection(pipeline, **split_generator.gen_kwargs)
pcoll_examples |= "Encode" >> beam.Map(lambda key_ex: (key_ex[0], encode_example(key_ex[1])))
return beam_writer.write_from_pcollection(pcoll_examples)
# Add the PCollection to the pipeline
_ = pipeline | split_name >> _build_pcollection() # pylint: disable=no-value-for-parameter max_bytes_per_shard
def as_streaming_dataset(
self,
split: Optional[str] = None,
) -> Union[Dict[str, IterableDataset], IterableDataset]:
self._request_info_from_hf_gcs()
datasets = {
split.name: IterableDataset(self._get_examples_iterable_for_split(split), info=self.info, split=split.name)
for split in self.info.splits.values()
}
if split:
try:
datasets = datasets[split]
except KeyError:
raise ValueError(f"Bad split: {split}. Available splits: {list(datasets)}")
if isinstance(datasets, dict):
datasets = IterableDatasetDict(datasets)
return datasets
def _get_examples_iterable_for_split(self, split: SplitInfo) -> ExamplesIterable:
return ExamplesIterable(self._generate_examples_from_hf_gcs, {"split": split})
def _generate_examples_from_hf_gcs(self, split: SplitInfo):
if split.shard_lengths:
num_shards = len(split.shard_lengths)
remote_prepared_urls = [
f"{self._remote_cache_dir_from_hf_gcs}/{self.name}-{split.name}-{shard_id:05d}-of-{num_shards:05d}.arrow"
for shard_id in range(num_shards)
]
else:
remote_prepared_urls = [f"{self._remote_cache_dir_from_hf_gcs}/{self.name}-{split.name}.arrow"]
key = 0
for remote_prepared_url in remote_prepared_urls:
with xopen(remote_prepared_url, "rb") as f:
with pa.ipc.open_stream(f) as reader:
for record_batch in reader:
for record in record_batch.to_pylist():
yield key, record
key += 1
def _request_info_from_hf_gcs(self):
from .download.streaming_download_manager import xopen
remote_dataset_info = f"{self._remote_cache_dir_from_hf_gcs}/{config.DATASET_INFO_FILENAME}"
try:
with xopen(remote_dataset_info) as f:
import json
_info = json.load(f)
except FileNotFoundError as err:
raise DatasetNotOnHfGcsError(err) from None
self.info.update(DatasetInfo.from_dict(_info))
@property
def _remote_cache_dir_from_hf_gcs(self):
relative_data_dir = self._relative_data_dir(with_hash=False)
return HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/")
| datasets-main | src/datasets/builder.py |
# Copyright 2020 The HuggingFace Datasets Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" Metrics base class."""
import os
import types
import uuid
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
from . import config
from .arrow_dataset import Dataset
from .arrow_reader import ArrowReader
from .arrow_writer import ArrowWriter
from .download.download_config import DownloadConfig
from .download.download_manager import DownloadManager
from .features import Features
from .info import DatasetInfo, MetricInfo
from .naming import camelcase_to_snakecase
from .utils.deprecation_utils import deprecated
from .utils.filelock import BaseFileLock, FileLock, Timeout
from .utils.logging import get_logger
from .utils.py_utils import copyfunc, temp_seed
logger = get_logger(__name__)
class FileFreeLock(BaseFileLock):
"""Thread lock until a file **cannot** be locked"""
def __init__(self, lock_file, *args, **kwargs):
self.filelock = FileLock(lock_file)
super().__init__(lock_file, *args, **kwargs)
def _acquire(self):
try:
self.filelock.acquire(timeout=0.01, poll_intervall=0.02) # Try to lock once
except Timeout:
# We couldn't acquire the lock, the file is locked!
self._lock_file_fd = self.filelock.lock_file
else:
# We were able to acquire the lock, the file is not yet locked!
self.filelock.release()
self._lock_file_fd = None
def _release(self):
self._lock_file_fd = None
# lists - summarize long lists similarly to NumPy
# arrays/tensors - let the frameworks control formatting
def summarize_if_long_list(obj):
if not type(obj) == list or len(obj) <= 6: # noqa: E721
return f"{obj}"
def format_chunk(chunk):
return ", ".join(repr(x) for x in chunk)
return f"[{format_chunk(obj[:3])}, ..., {format_chunk(obj[-3:])}]"
class MetricInfoMixin:
"""This base class exposes some attributes of MetricInfo
at the base level of the Metric for easy access.
<Deprecated version="2.5.0">
Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
</Deprecated>
"""
def __init__(self, info: MetricInfo):
self._metric_info = info
@property
def info(self):
""":class:`datasets.MetricInfo` object containing all the metadata in the metric."""
return self._metric_info
@property
def name(self) -> str:
return self._metric_info.metric_name
@property
def experiment_id(self) -> Optional[str]:
return self._metric_info.experiment_id
@property
def description(self) -> str:
return self._metric_info.description
@property
def citation(self) -> str:
return self._metric_info.citation
@property
def features(self) -> Features:
return self._metric_info.features
@property
def inputs_description(self) -> str:
return self._metric_info.inputs_description
@property
def homepage(self) -> Optional[str]:
return self._metric_info.homepage
@property
def license(self) -> str:
return self._metric_info.license
@property
def codebase_urls(self) -> Optional[List[str]]:
return self._metric_info.codebase_urls
@property
def reference_urls(self) -> Optional[List[str]]:
return self._metric_info.reference_urls
@property
def streamable(self) -> bool:
return self._metric_info.streamable
@property
def format(self) -> Optional[str]:
return self._metric_info.format
class Metric(MetricInfoMixin):
"""A Metric is the base class and common API for all metrics.
<Deprecated version="2.5.0">
Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
</Deprecated>
Args:
config_name (``str``): This is used to define a hash specific to a metrics computation script and prevents the metric's data
to be overridden when the metric loading script is modified.
keep_in_memory (:obj:`bool`): keep all predictions and references in memory. Not possible in distributed settings.
cache_dir (``str``): Path to a directory in which temporary prediction/references data will be stored.
The data directory should be located on a shared file-system in distributed setups.
num_process (``int``): specify the total number of nodes in a distributed settings.
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
process_id (``int``): specify the id of the current process in a distributed setup (between 0 and num_process-1)
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
seed (:obj:`int`, optional): If specified, this will temporarily set numpy's random seed when :func:`datasets.Metric.compute` is run.
experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
max_concurrent_cache_files (``int``): Max number of concurrent metrics cache files (default 10000).
timeout (``Union[int, float]``): Timeout in second for distributed setting synchronization.
"""
@deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
def __init__(
self,
config_name: Optional[str] = None,
keep_in_memory: bool = False,
cache_dir: Optional[str] = None,
num_process: int = 1,
process_id: int = 0,
seed: Optional[int] = None,
experiment_id: Optional[str] = None,
max_concurrent_cache_files: int = 10000,
timeout: Union[int, float] = 100,
**kwargs,
):
# prepare info
self.config_name = config_name or "default"
info = self._info()
info.metric_name = camelcase_to_snakecase(self.__class__.__name__)
info.config_name = self.config_name
info.experiment_id = experiment_id or "default_experiment"
MetricInfoMixin.__init__(self, info) # For easy access on low level
# Safety checks on num_process and process_id
if not isinstance(process_id, int) or process_id < 0:
raise ValueError("'process_id' should be a number greater than 0")
if not isinstance(num_process, int) or num_process <= process_id:
raise ValueError("'num_process' should be a number greater than process_id")
if keep_in_memory and num_process != 1:
raise ValueError("Using 'keep_in_memory' is not possible in distributed setting (num_process > 1).")
self.num_process = num_process
self.process_id = process_id
self.max_concurrent_cache_files = max_concurrent_cache_files
self.keep_in_memory = keep_in_memory
self._data_dir_root = os.path.expanduser(cache_dir or config.HF_METRICS_CACHE)
self.data_dir = self._build_data_dir()
if seed is None:
_, seed, pos, *_ = np.random.get_state()
self.seed: int = seed[pos] if pos < 624 else seed[0]
else:
self.seed: int = seed
self.timeout: Union[int, float] = timeout
# Update 'compute' and 'add' docstring
# methods need to be copied otherwise it changes the docstrings of every instance
self.compute = types.MethodType(copyfunc(self.compute), self)
self.add_batch = types.MethodType(copyfunc(self.add_batch), self)
self.add = types.MethodType(copyfunc(self.add), self)
self.compute.__func__.__doc__ += self.info.inputs_description
self.add_batch.__func__.__doc__ += self.info.inputs_description
self.add.__func__.__doc__ += self.info.inputs_description
# self.arrow_schema = pa.schema(field for field in self.info.features.type)
self.buf_writer = None
self.writer = None
self.writer_batch_size = None
self.data = None
# This is the cache file we store our predictions/references in
# Keep it None for now so we can (cloud)pickle the object
self.cache_file_name = None
self.filelock = None
self.rendez_vous_lock = None
# This is all the cache files on which we have a lock when we are in a distributed setting
self.file_paths = None
self.filelocks = None
def __len__(self):
"""Return the number of examples (predictions or predictions/references pair)
currently stored in the metric's cache.
"""
return 0 if self.writer is None else len(self.writer)
def __repr__(self):
return (
f'Metric(name: "{self.name}", features: {self.features}, '
f'usage: """{self.inputs_description}""", '
f"stored examples: {len(self)})"
)
def _build_data_dir(self):
"""Path of this metric in cache_dir:
Will be:
self._data_dir_root/self.name/self.config_name/self.hash (if not none)/
If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
"""
builder_data_dir = self._data_dir_root
builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name)
os.makedirs(builder_data_dir, exist_ok=True)
return builder_data_dir
def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]:
"""Create a new cache file. If the default cache file is used, we generated a new hash."""
file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow")
filelock = None
for i in range(self.max_concurrent_cache_files):
filelock = FileLock(file_path + ".lock")
try:
filelock.acquire(timeout=timeout)
except Timeout:
# If we have reached the max number of attempts or we are not allow to find a free name (distributed setup)
# We raise an error
if self.num_process != 1:
raise ValueError(
f"Error in _create_cache_file: another metric instance is already using the local cache file at {file_path}. "
f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
f"between distributed metric instances."
) from None
if i == self.max_concurrent_cache_files - 1:
raise ValueError(
f"Cannot acquire lock, too many metric instance are operating concurrently on this file system."
f"You should set a larger value of max_concurrent_cache_files when creating the metric "
f"(current value is {self.max_concurrent_cache_files})."
) from None
# In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name.
file_uuid = str(uuid.uuid4())
file_path = os.path.join(
self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow"
)
else:
break
return file_path, filelock
def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]:
"""Get a lock on all the cache files in a distributed setup.
We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds).
"""
if self.num_process == 1:
if self.cache_file_name is None:
raise ValueError(
"Metric cache file doesn't exist. Please make sure that you call `add` or `add_batch` "
"at least once before calling `compute`."
)
file_paths = [self.cache_file_name]
else:
file_paths = [
os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow")
for process_id in range(self.num_process)
]
# Let's acquire a lock on each process files to be sure they are finished writing
filelocks = []
for process_id, file_path in enumerate(file_paths):
if process_id == 0: # process 0 already has its lock file
filelocks.append(self.filelock)
else:
filelock = FileLock(file_path + ".lock")
try:
filelock.acquire(timeout=self.timeout)
except Timeout:
raise ValueError(
f"Cannot acquire lock on cached file {file_path} for process {process_id}."
) from None
else:
filelocks.append(filelock)
return file_paths, filelocks
def _check_all_processes_locks(self):
expected_lock_file_names = [
os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock")
for process_id in range(self.num_process)
]
for expected_lock_file_name in expected_lock_file_names:
nofilelock = FileFreeLock(expected_lock_file_name)
try:
nofilelock.acquire(timeout=self.timeout)
except Timeout:
raise ValueError(
f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
) from None
else:
nofilelock.release()
def _check_rendez_vous(self):
expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock")
nofilelock = FileFreeLock(expected_lock_file_name)
try:
nofilelock.acquire(timeout=self.timeout)
except Timeout:
raise ValueError(
f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
) from None
else:
nofilelock.release()
lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
rendez_vous_lock = FileLock(lock_file_name)
try:
rendez_vous_lock.acquire(timeout=self.timeout)
except Timeout:
raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.") from None
else:
rendez_vous_lock.release()
def _finalize(self):
"""Close all the writing process and load/gather the data
from all the nodes if main node or all_process is True.
"""
if self.writer is not None:
self.writer.finalize()
self.writer = None
# release the locks of the processes > 0 so that process 0 can lock them to read + delete the data
if self.filelock is not None and self.process_id > 0:
self.filelock.release()
if self.keep_in_memory:
# Read the predictions and references
reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.features))
self.data = Dataset.from_buffer(self.buf_writer.getvalue())
elif self.process_id == 0:
# Let's acquire a lock on each node files to be sure they are finished writing
file_paths, filelocks = self._get_all_cache_files()
# Read the predictions and references
try:
reader = ArrowReader(path="", info=DatasetInfo(features=self.features))
self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths]))
except FileNotFoundError:
raise ValueError(
"Error in finalize: another metric instance is already using the local cache file. "
"Please specify an experiment_id to avoid collision between distributed metric instances."
) from None
# Store file paths and locks and we will release/delete them after the computation.
self.file_paths = file_paths
self.filelocks = filelocks
def compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]:
"""Compute the metrics.
Usage of positional arguments is not allowed to prevent mistakes.
Args:
predictions (list/array/tensor, optional): Predictions.
references (list/array/tensor, optional): References.
**kwargs (optional): Keyword arguments that will be forwarded to the metrics :meth:`_compute`
method (see details in the docstring).
Return:
dict or None
- Dictionary with the metrics if this metric is run on the main process (``process_id == 0``).
- None if the metric is not run on the main process (``process_id != 0``).
Example:
```py
>>> from datasets import load_metric
>>> metric = load_metric("accuracy")
>>> accuracy = metric.compute(predictions=model_prediction, references=labels)
```
"""
all_kwargs = {"predictions": predictions, "references": references, **kwargs}
if predictions is None and references is None:
missing_kwargs = {k: None for k in self.features if k not in all_kwargs}
all_kwargs.update(missing_kwargs)
else:
missing_inputs = [k for k in self.features if k not in all_kwargs]
if missing_inputs:
raise ValueError(
f"Metric inputs are missing: {missing_inputs}. All required inputs are {list(self.features)}"
)
inputs = {input_name: all_kwargs[input_name] for input_name in self.features}
compute_kwargs = {k: kwargs[k] for k in kwargs if k not in self.features}
if any(v is not None for v in inputs.values()):
self.add_batch(**inputs)
self._finalize()
self.cache_file_name = None
self.filelock = None
if self.process_id == 0:
self.data.set_format(type=self.info.format)
inputs = {input_name: self.data[input_name] for input_name in self.features}
with temp_seed(self.seed):
output = self._compute(**inputs, **compute_kwargs)
if self.buf_writer is not None:
self.buf_writer = None
del self.data
self.data = None
else:
# Release locks and delete all the cache files. Process 0 is released last.
for filelock, file_path in reversed(list(zip(self.filelocks, self.file_paths))):
logger.info(f"Removing {file_path}")
del self.data
self.data = None
del self.writer
self.writer = None
os.remove(file_path)
filelock.release()
return output
else:
return None
def add_batch(self, *, predictions=None, references=None, **kwargs):
"""Add a batch of predictions and references for the metric's stack.
Args:
predictions (list/array/tensor, optional): Predictions.
references (list/array/tensor, optional): References.
Example:
```py
>>> from datasets import load_metric
>>> metric = load_metric("accuracy")
>>> metric.add_batch(predictions=model_prediction, references=labels)
```
"""
bad_inputs = [input_name for input_name in kwargs if input_name not in self.features]
if bad_inputs:
raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}")
batch = {"predictions": predictions, "references": references, **kwargs}
batch = {intput_name: batch[intput_name] for intput_name in self.features}
batch = self.info.features.encode_batch(batch)
if self.writer is None:
self._init_writer()
try:
self.writer.write_batch(batch)
except pa.ArrowInvalid:
if any(len(batch[c]) != len(next(iter(batch.values()))) for c in batch):
col0 = next(iter(batch))
bad_col = [c for c in batch if len(batch[c]) != len(batch[col0])][0]
error_msg = (
f"Mismatch in the number of {col0} ({len(batch[col0])}) and {bad_col} ({len(batch[bad_col])})"
)
elif sorted(self.features) != ["references", "predictions"]:
error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n"
error_msg_inputs = ",\n".join(
f"Input {input_name}: {summarize_if_long_list(batch[input_name])}" for input_name in self.features
)
error_msg += error_msg_inputs
else:
error_msg = (
f"Predictions and/or references don't match the expected format.\n"
f"Expected format: {self.features},\n"
f"Input predictions: {summarize_if_long_list(predictions)},\n"
f"Input references: {summarize_if_long_list(references)}"
)
raise ValueError(error_msg) from None
def add(self, *, prediction=None, reference=None, **kwargs):
"""Add one prediction and reference for the metric's stack.
Args:
prediction (list/array/tensor, optional): Predictions.
reference (list/array/tensor, optional): References.
Example:
```py
>>> from datasets import load_metric
>>> metric = load_metric("accuracy")
>>> metric.add(predictions=model_predictions, references=labels)
```
"""
bad_inputs = [input_name for input_name in kwargs if input_name not in self.features]
if bad_inputs:
raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}")
example = {"predictions": prediction, "references": reference, **kwargs}
example = {intput_name: example[intput_name] for intput_name in self.features}
example = self.info.features.encode_example(example)
if self.writer is None:
self._init_writer()
try:
self.writer.write(example)
except pa.ArrowInvalid:
error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n"
error_msg_inputs = ",\n".join(
f"Input {input_name}: {summarize_if_long_list(example[input_name])}" for input_name in self.features
)
error_msg += error_msg_inputs
raise ValueError(error_msg) from None
def _init_writer(self, timeout=1):
if self.num_process > 1:
if self.process_id == 0:
file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
self.rendez_vous_lock = FileLock(file_path)
try:
self.rendez_vous_lock.acquire(timeout=timeout)
except TimeoutError:
raise ValueError(
f"Error in _init_writer: another metric instance is already using the local cache file at {file_path}. "
f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
f"between distributed metric instances."
) from None
if self.keep_in_memory:
self.buf_writer = pa.BufferOutputStream()
self.writer = ArrowWriter(
features=self.info.features, stream=self.buf_writer, writer_batch_size=self.writer_batch_size
)
else:
self.buf_writer = None
# Get cache file name and lock it
if self.cache_file_name is None or self.filelock is None:
cache_file_name, filelock = self._create_cache_file() # get ready
self.cache_file_name = cache_file_name
self.filelock = filelock
self.writer = ArrowWriter(
features=self.info.features, path=self.cache_file_name, writer_batch_size=self.writer_batch_size
)
# Setup rendez-vous here if
if self.num_process > 1:
if self.process_id == 0:
self._check_all_processes_locks() # wait for everyone to be ready
self.rendez_vous_lock.release() # let everyone go
else:
self._check_rendez_vous() # wait for master to be ready and to let everyone go
def _info(self) -> MetricInfo:
"""Construct the MetricInfo object. See `MetricInfo` for details.
Warning: This function is only called once and the result is cached for all
following .info() calls.
Returns:
info: (MetricInfo) The metrics information
"""
raise NotImplementedError
def download_and_prepare(
self,
download_config: Optional[DownloadConfig] = None,
dl_manager: Optional[DownloadManager] = None,
):
"""Downloads and prepares dataset for reading.
Args:
download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
dl_manager (:class:`DownloadManager`, optional): Specific download manager to use.
"""
if dl_manager is None:
if download_config is None:
download_config = DownloadConfig()
download_config.cache_dir = os.path.join(self.data_dir, "downloads")
download_config.force_download = False
dl_manager = DownloadManager(
dataset_name=self.name, download_config=download_config, data_dir=self.data_dir
)
self._download_and_prepare(dl_manager)
def _download_and_prepare(self, dl_manager):
"""Downloads and prepares resources for the metric.
This is the internal implementation to overwrite called when user calls
`download_and_prepare`. It should download all required resources for the metric.
Args:
dl_manager (:class:`DownloadManager`): `DownloadManager` used to download and cache data.
"""
return None
def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]:
"""This method defines the common API for all the metrics in the library"""
raise NotImplementedError
def __del__(self):
if hasattr(self, "filelock") and self.filelock is not None:
self.filelock.release()
if hasattr(self, "rendez_vous_lock") and self.rendez_vous_lock is not None:
self.rendez_vous_lock.release()
if hasattr(self, "writer"): # in case it was already deleted
del self.writer
if hasattr(self, "data"): # in case it was already deleted
del self.data
| datasets-main | src/datasets/metric.py |
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" List and inspect datasets."""
import inspect
import os
import shutil
import warnings
from pathlib import PurePath
from typing import Dict, List, Mapping, Optional, Sequence, Union
import huggingface_hub
from .download.download_config import DownloadConfig
from .download.download_manager import DownloadMode
from .download.streaming_download_manager import StreamingDownloadManager
from .info import DatasetInfo
from .load import (
dataset_module_factory,
get_dataset_builder_class,
import_main_class,
load_dataset_builder,
metric_module_factory,
)
from .utils.deprecation_utils import deprecated
from .utils.file_utils import relative_to_absolute_path
from .utils.logging import get_logger
from .utils.version import Version
logger = get_logger(__name__)
class SplitsNotFoundError(ValueError):
pass
@deprecated("Use 'huggingface_hub.list_datasets' instead.")
def list_datasets(with_community_datasets=True, with_details=False):
"""List all the datasets scripts available on the Hugging Face Hub.
Args:
with_community_datasets (`bool`, *optional*, defaults to `True`):
Include the community provided datasets.
with_details (`bool`, *optional*, defaults to `False`):
Return the full details on the datasets instead of only the short name.
Example:
```py
>>> from datasets import list_datasets
>>> list_datasets()
['acronym_identification',
'ade_corpus_v2',
'adversarial_qa',
'aeslc',
'afrikaans_ner_corpus',
'ag_news',
...
]
```
"""
datasets = huggingface_hub.list_datasets(full=with_details)
if not with_community_datasets:
datasets = [dataset for dataset in datasets if "/" not in dataset.id]
if not with_details:
datasets = [dataset.id for dataset in datasets]
return list(datasets)
@deprecated(
"Use 'evaluate.list_evaluation_modules' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate"
)
def list_metrics(with_community_metrics=True, with_details=False):
"""List all the metrics script available on the Hugging Face Hub.
<Deprecated version="2.5.0">
Use `evaluate.list_evaluation_modules` instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate
</Deprecated>
Args:
with_community_metrics (:obj:`bool`, optional, default ``True``): Include the community provided metrics.
with_details (:obj:`bool`, optional, default ``False``): Return the full details on the metrics instead of only the short name.
Example:
```py
>>> from datasets import list_metrics
>>> list_metrics()
['accuracy',
'bertscore',
'bleu',
'bleurt',
'cer',
'chrf',
...
]
```
"""
metrics = huggingface_hub.list_metrics()
if not with_community_metrics:
metrics = [metric for metric in metrics if "/" not in metric.id]
if not with_details:
metrics = [metric.id for metric in metrics]
return metrics
@deprecated("Clone the dataset repository from the Hugging Face Hub instead.")
def inspect_dataset(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
"""
Allow inspection/modification of a dataset script by copying on local drive at local_path.
Args:
path (`str`): Path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name
as the directory),
e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`list_datasets`])
e.g. `'squad'`, `'glue'` or `'openai/webtext'`.
local_path (`str`):
Path to the local folder to copy the dataset script to.
download_config ([`DownloadConfig`], *optional*):
Specific download configuration parameters.
**download_kwargs (additional keyword arguments):
Optional arguments for [`DownloadConfig`] which will override
the attributes of `download_config` if supplied.
"""
dataset_module = dataset_module_factory(path, download_config=download_config, **download_kwargs)
builder_cls = get_dataset_builder_class(dataset_module)
module_source_path = inspect.getsourcefile(builder_cls)
module_source_dirpath = os.path.dirname(module_source_path)
for dirpath, dirnames, filenames in os.walk(module_source_dirpath):
dst_dirpath = os.path.join(local_path, os.path.relpath(dirpath, module_source_dirpath))
os.makedirs(dst_dirpath, exist_ok=True)
# skipping hidden directories; prune the search
# [:] for the in-place list modification required by os.walk
dirnames[:] = [dirname for dirname in dirnames if not dirname.startswith((".", "__"))]
for filename in filenames:
shutil.copy2(os.path.join(dirpath, filename), os.path.join(dst_dirpath, filename))
shutil.copystat(dirpath, dst_dirpath)
local_path = relative_to_absolute_path(local_path)
print(
f"The processing script for dataset {path} can be inspected at {local_path}. "
f"The main class is in {module_source_dirpath}. "
f'You can modify this processing script and use it with `datasets.load_dataset("{PurePath(local_path).as_posix()}")`.'
)
@deprecated(
"Use 'evaluate.inspect_evaluation_module' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate"
)
def inspect_metric(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
r"""
Allow inspection/modification of a metric script by copying it on local drive at local_path.
<Deprecated version="2.5.0">
Use `evaluate.inspect_evaluation_module` instead, from the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
</Deprecated>
Args:
path (``str``): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
local_path (``str``): path to the local folder to copy the datset script to.
download_config (Optional ``datasets.DownloadConfig``): specific download configuration parameters.
**download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
"""
metric_module = metric_module_factory(path, download_config=download_config, **download_kwargs)
metric_cls = import_main_class(metric_module.module_path, dataset=False)
module_source_path = inspect.getsourcefile(metric_cls)
module_source_dirpath = os.path.dirname(module_source_path)
for dirpath, dirnames, filenames in os.walk(module_source_dirpath):
dst_dirpath = os.path.join(local_path, os.path.relpath(dirpath, module_source_dirpath))
os.makedirs(dst_dirpath, exist_ok=True)
# skipping hidden directories; prune the search
dirnames[:] = [dirname for dirname in dirnames if not dirname.startswith((".", "__"))]
for filename in filenames:
shutil.copy2(os.path.join(dirpath, filename), os.path.join(dst_dirpath, filename))
shutil.copystat(dirpath, dst_dirpath)
local_path = relative_to_absolute_path(local_path)
print(
f"The processing scripts for metric {path} can be inspected at {local_path}. "
f"The main class is in {module_source_dirpath}. "
f'You can modify this processing scripts and use it with `datasets.load_metric("{PurePath(local_path).as_posix()}")`.'
)
def get_dataset_infos(
path: str,
data_files: Optional[Union[Dict, List, str]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
revision: Optional[Union[str, Version]] = None,
token: Optional[Union[bool, str]] = None,
use_auth_token="deprecated",
**config_kwargs,
):
"""Get the meta information about a dataset, returned as a dict mapping config name to DatasetInfoDict.
Args:
path (`str`): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
e.g. `'squad'`, `'glue'` or``'openai/webtext'`
revision (`Union[str, datasets.Version]`, *optional*):
If specified, the dataset module will be loaded from the datasets repository at this version.
By default:
- it is set to the local version of the lib.
- it will also try to load it from the main branch if it's not available at the local version of the lib.
Specifying a version that is different from your local version of the lib might cause compatibility issues.
download_config ([`DownloadConfig`], *optional*):
Specific download configuration parameters.
download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
Download/generate mode.
data_files (`Union[Dict, List, str]`, *optional*):
Defining the data_files of the dataset configuration.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If `True`, or not specified, will get token from `"~/.huggingface"`.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If `True`, or not specified, will get token from `"~/.huggingface"`.
<Deprecated version="2.14.0">
`use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
</Deprecated>
**config_kwargs (additional keyword arguments):
Optional attributes for builder class which will override the attributes if supplied.
Example:
```py
>>> from datasets import get_dataset_infos
>>> get_dataset_infos('rotten_tomatoes')
{'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews...), ...}
```
"""
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={use_auth_token}' instead.",
FutureWarning,
)
token = use_auth_token
config_names = get_dataset_config_names(
path=path,
revision=revision,
download_config=download_config,
download_mode=download_mode,
data_files=data_files,
token=token,
)
return {
config_name: get_dataset_config_info(
path=path,
config_name=config_name,
data_files=data_files,
download_config=download_config,
download_mode=download_mode,
revision=revision,
token=token,
**config_kwargs,
)
for config_name in config_names
}
def get_dataset_config_names(
path: str,
revision: Optional[Union[str, Version]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
dynamic_modules_path: Optional[str] = None,
data_files: Optional[Union[Dict, List, str]] = None,
**download_kwargs,
):
"""Get the list of available config names for a particular dataset.
Args:
path (`str`): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
e.g. `'squad'`, `'glue'` or `'openai/webtext'`
revision (`Union[str, datasets.Version]`, *optional*):
If specified, the dataset module will be loaded from the datasets repository at this version.
By default:
- it is set to the local version of the lib.
- it will also try to load it from the main branch if it's not available at the local version of the lib.
Specifying a version that is different from your local version of the lib might cause compatibility issues.
download_config ([`DownloadConfig`], *optional*):
Specific download configuration parameters.
download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
Download/generate mode.
dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`):
Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`.
By default the datasets and metrics are stored inside the `datasets_modules` module.
data_files (`Union[Dict, List, str]`, *optional*):
Defining the data_files of the dataset configuration.
**download_kwargs (additional keyword arguments):
Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied,
for example `token`.
Example:
```py
>>> from datasets import get_dataset_config_names
>>> get_dataset_config_names("glue")
['cola',
'sst2',
'mrpc',
'qqp',
'stsb',
'mnli',
'mnli_mismatched',
'mnli_matched',
'qnli',
'rte',
'wnli',
'ax']
```
"""
dataset_module = dataset_module_factory(
path,
revision=revision,
download_config=download_config,
download_mode=download_mode,
dynamic_modules_path=dynamic_modules_path,
data_files=data_files,
**download_kwargs,
)
builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path))
return list(builder_cls.builder_configs.keys()) or [dataset_module.builder_kwargs.get("config_name", "default")]
def get_dataset_config_info(
path: str,
config_name: Optional[str] = None,
data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
revision: Optional[Union[str, Version]] = None,
token: Optional[Union[bool, str]] = None,
use_auth_token="deprecated",
**config_kwargs,
) -> DatasetInfo:
"""Get the meta information (DatasetInfo) about a dataset for a particular config
Args:
path (``str``): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
config_name (:obj:`str`, optional): Defining the name of the dataset configuration.
data_files (:obj:`str` or :obj:`Sequence` or :obj:`Mapping`, optional): Path(s) to source data file(s).
download_config (:class:`~download.DownloadConfig`, optional): Specific download configuration parameters.
download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load.
As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If True, or not specified, will get token from `"~/.huggingface"`.
use_auth_token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If True, or not specified, will get token from `"~/.huggingface"`.
<Deprecated version="2.14.0">
`use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
</Deprecated>
**config_kwargs (additional keyword arguments): optional attributes for builder class which will override the attributes if supplied.
"""
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={use_auth_token}' instead.",
FutureWarning,
)
token = use_auth_token
builder = load_dataset_builder(
path,
name=config_name,
data_files=data_files,
download_config=download_config,
download_mode=download_mode,
revision=revision,
token=token,
**config_kwargs,
)
info = builder.info
if info.splits is None:
download_config = download_config.copy() if download_config else DownloadConfig()
if token is not None:
download_config.token = token
builder._check_manual_download(
StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)
)
try:
info.splits = {
split_generator.name: {"name": split_generator.name, "dataset_name": path}
for split_generator in builder._split_generators(
StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)
)
}
except Exception as err:
raise SplitsNotFoundError("The split names could not be parsed from the dataset config.") from err
return info
def get_dataset_split_names(
path: str,
config_name: Optional[str] = None,
data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
revision: Optional[Union[str, Version]] = None,
token: Optional[Union[bool, str]] = None,
use_auth_token="deprecated",
**config_kwargs,
):
"""Get the list of available splits for a particular config and dataset.
Args:
path (`str`): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
e.g. `'squad'`, `'glue'` or `'openai/webtext'`
config_name (`str`, *optional*):
Defining the name of the dataset configuration.
data_files (`str` or `Sequence` or `Mapping`, *optional*):
Path(s) to source data file(s).
download_config ([`DownloadConfig`], *optional*):
Specific download configuration parameters.
download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
Download/generate mode.
revision ([`Version`] or `str`, *optional*):
Version of the dataset script to load.
As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If `True`, or not specified, will get token from `"~/.huggingface"`.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If `True`, or not specified, will get token from `"~/.huggingface"`.
<Deprecated version="2.14.0">
`use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
</Deprecated>
**config_kwargs (additional keyword arguments):
Optional attributes for builder class which will override the attributes if supplied.
Example:
```py
>>> from datasets import get_dataset_split_names
>>> get_dataset_split_names('rotten_tomatoes')
['train', 'validation', 'test']
```
"""
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={use_auth_token}' instead.",
FutureWarning,
)
token = use_auth_token
info = get_dataset_config_info(
path,
config_name=config_name,
data_files=data_files,
download_config=download_config,
download_mode=download_mode,
revision=revision,
token=token,
**config_kwargs,
)
return list(info.splits.keys())
| datasets-main | src/datasets/inspect.py |
from typing import TypeVar
from .arrow_dataset import Dataset, _split_by_node_map_style_dataset
from .iterable_dataset import IterableDataset, _split_by_node_iterable_dataset
DatasetType = TypeVar("DatasetType", Dataset, IterableDataset)
def split_dataset_by_node(dataset: DatasetType, rank: int, world_size: int) -> DatasetType:
"""
Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
For map-style datasets:
Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset.
To maximize data loading throughput, chunks are made of contiguous data on disk if possible.
For iterable datasets:
If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`),
then the shards are evenly assigned across the nodes, which is the most optimized.
Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples.
Args:
dataset ([`Dataset`] or [`IterableDataset`]):
The dataset to split by node.
rank (`int`):
Rank of the current node.
world_size (`int`):
Total number of nodes.
Returns:
[`Dataset`] or [`IterableDataset`]: The dataset to be used on the node at rank `rank`.
"""
if isinstance(dataset, Dataset):
return _split_by_node_map_style_dataset(dataset, rank=rank, world_size=world_size)
else:
return _split_by_node_iterable_dataset(dataset, rank=rank, world_size=world_size)
| datasets-main | src/datasets/distributed.py |
import importlib.util
import os
import tempfile
from pathlib import PurePath
from typing import TYPE_CHECKING, Dict, List, NamedTuple, Optional, Union
import fsspec
import numpy as np
from .utils import logging
if TYPE_CHECKING:
from .arrow_dataset import Dataset # noqa: F401
try:
from elasticsearch import Elasticsearch # noqa: F401
except ImportError:
pass
try:
import faiss # noqa: F401
except ImportError:
pass
_has_elasticsearch = importlib.util.find_spec("elasticsearch") is not None
_has_faiss = importlib.util.find_spec("faiss") is not None
logger = logging.get_logger(__name__)
class MissingIndex(Exception):
pass
class SearchResults(NamedTuple):
scores: List[float]
indices: List[int]
class BatchedSearchResults(NamedTuple):
total_scores: List[List[float]]
total_indices: List[List[int]]
class NearestExamplesResults(NamedTuple):
scores: List[float]
examples: dict
class BatchedNearestExamplesResults(NamedTuple):
total_scores: List[List[float]]
total_examples: List[dict]
class BaseIndex:
"""Base class for indexing"""
def search(self, query, k: int = 10, **kwargs) -> SearchResults:
"""
To implement.
This method has to return the scores and the indices of the retrieved examples given a certain query.
"""
raise NotImplementedError
def search_batch(self, queries, k: int = 10, **kwargs) -> BatchedSearchResults:
"""Find the nearest examples indices to the query.
Args:
queries (`Union[List[str], np.ndarray]`): The queries as a list of strings if `column` is a text index or as a numpy array if `column` is a vector index.
k (`int`): The number of examples to retrieve per query.
Ouput:
total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.
total_indices (`List[List[int]]`): The indices of the retrieved examples per query.
"""
total_scores, total_indices = [], []
for query in queries:
scores, indices = self.search(query, k)
total_scores.append(scores)
total_indices.append(indices)
return BatchedSearchResults(total_scores, total_indices)
def save(self, file: Union[str, PurePath]):
"""Serialize the index on disk"""
raise NotImplementedError
@classmethod
def load(cls, file: Union[str, PurePath]) -> "BaseIndex":
"""Deserialize the index from disk"""
raise NotImplementedError
class ElasticSearchIndex(BaseIndex):
"""
Sparse index using Elasticsearch. It is used to index text and run queries based on BM25 similarity.
An Elasticsearch server needs to be accessible, and a python client is declared with
```
es_client = Elasticsearch([{'host': 'localhost', 'port': '9200'}])
```
for example.
"""
def __init__(
self,
host: Optional[str] = None,
port: Optional[int] = None,
es_client: Optional["Elasticsearch"] = None,
es_index_name: Optional[str] = None,
es_index_config: Optional[dict] = None,
):
if not _has_elasticsearch:
raise ImportError(
"You must install ElasticSearch to use ElasticSearchIndex. To do so you can run `pip install elasticsearch==7.7.1 for example`"
)
if es_client is not None and (host is not None or port is not None):
raise ValueError("Please specify either `es_client` or `(host, port)`, but not both.")
host = host or "localhost"
port = port or 9200
import elasticsearch.helpers # noqa: F401 - need this to properly load all the es features
from elasticsearch import Elasticsearch # noqa: F811
self.es_client = es_client if es_client is not None else Elasticsearch([{"host": host, "port": str(port)}])
self.es_index_name = (
es_index_name
if es_index_name is not None
else "huggingface_datasets_" + os.path.basename(tempfile.NamedTemporaryFile().name)
)
self.es_index_config = (
es_index_config
if es_index_config is not None
else {
"settings": {
"number_of_shards": 1,
"analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
},
"mappings": {"properties": {"text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}}},
}
)
def add_documents(self, documents: Union[List[str], "Dataset"], column: Optional[str] = None):
"""
Add documents to the index.
If the documents are inside a certain column, you can specify it using the `column` argument.
"""
index_name = self.es_index_name
index_config = self.es_index_config
self.es_client.indices.create(index=index_name, body=index_config)
number_of_docs = len(documents)
progress = logging.tqdm(unit="docs", total=number_of_docs, disable=not logging.is_progress_bar_enabled())
successes = 0
def passage_generator():
if column is not None:
for i, example in enumerate(documents):
yield {"text": example[column], "_id": i}
else:
for i, example in enumerate(documents):
yield {"text": example, "_id": i}
# create the ES index
import elasticsearch as es
for ok, action in es.helpers.streaming_bulk(
client=self.es_client,
index=index_name,
actions=passage_generator(),
):
progress.update(1)
successes += ok
if successes != len(documents):
logger.warning(
f"Some documents failed to be added to ElasticSearch. Failures: {len(documents)-successes}/{len(documents)}"
)
logger.info(f"Indexed {successes:d} documents")
def search(self, query: str, k=10, **kwargs) -> SearchResults:
"""Find the nearest examples indices to the query.
Args:
query (`str`): The query as a string.
k (`int`): The number of examples to retrieve.
Ouput:
scores (`List[List[float]`): The retrieval scores of the retrieved examples.
indices (`List[List[int]]`): The indices of the retrieved examples.
"""
response = self.es_client.search(
index=self.es_index_name,
body={"query": {"multi_match": {"query": query, "fields": ["text"], "type": "cross_fields"}}, "size": k},
**kwargs,
)
hits = response["hits"]["hits"]
return SearchResults([hit["_score"] for hit in hits], [int(hit["_id"]) for hit in hits])
def search_batch(self, queries, k: int = 10, max_workers=10, **kwargs) -> BatchedSearchResults:
import concurrent.futures
total_scores, total_indices = [None] * len(queries), [None] * len(queries)
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
future_to_index = {executor.submit(self.search, query, k, **kwargs): i for i, query in enumerate(queries)}
for future in concurrent.futures.as_completed(future_to_index):
index = future_to_index[future]
results: SearchResults = future.result()
total_scores[index] = results.scores
total_indices[index] = results.indices
return BatchedSearchResults(total_indices=total_indices, total_scores=total_scores)
class FaissIndex(BaseIndex):
"""
Dense index using Faiss. It is used to index vectors.
Faiss is a library for efficient similarity search and clustering of dense vectors.
It contains algorithms that search in sets of vectors of any size, up to ones that possibly do not fit in RAM.
You can find more information about Faiss here:
- For index types and the string factory: https://github.com/facebookresearch/faiss/wiki/The-index-factory
- For GPU settings: https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU
"""
def __init__(
self,
device: Optional[Union[int, List[int]]] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None,
):
"""
Create a Dense index using Faiss. You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
You can find more information about Faiss here:
- For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
"""
if string_factory is not None and custom_index is not None:
raise ValueError("Please specify either `string_factory` or `custom_index` but not both.")
if device is not None and custom_index is not None:
raise ValueError(
"Cannot pass both 'custom_index' and 'device'. "
"Pass 'custom_index' already transferred to the target device instead."
)
self.device = device
self.string_factory = string_factory
self.metric_type = metric_type
self.faiss_index = custom_index
if not _has_faiss:
raise ImportError(
"You must install Faiss to use FaissIndex. To do so you can run `conda install -c pytorch faiss-cpu` or `conda install -c pytorch faiss-gpu`. "
"A community supported package is also available on pypi: `pip install faiss-cpu` or `pip install faiss-gpu`. "
"Note that pip may not have the latest version of FAISS, and thus, some of the latest features and bug fixes may not be available."
)
def add_vectors(
self,
vectors: Union[np.array, "Dataset"],
column: Optional[str] = None,
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: Optional[bool] = None,
):
"""
Add vectors to the index.
If the arrays are inside a certain column, you can specify it using the `column` argument.
"""
import faiss # noqa: F811
# Create index
if self.faiss_index is None:
size = len(vectors[0]) if column is None else len(vectors[0][column])
if self.string_factory is not None:
if self.metric_type is None:
index = faiss.index_factory(size, self.string_factory)
else:
index = faiss.index_factory(size, self.string_factory, self.metric_type)
else:
if self.metric_type is None:
index = faiss.IndexFlat(size)
else:
index = faiss.IndexFlat(size, self.metric_type)
self.faiss_index = self._faiss_index_to_device(index, self.device)
logger.info(f"Created faiss index of type {type(self.faiss_index)}")
# Set verbosity level
if faiss_verbose is not None:
self.faiss_index.verbose = faiss_verbose
if hasattr(self.faiss_index, "index") and self.faiss_index.index is not None:
self.faiss_index.index.verbose = faiss_verbose
if hasattr(self.faiss_index, "quantizer") and self.faiss_index.quantizer is not None:
self.faiss_index.quantizer.verbose = faiss_verbose
if hasattr(self.faiss_index, "clustering_index") and self.faiss_index.clustering_index is not None:
self.faiss_index.clustering_index.verbose = faiss_verbose
# Train
if train_size is not None:
train_vecs = vectors[:train_size] if column is None else vectors[:train_size][column]
logger.info(f"Training the index with the first {len(train_vecs)} vectors")
self.faiss_index.train(train_vecs)
else:
logger.info("Ignored the training step of the faiss index as `train_size` is None.")
# Add vectors
logger.info(f"Adding {len(vectors)} vectors to the faiss index")
for i in logging.tqdm(range(0, len(vectors), batch_size), disable=not logging.is_progress_bar_enabled()):
vecs = vectors[i : i + batch_size] if column is None else vectors[i : i + batch_size][column]
self.faiss_index.add(vecs)
@staticmethod
def _faiss_index_to_device(index: "faiss.Index", device: Optional[Union[int, List[int]]] = None) -> "faiss.Index":
"""
Sends a faiss index to a device.
A device can either be a positive integer (GPU id), a negative integer (all GPUs),
or a list of positive integers (select GPUs to use), or `None` for CPU.
"""
# If device is not specified, then it runs on CPU.
if device is None:
return index
import faiss # noqa: F811
# If the device id is given as an integer
if isinstance(device, int):
# Positive integers are directly mapped to GPU ids
if device > -1:
faiss_res = faiss.StandardGpuResources()
index = faiss.index_cpu_to_gpu(faiss_res, device, index)
# And negative integers mean using all GPUs
else:
index = faiss.index_cpu_to_all_gpus(index)
# Device ids given as a list mean mapping to those devices specified.
elif isinstance(device, (list, tuple)):
index = faiss.index_cpu_to_gpus_list(index, gpus=list(device))
else:
raise TypeError(
f"The argument type: {type(device)} is not expected. "
+ "Please pass in either nothing, a positive int, a negative int, or a list of positive ints."
)
return index
def search(self, query: np.array, k=10, **kwargs) -> SearchResults:
"""Find the nearest examples indices to the query.
Args:
query (`np.array`): The query as a numpy array.
k (`int`): The number of examples to retrieve.
Ouput:
scores (`List[List[float]`): The retrieval scores of the retrieved examples.
indices (`List[List[int]]`): The indices of the retrieved examples.
"""
if len(query.shape) != 1 and (len(query.shape) != 2 or query.shape[0] != 1):
raise ValueError("Shape of query is incorrect, it has to be either a 1D array or 2D (1, N)")
queries = query.reshape(1, -1)
if not queries.flags.c_contiguous:
queries = np.asarray(queries, order="C")
scores, indices = self.faiss_index.search(queries, k, **kwargs)
return SearchResults(scores[0], indices[0].astype(int))
def search_batch(self, queries: np.array, k=10, **kwargs) -> BatchedSearchResults:
"""Find the nearest examples indices to the queries.
Args:
queries (`np.array`): The queries as a numpy array.
k (`int`): The number of examples to retrieve.
Ouput:
total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.
total_indices (`List[List[int]]`): The indices of the retrieved examples per query.
"""
if len(queries.shape) != 2:
raise ValueError("Shape of query must be 2D")
if not queries.flags.c_contiguous:
queries = np.asarray(queries, order="C")
scores, indices = self.faiss_index.search(queries, k, **kwargs)
return BatchedSearchResults(scores, indices.astype(int))
def save(self, file: Union[str, PurePath], storage_options: Optional[Dict] = None):
"""Serialize the FaissIndex on disk"""
import faiss # noqa: F811
if self.device is not None and isinstance(self.device, (int, list, tuple)):
index = faiss.index_gpu_to_cpu(self.faiss_index)
else:
index = self.faiss_index
with fsspec.open(str(file), "wb", **(storage_options or {})) as f:
faiss.write_index(index, faiss.BufferedIOWriter(faiss.PyCallbackIOWriter(f.write)))
@classmethod
def load(
cls,
file: Union[str, PurePath],
device: Optional[Union[int, List[int]]] = None,
storage_options: Optional[Dict] = None,
) -> "FaissIndex":
"""Deserialize the FaissIndex from disk"""
import faiss # noqa: F811
# Instances of FaissIndex is essentially just a wrapper for faiss indices.
faiss_index = cls(device=device)
with fsspec.open(str(file), "rb", **(storage_options or {})) as f:
index = faiss.read_index(faiss.BufferedIOReader(faiss.PyCallbackIOReader(f.read)))
faiss_index.faiss_index = faiss_index._faiss_index_to_device(index, faiss_index.device)
return faiss_index
class IndexableMixin:
"""Add indexing features to `datasets.Dataset`"""
def __init__(self):
self._indexes: Dict[str, BaseIndex] = {}
def __len__(self):
raise NotImplementedError
def __getitem__(self, key):
raise NotImplementedError
def is_index_initialized(self, index_name: str) -> bool:
return index_name in self._indexes
def _check_index_is_initialized(self, index_name: str):
if not self.is_index_initialized(index_name):
raise MissingIndex(
f"Index with index_name '{index_name}' not initialized yet. Please make sure that you call `add_faiss_index` or `add_elasticsearch_index` first."
)
def list_indexes(self) -> List[str]:
"""List the `colindex_nameumns`/identifiers of all the attached indexes."""
return list(self._indexes)
def get_index(self, index_name: str) -> BaseIndex:
"""List the `index_name`/identifiers of all the attached indexes.
Args:
index_name (`str`): Index name.
Returns:
[`BaseIndex`]
"""
self._check_index_is_initialized(index_name)
return self._indexes[index_name]
def add_faiss_index(
self,
column: str,
index_name: Optional[str] = None,
device: Optional[Union[int, List[int]]] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None,
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: bool = False,
):
"""Add a dense index using Faiss for fast retrieval.
The index is created using the vectors of the specified column.
You can specify `device` if you want to run it on GPU (`device` must be the GPU index, see more below).
You can find more information about Faiss here:
- For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
Args:
column (`str`): The column of the vectors to add to the index.
index_name (Optional `str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
By default it corresponds to `column`.
device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.
metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs.
batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
<Added version="2.4.0"/>
train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index.
faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index.
"""
index_name = index_name if index_name is not None else column
faiss_index = FaissIndex(
device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index
)
faiss_index.add_vectors(
self, column=column, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose
)
self._indexes[index_name] = faiss_index
def add_faiss_index_from_external_arrays(
self,
external_arrays: np.array,
index_name: str,
device: Optional[Union[int, List[int]]] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None,
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: bool = False,
):
"""Add a dense index using Faiss for fast retrieval.
The index is created using the vectors of `external_arrays`.
You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
You can find more information about Faiss here:
- For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
Args:
external_arrays (`np.array`): If you want to use arrays from outside the lib for the index, you can set `external_arrays`.
It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`.
index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.
metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs.
batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
<Added version="2.4.0"/>
train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index.
faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index.
"""
faiss_index = FaissIndex(
device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index
)
faiss_index.add_vectors(
external_arrays, column=None, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose
)
self._indexes[index_name] = faiss_index
def save_faiss_index(self, index_name: str, file: Union[str, PurePath], storage_options: Optional[Dict] = None):
"""Save a FaissIndex on disk.
Args:
index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`).
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.11.0"/>
"""
index = self.get_index(index_name)
if not isinstance(index, FaissIndex):
raise ValueError(f"Index '{index_name}' is not a FaissIndex but a '{type(index)}'")
index.save(file, storage_options=storage_options)
logger.info(f"Saved FaissIndex {index_name} at {file}")
def load_faiss_index(
self,
index_name: str,
file: Union[str, PurePath],
device: Optional[Union[int, List[int]]] = None,
storage_options: Optional[Dict] = None,
):
"""Load a FaissIndex from disk.
If you want to do additional configurations, you can have access to the faiss index object by doing
`.get_index(index_name).faiss_index` to make it fit your needs.
Args:
index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to
call `.get_nearest` or `.search`.
file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`).
device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.11.0"/>
"""
index = FaissIndex.load(file, device=device, storage_options=storage_options)
if index.faiss_index.ntotal != len(self):
raise ValueError(
f"Index size should match Dataset size, but Index '{index_name}' at {file} has {index.faiss_index.ntotal} elements while the dataset has {len(self)} examples."
)
self._indexes[index_name] = index
logger.info(f"Loaded FaissIndex {index_name} from {file}")
def add_elasticsearch_index(
self,
column: str,
index_name: Optional[str] = None,
host: Optional[str] = None,
port: Optional[int] = None,
es_client: Optional["Elasticsearch"] = None,
es_index_name: Optional[str] = None,
es_index_config: Optional[dict] = None,
):
"""Add a text index using ElasticSearch for fast retrieval.
Args:
column (`str`): The column of the documents to add to the index.
index_name (Optional `str`): The index_name/identifier of the index. This is the index name that is used to call `.get_nearest` or `.search`.
By default it corresponds to `column`.
host (Optional `str`, defaults to localhost):
host of where ElasticSearch is running
port (Optional `str`, defaults to 9200):
port of where ElasticSearch is running
es_client (Optional `elasticsearch.Elasticsearch`):
The elasticsearch client used to create the index if host and port are None.
es_index_name (Optional `str`): The elasticsearch index name used to create the index.
es_index_config (Optional `dict`):
The configuration of the elasticsearch index.
Default config is:
Config::
{
"settings": {
"number_of_shards": 1,
"analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
},
"mappings": {
"properties": {
"text": {
"type": "text",
"analyzer": "standard",
"similarity": "BM25"
},
}
},
}
"""
index_name = index_name if index_name is not None else column
es_index = ElasticSearchIndex(
host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config
)
es_index.add_documents(self, column=column)
self._indexes[index_name] = es_index
def load_elasticsearch_index(
self,
index_name: str,
es_index_name: str,
host: Optional[str] = None,
port: Optional[int] = None,
es_client: Optional["Elasticsearch"] = None,
es_index_config: Optional[dict] = None,
):
"""Load an existing text index using ElasticSearch for fast retrieval.
Args:
index_name (`str`):
The `index_name`/identifier of the index. This is the index name that is used to call `get_nearest` or `search`.
es_index_name (`str`):
The name of elasticsearch index to load.
host (`str`, *optional*, defaults to `localhost`):
Host of where ElasticSearch is running.
port (`str`, *optional*, defaults to `9200`):
Port of where ElasticSearch is running.
es_client (`elasticsearch.Elasticsearch`, *optional*):
The elasticsearch client used to create the index if host and port are `None`.
es_index_config (`dict`, *optional*):
The configuration of the elasticsearch index.
Default config is:
```
{
"settings": {
"number_of_shards": 1,
"analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
},
"mappings": {
"properties": {
"text": {
"type": "text",
"analyzer": "standard",
"similarity": "BM25"
},
}
},
}
```
"""
self._indexes[index_name] = ElasticSearchIndex(
host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config
)
def drop_index(self, index_name: str):
"""Drop the index with the specified column.
Args:
index_name (`str`):
The `index_name`/identifier of the index.
"""
del self._indexes[index_name]
def search(self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs) -> SearchResults:
"""Find the nearest examples indices in the dataset to the query.
Args:
index_name (`str`):
The name/identifier of the index.
query (`Union[str, np.ndarray]`):
The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
k (`int`):
The number of examples to retrieve.
Returns:
`(scores, indices)`:
A tuple of `(scores, indices)` where:
- **scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples
- **indices** (`List[List[int]]`): the indices of the retrieved examples
"""
self._check_index_is_initialized(index_name)
return self._indexes[index_name].search(query, k, **kwargs)
def search_batch(
self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs
) -> BatchedSearchResults:
"""Find the nearest examples indices in the dataset to the query.
Args:
index_name (`str`):
The `index_name`/identifier of the index.
queries (`Union[List[str], np.ndarray]`):
The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
k (`int`):
The number of examples to retrieve per query.
Returns:
`(total_scores, total_indices)`:
A tuple of `(total_scores, total_indices)` where:
- **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query
- **total_indices** (`List[List[int]]`): the indices of the retrieved examples per query
"""
self._check_index_is_initialized(index_name)
return self._indexes[index_name].search_batch(queries, k, **kwargs)
def get_nearest_examples(
self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs
) -> NearestExamplesResults:
"""Find the nearest examples in the dataset to the query.
Args:
index_name (`str`):
The index_name/identifier of the index.
query (`Union[str, np.ndarray]`):
The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
k (`int`):
The number of examples to retrieve.
Returns:
`(scores, examples)`:
A tuple of `(scores, examples)` where:
- **scores** (`List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples
- **examples** (`dict`): the retrieved examples
"""
self._check_index_is_initialized(index_name)
scores, indices = self.search(index_name, query, k, **kwargs)
top_indices = [i for i in indices if i >= 0]
return NearestExamplesResults(scores[: len(top_indices)], self[top_indices])
def get_nearest_examples_batch(
self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs
) -> BatchedNearestExamplesResults:
"""Find the nearest examples in the dataset to the query.
Args:
index_name (`str`):
The `index_name`/identifier of the index.
queries (`Union[List[str], np.ndarray]`):
The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
k (`int`):
The number of examples to retrieve per query.
Returns:
`(total_scores, total_examples)`:
A tuple of `(total_scores, total_examples)` where:
- **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query
- **total_examples** (`List[dict]`): the retrieved examples per query
"""
self._check_index_is_initialized(index_name)
total_scores, total_indices = self.search_batch(index_name, queries, k, **kwargs)
total_scores = [
scores_i[: len([i for i in indices_i if i >= 0])]
for scores_i, indices_i in zip(total_scores, total_indices)
]
total_samples = [self[[i for i in indices if i >= 0]] for indices in total_indices]
return BatchedNearestExamplesResults(total_scores, total_samples)
| datasets-main | src/datasets/search.py |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Splits related API."""
import abc
import collections
import copy
import dataclasses
import re
from dataclasses import dataclass
from typing import Dict, List, Optional, Union
from .arrow_reader import FileInstructions, make_file_instructions
from .naming import _split_re
from .utils.py_utils import NonMutableDict, asdict
@dataclass
class SplitInfo:
name: str = dataclasses.field(default="", metadata={"include_in_asdict_even_if_is_default": True})
num_bytes: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True})
num_examples: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True})
shard_lengths: Optional[List[int]] = None
# Deprecated
# For backward compatibility, this field needs to always be included in files like
# dataset_infos.json and dataset_info.json files
# To do so, we always include it in the output of datasets.utils.py_utils.asdict(split_info)
dataset_name: Optional[str] = dataclasses.field(
default=None, metadata={"include_in_asdict_even_if_is_default": True}
)
@property
def file_instructions(self):
"""Returns the list of dict(filename, take, skip)."""
# `self.dataset_name` is assigned in `SplitDict.add()`.
instructions = make_file_instructions(
name=self.dataset_name,
split_infos=[self],
instruction=str(self.name),
)
return instructions.file_instructions
@dataclass
class SubSplitInfo:
"""Wrapper around a sub split info.
This class expose info on the subsplit:
```
ds, info = datasets.load_dataset(..., split='train[75%:]', with_info=True)
info.splits['train[75%:]'].num_examples
```
"""
instructions: FileInstructions
@property
def num_examples(self):
"""Returns the number of example in the subsplit."""
return self.instructions.num_examples
@property
def file_instructions(self):
"""Returns the list of dict(filename, take, skip)."""
return self.instructions.file_instructions
class SplitBase(metaclass=abc.ABCMeta):
# pylint: disable=line-too-long
"""Abstract base class for Split compositionality.
See the
[guide on splits](../loading#slice-splits)
for more information.
There are three parts to the composition:
1) The splits are composed (defined, merged, split,...) together before
calling the `.as_dataset()` function. This is done with the `__add__`,
`__getitem__`, which return a tree of `SplitBase` (whose leaf
are the `NamedSplit` objects)
```
split = datasets.Split.TRAIN + datasets.Split.TEST.subsplit(datasets.percent[:50])
```
2) The `SplitBase` is forwarded to the `.as_dataset()` function
to be resolved into actual read instruction. This is done by the
`.get_read_instruction()` method which takes the real dataset splits
(name, number of shards,...) and parse the tree to return a
`SplitReadInstruction()` object
```
read_instruction = split.get_read_instruction(self.info.splits)
```
3) The `SplitReadInstruction` is then used in the `tf.data.Dataset` pipeline
to define which files to read and how to skip examples within file.
"""
# pylint: enable=line-too-long
@abc.abstractmethod
def get_read_instruction(self, split_dict):
"""Parse the descriptor tree and compile all read instructions together.
Args:
split_dict: `dict`, The `dict[split_name, SplitInfo]` of the dataset
Returns:
split_read_instruction: `SplitReadInstruction`
"""
raise NotImplementedError("Abstract method")
def __eq__(self, other):
"""Equality: datasets.Split.TRAIN == 'train'."""
if isinstance(other, (NamedSplit, str)):
return False
raise NotImplementedError("Equality is not implemented between merged/sub splits.")
def __ne__(self, other):
"""InEquality: datasets.Split.TRAIN != 'test'."""
return not self.__eq__(other)
def __add__(self, other):
"""Merging: datasets.Split.TRAIN + datasets.Split.TEST."""
return _SplitMerged(self, other)
def subsplit(self, arg=None, k=None, percent=None, weighted=None): # pylint: disable=redefined-outer-name
"""Divides this split into subsplits.
There are 3 ways to define subsplits, which correspond to the 3
arguments `k` (get `k` even subsplits), `percent` (get a slice of the
dataset with `datasets.percent`), and `weighted` (get subsplits with proportions
specified by `weighted`).
Example::
```
# 50% train, 50% test
train, test = split.subsplit(k=2)
# 50% train, 25% test, 25% validation
train, test, validation = split.subsplit(weighted=[2, 1, 1])
# Extract last 20%
subsplit = split.subsplit(datasets.percent[-20:])
```
Warning: k and weighted will be converted into percent which mean that
values below the percent will be rounded up or down. The final split may be
bigger to deal with remainders. For instance:
```
train, test, valid = split.subsplit(k=3) # 33%, 33%, 34%
s1, s2, s3, s4 = split.subsplit(weighted=[2, 2, 1, 1]) # 33%, 33%, 16%, 18%
```
Args:
arg: If no kwargs are given, `arg` will be interpreted as one of
`k`, `percent`, or `weighted` depending on the type.
For example:
```
split.subsplit(10) # Equivalent to split.subsplit(k=10)
split.subsplit(datasets.percent[:-20]) # percent=datasets.percent[:-20]
split.subsplit([1, 1, 2]) # weighted=[1, 1, 2]
```
k: `int` If set, subdivide the split into `k` equal parts.
percent: `datasets.percent slice`, return a single subsplit corresponding to
a slice of the original split. For example:
`split.subsplit(datasets.percent[-20:]) # Last 20% of the dataset`.
weighted: `list[int]`, return a list of subsplits whose proportions match
the normalized sum of the list. For example:
`split.subsplit(weighted=[1, 1, 2]) # 25%, 25%, 50%`.
Returns:
A subsplit or list of subsplits extracted from this split object.
"""
# Note that the percent kwargs redefine the outer name datasets.percent. This
# is done for consistency (.subsplit(percent=datasets.percent[:40]))
if sum(bool(x) for x in (arg, k, percent, weighted)) != 1:
raise ValueError("Only one argument of subsplit should be set.")
# Auto deduce k
if isinstance(arg, int):
k = arg
elif isinstance(arg, slice):
percent = arg
elif isinstance(arg, list):
weighted = arg
if not (k or percent or weighted):
raise ValueError(
f"Invalid split argument {arg}. Only list, slice and int supported. "
"One of k, weighted or percent should be set to a non empty value."
)
def assert_slices_coverage(slices):
# Ensure that the expended slices cover all percents.
assert sum((list(range(*s.indices(100))) for s in slices), []) == list(range(100))
if k:
if not 0 < k <= 100:
raise ValueError(f"Subsplit k should be between 0 and 100, got {k}")
shift = 100 // k
slices = [slice(i * shift, (i + 1) * shift) for i in range(k)]
# Round up last element to ensure all elements are taken
slices[-1] = slice(slices[-1].start, 100)
# Internal check to ensure full coverage
assert_slices_coverage(slices)
return tuple(_SubSplit(self, s) for s in slices)
elif percent:
return _SubSplit(self, percent)
elif weighted:
# Normalize the weighted sum
total = sum(weighted)
weighted = [100 * x // total for x in weighted]
# Create the slice for each of the elements
start = 0
stop = 0
slices = []
for v in weighted:
stop += v
slices.append(slice(start, stop))
start = stop
# Round up last element to ensure all elements are taken
slices[-1] = slice(slices[-1].start, 100)
# Internal check to ensure full coverage
assert_slices_coverage(slices)
return tuple(_SubSplit(self, s) for s in slices)
else:
# Should not be possible
raise ValueError("Could not determine the split")
# 2 requirements:
# 1. datasets.percent be sliceable
# 2. datasets.percent be documented
#
# Instances are not documented, so we want datasets.percent to be a class, but to
# have it be sliceable, we need this metaclass.
class PercentSliceMeta(type):
def __getitem__(cls, slice_value):
if not isinstance(slice_value, slice):
raise ValueError(f"datasets.percent should only be called with slice, not {slice_value}")
return slice_value
class PercentSlice(metaclass=PercentSliceMeta):
# pylint: disable=line-too-long
"""Syntactic sugar for defining slice subsplits: `datasets.percent[75:-5]`.
See the
[guide on splits](../loading#slice-splits)
for more information.
"""
# pylint: enable=line-too-long
pass
percent = PercentSlice # pylint: disable=invalid-name
class _SplitMerged(SplitBase):
"""Represent two split descriptors merged together."""
def __init__(self, split1, split2):
self._split1 = split1
self._split2 = split2
def get_read_instruction(self, split_dict):
read_instruction1 = self._split1.get_read_instruction(split_dict)
read_instruction2 = self._split2.get_read_instruction(split_dict)
return read_instruction1 + read_instruction2
def __repr__(self):
return f"({repr(self._split1)} + {repr(self._split2)})"
class _SubSplit(SplitBase):
"""Represent a sub split of a split descriptor."""
def __init__(self, split, slice_value):
self._split = split
self._slice_value = slice_value
def get_read_instruction(self, split_dict):
return self._split.get_read_instruction(split_dict)[self._slice_value]
def __repr__(self):
slice_str = "{start}:{stop}"
if self._slice_value.step is not None:
slice_str += ":{step}"
slice_str = slice_str.format(
start="" if self._slice_value.start is None else self._slice_value.start,
stop="" if self._slice_value.stop is None else self._slice_value.stop,
step=self._slice_value.step,
)
return f"{repr(self._split)}(datasets.percent[{slice_str}])"
class NamedSplit(SplitBase):
"""Descriptor corresponding to a named split (train, test, ...).
Example:
Each descriptor can be composed with other using addition or slice:
```py
split = datasets.Split.TRAIN.subsplit(datasets.percent[0:25]) + datasets.Split.TEST
```
The resulting split will correspond to 25% of the train split merged with
100% of the test split.
A split cannot be added twice, so the following will fail:
```py
split = (
datasets.Split.TRAIN.subsplit(datasets.percent[:25]) +
datasets.Split.TRAIN.subsplit(datasets.percent[75:])
) # Error
split = datasets.Split.TEST + datasets.Split.ALL # Error
```
The slices can be applied only one time. So the following are valid:
```py
split = (
datasets.Split.TRAIN.subsplit(datasets.percent[:25]) +
datasets.Split.TEST.subsplit(datasets.percent[:50])
)
split = (datasets.Split.TRAIN + datasets.Split.TEST).subsplit(datasets.percent[:50])
```
But this is not valid:
```py
train = datasets.Split.TRAIN
test = datasets.Split.TEST
split = train.subsplit(datasets.percent[:25]).subsplit(datasets.percent[:25])
split = (train.subsplit(datasets.percent[:25]) + test).subsplit(datasets.percent[:50])
```
"""
def __init__(self, name):
self._name = name
split_names_from_instruction = [split_instruction.split("[")[0] for split_instruction in name.split("+")]
for split_name in split_names_from_instruction:
if not re.match(_split_re, split_name):
raise ValueError(f"Split name should match '{_split_re}' but got '{split_name}'.")
def __str__(self):
return self._name
def __repr__(self):
return f"NamedSplit({self._name!r})"
def __eq__(self, other):
"""Equality: datasets.Split.TRAIN == 'train'."""
if isinstance(other, NamedSplit):
return self._name == other._name # pylint: disable=protected-access
elif isinstance(other, SplitBase):
return False
elif isinstance(other, str): # Other should be string
return self._name == other
else:
raise ValueError(f"Equality not supported between split {self} and {other}")
def __lt__(self, other):
return self._name < other._name # pylint: disable=protected-access
def __hash__(self):
return hash(self._name)
def get_read_instruction(self, split_dict):
return SplitReadInstruction(split_dict[self._name])
class NamedSplitAll(NamedSplit):
"""Split corresponding to the union of all defined dataset splits."""
def __init__(self):
super().__init__("all")
def __repr__(self):
return "NamedSplitAll()"
def get_read_instruction(self, split_dict):
# Merge all dataset split together
read_instructions = [SplitReadInstruction(s) for s in split_dict.values()]
return sum(read_instructions, SplitReadInstruction())
class Split:
# pylint: disable=line-too-long
"""`Enum` for dataset splits.
Datasets are typically split into different subsets to be used at various
stages of training and evaluation.
- `TRAIN`: the training data.
- `VALIDATION`: the validation data. If present, this is typically used as
evaluation data while iterating on a model (e.g. changing hyperparameters,
model architecture, etc.).
- `TEST`: the testing data. This is the data to report metrics on. Typically
you do not want to use this during model iteration as you may overfit to it.
- `ALL`: the union of all defined dataset splits.
All splits, including compositions inherit from `datasets.SplitBase`.
See the [guide](../load_hub#splits) on splits for more information.
Example:
```py
>>> datasets.SplitGenerator(
... name=datasets.Split.TRAIN,
... gen_kwargs={"split_key": "train", "files": dl_manager.download_and extract(url)},
... ),
... datasets.SplitGenerator(
... name=datasets.Split.VALIDATION,
... gen_kwargs={"split_key": "validation", "files": dl_manager.download_and extract(url)},
... ),
... datasets.SplitGenerator(
... name=datasets.Split.TEST,
... gen_kwargs={"split_key": "test", "files": dl_manager.download_and extract(url)},
... )
```
"""
# pylint: enable=line-too-long
TRAIN = NamedSplit("train")
TEST = NamedSplit("test")
VALIDATION = NamedSplit("validation")
ALL = NamedSplitAll()
def __new__(cls, name):
"""Create a custom split with datasets.Split('custom_name')."""
return NamedSplitAll() if name == "all" else NamedSplit(name)
# Similar to SplitInfo, but contain an additional slice info
SlicedSplitInfo = collections.namedtuple(
"SlicedSplitInfo",
[
"split_info",
"slice_value",
],
) # noqa: E231
class SplitReadInstruction:
"""Object containing the reading instruction for the dataset.
Similarly to `SplitDescriptor` nodes, this object can be composed with itself,
but the resolution happens instantaneously, instead of keeping track of the
tree, such as all instructions are compiled and flattened in a single
SplitReadInstruction object containing the list of files and slice to use.
Once resolved, the instructions can be accessed with:
```
read_instructions.get_list_sliced_split_info() # List of splits to use
```
"""
def __init__(self, split_info=None):
self._splits = NonMutableDict(error_msg="Overlap between splits. Split {key} has been added with " "itself.")
if split_info:
self.add(SlicedSplitInfo(split_info=split_info, slice_value=None))
def add(self, sliced_split):
"""Add a SlicedSplitInfo the read instructions."""
# TODO(epot): Check that the number of examples per shard % 100 == 0
# Otherwise the slices value may be unbalanced and not exactly reflect the
# requested slice.
self._splits[sliced_split.split_info.name] = sliced_split
def __add__(self, other):
"""Merging split together."""
# Will raise error if a split has already be added (NonMutableDict)
# TODO(epot): If a split is already added but there is no overlap between
# the slices, should merge the slices (ex: [:10] + [80:])
split_instruction = SplitReadInstruction()
split_instruction._splits.update(self._splits) # pylint: disable=protected-access
split_instruction._splits.update(other._splits) # pylint: disable=protected-access
return split_instruction
def __getitem__(self, slice_value):
"""Sub-splits."""
# Will raise an error if a split has already been sliced
split_instruction = SplitReadInstruction()
for v in self._splits.values():
if v.slice_value is not None:
raise ValueError(f"Trying to slice Split {v.split_info.name} which has already been sliced")
v = v._asdict()
v["slice_value"] = slice_value
split_instruction.add(SlicedSplitInfo(**v))
return split_instruction
def get_list_sliced_split_info(self):
return list(self._splits.values())
class SplitDict(dict):
"""Split info object."""
def __init__(self, *args, dataset_name=None, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_name = dataset_name
def __getitem__(self, key: Union[SplitBase, str]):
# 1st case: The key exists: `info.splits['train']`
if str(key) in self:
return super().__getitem__(str(key))
# 2nd case: Uses instructions: `info.splits['train[50%]']`
else:
instructions = make_file_instructions(
name=self.dataset_name,
split_infos=self.values(),
instruction=key,
)
return SubSplitInfo(instructions)
def __setitem__(self, key: Union[SplitBase, str], value: SplitInfo):
if key != value.name:
raise ValueError(f"Cannot add elem. (key mismatch: '{key}' != '{value.name}')")
if key in self:
raise ValueError(f"Split {key} already present")
super().__setitem__(key, value)
def add(self, split_info: SplitInfo):
"""Add the split info."""
if split_info.name in self:
raise ValueError(f"Split {split_info.name} already present")
split_info.dataset_name = self.dataset_name
super().__setitem__(split_info.name, split_info)
@property
def total_num_examples(self):
"""Return the total number of examples."""
return sum(s.num_examples for s in self.values())
@classmethod
def from_split_dict(cls, split_infos: Union[List, Dict], dataset_name: Optional[str] = None):
"""Returns a new SplitDict initialized from a Dict or List of `split_infos`."""
if isinstance(split_infos, dict):
split_infos = list(split_infos.values())
if dataset_name is None:
dataset_name = split_infos[0].get("dataset_name") if split_infos else None
split_dict = cls(dataset_name=dataset_name)
for split_info in split_infos:
if isinstance(split_info, dict):
split_info = SplitInfo(**split_info)
split_dict.add(split_info)
return split_dict
def to_split_dict(self):
"""Returns a list of SplitInfo protos that we have."""
out = []
for split_name, split_info in self.items():
split_info = copy.deepcopy(split_info)
split_info.name = split_name
out.append(split_info)
return out
def copy(self):
return SplitDict.from_split_dict(self.to_split_dict(), self.dataset_name)
def _to_yaml_list(self) -> list:
out = [asdict(s) for s in self.to_split_dict()]
# we don't need the shard lengths in YAML, since it depends on max_shard_size and num_proc
for split_info_dict in out:
split_info_dict.pop("shard_lengths", None)
# we don't need the dataset_name attribute that is deprecated
for split_info_dict in out:
split_info_dict.pop("dataset_name", None)
return out
@classmethod
def _from_yaml_list(cls, yaml_data: list) -> "SplitDict":
return cls.from_split_dict(yaml_data)
@dataclass
class SplitGenerator:
"""Defines the split information for the generator.
This should be used as returned value of
`GeneratorBasedBuilder._split_generators`.
See `GeneratorBasedBuilder._split_generators` for more info and example
of usage.
Args:
name (`str`):
Name of the `Split` for which the generator will
create the examples.
**gen_kwargs (additional keyword arguments):
Keyword arguments to forward to the `DatasetBuilder._generate_examples` method
of the builder.
Example:
```py
>>> datasets.SplitGenerator(
... name=datasets.Split.TRAIN,
... gen_kwargs={"split_key": "train", "files": dl_manager.download_and_extract(url)},
... )
```
"""
name: str
gen_kwargs: Dict = dataclasses.field(default_factory=dict)
split_info: SplitInfo = dataclasses.field(init=False)
def __post_init__(self):
self.name = str(self.name) # Make sure we convert NamedSplits in strings
NamedSplit(self.name) # check that it's a valid split name
self.split_info = SplitInfo(name=self.name)
| datasets-main | src/datasets/splits.py |
import copy
import os
import tempfile
import warnings
from functools import partial
from itertools import groupby
from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union
import numpy as np
import pyarrow as pa
import pyarrow.compute as pc
from . import config
from .utils.logging import get_logger
if TYPE_CHECKING:
from .features.features import Features, FeatureType
logger = get_logger(__name__)
def inject_arrow_table_documentation(arrow_table_method):
def wrapper(fn):
fn.__doc__ = arrow_table_method.__doc__ + (fn.__doc__ if fn.__doc__ is not None else "")
fn.__doc__ = fn.__doc__.replace("pyarrow.Table", "Table")
if hasattr(arrow_table_method, "__annotations__"):
fn.__annotations__ = arrow_table_method.__annotations__
return fn
return wrapper
def _in_memory_arrow_table_from_file(filename: str) -> pa.Table:
in_memory_stream = pa.input_stream(filename)
opened_stream = pa.ipc.open_stream(in_memory_stream)
pa_table = opened_stream.read_all()
return pa_table
def _in_memory_arrow_table_from_buffer(buffer: pa.Buffer) -> pa.Table:
stream = pa.BufferReader(buffer)
opened_stream = pa.ipc.open_stream(stream)
table = opened_stream.read_all()
return table
def _memory_mapped_record_batch_reader_from_file(filename: str) -> pa.RecordBatchStreamReader:
memory_mapped_stream = pa.memory_map(filename)
return pa.ipc.open_stream(memory_mapped_stream)
def read_schema_from_file(filename: str) -> pa.Schema:
"""
Infer arrow table schema from file without loading whole file into memory.
Usefull especially while having very big files.
"""
with pa.memory_map(filename) as memory_mapped_stream:
schema = pa.ipc.open_stream(memory_mapped_stream).schema
return schema
def _memory_mapped_arrow_table_from_file(filename: str) -> pa.Table:
opened_stream = _memory_mapped_record_batch_reader_from_file(filename)
pa_table = opened_stream.read_all()
return pa_table
def _write_table_to_file(table: pa.Table, filename: str) -> int:
with open(filename, "wb") as sink:
writer = pa.RecordBatchStreamWriter(sink=sink, schema=table.schema)
batches: List[pa.RecordBatch] = table.to_batches()
for batch in batches:
writer.write_batch(batch)
writer.close()
return sum(batch.nbytes for batch in batches)
def _deepcopy(x, memo: dict):
"""deepcopy a regular class instance"""
cls = x.__class__
result = cls.__new__(cls)
memo[id(x)] = result
for k, v in x.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
def _interpolation_search(arr: List[int], x: int) -> int:
"""
Return the position i of a sorted array so that arr[i] <= x < arr[i+1]
Args:
arr (`List[int]`): non-empty sorted list of integers
x (`int`): query
Returns:
`int`: the position i so that arr[i] <= x < arr[i+1]
Raises:
`IndexError`: if the array is empty or if the query is outside the array values
"""
i, j = 0, len(arr) - 1
while i < j and arr[i] <= x < arr[j]:
k = i + ((j - i) * (x - arr[i]) // (arr[j] - arr[i]))
if arr[k] <= x < arr[k + 1]:
return k
elif arr[k] < x:
i, j = k + 1, j
else:
i, j = i, k
raise IndexError(f"Invalid query '{x}' for size {arr[-1] if len(arr) else 'none'}.")
class IndexedTableMixin:
def __init__(self, table: pa.Table):
self._schema: pa.Schema = table.schema
self._batches: List[pa.RecordBatch] = [
recordbatch for recordbatch in table.to_batches() if len(recordbatch) > 0
]
self._offsets: np.ndarray = np.cumsum([0] + [len(b) for b in self._batches], dtype=np.int64)
def fast_gather(self, indices: Union[List[int], np.ndarray]) -> pa.Table:
"""
Create a pa.Table by gathering the records at the records at the specified indices. Should be faster
than pa.concat_tables(table.fast_slice(int(i) % table.num_rows, 1) for i in indices) since NumPy can compute
the binary searches in parallel, highly optimized C
"""
if not len(indices):
raise ValueError("Indices must be non-empty")
batch_indices = np.searchsorted(self._offsets, indices, side="right") - 1
return pa.Table.from_batches(
[
self._batches[batch_idx].slice(i - self._offsets[batch_idx], 1)
for batch_idx, i in zip(batch_indices, indices)
],
schema=self._schema,
)
def fast_slice(self, offset=0, length=None) -> pa.Table:
"""
Slice the Table using interpolation search.
The behavior is the same as `pyarrow.Table.slice` but it's significantly faster.
Interpolation search is used to find the start and end indexes of the batches we want to keep.
The batches to keep are then concatenated to form the sliced Table.
"""
if offset < 0:
raise IndexError("Offset must be non-negative")
elif offset >= self._offsets[-1] or (length is not None and length <= 0):
return pa.Table.from_batches([], schema=self._schema)
i = _interpolation_search(self._offsets, offset)
if length is None or length + offset >= self._offsets[-1]:
batches = self._batches[i:]
batches[0] = batches[0].slice(offset - self._offsets[i])
else:
j = _interpolation_search(self._offsets, offset + length - 1)
batches = self._batches[i : j + 1]
batches[-1] = batches[-1].slice(0, offset + length - self._offsets[j])
batches[0] = batches[0].slice(offset - self._offsets[i])
return pa.Table.from_batches(batches, schema=self._schema)
class Table(IndexedTableMixin):
"""
Wraps a pyarrow Table by using composition.
This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`.
It implements all the basic attributes/methods of the pyarrow Table class except
the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column,
append_column, remove_column, set_column, rename_columns` and `drop`.
The implementation of these methods differs for the subclasses.
"""
def __init__(self, table: pa.Table):
super().__init__(table)
self.table = table
def __deepcopy__(self, memo: dict):
# arrow tables are immutable, so there's no need to copy self.table
# moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason
# by adding it to the memo, self.table won't be copied
memo[id(self.table)] = self.table
# same for the recordbatches used by the index
memo[id(self._batches)] = list(self._batches)
return _deepcopy(self, memo)
def __getstate__(self):
# We can't pickle objects that are bigger than 4GiB, or it causes OverflowError
# So we write the table on disk instead
if self.table.nbytes >= config.MAX_TABLE_NBYTES_FOR_PICKLING:
table = self.table
with tempfile.NamedTemporaryFile("wb", delete=False, suffix=".arrow") as tmp_file:
filename = tmp_file.name
logger.debug(
f"Attempting to pickle a table bigger than 4GiB. Writing it on the disk instead at {filename}"
)
_write_table_to_file(table=table, filename=filename)
return {"path": filename}
else:
return {"table": self.table}
def __setstate__(self, state):
if "path" in state:
filename = state["path"]
logger.debug(f"Unpickling a big table from the disk at {filename}")
table = _in_memory_arrow_table_from_file(filename)
logger.debug(f"Removing temporary table file at {filename}")
os.remove(filename)
else:
table = state["table"]
Table.__init__(self, table)
def validate(self, *args, **kwargs):
"""
Perform validation checks. An exception is raised if validation fails.
By default only cheap validation checks are run. Pass `full=True`
for thorough validation checks (potentially `O(n)`).
Args:
full (`bool`, defaults to `False`):
If `True`, run expensive checks, otherwise cheap checks only.
Raises:
`pa.lib.ArrowInvalid`: if validation fails
"""
return self.table.validate(*args, **kwargs)
def equals(self, *args, **kwargs):
"""
Check if contents of two tables are equal.
Args:
other ([`~datasets.table.Table`]):
Table to compare against.
check_metadata `bool`, defaults to `False`):
Whether schema metadata equality should be checked as well.
Returns:
`bool`
"""
args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args)
kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs}
return self.table.equals(*args, **kwargs)
def to_batches(self, *args, **kwargs):
"""
Convert Table to list of (contiguous) `RecordBatch` objects.
Args:
max_chunksize (`int`, defaults to `None`):
Maximum size for `RecordBatch` chunks. Individual chunks may be
smaller depending on the chunk layout of individual columns.
Returns:
`List[pyarrow.RecordBatch]`
"""
return self.table.to_batches(*args, **kwargs)
def to_pydict(self, *args, **kwargs):
"""
Convert the Table to a `dict` or `OrderedDict`.
Returns:
`dict`
"""
return self.table.to_pydict(*args, **kwargs)
def to_pylist(self, *args, **kwargs):
"""
Convert the Table to a list
Returns:
`list`
"""
try:
return self.table.to_pylist(*args, **kwargs)
except AttributeError: # pyarrow <7 does not have to_pylist, so we use to_pydict
pydict = self.table.to_pydict(*args, **kwargs)
return [{k: pydict[k][i] for k in pydict} for i in range(len(self.table))]
def to_pandas(self, *args, **kwargs):
"""
Convert to a pandas-compatible NumPy array or DataFrame, as appropriate.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
Arrow MemoryPool to use for allocations. Uses the default memory
pool is not passed.
strings_to_categorical (`bool`, defaults to `False`):
Encode string (UTF8) and binary types to `pandas.Categorical`.
categories (`list`, defaults to `empty`):
List of fields that should be returned as `pandas.Categorical`. Only
applies to table-like data structures.
zero_copy_only (`bool`, defaults to `False`):
Raise an `ArrowException` if this function call would require copying
the underlying data.
integer_object_nulls (`bool`, defaults to `False`):
Cast integers with nulls to objects.
date_as_object (`bool`, defaults to `True`):
Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype.
timestamp_as_object (`bool`, defaults to `False`):
Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is
useful if you have timestamps that don't fit in the normal date
range of nanosecond timestamps (1678 CE-2262 CE).
If `False`, all timestamps are converted to `datetime64[ns]` dtype.
use_threads (`bool`, defaults to `True`):
Whether to parallelize the conversion using multiple threads.
deduplicate_objects (`bool`, defaults to `False`):
Do not create multiple copies Python objects when created, to save
on memory use. Conversion will be slower.
ignore_metadata (`bool`, defaults to `False`):
If `True`, do not use the 'pandas' metadata to reconstruct the
DataFrame index, if present.
safe (`bool`, defaults to `True`):
For certain data types, a cast is needed in order to store the
data in a pandas DataFrame or Series (e.g. timestamps are always
stored as nanoseconds in pandas). This option controls whether it
is a safe cast or not.
split_blocks (`bool`, defaults to `False`):
If `True`, generate one internal "block" for each column when
creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this
can temporarily reduce memory note that various pandas operations
can trigger "consolidation" which may balloon memory use.
self_destruct (`bool`, defaults to `False`):
EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow
memory while converting the Arrow object to pandas. If you use the
object after calling `to_pandas` with this option it will crash your
program.
types_mapper (`function`, defaults to `None`):
A function mapping a pyarrow DataType to a pandas `ExtensionDtype`.
This can be used to override the default pandas type for conversion
of built-in pyarrow types or in absence of `pandas_metadata` in the
Table schema. The function receives a pyarrow DataType and is
expected to return a pandas `ExtensionDtype` or `None` if the
default conversion should be used for that type. If you have
a dictionary mapping, you can pass `dict.get` as function.
Returns:
`pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object
"""
return self.table.to_pandas(*args, **kwargs)
def to_string(self, *args, **kwargs):
return self.table.to_string(*args, **kwargs)
def to_reader(self, max_chunksize: Optional[int] = None):
"""
Convert the Table to a RecordBatchReader.
Note that this method is zero-copy, it merely exposes the same data under a different API.
Args:
max_chunksize (`int`, defaults to `None`)
Maximum size for RecordBatch chunks. Individual chunks may be smaller depending
on the chunk layout of individual columns.
Returns:
`pyarrow.RecordBatchReader`
"""
return self.table.to_reader(max_chunksize=max_chunksize)
def field(self, *args, **kwargs):
"""
Select a schema field by its column name or numeric index.
Args:
i (`Union[int, str]`):
The index or name of the field to retrieve.
Returns:
`pyarrow.Field`
"""
return self.table.field(*args, **kwargs)
def column(self, *args, **kwargs):
"""
Select a column by its column name, or numeric index.
Args:
i (`Union[int, str]`):
The index or name of the column to retrieve.
Returns:
`pyarrow.ChunkedArray`
"""
return self.table.column(*args, **kwargs)
def itercolumns(self, *args, **kwargs):
"""
Iterator over all columns in their numerical order.
Yields:
`pyarrow.ChunkedArray`
"""
return self.table.itercolumns(*args, **kwargs)
@property
def schema(self):
"""
Schema of the table and its columns.
Returns:
`pyarrow.Schema`
"""
return self.table.schema
@property
def columns(self):
"""
List of all columns in numerical order.
Returns:
`List[pa.ChunkedArray]`
"""
return self.table.columns
@property
def num_columns(self):
"""
Number of columns in this table.
Returns:
int
"""
return self.table.num_columns
@property
def num_rows(self):
"""
Number of rows in this table.
Due to the definition of a table, all columns have the same number of
rows.
Returns:
int
"""
return self.table.num_rows
@property
def shape(self):
"""
Dimensions of the table: (#rows, #columns).
Returns:
`(int, int)`: Number of rows and number of columns.
"""
return self.table.shape
@property
def nbytes(self):
"""
Total number of bytes consumed by the elements of the table.
"""
return self.table.nbytes
@property
def column_names(self):
"""
Names of the table's columns.
"""
return self.table.column_names
def __eq__(self, other):
return self.equals(other)
def __getitem__(self, i):
return self.table[i]
def __len__(self):
return len(self.table)
def __repr__(self):
return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__)
def __str__(self):
return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__)
def slice(self, *args, **kwargs):
"""
Compute zero-copy slice of this Table.
Args:
offset (`int`, defaults to `0`):
Offset from start of table to slice.
length (`int`, defaults to `None`):
Length of slice (default is until end of table starting from
offset).
Returns:
`datasets.table.Table`
"""
raise NotImplementedError()
def filter(self, *args, **kwargs):
"""
Select records from a Table. See `pyarrow.compute.filter` for full usage.
"""
raise NotImplementedError()
def flatten(self, *args, **kwargs):
"""
Flatten this Table. Each column with a struct type is flattened
into one column per struct field. Other columns are left unchanged.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
For memory allocations, if required, otherwise use default pool.
Returns:
`datasets.table.Table`
"""
raise NotImplementedError()
def combine_chunks(self, *args, **kwargs):
"""
Make a new table by combining the chunks this table has.
All the underlying chunks in the `ChunkedArray` of each column are
concatenated into zero or one chunk.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
For memory allocations, if required, otherwise use default pool.
Returns:
`datasets.table.Table`
"""
raise NotImplementedError()
def cast(self, *args, **kwargs):
"""
Cast table values to another schema.
Args:
target_schema (`Schema`):
Schema to cast to, the names and order of fields must match.
safe (`bool`, defaults to `True`):
Check for overflows or other unsafe conversions.
Returns:
`datasets.table.Table`
"""
raise NotImplementedError()
def replace_schema_metadata(self, *args, **kwargs):
"""
EXPERIMENTAL: Create shallow copy of table by replacing schema
key-value metadata with the indicated new metadata (which may be None,
which deletes any existing metadata
Args:
metadata (`dict`, defaults to `None`):
Returns:
`datasets.table.Table`: shallow_copy
"""
raise NotImplementedError()
def add_column(self, *args, **kwargs):
"""
Add column to Table at position.
A new table is returned with the column added, the original table
object is left unchanged.
Args:
i (`int`):
Index to place the column at.
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`: New table with the passed column added.
"""
raise NotImplementedError()
def append_column(self, *args, **kwargs):
"""
Append column at end of columns.
Args:
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`: New table with the passed column added.
"""
raise NotImplementedError()
def remove_column(self, *args, **kwargs):
"""
Create new Table with the indicated column removed.
Args:
i (`int`):
Index of column to remove.
Returns:
`datasets.table.Table`: New table without the column.
"""
raise NotImplementedError()
def set_column(self, *args, **kwargs):
"""
Replace column in Table at position.
Args:
i (`int`):
Index to place the column at.
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`: New table with the passed column set.
"""
raise NotImplementedError()
def rename_columns(self, *args, **kwargs):
"""
Create new table with columns renamed to provided names.
"""
raise NotImplementedError()
def drop(self, *args, **kwargs):
"""
Drop one or more columns and return a new table.
Args:
columns (`List[str]`):
List of field names referencing existing columns.
Raises:
`KeyError` : if any of the passed columns name are not existing.
Returns:
`datasets.table.Table`: New table without the columns.
"""
raise NotImplementedError()
def select(self, *args, **kwargs):
"""
Select columns of the table.
Returns a new table with the specified columns, and metadata preserved.
Args:
columns (:obj:`Union[List[str], List[int]]`):
The column names or integer indices to select.
Returns:
`datasets.table.Table`: table with only a subset of the columns
"""
raise NotImplementedError()
class TableBlock(Table):
"""
`TableBlock` is the allowed class inside a `ConcanetationTable`.
Only `MemoryMappedTable` and `InMemoryTable` are `TableBlock`.
This is because we don't want a `ConcanetationTable` made out of other `ConcanetationTables`.
"""
pass
class InMemoryTable(TableBlock):
"""
The table is said in-memory when it is loaded into the user's RAM.
Pickling it does copy all the data using memory.
Its implementation is simple and uses the underlying pyarrow Table methods directly.
This is different from the `MemoryMapped` table, for which pickling doesn't copy all the
data in memory. For a `MemoryMapped`, unpickling instead reloads the table from the disk.
`InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for
data bigger than memory or when you want the memory footprint of your application to
stay low.
"""
@classmethod
def from_file(cls, filename: str):
table = _in_memory_arrow_table_from_file(filename)
return cls(table)
@classmethod
def from_buffer(cls, buffer: pa.Buffer):
table = _in_memory_arrow_table_from_buffer(buffer)
return cls(table)
@classmethod
def from_pandas(cls, *args, **kwargs):
"""
Convert pandas.DataFrame to an Arrow Table.
The column types in the resulting Arrow Table are inferred from the
dtypes of the pandas.Series in the DataFrame. In the case of non-object
Series, the NumPy dtype is translated to its Arrow equivalent. In the
case of `object`, we need to guess the datatype by looking at the
Python objects in this Series.
Be aware that Series of the `object` dtype don't carry enough
information to always lead to a meaningful Arrow type. In the case that
we cannot infer a type, e.g. because the DataFrame is of length 0 or
the Series only contains `None/nan` objects, the type is set to
null. This behavior can be avoided by constructing an explicit schema
and passing it to this function.
Args:
df (`pandas.DataFrame`):
schema (`pyarrow.Schema`, *optional*):
The expected schema of the Arrow Table. This can be used to
indicate the type of columns if we cannot infer it automatically.
If passed, the output will have exactly this schema. Columns
specified in the schema that are not found in the DataFrame columns
or its index will raise an error. Additional columns or index
levels in the DataFrame which are not specified in the schema will
be ignored.
preserve_index (`bool`, *optional*):
Whether to store the index as an additional column in the resulting
`Table`. The default of None will store the index as a column,
except for RangeIndex which is stored as metadata only. Use
`preserve_index=True` to force it to be stored as a column.
nthreads (`int`, defaults to `None` (may use up to system CPU count threads))
If greater than 1, convert columns to Arrow in parallel using
indicated number of threads.
columns (`List[str]`, *optional*):
List of column to be converted. If `None`, use all columns.
safe (`bool`, defaults to `True`):
Check for overflows or other unsafe conversions,
Returns:
`datasets.table.Table`:
Examples:
```python
>>> import pandas as pd
>>> import pyarrow as pa
>>> df = pd.DataFrame({
... 'int': [1, 2],
... 'str': ['a', 'b']
... })
>>> pa.Table.from_pandas(df)
<pyarrow.lib.Table object at 0x7f05d1fb1b40>
```
"""
return cls(pa.Table.from_pandas(*args, **kwargs))
@classmethod
def from_arrays(cls, *args, **kwargs):
"""
Construct a Table from Arrow arrays.
Args:
arrays (`List[Union[pyarrow.Array, pyarrow.ChunkedArray]]`):
Equal-length arrays that should form the table.
names (`List[str]`, *optional*):
Names for the table columns. If not passed, schema must be passed.
schema (`Schema`, defaults to `None`):
Schema for the created table. If not passed, names must be passed.
metadata (`Union[dict, Mapping]`, defaults to `None`):
Optional metadata for the schema (if inferred).
Returns:
`datasets.table.Table`
"""
return cls(pa.Table.from_arrays(*args, **kwargs))
@classmethod
def from_pydict(cls, *args, **kwargs):
"""
Construct a Table from Arrow arrays or columns.
Args:
mapping (`Union[dict, Mapping]`):
A mapping of strings to Arrays or Python lists.
schema (`Schema`, defaults to `None`):
If not passed, will be inferred from the Mapping values
metadata (`Union[dict, Mapping]`, defaults to `None`):
Optional metadata for the schema (if inferred).
Returns:
`datasets.table.Table`
"""
return cls(pa.Table.from_pydict(*args, **kwargs))
@classmethod
def from_pylist(cls, mapping, *args, **kwargs):
"""
Construct a Table from list of rows / dictionaries.
Args:
mapping (`List[dict]`):
A mapping of strings to row values.
schema (`Schema`, defaults to `None`):
If not passed, will be inferred from the Mapping values
metadata (`Union[dict, Mapping]`, defaults to `None`):
Optional metadata for the schema (if inferred).
Returns:
`datasets.table.Table`
"""
return cls(pa.Table.from_pylist(mapping, *args, **kwargs))
@classmethod
def from_batches(cls, *args, **kwargs):
"""
Construct a Table from a sequence or iterator of Arrow `RecordBatches`.
Args:
batches (`Union[Sequence[pyarrow.RecordBatch], Iterator[pyarrow.RecordBatch]]`):
Sequence of `RecordBatch` to be converted, all schemas must be equal.
schema (`Schema`, defaults to `None`):
If not passed, will be inferred from the first `RecordBatch`.
Returns:
`datasets.table.Table`:
"""
return cls(pa.Table.from_batches(*args, **kwargs))
def slice(self, offset=0, length=None):
"""
Compute zero-copy slice of this Table.
Args:
offset (`int`, defaults to `0`):
Offset from start of table to slice.
length (`int`, defaults to `None`):
Length of slice (default is until end of table starting from
offset).
Returns:
`datasets.table.Table`
"""
# Use fast slicing here
return InMemoryTable(self.fast_slice(offset=offset, length=length))
def filter(self, *args, **kwargs):
"""
Select records from a Table. See `pyarrow.compute.filter` for full usage.
"""
return InMemoryTable(self.table.filter(*args, **kwargs))
def flatten(self, *args, **kwargs):
"""
Flatten this Table. Each column with a struct type is flattened
into one column per struct field. Other columns are left unchanged.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
For memory allocations, if required, otherwise use default pool.
Returns:
`datasets.table.Table`
"""
return InMemoryTable(table_flatten(self.table, *args, **kwargs))
def combine_chunks(self, *args, **kwargs):
"""
Make a new table by combining the chunks this table has.
All the underlying chunks in the `ChunkedArray` of each column are
concatenated into zero or one chunk.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
For memory allocations, if required, otherwise use default pool.
Returns:
`datasets.table.Table`
"""
return InMemoryTable(self.table.combine_chunks(*args, **kwargs))
def cast(self, *args, **kwargs):
"""
Cast table values to another schema.
Args:
target_schema (`Schema`):
Schema to cast to, the names and order of fields must match.
safe (`bool`, defaults to `True`):
Check for overflows or other unsafe conversions.
Returns:
`datasets.table.Table`
"""
return InMemoryTable(table_cast(self.table, *args, **kwargs))
def replace_schema_metadata(self, *args, **kwargs):
"""
EXPERIMENTAL: Create shallow copy of table by replacing schema
key-value metadata with the indicated new metadata (which may be `None`,
which deletes any existing metadata).
Args:
metadata (`dict`, defaults to `None`):
Returns:
`datasets.table.Table`: shallow_copy
"""
return InMemoryTable(self.table.replace_schema_metadata(*args, **kwargs))
def add_column(self, *args, **kwargs):
"""
Add column to Table at position.
A new table is returned with the column added, the original table
object is left unchanged.
Args:
i (`int`):
Index to place the column at.
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`: New table with the passed column added.
"""
return InMemoryTable(self.table.add_column(*args, **kwargs))
def append_column(self, *args, **kwargs):
"""
Append column at end of columns.
Args:
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`:
New table with the passed column added.
"""
return InMemoryTable(self.table.append_column(*args, **kwargs))
def remove_column(self, *args, **kwargs):
"""
Create new Table with the indicated column removed.
Args:
i (`int`):
Index of column to remove.
Returns:
`datasets.table.Table`:
New table without the column.
"""
return InMemoryTable(self.table.remove_column(*args, **kwargs))
def set_column(self, *args, **kwargs):
"""
Replace column in Table at position.
Args:
i (`int`):
Index to place the column at.
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`:
New table with the passed column set.
"""
return InMemoryTable(self.table.set_column(*args, **kwargs))
def rename_columns(self, *args, **kwargs):
"""
Create new table with columns renamed to provided names.
"""
return InMemoryTable(self.table.rename_columns(*args, **kwargs))
def drop(self, *args, **kwargs):
"""
Drop one or more columns and return a new table.
Args:
columns (`List[str]`):
List of field names referencing existing columns.
Raises:
`KeyError` : if any of the passed columns name are not existing.
Returns:
`datasets.table.Table`:
New table without the columns.
"""
return InMemoryTable(self.table.drop(*args, **kwargs))
def select(self, *args, **kwargs):
"""
Select columns of the table.
Returns a new table with the specified columns, and metadata preserved.
Args:
columns (:obj:`Union[List[str], List[int]]`):
The column names or integer indices to select.
Returns:
:class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
"""
return InMemoryTable(self.table.select(*args, **kwargs))
# The MemoryMappedTable needs replays to properly reload tables from the disk
Replay = Tuple[str, tuple, dict]
class MemoryMappedTable(TableBlock):
"""
The table is said memory mapped when it doesn't use the user's RAM but loads the data
from the disk instead.
Pickling it doesn't copy the data into memory.
Instead, only the path to the memory mapped arrow file is pickled, as well as the list
of transforms to "replay" when reloading the table from the disk.
Its implementation requires to store an history of all the transforms that were applied
to the underlying pyarrow Table, so that they can be "replayed" when reloading the Table
from the disk.
This is different from the `InMemoryTable` table, for which pickling does copy all the
data in memory.
`InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for
data bigger than memory or when you want the memory footprint of your application to
stay low.
"""
def __init__(self, table: pa.Table, path: str, replays: Optional[List[Replay]] = None):
super().__init__(table)
self.path = os.path.abspath(path)
self.replays: List[Replay] = replays if replays is not None else []
@classmethod
def from_file(cls, filename: str, replays=None):
table = _memory_mapped_arrow_table_from_file(filename)
table = cls._apply_replays(table, replays)
return cls(table, filename, replays)
def __getstate__(self):
return {"path": self.path, "replays": self.replays}
def __setstate__(self, state):
path = state["path"]
replays = state["replays"]
table = _memory_mapped_arrow_table_from_file(path)
table = self._apply_replays(table, replays)
MemoryMappedTable.__init__(self, table, path=path, replays=replays)
@staticmethod
def _apply_replays(table: pa.Table, replays: Optional[List[Replay]] = None) -> pa.Table:
if replays is not None:
for name, args, kwargs in replays:
if name == "cast":
table = table_cast(table, *args, **kwargs)
elif name == "flatten":
table = table_flatten(table, *args, **kwargs)
else:
table = getattr(table, name)(*args, **kwargs)
return table
def _append_replay(self, replay: Replay) -> List[Replay]:
replays = copy.deepcopy(self.replays)
replays.append(replay)
return replays
def slice(self, offset=0, length=None):
"""
Compute zero-copy slice of this Table.
Args:
offset (`int`, defaults to `0`):
Offset from start of table to slice.
length (`int`, defaults to `None`):
Length of slice (default is until end of table starting from
offset).
Returns:
`datasets.table.Table`
"""
replay = ("slice", (offset, length), {})
replays = self._append_replay(replay)
# Use fast slicing here
return MemoryMappedTable(self.fast_slice(offset=offset, length=length), self.path, replays)
def filter(self, *args, **kwargs):
"""
Select records from a Table. See `pyarrow.compute.filter` for full usage.
"""
replay = ("filter", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.filter(*args, **kwargs), self.path, replays)
def flatten(self, *args, **kwargs):
"""
Flatten this Table. Each column with a struct type is flattened
into one column per struct field. Other columns are left unchanged.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
For memory allocations, if required, otherwise use default pool.
Returns:
`datasets.table.Table`
"""
replay = ("flatten", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(table_flatten(self.table, *args, **kwargs), self.path, replays)
def combine_chunks(self, *args, **kwargs):
"""
Make a new table by combining the chunks this table has.
All the underlying chunks in the ChunkedArray of each column are
concatenated into zero or one chunk.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
For memory allocations, if required, otherwise use default pool.
Returns:
`datasets.table.Table`
"""
replay = ("combine_chunks", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.combine_chunks(*args, **kwargs), self.path, replays)
def cast(self, *args, **kwargs):
"""
Cast table values to another schema
Args:
target_schema (`Schema`):
Schema to cast to, the names and order of fields must match.
safe (`bool`, defaults to `True`):
Check for overflows or other unsafe conversions.
Returns:
`datasets.table.Table`
"""
replay = ("cast", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(table_cast(self.table, *args, **kwargs), self.path, replays)
def replace_schema_metadata(self, *args, **kwargs):
"""
EXPERIMENTAL: Create shallow copy of table by replacing schema
key-value metadata with the indicated new metadata (which may be None,
which deletes any existing metadata.
Args:
metadata (`dict`, defaults to `None`):
Returns:
`datasets.table.Table`: shallow_copy
"""
replay = ("replace_schema_metadata", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.replace_schema_metadata(*args, **kwargs), self.path, replays)
def add_column(self, *args, **kwargs):
"""
Add column to Table at position.
A new table is returned with the column added, the original table
object is left unchanged.
Args:
i (`int`):
Index to place the column at.
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`: New table with the passed column added.
"""
replay = ("add_column", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.add_column(*args, **kwargs), self.path, replays)
def append_column(self, *args, **kwargs):
"""
Append column at end of columns.
Args:
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`:
New table with the passed column added.
"""
replay = ("append_column", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.append_column(*args, **kwargs), self.path, replays)
def remove_column(self, *args, **kwargs):
"""
Create new Table with the indicated column removed.
Args:
i (`int`):
Index of column to remove.
Returns:
`datasets.table.Table`:
New table without the column.
"""
replay = ("remove_column", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.remove_column(*args, **kwargs), self.path, replays)
def set_column(self, *args, **kwargs):
"""
Replace column in Table at position.
Args:
i (`int`):
Index to place the column at.
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`:
New table with the passed column set.
"""
replay = ("set_column", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.set_column(*args, **kwargs), self.path, replays)
def rename_columns(self, *args, **kwargs):
"""
Create new table with columns renamed to provided names.
"""
replay = ("rename_columns", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.rename_columns(*args, **kwargs), self.path, replays)
def drop(self, *args, **kwargs):
"""
Drop one or more columns and return a new table.
Args:
columns (`List[str]`):
List of field names referencing existing columns.
Raises:
`KeyError` : if any of the passed columns name are not existing.
Returns:
`datasets.table.Table`:
New table without the columns.
"""
replay = ("drop", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.drop(*args, **kwargs), self.path, replays)
def select(self, *args, **kwargs):
"""
Select columns of the table.
Returns a new table with the specified columns, and metadata preserved.
Args:
columns (:obj:`Union[List[str], List[int]]`):
The column names or integer indices to select.
Returns:
:class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
"""
replay = ("select", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.select(*args, **kwargs), self.path, replays)
# A ConcatenationTable is the concatenation of several tables.
# The ``blocks`` attributes stores a list of list of blocks.
# The first axis concatenates the tables along the axis 0 (it appends rows),
# while the second axis concatenates tables along the axis 1 (it appends columns).
TableBlockContainer = TypeVar("TableBlockContainer", TableBlock, List[TableBlock], List[List[TableBlock]])
class ConcatenationTable(Table):
"""
The table comes from the concatenation of several tables called blocks.
It enables concatenation on both axis 0 (append rows) and axis 1 (append columns).
The underlying tables are called "blocks" and can be either `InMemoryTable`
or `MemoryMappedTable` objects.
This allows to combine tables that come from memory or that are memory mapped.
When a `ConcatenationTable` is pickled, then each block is pickled:
- the `InMemoryTable` objects are pickled by copying all the data in memory.
- the MemoryMappedTable objects are pickled without copying the data into memory.
Instead, only the path to the memory mapped arrow file is pickled, as well as the list
of transforms to "replays" when reloading the table from the disk.
Its implementation requires to store each block separately.
The `blocks` attributes stores a list of list of blocks.
The first axis concatenates the tables along the axis 0 (it appends rows),
while the second axis concatenates tables along the axis 1 (it appends columns).
If some columns are missing when concatenating on axis 0, they are filled with null values.
This is done using `pyarrow.concat_tables(tables, promote=True)`.
You can access the fully combined table by accessing the `ConcatenationTable.table` attribute,
and the blocks by accessing the `ConcatenationTable.blocks` attribute.
"""
def __init__(self, table: pa.Table, blocks: List[List[TableBlock]]):
super().__init__(table)
self.blocks = blocks
# Check that all the blocks have the right type.
# Only InMemoryTable and MemoryMappedTable are allowed.
for subtables in blocks:
for subtable in subtables:
if not isinstance(subtable, TableBlock):
raise TypeError(
"The blocks of a ConcatenationTable must be InMemoryTable or MemoryMappedTable objects"
f", but got {subtable}."
)
def __getstate__(self):
return {"blocks": self.blocks}
def __setstate__(self, state):
blocks = state["blocks"]
table = self._concat_blocks_horizontally_and_vertically(blocks)
ConcatenationTable.__init__(self, table, blocks=blocks)
@staticmethod
def _concat_blocks(blocks: List[Union[TableBlock, pa.Table]], axis: int = 0) -> pa.Table:
pa_tables = [table.table if hasattr(table, "table") else table for table in blocks]
if axis == 0:
# we set promote=True to fill missing columns with null values
return pa.concat_tables(pa_tables, promote=True)
elif axis == 1:
for i, table in enumerate(pa_tables):
if i == 0:
pa_table = table
else:
for name, col in zip(table.column_names, table.columns):
pa_table = pa_table.append_column(name, col)
return pa_table
else:
raise ValueError("'axis' must be either 0 or 1")
@classmethod
def _concat_blocks_horizontally_and_vertically(cls, blocks: List[List[TableBlock]]) -> pa.Table:
pa_tables_to_concat_vertically = []
for i, tables in enumerate(blocks):
if not tables:
continue
pa_table_horizontally_concatenated = cls._concat_blocks(tables, axis=1)
pa_tables_to_concat_vertically.append(pa_table_horizontally_concatenated)
return cls._concat_blocks(pa_tables_to_concat_vertically, axis=0)
@classmethod
def _merge_blocks(cls, blocks: TableBlockContainer, axis: Optional[int] = None) -> TableBlockContainer:
if axis is not None:
merged_blocks = []
for is_in_memory, block_group in groupby(blocks, key=lambda x: isinstance(x, InMemoryTable)):
if is_in_memory:
block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))]
merged_blocks += list(block_group)
else: # both
merged_blocks = [cls._merge_blocks(row_block, axis=1) for row_block in blocks]
if all(len(row_block) == 1 for row_block in merged_blocks):
merged_blocks = cls._merge_blocks(
[block for row_block in merged_blocks for block in row_block], axis=0
)
return merged_blocks
@classmethod
def _consolidate_blocks(cls, blocks: TableBlockContainer) -> TableBlockContainer:
if isinstance(blocks, TableBlock):
return blocks
elif isinstance(blocks[0], TableBlock):
return cls._merge_blocks(blocks, axis=0)
else:
return cls._merge_blocks(blocks)
@classmethod
def from_blocks(cls, blocks: TableBlockContainer) -> "ConcatenationTable":
blocks = cls._consolidate_blocks(blocks)
if isinstance(blocks, TableBlock):
table = blocks
return cls(table.table, [[table]])
elif isinstance(blocks[0], TableBlock):
table = cls._concat_blocks(blocks, axis=0)
blocks = [[t] for t in blocks]
return cls(table, blocks)
else:
table = cls._concat_blocks_horizontally_and_vertically(blocks)
return cls(table, blocks)
@classmethod
def from_tables(cls, tables: List[Union[pa.Table, Table]], axis: int = 0) -> "ConcatenationTable":
"""Create `ConcatenationTable` from list of tables.
Args:
tables (list of `Table` or list of `pyarrow.Table`):
List of tables.
axis (`{0, 1}`, defaults to `0`, meaning over rows):
Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns
(horizontally).
<Added version="1.6.0"/>
"""
def to_blocks(table: Union[pa.Table, Table]) -> List[List[TableBlock]]:
if isinstance(table, pa.Table):
return [[InMemoryTable(table)]]
elif isinstance(table, ConcatenationTable):
return copy.deepcopy(table.blocks)
else:
return [[table]]
def _slice_row_block(row_block: List[TableBlock], length: int) -> Tuple[List[TableBlock], List[TableBlock]]:
sliced = [table.slice(0, length) for table in row_block]
remainder = [table.slice(length, len(row_block[0]) - length) for table in row_block]
return sliced, remainder
def _split_both_like(
result: List[List[TableBlock]], blocks: List[List[TableBlock]]
) -> Tuple[List[List[TableBlock]], List[List[TableBlock]]]:
"""
Make sure each row_block contain the same num_rows to be able to concatenate them on axis=1.
To do so, we modify both blocks sets to have the same row_blocks boundaries.
For example, if `result` has 2 row_blocks of 3 rows and `blocks` has 3 row_blocks of 2 rows,
we modify both to have 4 row_blocks of size 2, 1, 1 and 2:
[ x x x | x x x ]
+ [ y y | y y | y y ]
-----------------------------
= [ x x | x | x | x x ]
[ y y | y | y | y y ]
"""
result, blocks = list(result), list(blocks)
new_result, new_blocks = [], []
while result and blocks:
# we slice the longest row block to save two row blocks of same length
# and we replace the long row block by its remainder if necessary
if len(result[0][0]) > len(blocks[0][0]):
new_blocks.append(blocks[0])
sliced, result[0] = _slice_row_block(result[0], len(blocks.pop(0)[0]))
new_result.append(sliced)
elif len(result[0][0]) < len(blocks[0][0]):
new_result.append(result[0])
sliced, blocks[0] = _slice_row_block(blocks[0], len(result.pop(0)[0]))
new_blocks.append(sliced)
else:
new_result.append(result.pop(0))
new_blocks.append(blocks.pop(0))
if result or blocks:
raise ValueError("Failed to concatenate on axis=1 because tables don't have the same number of rows")
return new_result, new_blocks
def _extend_blocks(
result: List[List[TableBlock]], blocks: List[List[TableBlock]], axis: int = 0
) -> List[List[TableBlock]]:
if axis == 0:
result.extend(blocks)
elif axis == 1:
# We make sure each row_block have the same num_rows
result, blocks = _split_both_like(result, blocks)
for i, row_block in enumerate(blocks):
result[i].extend(row_block)
return result
blocks = to_blocks(tables[0])
for table in tables[1:]:
table_blocks = to_blocks(table)
blocks = _extend_blocks(blocks, table_blocks, axis=axis)
return cls.from_blocks(blocks)
@property
def _slices(self):
offset = 0
for tables in self.blocks:
length = len(tables[0])
yield (offset, length)
offset += length
def slice(self, offset=0, length=None):
"""
Compute zero-copy slice of this Table.
Args:
offset (`int`, defaults to `0`):
Offset from start of table to slice.
length (`int`, defaults to `None`):
Length of slice (default is until end of table starting from
offset).
Returns:
`datasets.table.Table`
"""
table = self.table.slice(offset, length=length)
length = length if length is not None else self.num_rows - offset
blocks = []
for tables in self.blocks:
n_rows = len(tables[0])
if length == 0:
break
elif n_rows <= offset:
offset = offset - n_rows
elif n_rows <= offset + length:
blocks.append([t.slice(offset) for t in tables])
length, offset = length + offset - n_rows, 0
else:
blocks.append([t.slice(offset, length) for t in tables])
length, offset = 0, 0
return ConcatenationTable(table, blocks)
def filter(self, mask, *args, **kwargs):
"""
Select records from a Table. See `pyarrow.compute.filter` for full usage.
"""
table = self.table.filter(mask, *args, **kwargs)
blocks = []
for (offset, length), tables in zip(self._slices, self.blocks):
submask = mask.slice(offset, length)
blocks.append([t.filter(submask, *args, **kwargs) for t in tables])
return ConcatenationTable(table, blocks)
def flatten(self, *args, **kwargs):
"""
Flatten this Table. Each column with a struct type is flattened
into one column per struct field. Other columns are left unchanged.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
For memory allocations, if required, otherwise use default pool.
Returns:
`datasets.table.Table`
"""
table = table_flatten(self.table, *args, **kwargs)
blocks = []
for tables in self.blocks:
blocks.append([t.flatten(*args, **kwargs) for t in tables])
return ConcatenationTable(table, blocks)
def combine_chunks(self, *args, **kwargs):
"""
Make a new table by combining the chunks this table has.
All the underlying chunks in the `ChunkedArray` of each column are
concatenated into zero or one chunk.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
For memory allocations, if required, otherwise use default pool.
Returns:
`datasets.table.Table`
"""
table = self.table.combine_chunks(*args, **kwargs)
blocks = []
for tables in self.blocks:
blocks.append([t.combine_chunks(*args, **kwargs) for t in tables])
return ConcatenationTable(table, blocks)
def cast(self, target_schema, *args, **kwargs):
"""
Cast table values to another schema.
Args:
target_schema (`Schema`):
Schema to cast to, the names and order of fields must match.
safe (`bool`, defaults to `True`):
Check for overflows or other unsafe conversions.
Returns:
`datasets.table.Table`
"""
from .features import Features
table = table_cast(self.table, target_schema, *args, **kwargs)
target_features = Features.from_arrow_schema(target_schema)
blocks = []
for subtables in self.blocks:
new_tables = []
fields = list(target_schema)
for subtable in subtables:
subfields = []
for name in subtable.column_names:
subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name)))
subfeatures = Features({subfield.name: target_features[subfield.name] for subfield in subfields})
subschema = subfeatures.arrow_schema
new_tables.append(subtable.cast(subschema, *args, **kwargs))
blocks.append(new_tables)
return ConcatenationTable(table, blocks)
def replace_schema_metadata(self, *args, **kwargs):
"""
EXPERIMENTAL: Create shallow copy of table by replacing schema
key-value metadata with the indicated new metadata (which may be `None`,
which deletes any existing metadata).
Args:
metadata (`dict`, defaults to `None`):
Returns:
`datasets.table.Table`: shallow_copy
"""
table = self.table.replace_schema_metadata(*args, **kwargs)
blocks = []
for tables in self.blocks:
blocks.append([t.replace_schema_metadata(*args, **kwargs) for t in tables])
return ConcatenationTable(table, self.blocks)
def add_column(self, *args, **kwargs):
"""
Add column to Table at position.
A new table is returned with the column added, the original table
object is left unchanged.
Args:
i (`int`):
Index to place the column at.
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`: New table with the passed column added.
"""
raise NotImplementedError()
def append_column(self, *args, **kwargs):
"""
Append column at end of columns.
Args:
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`:
New table with the passed column added.
"""
raise NotImplementedError()
def remove_column(self, i, *args, **kwargs):
"""
Create new Table with the indicated column removed.
Args:
i (`int`):
Index of column to remove.
Returns:
`datasets.table.Table`:
New table without the column.
"""
table = self.table.remove_column(i, *args, **kwargs)
name = self.table.column_names[i]
blocks = []
for tables in self.blocks:
blocks.append(
[
t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t
for t in tables
]
)
return ConcatenationTable(table, blocks)
def set_column(self, *args, **kwargs):
"""
Replace column in Table at position.
Args:
i (`int`):
Index to place the column at.
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`:
New table with the passed column set.
"""
raise NotImplementedError()
def rename_columns(self, names, *args, **kwargs):
"""
Create new table with columns renamed to provided names.
"""
table = self.table.rename_columns(names, *args, **kwargs)
names = dict(zip(self.table.column_names, names))
blocks = []
for tables in self.blocks:
blocks.append(
[t.rename_columns([names[name] for name in t.column_names], *args, **kwargs) for t in tables]
)
return ConcatenationTable(table, blocks)
def drop(self, columns, *args, **kwargs):
"""
Drop one or more columns and return a new table.
Args:
columns (`List[str]`):
List of field names referencing existing columns.
Raises:
`KeyError` : if any of the passed columns name are not existing.
Returns:
`datasets.table.Table`:
New table without the columns.
"""
table = self.table.drop(columns, *args, **kwargs)
blocks = []
for tables in self.blocks:
blocks.append([t.drop([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables])
return ConcatenationTable(table, blocks)
def select(self, columns, *args, **kwargs):
"""
Select columns of the table.
Returns a new table with the specified columns, and metadata preserved.
Args:
columns (:obj:`Union[List[str], List[int]]`):
The column names or integer indices to select.
Returns:
:class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
"""
table = self.table.select(columns, *args, **kwargs)
blocks = []
for tables in self.blocks:
blocks.append([t.select([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables])
return ConcatenationTable(table, blocks)
def concat_tables(tables: List[Table], axis: int = 0) -> Table:
"""
Concatenate tables.
Args:
tables (list of `Table`):
List of tables to be concatenated.
axis (`{0, 1}`, defaults to `0`, meaning over rows):
Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns
(horizontally).
<Added version="1.6.0"/>
Returns:
`datasets.table.Table`:
If the number of input tables is > 1, then the returned table is a `datasets.table.ConcatenationTable`.
Otherwise if there's only one table, it is returned as is.
"""
tables = list(tables)
if len(tables) == 1:
return tables[0]
return ConcatenationTable.from_tables(tables, axis=axis)
def list_table_cache_files(table: Table) -> List[str]:
"""
Get the cache files that are loaded by the table.
Cache file are used when parts of the table come from the disk via memory mapping.
Returns:
`List[str]`:
A list of paths to the cache files loaded by the table.
"""
if isinstance(table, ConcatenationTable):
cache_files = []
for subtables in table.blocks:
for subtable in subtables:
cache_files += list_table_cache_files(subtable)
return cache_files
elif isinstance(table, MemoryMappedTable):
return [table.path]
else:
return []
def _wrap_for_chunked_arrays(func):
"""Apply the function on each chunk of a `pyarrow.ChunkedArray`, or on the array directly"""
def wrapper(array, *args, **kwargs):
if isinstance(array, pa.ChunkedArray):
return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
else:
return func(array, *args, **kwargs)
return wrapper
def _is_extension_type(pa_type: pa.DataType) -> bool:
"""
Check (recursively) if a pyarrow type is an extension type.
"""
if isinstance(pa_type, pa.StructType):
return any(_is_extension_type(field.type) for field in pa_type)
elif isinstance(pa_type, (pa.ListType, pa.FixedSizeListType, pa.LargeListType)):
return _is_extension_type(pa_type.value_type)
elif isinstance(pa_type, pa.ExtensionType):
return True
else:
return False
def array_concat(arrays: List[pa.Array]):
"""Improved version of pa.concat_arrays
It supports concatenating pa.ExtensionArray objects by concatenating the underlying storages.
Args:
arrays (List[pa.Array]): List of arrays to contatenate
Raises:
pa.ArrowInvalid: if the arrow array concatenation fails
ValueError: if the list of arrays is empty
TypeError: if the arrays to be concatenated have different types
Returns:
array (:obj:`pyarrow.Array`): the concatenated array
"""
arrays = list(arrays)
array_types = {array.type for array in arrays}
if not array_types:
raise ValueError("Couldn't concatenate empty list of arrays")
if len(array_types) > 1:
array_types = list(array_types)
raise TypeError(f"Couldn't concatenate arrays with different types {array_types[0]} and {array_types[1]}")
array_type = arrays[0].type
arrays = [chunk for arr in arrays for chunk in (arr.chunks if isinstance(arr, pa.ChunkedArray) else (arr,))]
if not _is_extension_type(array_type):
return pa.concat_arrays(arrays)
def _offsets_concat(offsets):
offset = offsets[0]
concatenated_offsets = offset
for offset in offsets[1:]:
offset = pc.subtract(offset, offset[0])
offset = pc.add(offset[1:], concatenated_offsets[-1])
concatenated_offsets = pa.concat_arrays([concatenated_offsets, offset])
return concatenated_offsets
def _concat_arrays(arrays):
array_type = arrays[0].type
if isinstance(array_type, pa.PyExtensionType):
return array_type.wrap_array(_concat_arrays([array.storage for array in arrays]))
elif pa.types.is_struct(array_type):
return pa.StructArray.from_arrays(
[_concat_arrays([array.field(field.name) for array in arrays]) for field in array_type],
fields=list(array_type),
mask=pa.concat_arrays([array.is_null() for array in arrays]),
)
elif pa.types.is_list(array_type):
if any(array.null_count > 0 for array in arrays):
if config.PYARROW_VERSION.major < 10:
warnings.warn(
"None values are converted to empty lists in `pyarrow<10.0.0` when concatenating list arrays with None values. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676."
)
else:
return pa.ListArray.from_arrays(
_offsets_concat([array.offsets for array in arrays]),
_concat_arrays([array.values for array in arrays]),
mask=pa.concat_arrays([array.is_null() for array in arrays]),
)
return pa.ListArray.from_arrays(
_offsets_concat([array.offsets for array in arrays]),
_concat_arrays([array.values for array in arrays]),
)
elif pa.types.is_fixed_size_list(array_type):
if config.PYARROW_VERSION.major < 14:
# PyArrow bug: https://github.com/apache/arrow/issues/35360
return pa.FixedSizeListArray.from_arrays(
_concat_arrays([array.values[array.offset * array.type.list_size :] for array in arrays]),
array_type.list_size,
)
else:
return pa.FixedSizeListArray.from_arrays(
_concat_arrays([array.values for array in arrays]),
array_type.value_type,
array_type.list_size,
)
return pa.concat_arrays(arrays)
return _concat_arrays(arrays)
@_wrap_for_chunked_arrays
def array_cast(array: pa.Array, pa_type: pa.DataType, allow_number_to_str=True):
"""Improved version of `pa.Array.cast`
It supports casting `pa.StructArray` objects to re-order the fields.
It also let you control certain aspects of the casting, e.g. whether
to disable numbers (`floats` or `ints`) to strings.
Args:
array (`pa.Array`):
PyArrow array to cast
pa_type (`pa.DataType`):
Target PyArrow type
allow_number_to_str (`bool`, defaults to `True`):
Whether to allow casting numbers to strings.
Defaults to `True`.
Raises:
`pa.ArrowInvalidError`: if the arrow data casting fails
`TypeError`: if the target type is not supported according, e.g.
- if a field is missing
- if casting from numbers to strings and `allow_number_to_str` is `False`
Returns:
`List[pyarrow.Array]`: the casted array
"""
_c = partial(array_cast, allow_number_to_str=allow_number_to_str)
if isinstance(array, pa.ExtensionArray):
array = array.storage
if isinstance(pa_type, pa.ExtensionType):
return pa_type.wrap_array(array)
elif array.type == pa_type:
return array
elif pa.types.is_struct(array.type):
if pa.types.is_struct(pa_type) and ({field.name for field in pa_type} == {field.name for field in array.type}):
if array.type.num_fields == 0:
return array
arrays = [_c(array.field(field.name), field.type) for field in pa_type]
return pa.StructArray.from_arrays(arrays, fields=list(pa_type), mask=array.is_null())
elif pa.types.is_list(array.type):
if pa.types.is_fixed_size_list(pa_type):
if pa_type.list_size * len(array) == len(array.values):
return pa.FixedSizeListArray.from_arrays(
_c(array.values, pa_type.value_type),
pa_type.list_size,
)
elif pa.types.is_list(pa_type):
if array.null_count > 0:
if config.PYARROW_VERSION.major < 10:
warnings.warn(
f"None values are converted to empty lists in `pyarrow<10.0.0` when converting array to {pa_type}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676."
)
else:
return pa.ListArray.from_arrays(
array.offsets, _c(array.values, pa_type.value_type), mask=array.is_null()
)
return pa.ListArray.from_arrays(array.offsets, _c(array.values, pa_type.value_type))
elif pa.types.is_fixed_size_list(array.type):
array_values = array.values
if config.PYARROW_VERSION.major < 14:
# PyArrow bug: https://github.com/apache/arrow/issues/35360
array_values = array.values[array.offset * array.type.list_size :]
if pa.types.is_fixed_size_list(pa_type):
return pa.FixedSizeListArray.from_arrays(
_c(array_values, pa_type.value_type),
pa_type.list_size,
)
elif pa.types.is_list(pa_type):
offsets_arr = pa.array(range(len(array) + 1), pa.int32())
if array.null_count > 0:
if config.PYARROW_VERSION.major < 10:
warnings.warn(
f"None values are converted to empty lists in `pyarrow<10.0.0` when converting array to {pa_type}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676."
)
else:
return pa.ListArray.from_arrays(
offsets_arr, _c(array_values, pa_type.value_type), mask=array.is_null()
)
return pa.ListArray.from_arrays(offsets_arr, _c(array_values, pa_type.value_type))
else:
if (
not allow_number_to_str
and pa.types.is_string(pa_type)
and (pa.types.is_floating(array.type) or pa.types.is_integer(array.type))
):
raise TypeError(
f"Couldn't cast array of type {array.type} to {pa_type} since allow_number_to_str is set to {allow_number_to_str}"
)
if pa.types.is_null(pa_type) and not pa.types.is_null(array.type):
raise TypeError(f"Couldn't cast array of type {array.type} to {pa_type}")
return array.cast(pa_type)
raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{pa_type}")
@_wrap_for_chunked_arrays
def cast_array_to_feature(array: pa.Array, feature: "FeatureType", allow_number_to_str=True):
"""Cast an array to the arrow type that corresponds to the requested feature type.
For custom features like [`Audio`] or [`Image`], it takes into account the "cast_storage" methods
they defined to enable casting from other arrow types.
Args:
array (`pa.Array`):
The PyArrow array to cast.
feature (`datasets.features.FeatureType`):
The target feature type.
allow_number_to_str (`bool`, defaults to `True`):
Whether to allow casting numbers to strings.
Defaults to `True`.
Raises:
`pa.ArrowInvalidError`: if the arrow data casting fails
`TypeError`: if the target type is not supported according, e.g.
- if a field is missing
- if casting from numbers to strings and `allow_number_to_str` is `False`
Returns:
array (`pyarrow.Array`): the casted array
"""
from .features.features import Sequence, get_nested_type
_c = partial(cast_array_to_feature, allow_number_to_str=allow_number_to_str)
if isinstance(array, pa.ExtensionArray):
array = array.storage
if hasattr(feature, "cast_storage"):
return feature.cast_storage(array)
elif pa.types.is_struct(array.type):
# feature must be a dict or Sequence(subfeatures_dict)
if isinstance(feature, Sequence) and isinstance(feature.feature, dict):
feature = {
name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items()
}
if isinstance(feature, dict) and {field.name for field in array.type} == set(feature):
if array.type.num_fields == 0:
return array
arrays = [_c(array.field(name), subfeature) for name, subfeature in feature.items()]
return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null())
elif pa.types.is_list(array.type):
# feature must be either [subfeature] or Sequence(subfeature)
if isinstance(feature, list):
casted_values = _c(array.values, feature[0])
if casted_values.type == array.values.type:
return array
else:
if array.null_count > 0:
if config.PYARROW_VERSION.major < 10:
warnings.warn(
f"None values are converted to empty lists in `pyarrow<10.0.0` when converting array to {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676."
)
else:
return pa.ListArray.from_arrays(array.offsets, casted_values, mask=array.is_null())
return pa.ListArray.from_arrays(array.offsets, casted_values)
elif isinstance(feature, Sequence):
if feature.length > -1:
if feature.length * len(array) == len(array.values):
return pa.FixedSizeListArray.from_arrays(_c(array.values, feature.feature), feature.length)
else:
casted_values = _c(array.values, feature.feature)
if casted_values.type == array.values.type:
return array
else:
if array.null_count > 0:
if config.PYARROW_VERSION.major < 10:
warnings.warn(
f"None values are converted to empty lists in `pyarrow<10.0.0` when converting array to {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676."
)
else:
return pa.ListArray.from_arrays(
array.offsets, _c(array.values, feature.feature), mask=array.is_null()
)
return pa.ListArray.from_arrays(array.offsets, _c(array.values, feature.feature))
elif pa.types.is_fixed_size_list(array.type):
# feature must be either [subfeature] or Sequence(subfeature)
array_values = array.values
if config.PYARROW_VERSION.major < 14:
# PyArrow bug: https://github.com/apache/arrow/issues/35360
array_values = array.values[array.offset * array.type.list_size :]
if isinstance(feature, list):
if array.null_count > 0:
if config.PYARROW_VERSION.major < 10:
warnings.warn(
f"None values are converted to empty lists when converting array to {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`"
)
else:
return pa.ListArray.from_arrays(array.offsets, _c(array_values, feature[0]), mask=array.is_null())
return pa.ListArray.from_arrays(array.offsets, _c(array_values, feature[0]))
elif isinstance(feature, Sequence):
if feature.length > -1:
if feature.length * len(array) == len(array_values):
return pa.FixedSizeListArray.from_arrays(_c(array_values, feature.feature), feature.length)
else:
offsets_arr = pa.array(range(len(array) + 1), pa.int32())
if array.null_count > 0:
if config.PYARROW_VERSION.major < 10:
warnings.warn(
f"None values are converted to empty lists when converting array to {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`"
)
else:
return pa.ListArray.from_arrays(
offsets_arr, _c(array_values, feature.feature), mask=array.is_null()
)
return pa.ListArray.from_arrays(offsets_arr, _c(array_values, feature.feature))
if pa.types.is_null(array.type):
return array_cast(array, get_nested_type(feature), allow_number_to_str=allow_number_to_str)
elif not isinstance(feature, (Sequence, dict, list, tuple)):
return array_cast(array, feature(), allow_number_to_str=allow_number_to_str)
raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{feature}")
@_wrap_for_chunked_arrays
def embed_array_storage(array: pa.Array, feature: "FeatureType"):
"""Embed data into an arrays's storage.
For custom features like Audio or Image, it takes into account the "embed_storage" methods
they defined to enable embedding external data (e.g. an image file) into an other arrow types.
<Added version="2.4.0"/>
Args:
array (`pa.Array`):
The PyArrow array in which to embed data.
feature (`datasets.features.FeatureType`):
Array features.
Raises:
`TypeError`: if the target type is not supported according, e.g.
- if a field is missing
Returns:
array (`pyarrow.Array`): the casted array
"""
from .features import Sequence
_e = embed_array_storage
if isinstance(array, pa.ExtensionArray):
array = array.storage
if hasattr(feature, "embed_storage"):
return feature.embed_storage(array)
elif pa.types.is_struct(array.type):
# feature must be a dict or Sequence(subfeatures_dict)
if isinstance(feature, Sequence) and isinstance(feature.feature, dict):
feature = {
name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items()
}
if isinstance(feature, dict):
arrays = [_e(array.field(name), subfeature) for name, subfeature in feature.items()]
return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null())
elif pa.types.is_list(array.type):
# feature must be either [subfeature] or Sequence(subfeature)
if isinstance(feature, list):
if array.null_count > 0:
if config.PYARROW_VERSION.major < 10:
warnings.warn(
f"None values are converted to empty lists when embedding array storage with {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`"
)
else:
return pa.ListArray.from_arrays(array.offsets, _e(array.values, feature[0]), mask=array.is_null())
return pa.ListArray.from_arrays(array.offsets, _e(array.values, feature[0]))
elif isinstance(feature, Sequence):
if feature.length > -1:
if feature.length * len(array) == len(array.values):
return pa.FixedSizeListArray.from_arrays(_e(array.values, feature.feature), feature.length)
else:
casted_values = _e(array.values, feature.feature)
if casted_values.type == array.values.type:
return array
else:
if array.null_count > 0:
if config.PYARROW_VERSION.major < 10:
warnings.warn(
f"None values are converted to empty lists when embedding array storage with {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`"
)
else:
return pa.ListArray.from_arrays(
array.offsets, _e(array.values, feature.feature), mask=array.is_null()
)
return pa.ListArray.from_arrays(array.offsets, _e(array.values, feature.feature))
elif pa.types.is_fixed_size_list(array.type):
# feature must be either [subfeature] or Sequence(subfeature)
array_values = array.values
if config.PYARROW_VERSION.major < 14:
# PyArrow bug: https://github.com/apache/arrow/issues/35360
array_values = array.values[array.offset * array.type.list_size :]
if isinstance(feature, list):
if array.null_count > 0:
if config.PYARROW_VERSION.major < 10:
warnings.warn(
f"None values are converted to empty lists when embedding array storage with {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`"
)
else:
return pa.ListArray.from_arrays(array.offsets, _e(array_values, feature[0]), mask=array.is_null())
return pa.ListArray.from_arrays(array.offsets, _e(array_values, feature[0]))
elif isinstance(feature, Sequence):
if feature.length > -1:
if feature.length * len(array) == len(array_values):
return pa.FixedSizeListArray.from_arrays(_e(array_values, feature.feature), feature.length)
else:
offsets_arr = pa.array(range(len(array) + 1), pa.int32())
if array.null_count > 0:
if config.PYARROW_VERSION.major < 10:
warnings.warn(
f"None values are converted to empty lists when embedding array storage with {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`"
)
else:
return pa.ListArray.from_arrays(
offsets_arr, _e(array_values, feature.feature), mask=array.is_null()
)
return pa.ListArray.from_arrays(offsets_arr, _e(array_values, feature.feature))
if not isinstance(feature, (Sequence, dict, list, tuple)):
return array
raise TypeError(f"Couldn't embed array of type\n{array.type}\nwith\n{feature}")
def cast_table_to_features(table: pa.Table, features: "Features"):
"""Cast a table to the arrow schema that corresponds to the requested features.
Args:
table (`pyarrow.Table`):
PyArrow table to cast.
features ([`Features`]):
Target features.
Returns:
table (`pyarrow.Table`): the casted table
"""
if sorted(table.column_names) != sorted(features):
raise ValueError(f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match")
arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
return pa.Table.from_arrays(arrays, schema=features.arrow_schema)
def cast_table_to_schema(table: pa.Table, schema: pa.Schema):
"""Cast a table to the arrow schema. Different from `cast_table_to_features`, this method can preserve nullability.
Args:
table (`pa.Table`):
PyArrow table to cast.
features ([`Features`]):
Target features.
Returns:
`pa.Table`: the casted table
"""
from .features import Features
features = Features.from_arrow_schema(schema)
if sorted(table.column_names) != sorted(features):
raise ValueError(f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match")
arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
return pa.Table.from_arrays(arrays, schema=schema)
def embed_table_storage(table: pa.Table):
"""Embed external data into a table's storage.
<Added version="2.4.0"/>
Args:
table (`pyarrow.Table`):
PyArrow table in which to embed data.
Returns:
table (`pyarrow.Table`): the table with embedded data
"""
from .features.features import Features, require_storage_embed
features = Features.from_arrow_schema(table.schema)
arrays = [
embed_array_storage(table[name], feature) if require_storage_embed(feature) else table[name]
for name, feature in features.items()
]
return pa.Table.from_arrays(arrays, schema=features.arrow_schema)
def table_cast(table: pa.Table, schema: pa.Schema):
"""Improved version of `pa.Table.cast`.
It supports casting to feature types stored in the schema metadata.
Args:
table (`pyarrow.Table`):
PyArrow table to cast.
schema (`pyarrow.Schema`):
Target PyArrow schema.
Returns:
table (`pyarrow.Table`): the casted table
"""
if table.schema != schema:
return cast_table_to_schema(table, schema)
elif table.schema.metadata != schema.metadata:
return table.replace_schema_metadata(schema.metadata)
else:
return table
def table_flatten(table: pa.Table):
"""Improved version of `pa.Table.flatten`.
It behaves as `pa.Table.flatten` in a sense it does 1-step flatten of the columns with a struct type into one column per struct field,
but updates the metadata and skips decodable features unless the `decode` attribute of these features is set to False.
Args:
table (`pa.Table`):
PyArrow table to flatten.
Returns:
`Table`: the flattened table
"""
from .features import Features
features = Features.from_arrow_schema(table.schema)
if any(hasattr(subfeature, "flatten") and subfeature.flatten() == subfeature for subfeature in features.values()):
flat_arrays = []
flat_column_names = []
for field in table.schema:
array = table.column(field.name)
subfeature = features[field.name]
if pa.types.is_struct(field.type) and (
not hasattr(subfeature, "flatten") or subfeature.flatten() != subfeature
):
flat_arrays.extend(array.flatten())
flat_column_names.extend([f"{field.name}.{subfield.name}" for subfield in field.type])
else:
flat_arrays.append(array)
flat_column_names.append(field.name)
flat_table = pa.Table.from_arrays(
flat_arrays,
names=flat_column_names,
)
else:
flat_table = table.flatten()
# Preserve complex types in the metadata
flat_features = features.flatten(max_depth=2)
flat_features = Features({column_name: flat_features[column_name] for column_name in flat_table.column_names})
return flat_table.replace_schema_metadata(flat_features.arrow_schema.metadata)
def table_visitor(table: pa.Table, function: Callable[[pa.Array], None]):
"""Visit all arrays in a table and apply a function to them.
Args:
table (`pyarrow.Table`):
PyArrow table to visit.
function (`Callable[[pa.Array], None]`):
Function to apply to each array.
"""
from .features import Features, Sequence
features = Features.from_arrow_schema(table.schema)
def _visit(array, feature):
if isinstance(array, pa.ChunkedArray):
for chunk in array.chunks:
_visit(chunk, feature)
else:
if isinstance(array, pa.ExtensionArray):
array = array.storage
function(array, feature)
if pa.types.is_struct(array.type) and not hasattr(feature, "cast_storage"):
if isinstance(feature, Sequence) and isinstance(feature.feature, dict):
feature = {
name: Sequence(subfeature, length=feature.length)
for name, subfeature in feature.feature.items()
}
for name, subfeature in feature.items():
_visit(array.field(name), subfeature)
elif pa.types.is_list(array.type):
if isinstance(feature, list):
_visit(array.values, feature[0])
elif isinstance(feature, Sequence):
_visit(array.values, feature.feature)
for name, feature in features.items():
_visit(table[name], feature)
def table_iter(table: Table, batch_size: int, drop_last_batch=False) -> Iterator[pa.Table]:
"""Iterate over sub-tables of size `batch_size`.
Args:
table (`pyarrow.Table`):
PyArrow table to iterate over.
batch_size (`int`):
Size of each sub-table to yield.
drop_last_batch (`bool`, defaults to `False`):
Drop the last batch if it is smaller than `batch_size`.
"""
chunks_buffer = []
chunks_buffer_size = 0
for chunk in table.to_reader(max_chunksize=batch_size):
if len(chunk) == 0:
continue
elif chunks_buffer_size + len(chunk) < batch_size:
chunks_buffer.append(chunk)
chunks_buffer_size += len(chunk)
continue
elif chunks_buffer_size + len(chunk) == batch_size:
chunks_buffer.append(chunk)
yield pa.Table.from_batches(chunks_buffer)
chunks_buffer = []
chunks_buffer_size = 0
else:
cropped_chunk_length = batch_size - chunks_buffer_size
chunks_buffer.append(chunk.slice(0, cropped_chunk_length))
yield pa.Table.from_batches(chunks_buffer)
chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)]
chunks_buffer_size = len(chunk) - cropped_chunk_length
if not drop_last_batch and chunks_buffer:
yield pa.Table.from_batches(chunks_buffer)
| datasets-main | src/datasets/table.py |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Access datasets."""
import filecmp
import importlib
import inspect
import json
import os
import posixpath
import shutil
import time
import warnings
from collections import Counter
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union
import fsspec
import requests
from huggingface_hub import DatasetCard, DatasetCardData, HfApi
from . import config
from .arrow_dataset import Dataset
from .builder import BuilderConfig, DatasetBuilder
from .data_files import (
DEFAULT_PATTERNS_ALL,
DataFilesDict,
DataFilesList,
EmptyDatasetError,
get_data_patterns,
get_metadata_patterns,
sanitize_patterns,
)
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download.download_config import DownloadConfig
from .download.download_manager import DownloadMode
from .download.streaming_download_manager import StreamingDownloadManager, xbasename, xglob, xjoin
from .features import Features
from .filesystems import extract_path_from_uri, is_remote_filesystem
from .fingerprint import Hasher
from .info import DatasetInfo, DatasetInfosDict
from .iterable_dataset import IterableDataset
from .metric import Metric
from .naming import camelcase_to_snakecase, snakecase_to_camelcase
from .packaged_modules import (
_EXTENSION_TO_MODULE,
_MODULE_SUPPORTS_METADATA,
_MODULE_TO_EXTENSIONS,
_PACKAGED_DATASETS_MODULES,
_hash_python_lines,
)
from .splits import Split
from .utils.deprecation_utils import deprecated
from .utils.file_utils import (
OfflineModeIsEnabled,
_raise_if_offline_mode_is_enabled,
cached_path,
head_hf_s3,
hf_github_url,
init_hf_modules,
is_relative_path,
relative_to_absolute_path,
url_or_path_join,
)
from .utils.filelock import FileLock
from .utils.hub import hf_hub_url
from .utils.info_utils import VerificationMode, is_small_dataset
from .utils.logging import get_logger
from .utils.metadata import MetadataConfigs
from .utils.py_utils import get_imports
from .utils.version import Version
logger = get_logger(__name__)
ALL_ALLOWED_EXTENSIONS = list(_EXTENSION_TO_MODULE.keys()) + [".zip"]
def init_dynamic_modules(
name: str = config.MODULE_NAME_FOR_DYNAMIC_MODULES, hf_modules_cache: Optional[Union[Path, str]] = None
):
"""
Create a module with name `name` in which you can add dynamic modules
such as metrics or datasets. The module can be imported using its name.
The module is created in the HF_MODULE_CACHE directory by default (~/.cache/huggingface/modules) but it can
be overridden by specifying a path to another directory in `hf_modules_cache`.
"""
hf_modules_cache = init_hf_modules(hf_modules_cache)
dynamic_modules_path = os.path.join(hf_modules_cache, name)
os.makedirs(dynamic_modules_path, exist_ok=True)
if not os.path.exists(os.path.join(dynamic_modules_path, "__init__.py")):
with open(os.path.join(dynamic_modules_path, "__init__.py"), "w"):
pass
return dynamic_modules_path
def import_main_class(module_path, dataset=True) -> Optional[Union[Type[DatasetBuilder], Type[Metric]]]:
"""Import a module at module_path and return its main class:
- a DatasetBuilder if dataset is True
- a Metric if dataset is False
"""
module = importlib.import_module(module_path)
if dataset:
main_cls_type = DatasetBuilder
else:
main_cls_type = Metric
# Find the main class in our imported module
module_main_cls = None
for name, obj in module.__dict__.items():
if inspect.isclass(obj) and issubclass(obj, main_cls_type):
if inspect.isabstract(obj):
continue
module_main_cls = obj
obj_module = inspect.getmodule(obj)
if obj_module is not None and module == obj_module:
break
return module_main_cls
class _InitializeConfiguredDatasetBuilder:
"""
From https://stackoverflow.com/questions/4647566/pickle-a-dynamically-parameterized-sub-class
See also ConfiguredDatasetBuilder.__reduce__
When called with the param value as the only argument, returns an
un-initialized instance of the parameterized class. Subsequent __setstate__
will be called by pickle.
"""
def __call__(self, builder_cls, metadata_configs, default_config_name, name):
# make a simple object which has no complex __init__ (this one will do)
obj = _InitializeConfiguredDatasetBuilder()
obj.__class__ = configure_builder_class(
builder_cls, metadata_configs, default_config_name=default_config_name, dataset_name=name
)
return obj
def configure_builder_class(
builder_cls: Type[DatasetBuilder],
builder_configs: List[BuilderConfig],
default_config_name: Optional[str],
dataset_name: str,
) -> Type[DatasetBuilder]:
"""
Dynamically create a builder class with custom builder configs parsed from README.md file,
i.e. set BUILDER_CONFIGS class variable of a builder class to custom configs list.
"""
class ConfiguredDatasetBuilder(builder_cls):
BUILDER_CONFIGS = builder_configs
DEFAULT_CONFIG_NAME = default_config_name
__module__ = builder_cls.__module__ # so that the actual packaged builder can be imported
def __reduce__(self): # to make dynamically created class pickable, see _InitializeParameterizedDatasetBuilder
parent_builder_cls = self.__class__.__mro__[1]
return (
_InitializeConfiguredDatasetBuilder(),
(
parent_builder_cls,
self.BUILDER_CONFIGS,
self.DEFAULT_CONFIG_NAME,
self.dataset_name,
),
self.__dict__.copy(),
)
ConfiguredDatasetBuilder.__name__ = (
f"{builder_cls.__name__.lower().capitalize()}{snakecase_to_camelcase(dataset_name)}"
)
ConfiguredDatasetBuilder.__qualname__ = (
f"{builder_cls.__name__.lower().capitalize()}{snakecase_to_camelcase(dataset_name)}"
)
return ConfiguredDatasetBuilder
def get_dataset_builder_class(
dataset_module: "DatasetModule", dataset_name: Optional[str] = None
) -> Type[DatasetBuilder]:
builder_cls = import_main_class(dataset_module.module_path)
if dataset_module.builder_configs_parameters.builder_configs:
builder_cls = configure_builder_class(
builder_cls,
builder_configs=dataset_module.builder_configs_parameters.builder_configs,
default_config_name=dataset_module.builder_configs_parameters.default_config_name,
dataset_name=dataset_name,
)
return builder_cls
def files_to_hash(file_paths: List[str]) -> str:
"""
Convert a list of scripts or text files provided in file_paths into a hashed filename in a repeatable way.
"""
# List all python files in directories if directories are supplied as part of external imports
to_use_files: List[Union[Path, str]] = []
for file_path in file_paths:
if os.path.isdir(file_path):
to_use_files.extend(list(Path(file_path).rglob("*.[pP][yY]")))
else:
to_use_files.append(file_path)
# Get the code from all these files
lines = []
for file_path in to_use_files:
with open(file_path, encoding="utf-8") as f:
lines.extend(f.readlines())
return _hash_python_lines(lines)
def increase_load_count(name: str, resource_type: str):
"""Update the download count of a dataset or metric."""
if not config.HF_DATASETS_OFFLINE and config.HF_UPDATE_DOWNLOAD_COUNTS:
try:
head_hf_s3(name, filename=name + ".py", dataset=(resource_type == "dataset"))
except Exception:
pass
def _download_additional_modules(
name: str, base_path: str, imports: Tuple[str, str, str, str], download_config: Optional[DownloadConfig]
) -> List[Tuple[str, str]]:
"""
Download additional module for a module <name>.py at URL (or local path) <base_path>/<name>.py
The imports must have been parsed first using ``get_imports``.
If some modules need to be installed with pip, an error is raised showing how to install them.
This function return the list of downloaded modules as tuples (import_name, module_file_path).
The downloaded modules can then be moved into an importable directory with ``_copy_script_and_other_resources_in_importable_dir``.
"""
local_imports = []
library_imports = []
download_config = download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading extra modules"
for import_type, import_name, import_path, sub_directory in imports:
if import_type == "library":
library_imports.append((import_name, import_path)) # Import from a library
continue
if import_name == name:
raise ValueError(
f"Error in the {name} script, importing relative {import_name} module "
f"but {import_name} is the name of the script. "
f"Please change relative import {import_name} to another name and add a '# From: URL_OR_PATH' "
f"comment pointing to the original relative import file path."
)
if import_type == "internal":
url_or_filename = url_or_path_join(base_path, import_path + ".py")
elif import_type == "external":
url_or_filename = import_path
else:
raise ValueError("Wrong import_type")
local_import_path = cached_path(
url_or_filename,
download_config=download_config,
)
if sub_directory is not None:
local_import_path = os.path.join(local_import_path, sub_directory)
local_imports.append((import_name, local_import_path))
# Check library imports
needs_to_be_installed = {}
for library_import_name, library_import_path in library_imports:
try:
lib = importlib.import_module(library_import_name) # noqa F841
except ImportError:
if library_import_name not in needs_to_be_installed or library_import_path != library_import_name:
needs_to_be_installed[library_import_name] = library_import_path
if needs_to_be_installed:
_dependencies_str = "dependencies" if len(needs_to_be_installed) > 1 else "dependency"
_them_str = "them" if len(needs_to_be_installed) > 1 else "it"
if "sklearn" in needs_to_be_installed.keys():
needs_to_be_installed["sklearn"] = "scikit-learn"
raise ImportError(
f"To be able to use {name}, you need to install the following {_dependencies_str}: "
f"{', '.join(needs_to_be_installed)}.\nPlease install {_them_str} using 'pip install "
f"{' '.join(needs_to_be_installed.values())}' for instance."
)
return local_imports
def _copy_script_and_other_resources_in_importable_dir(
name: str,
importable_directory_path: str,
subdirectory_name: str,
original_local_path: str,
local_imports: List[Tuple[str, str]],
additional_files: List[Tuple[str, str]],
download_mode: Optional[Union[DownloadMode, str]],
) -> str:
"""Copy a script and its required imports to an importable directory
Args:
name (str): name of the resource to load
importable_directory_path (str): path to the loadable folder in the dynamic modules directory
subdirectory_name (str): name of the subdirectory in importable_directory_path in which to place the script
original_local_path (str): local path to the resource script
local_imports (List[Tuple[str, str]]): list of (destination_filename, import_file_to_copy)
additional_files (List[Tuple[str, str]]): list of (destination_filename, additional_file_to_copy)
download_mode (Optional[Union[DownloadMode, str]]): download mode
Return:
importable_local_file: path to an importable module with importlib.import_module
"""
# Define a directory with a unique name in our dataset or metric folder
# path is: ./datasets|metrics/dataset|metric_name/hash_from_code/script.py
# we use a hash as subdirectory_name to be able to have multiple versions of a dataset/metric processing file together
importable_subdirectory = os.path.join(importable_directory_path, subdirectory_name)
importable_local_file = os.path.join(importable_subdirectory, name + ".py")
# Prevent parallel disk operations
lock_path = importable_directory_path + ".lock"
with FileLock(lock_path):
# Create main dataset/metrics folder if needed
if download_mode == DownloadMode.FORCE_REDOWNLOAD and os.path.exists(importable_directory_path):
shutil.rmtree(importable_directory_path)
os.makedirs(importable_directory_path, exist_ok=True)
# add an __init__ file to the main dataset folder if needed
init_file_path = os.path.join(importable_directory_path, "__init__.py")
if not os.path.exists(init_file_path):
with open(init_file_path, "w"):
pass
# Create hash dataset folder if needed
os.makedirs(importable_subdirectory, exist_ok=True)
# add an __init__ file to the hash dataset folder if needed
init_file_path = os.path.join(importable_subdirectory, "__init__.py")
if not os.path.exists(init_file_path):
with open(init_file_path, "w"):
pass
# Copy dataset.py file in hash folder if needed
if not os.path.exists(importable_local_file):
shutil.copyfile(original_local_path, importable_local_file)
# Record metadata associating original dataset path with local unique folder
# Use os.path.splitext to split extension from importable_local_file
meta_path = os.path.splitext(importable_local_file)[0] + ".json"
if not os.path.exists(meta_path):
meta = {"original file path": original_local_path, "local file path": importable_local_file}
# the filename is *.py in our case, so better rename to filename.json instead of filename.py.json
with open(meta_path, "w", encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
# Copy all the additional imports
for import_name, import_path in local_imports:
if os.path.isfile(import_path):
full_path_local_import = os.path.join(importable_subdirectory, import_name + ".py")
if not os.path.exists(full_path_local_import):
shutil.copyfile(import_path, full_path_local_import)
elif os.path.isdir(import_path):
full_path_local_import = os.path.join(importable_subdirectory, import_name)
if not os.path.exists(full_path_local_import):
shutil.copytree(import_path, full_path_local_import)
else:
raise ImportError(f"Error with local import at {import_path}")
# Copy additional files like dataset_infos.json file if needed
for file_name, original_path in additional_files:
destination_additional_path = os.path.join(importable_subdirectory, file_name)
if not os.path.exists(destination_additional_path) or not filecmp.cmp(
original_path, destination_additional_path
):
shutil.copyfile(original_path, destination_additional_path)
return importable_local_file
def _create_importable_file(
local_path: str,
local_imports: List[Tuple[str, str]],
additional_files: List[Tuple[str, str]],
dynamic_modules_path: str,
module_namespace: str,
name: str,
download_mode: DownloadMode,
) -> Tuple[str, str]:
importable_directory_path = os.path.join(dynamic_modules_path, module_namespace, name.replace("/", "--"))
Path(importable_directory_path).mkdir(parents=True, exist_ok=True)
(Path(importable_directory_path).parent / "__init__.py").touch(exist_ok=True)
hash = files_to_hash([local_path] + [loc[1] for loc in local_imports])
importable_local_file = _copy_script_and_other_resources_in_importable_dir(
name=name.split("/")[-1],
importable_directory_path=importable_directory_path,
subdirectory_name=hash,
original_local_path=local_path,
local_imports=local_imports,
additional_files=additional_files,
download_mode=download_mode,
)
logger.debug(f"Created importable dataset file at {importable_local_file}")
module_path = ".".join(
[os.path.basename(dynamic_modules_path), module_namespace, name.replace("/", "--"), hash, name.split("/")[-1]]
)
return module_path, hash
def infer_module_for_data_files_list(
data_files_list: DataFilesList, download_config: Optional[DownloadConfig] = None
) -> Optional[Tuple[str, str]]:
"""Infer module (and builder kwargs) from list of data files.
It picks the module based on the most common file extension.
In case of a draw ".parquet" is the favorite, and then alphabetical order.
Args:
data_files_list (DataFilesList): List of data files.
download_config (bool or str, optional): mainly use use_auth_token or storage_options to support different platforms and auth types.
Returns:
tuple[str, dict[str, Any]]: Tuple with
- inferred module name
- dict of builder kwargs
"""
extensions_counter = Counter(
"." + suffix.lower()
for filepath in data_files_list[: config.DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE]
for suffix in xbasename(filepath).split(".")[1:]
)
if extensions_counter:
def sort_key(ext_count: Tuple[str, int]) -> Tuple[int, bool]:
"""Sort by count and set ".parquet" as the favorite in case of a draw"""
ext, count = ext_count
return (count, ext == ".parquet", ext)
for ext, _ in sorted(extensions_counter.items(), key=sort_key, reverse=True):
if ext in _EXTENSION_TO_MODULE:
return _EXTENSION_TO_MODULE[ext]
elif ext == ".zip":
return infer_module_for_data_files_list_in_archives(data_files_list, download_config=download_config)
return None, {}
def infer_module_for_data_files_list_in_archives(
data_files_list: DataFilesList, download_config: Optional[DownloadConfig] = None
) -> Optional[Tuple[str, str]]:
"""Infer module (and builder kwargs) from list of archive data files.
Args:
data_files_list (DataFilesList): List of data files.
download_config (bool or str, optional): mainly use use_auth_token or storage_options to support different platforms and auth types.
Returns:
tuple[str, dict[str, Any]]: Tuple with
- inferred module name
- dict of builder kwargs
"""
archived_files = []
archive_files_counter = 0
for filepath in data_files_list:
if str(filepath).endswith(".zip"):
archive_files_counter += 1
if archive_files_counter > config.GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE:
break
extracted = xjoin(StreamingDownloadManager().extract(filepath), "**")
archived_files += [
f.split("::")[0]
for f in xglob(extracted, recursive=True, download_config=download_config)[
: config.ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE
]
]
extensions_counter = Counter(
"." + suffix.lower() for filepath in archived_files for suffix in xbasename(filepath).split(".")[1:]
)
if extensions_counter:
most_common = extensions_counter.most_common(1)[0][0]
if most_common in _EXTENSION_TO_MODULE:
return _EXTENSION_TO_MODULE[most_common]
return None, {}
def infer_module_for_data_files(
data_files: DataFilesDict, path: Optional[str] = None, download_config: Optional[DownloadConfig] = None
) -> Tuple[Optional[str], Dict[str, Any]]:
"""Infer module (and builder kwargs) from data files. Raise if module names for different splits don't match.
Args:
data_files (DataFilesDict): List of data files.
path (str, optional): Dataset name or path.
DownloadConfig (bool or str, optional): for authenticate on the Hugging Face Hub for private remote files.
Returns:
tuple[str, dict[str, Any]]: Tuple with
- inferred module name
- builder kwargs
"""
split_modules = {
split: infer_module_for_data_files_list(data_files_list, download_config=download_config)
for split, data_files_list in data_files.items()
}
module_name, default_builder_kwargs = next(iter(split_modules.values()))
if any((module_name, default_builder_kwargs) != split_module for split_module in split_modules.values()):
raise ValueError(f"Couldn't infer the same data file format for all splits. Got {split_modules}")
if not module_name:
path = f" in {path}. " if path else ". "
raise FileNotFoundError(f"No (supported) data files or dataset script found{path}")
return module_name, default_builder_kwargs
def update_hash_with_config_parameters(hash: str, config_parameters: dict) -> str:
"""
Used to update hash of packaged modules which is used for creating unique cache directories to reflect
different config parameters which are passed in metadata from readme.
"""
params_to_exclude = {"config_name", "version", "description"}
params_to_add_to_hash = {
param: value for param, value in sorted(config_parameters.items()) if param not in params_to_exclude
}
m = Hasher()
m.update(hash)
m.update(params_to_add_to_hash)
return m.hexdigest()
def create_builder_configs_from_metadata_configs(
module_path: str,
metadata_configs: MetadataConfigs,
supports_metadata: bool,
base_path: Optional[str] = None,
default_builder_kwargs: Dict[str, Any] = None,
download_config: Optional[DownloadConfig] = None,
) -> Tuple[List[BuilderConfig], str]:
builder_cls = import_main_class(module_path)
builder_config_cls = builder_cls.BUILDER_CONFIG_CLASS
default_config_name = metadata_configs.get_default_config_name()
builder_configs = []
base_path = base_path if base_path is not None else ""
for config_name, config_params in metadata_configs.items():
config_data_files = config_params.get("data_files")
config_data_dir = config_params.get("data_dir")
config_base_path = base_path + "/" + config_data_dir if config_data_dir else base_path
try:
config_patterns = (
sanitize_patterns(config_data_files)
if config_data_files is not None
else get_data_patterns(config_base_path)
)
config_data_files_dict = DataFilesDict.from_patterns(
config_patterns,
base_path=config_base_path,
allowed_extensions=ALL_ALLOWED_EXTENSIONS,
download_config=download_config,
)
except EmptyDatasetError as e:
raise EmptyDatasetError(
f"Dataset at '{base_path}' doesn't contain data files matching the patterns for config '{config_name}',"
f" check `data_files` and `data_fir` parameters in the `configs` YAML field in README.md. "
) from e
if config_data_files is None and supports_metadata and config_patterns != DEFAULT_PATTERNS_ALL:
try:
config_metadata_patterns = get_metadata_patterns(base_path)
except FileNotFoundError:
config_metadata_patterns = None
if config_metadata_patterns is not None:
config_metadata_data_files_list = DataFilesList.from_patterns(
config_metadata_patterns, base_path=base_path
)
if config_metadata_data_files_list:
config_data_files_dict = DataFilesDict(
{
split: data_files_list + config_metadata_data_files_list
for split, data_files_list in config_data_files_dict.items()
}
)
ignored_params = [
param for param in config_params if not hasattr(builder_config_cls, param) and param != "default"
]
if ignored_params:
logger.warning(
f"Some datasets params were ignored: {ignored_params}. "
"Make sure to use only valid params for the dataset builder and to have "
"a up-to-date version of the `datasets` library."
)
builder_configs.append(
builder_config_cls(
name=config_name,
data_files=config_data_files_dict,
data_dir=config_data_dir,
**{
param: value
for param, value in {**default_builder_kwargs, **config_params}.items()
if hasattr(builder_config_cls, param) and param not in ("default", "data_files", "data_dir")
},
)
)
return builder_configs, default_config_name
@dataclass
class BuilderConfigsParameters:
"""Dataclass containing objects related to creation of builder configurations from yaml's metadata content.
Attributes:
metadata_configs (`MetadataConfigs`, *optional*):
Configs parsed from yaml's metadata.
builder_configs (`list[BuilderConfig]`, *optional*):
List of BuilderConfig objects created from metadata_configs above.
default_config_name (`str`):
Name of default config taken from yaml's metadata.
"""
metadata_configs: Optional[MetadataConfigs] = None
builder_configs: Optional[List[BuilderConfig]] = None
default_config_name: Optional[str] = None
@dataclass
class DatasetModule:
module_path: str
hash: str
builder_kwargs: dict
builder_configs_parameters: BuilderConfigsParameters = field(default_factory=BuilderConfigsParameters)
dataset_infos: Optional[DatasetInfosDict] = None
@dataclass
class MetricModule:
module_path: str
hash: str
class _DatasetModuleFactory:
def get_module(self) -> DatasetModule:
raise NotImplementedError
class _MetricModuleFactory:
def get_module(self) -> MetricModule:
raise NotImplementedError
class GithubMetricModuleFactory(_MetricModuleFactory):
"""Get the module of a metric. The metric script is downloaded from GitHub.
<Deprecated version="2.5.0">
Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
</Deprecated>
"""
@deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
def __init__(
self,
name: str,
revision: Optional[Union[str, Version]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
dynamic_modules_path: Optional[str] = None,
):
self.name = name
self.revision = revision
self.download_config = download_config.copy() if download_config else DownloadConfig()
if self.download_config.max_retries < 3:
self.download_config.max_retries = 3
self.download_mode = download_mode
self.dynamic_modules_path = dynamic_modules_path
assert self.name.count("/") == 0
increase_load_count(name, resource_type="metric")
def download_loading_script(self, revision: Optional[str]) -> str:
file_path = hf_github_url(path=self.name, name=self.name + ".py", revision=revision, dataset=False)
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading builder script"
return cached_path(file_path, download_config=download_config)
def get_module(self) -> MetricModule:
# get script and other files
revision = self.revision
try:
local_path = self.download_loading_script(revision)
revision = self.revision
except FileNotFoundError:
if revision is not None:
raise
else:
revision = "main"
local_path = self.download_loading_script(revision)
logger.warning(
f"Couldn't find a directory or a metric named '{self.name}' in this version. "
f"It was picked from the main branch on github instead."
)
imports = get_imports(local_path)
local_imports = _download_additional_modules(
name=self.name,
base_path=hf_github_url(path=self.name, name="", revision=revision, dataset=False),
imports=imports,
download_config=self.download_config,
)
# copy the script and the files in an importable directory
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
module_path, hash = _create_importable_file(
local_path=local_path,
local_imports=local_imports,
additional_files=[],
dynamic_modules_path=dynamic_modules_path,
module_namespace="metrics",
name=self.name,
download_mode=self.download_mode,
)
# make the new module to be noticed by the import system
importlib.invalidate_caches()
return MetricModule(module_path, hash)
class LocalMetricModuleFactory(_MetricModuleFactory):
"""Get the module of a local metric. The metric script is loaded from a local script.
<Deprecated version="2.5.0">
Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
</Deprecated>
"""
@deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
def __init__(
self,
path: str,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
dynamic_modules_path: Optional[str] = None,
):
self.path = path
self.name = Path(path).stem
self.download_config = download_config or DownloadConfig()
self.download_mode = download_mode
self.dynamic_modules_path = dynamic_modules_path
def get_module(self) -> MetricModule:
# get script and other files
imports = get_imports(self.path)
local_imports = _download_additional_modules(
name=self.name,
base_path=str(Path(self.path).parent),
imports=imports,
download_config=self.download_config,
)
# copy the script and the files in an importable directory
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
module_path, hash = _create_importable_file(
local_path=self.path,
local_imports=local_imports,
additional_files=[],
dynamic_modules_path=dynamic_modules_path,
module_namespace="metrics",
name=self.name,
download_mode=self.download_mode,
)
# make the new module to be noticed by the import system
importlib.invalidate_caches()
return MetricModule(module_path, hash)
class LocalDatasetModuleFactoryWithScript(_DatasetModuleFactory):
"""Get the module of a local dataset. The dataset script is loaded from a local script."""
def __init__(
self,
path: str,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
dynamic_modules_path: Optional[str] = None,
):
self.path = path
self.name = Path(path).stem
self.download_config = download_config or DownloadConfig()
self.download_mode = download_mode
self.dynamic_modules_path = dynamic_modules_path
def get_module(self) -> DatasetModule:
# get script and other files
dataset_infos_path = Path(self.path).parent / config.DATASETDICT_INFOS_FILENAME
dataset_readme_path = Path(self.path).parent / "README.md"
imports = get_imports(self.path)
local_imports = _download_additional_modules(
name=self.name,
base_path=str(Path(self.path).parent),
imports=imports,
download_config=self.download_config,
)
additional_files = []
if dataset_infos_path.is_file():
additional_files.append((config.DATASETDICT_INFOS_FILENAME, str(dataset_infos_path)))
if dataset_readme_path.is_file():
additional_files.append(("README.md", dataset_readme_path))
# copy the script and the files in an importable directory
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
module_path, hash = _create_importable_file(
local_path=self.path,
local_imports=local_imports,
additional_files=additional_files,
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
name=self.name,
download_mode=self.download_mode,
)
# make the new module to be noticed by the import system
importlib.invalidate_caches()
builder_kwargs = {"hash": hash, "base_path": str(Path(self.path).parent)}
return DatasetModule(module_path, hash, builder_kwargs)
class LocalDatasetModuleFactoryWithoutScript(_DatasetModuleFactory):
"""Get the module of a dataset loaded from the user's data files. The dataset builder module to use is inferred
from the data files extensions."""
def __init__(
self,
path: str,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List, Dict]] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
):
if data_dir and os.path.isabs(data_dir):
raise ValueError(f"`data_dir` must be relative to a dataset directory's root: {path}")
self.path = Path(path).as_posix()
self.name = Path(path).stem
self.data_files = data_files
self.data_dir = data_dir
self.download_mode = download_mode
def get_module(self) -> DatasetModule:
readme_path = os.path.join(self.path, "README.md")
dataset_card_data = DatasetCard.load(readme_path).data if os.path.isfile(readme_path) else DatasetCardData()
metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
# we need a set of data files to find which dataset builder to use
# because we need to infer module name by files extensions
base_path = Path(self.path, self.data_dir or "").expanduser().resolve().as_posix()
if self.data_files is not None:
patterns = sanitize_patterns(self.data_files)
elif metadata_configs and "data_files" in next(iter(metadata_configs.values())):
patterns = sanitize_patterns(next(iter(metadata_configs.values()))["data_files"])
else:
patterns = get_data_patterns(base_path)
data_files = DataFilesDict.from_patterns(
patterns,
base_path=base_path,
allowed_extensions=ALL_ALLOWED_EXTENSIONS,
)
module_name, default_builder_kwargs = infer_module_for_data_files(
data_files=data_files,
path=self.path,
)
data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name])
# Collect metadata files if the module supports them
supports_metadata = module_name in _MODULE_SUPPORTS_METADATA
if self.data_files is None and supports_metadata and patterns != DEFAULT_PATTERNS_ALL:
try:
metadata_patterns = get_metadata_patterns(base_path)
except FileNotFoundError:
metadata_patterns = None
if metadata_patterns is not None:
metadata_data_files_list = DataFilesList.from_patterns(metadata_patterns, base_path=base_path)
if metadata_data_files_list:
data_files = DataFilesDict(
{
split: data_files_list + metadata_data_files_list
for split, data_files_list in data_files.items()
}
)
module_path, hash = _PACKAGED_DATASETS_MODULES[module_name]
if metadata_configs:
builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
module_path,
metadata_configs,
base_path=base_path,
supports_metadata=supports_metadata,
default_builder_kwargs=default_builder_kwargs,
)
else:
builder_configs, default_config_name = None, None
builder_kwargs = {
"hash": hash,
"base_path": self.path,
"dataset_name": camelcase_to_snakecase(Path(self.path).name),
}
if self.data_files is not None or not metadata_configs:
builder_kwargs["data_files"] = data_files
builder_kwargs.update(default_builder_kwargs) # from _EXTENSION_TO_MODULE
# this file is deprecated and was created automatically in old versions of push_to_hub
if os.path.isfile(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME)):
with open(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f:
legacy_dataset_infos = DatasetInfosDict(
{
config_name: DatasetInfo.from_dict(dataset_info_dict)
for config_name, dataset_info_dict in json.load(f).items()
}
)
if len(legacy_dataset_infos) == 1:
# old config e.g. named "username--dataset_name"
legacy_config_name = next(iter(legacy_dataset_infos))
legacy_dataset_infos["default"] = legacy_dataset_infos.pop(legacy_config_name)
legacy_dataset_infos.update(dataset_infos)
dataset_infos = legacy_dataset_infos
if default_config_name is None and len(dataset_infos) == 1:
default_config_name = next(iter(dataset_infos))
return DatasetModule(
module_path,
hash,
builder_kwargs,
dataset_infos=dataset_infos,
builder_configs_parameters=BuilderConfigsParameters(
metadata_configs=metadata_configs,
builder_configs=builder_configs,
default_config_name=default_config_name,
),
)
class PackagedDatasetModuleFactory(_DatasetModuleFactory):
"""Get the dataset builder module from the ones that are packaged with the library: csv, json, etc."""
def __init__(
self,
name: str,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List, Dict]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
):
self.name = name
self.data_files = data_files
self.data_dir = data_dir
self.download_config = download_config
self.download_mode = download_mode
increase_load_count(name, resource_type="dataset")
def get_module(self) -> DatasetModule:
base_path = Path(self.data_dir or "").expanduser().resolve().as_posix()
patterns = sanitize_patterns(self.data_files) if self.data_files is not None else get_data_patterns(base_path)
data_files = DataFilesDict.from_patterns(
patterns,
download_config=self.download_config,
base_path=base_path,
)
supports_metadata = self.name in _MODULE_SUPPORTS_METADATA
if self.data_files is None and supports_metadata and patterns != DEFAULT_PATTERNS_ALL:
try:
metadata_patterns = get_metadata_patterns(base_path)
except FileNotFoundError:
metadata_patterns = None
if metadata_patterns is not None:
metadata_data_files_list = DataFilesList.from_patterns(
metadata_patterns, download_config=self.download_config, base_path=base_path
)
if metadata_data_files_list:
data_files = DataFilesDict(
{
split: data_files_list + metadata_data_files_list
for split, data_files_list in data_files.items()
}
)
module_path, hash = _PACKAGED_DATASETS_MODULES[self.name]
builder_kwargs = {
"hash": hash,
"data_files": data_files,
"dataset_name": self.name,
}
return DatasetModule(module_path, hash, builder_kwargs)
class HubDatasetModuleFactoryWithoutScript(_DatasetModuleFactory):
"""
Get the module of a dataset loaded from data files of a dataset repository.
The dataset builder module to use is inferred from the data files extensions.
"""
def __init__(
self,
name: str,
revision: Optional[Union[str, Version]] = None,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List, Dict]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
):
self.name = name
self.revision = revision
self.data_files = data_files
self.data_dir = data_dir
self.download_config = download_config or DownloadConfig()
self.download_mode = download_mode
increase_load_count(name, resource_type="dataset")
def get_module(self) -> DatasetModule:
hfh_dataset_info = HfApi(config.HF_ENDPOINT).dataset_info(
self.name,
revision=self.revision,
token=self.download_config.token,
timeout=100.0,
)
# even if metadata_configs is not None (which means that we will resolve files for each config later)
# we cannot skip resolving all files because we need to infer module name by files extensions
revision = hfh_dataset_info.sha # fix the revision in case there are new commits in the meantime
base_path = f"hf://datasets/{self.name}@{revision}/{self.data_dir or ''}".rstrip("/")
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading readme"
try:
dataset_readme_path = cached_path(
hf_hub_url(self.name, "README.md", revision=revision),
download_config=download_config,
)
dataset_card_data = DatasetCard.load(Path(dataset_readme_path)).data
except FileNotFoundError:
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
# we need a set of data files to find which dataset builder to use
# because we need to infer module name by files extensions
if self.data_files is not None:
patterns = sanitize_patterns(self.data_files)
elif metadata_configs and "data_files" in next(iter(metadata_configs.values())):
patterns = sanitize_patterns(next(iter(metadata_configs.values()))["data_files"])
else:
patterns = get_data_patterns(base_path, download_config=self.download_config)
data_files = DataFilesDict.from_patterns(
patterns,
base_path=base_path,
allowed_extensions=ALL_ALLOWED_EXTENSIONS,
download_config=self.download_config,
)
module_name, default_builder_kwargs = infer_module_for_data_files(
data_files=data_files,
path=self.name,
download_config=self.download_config,
)
data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name])
# Collect metadata files if the module supports them
supports_metadata = module_name in _MODULE_SUPPORTS_METADATA
if self.data_files is None and supports_metadata and patterns != DEFAULT_PATTERNS_ALL:
try:
metadata_patterns = get_metadata_patterns(base_path)
except FileNotFoundError:
metadata_patterns = None
if metadata_patterns is not None:
metadata_data_files_list = DataFilesList.from_patterns(
metadata_patterns, download_config=self.download_config, base_path=base_path
)
if metadata_data_files_list:
data_files = DataFilesDict(
{
split: data_files_list + metadata_data_files_list
for split, data_files_list in data_files.items()
}
)
module_path, hash = _PACKAGED_DATASETS_MODULES[module_name]
if metadata_configs:
builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
module_path,
metadata_configs,
base_path=base_path,
supports_metadata=supports_metadata,
default_builder_kwargs=default_builder_kwargs,
download_config=self.download_config,
)
else:
builder_configs, default_config_name = None, None
builder_kwargs = {
"hash": hash,
"base_path": hf_hub_url(self.name, "", revision=self.revision),
"repo_id": self.name,
"dataset_name": camelcase_to_snakecase(Path(self.name).name),
}
if self.data_files is not None or not metadata_configs:
builder_kwargs["data_files"] = data_files
builder_kwargs.update(default_builder_kwargs) # from _EXTENSION_TO_MODULE
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading metadata"
try:
# this file is deprecated and was created automatically in old versions of push_to_hub
dataset_infos_path = cached_path(
hf_hub_url(self.name, config.DATASETDICT_INFOS_FILENAME, revision=self.revision),
download_config=download_config,
)
with open(dataset_infos_path, encoding="utf-8") as f:
legacy_dataset_infos = DatasetInfosDict(
{
config_name: DatasetInfo.from_dict(dataset_info_dict)
for config_name, dataset_info_dict in json.load(f).items()
}
)
if len(legacy_dataset_infos) == 1:
# old config e.g. named "username--dataset_name"
legacy_config_name = next(iter(legacy_dataset_infos))
legacy_dataset_infos["default"] = legacy_dataset_infos.pop(legacy_config_name)
legacy_dataset_infos.update(dataset_infos)
dataset_infos = legacy_dataset_infos
except FileNotFoundError:
pass
if default_config_name is None and len(dataset_infos) == 1:
default_config_name = next(iter(dataset_infos))
return DatasetModule(
module_path,
hash,
builder_kwargs,
dataset_infos=dataset_infos,
builder_configs_parameters=BuilderConfigsParameters(
metadata_configs=metadata_configs,
builder_configs=builder_configs,
default_config_name=default_config_name,
),
)
class HubDatasetModuleFactoryWithScript(_DatasetModuleFactory):
"""
Get the module of a dataset from a dataset repository.
The dataset script comes from the script inside the dataset repository.
"""
def __init__(
self,
name: str,
revision: Optional[Union[str, Version]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
dynamic_modules_path: Optional[str] = None,
):
self.name = name
self.revision = revision
self.download_config = download_config or DownloadConfig()
self.download_mode = download_mode
self.dynamic_modules_path = dynamic_modules_path
increase_load_count(name, resource_type="dataset")
def download_loading_script(self) -> str:
file_path = hf_hub_url(repo_id=self.name, path=self.name.split("/")[-1] + ".py", revision=self.revision)
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading builder script"
return cached_path(file_path, download_config=download_config)
def download_dataset_infos_file(self) -> str:
dataset_infos = hf_hub_url(repo_id=self.name, path=config.DATASETDICT_INFOS_FILENAME, revision=self.revision)
# Download the dataset infos file if available
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading metadata"
try:
return cached_path(
dataset_infos,
download_config=download_config,
)
except (FileNotFoundError, ConnectionError):
return None
def download_dataset_readme_file(self) -> str:
readme_url = hf_hub_url(repo_id=self.name, path="README.md", revision=self.revision)
# Download the dataset infos file if available
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading readme"
try:
return cached_path(
readme_url,
download_config=download_config,
)
except (FileNotFoundError, ConnectionError):
return None
def get_module(self) -> DatasetModule:
# get script and other files
local_path = self.download_loading_script()
dataset_infos_path = self.download_dataset_infos_file()
dataset_readme_path = self.download_dataset_readme_file()
imports = get_imports(local_path)
local_imports = _download_additional_modules(
name=self.name,
base_path=hf_hub_url(repo_id=self.name, path="", revision=self.revision),
imports=imports,
download_config=self.download_config,
)
additional_files = []
if dataset_infos_path:
additional_files.append((config.DATASETDICT_INFOS_FILENAME, dataset_infos_path))
if dataset_readme_path:
additional_files.append(("README.md", dataset_readme_path))
# copy the script and the files in an importable directory
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
module_path, hash = _create_importable_file(
local_path=local_path,
local_imports=local_imports,
additional_files=additional_files,
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
name=self.name,
download_mode=self.download_mode,
)
# make the new module to be noticed by the import system
importlib.invalidate_caches()
builder_kwargs = {
"hash": hash,
"base_path": hf_hub_url(self.name, "", revision=self.revision),
"repo_id": self.name,
}
return DatasetModule(module_path, hash, builder_kwargs)
class CachedDatasetModuleFactory(_DatasetModuleFactory):
"""
Get the module of a dataset that has been loaded once already and cached.
The script that is loaded from the cache is the most recent one with a matching name.
"""
def __init__(
self,
name: str,
dynamic_modules_path: Optional[str] = None,
):
self.name = name
self.dynamic_modules_path = dynamic_modules_path
assert self.name.count("/") <= 1
def get_module(self) -> DatasetModule:
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
importable_directory_path = os.path.join(dynamic_modules_path, "datasets", self.name.replace("/", "--"))
hashes = (
[h for h in os.listdir(importable_directory_path) if len(h) == 64]
if os.path.isdir(importable_directory_path)
else None
)
if not hashes:
raise FileNotFoundError(f"Dataset {self.name} is not cached in {dynamic_modules_path}")
# get most recent
def _get_modification_time(module_hash):
return (Path(importable_directory_path) / module_hash / (self.name.split("/")[-1] + ".py")).stat().st_mtime
hash = sorted(hashes, key=_get_modification_time)[-1]
warning_msg = (
f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} "
f"(last modified on {time.ctime(_get_modification_time(hash))}) since it "
f"couldn't be found locally at {self.name}."
)
if not config.HF_DATASETS_OFFLINE:
warning_msg += ", or remotely on the Hugging Face Hub."
logger.warning(warning_msg)
# make the new module to be noticed by the import system
module_path = ".".join(
[
os.path.basename(dynamic_modules_path),
"datasets",
self.name.replace("/", "--"),
hash,
self.name.split("/")[-1],
]
)
importlib.invalidate_caches()
builder_kwargs = {
"hash": hash,
"repo_id": self.name,
}
return DatasetModule(module_path, hash, builder_kwargs)
class CachedMetricModuleFactory(_MetricModuleFactory):
"""
Get the module of a metric that has been loaded once already and cached.
The script that is loaded from the cache is the most recent one with a matching name.
<Deprecated version="2.5.0">
Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
</Deprecated>
"""
@deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
def __init__(
self,
name: str,
dynamic_modules_path: Optional[str] = None,
):
self.name = name
self.dynamic_modules_path = dynamic_modules_path
assert self.name.count("/") == 0
def get_module(self) -> MetricModule:
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
importable_directory_path = os.path.join(dynamic_modules_path, "metrics", self.name)
hashes = (
[h for h in os.listdir(importable_directory_path) if len(h) == 64]
if os.path.isdir(importable_directory_path)
else None
)
if not hashes:
raise FileNotFoundError(f"Metric {self.name} is not cached in {dynamic_modules_path}")
# get most recent
def _get_modification_time(module_hash):
return (Path(importable_directory_path) / module_hash / (self.name + ".py")).stat().st_mtime
hash = sorted(hashes, key=_get_modification_time)[-1]
logger.warning(
f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} "
f"(last modified on {time.ctime(_get_modification_time(hash))}) since it "
f"couldn't be found locally at {self.name}, or remotely on the Hugging Face Hub."
)
# make the new module to be noticed by the import system
module_path = ".".join([os.path.basename(dynamic_modules_path), "metrics", self.name, hash, self.name])
importlib.invalidate_caches()
return MetricModule(module_path, hash)
def dataset_module_factory(
path: str,
revision: Optional[Union[str, Version]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
dynamic_modules_path: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Optional[Union[Dict, List, str, DataFilesDict]] = None,
**download_kwargs,
) -> DatasetModule:
"""
Download/extract/cache a dataset module.
Dataset codes are cached inside the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks).
Args:
path (str): Path or name of the dataset.
Depending on ``path``, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory.
For local datasets:
- if ``path`` is a local directory (containing data files only)
-> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
e.g. ``'./path/to/directory/with/my/csv/data'``.
- if ``path`` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory):
-> load the dataset builder from the dataset script
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``.
For datasets on the Hugging Face Hub (list all available datasets with ``huggingface_hub.list_datasets()``)
- if ``path`` is a dataset repository on the HF hub (containing data files only)
-> load a generic dataset builder (csv, text etc.) based on the content of the repository
e.g. ``'username/dataset_name'``, a dataset repository on the HF hub containing your data files.
- if ``path`` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
-> load the dataset builder from the dataset script in the dataset repository
e.g. ``glue``, ``squad``, ``'username/dataset_name'``, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load.
As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules):
Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`.
By default, the datasets and metrics are stored inside the `datasets_modules` module.
data_dir (:obj:`str`, optional): Directory with the data files. Used only if `data_files` is not specified,
in which case it's equal to pass `os.path.join(data_dir, "**")` as `data_files`.
data_files (:obj:`Union[Dict, List, str]`, optional): Defining the data_files of the dataset configuration.
**download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override
the attributes in download_config if supplied.
Returns:
DatasetModule
"""
if download_config is None:
download_config = DownloadConfig(**download_kwargs)
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
download_config.extract_compressed_file = True
download_config.force_extract = True
download_config.force_download = download_mode == DownloadMode.FORCE_REDOWNLOAD
filename = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1]
if not filename.endswith(".py"):
filename = filename + ".py"
combined_path = os.path.join(path, filename)
# We have several ways to get a dataset builder:
#
# - if path is the name of a packaged dataset module
# -> use the packaged module (json, csv, etc.)
#
# - if os.path.join(path, name) is a local python file
# -> use the module from the python file
# - if path is a local directory (but no python file)
# -> use a packaged module (csv, text etc.) based on content of the directory
#
# - if path has one "/" and is dataset repository on the HF hub with a python file
# -> the module from the python file in the dataset repository
# - if path has one "/" and is dataset repository on the HF hub without a python file
# -> use a packaged module (csv, text etc.) based on content of the repository
# Try packaged
if path in _PACKAGED_DATASETS_MODULES:
return PackagedDatasetModuleFactory(
path,
data_dir=data_dir,
data_files=data_files,
download_config=download_config,
download_mode=download_mode,
).get_module()
# Try locally
elif path.endswith(filename):
if os.path.isfile(path):
return LocalDatasetModuleFactoryWithScript(
path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path
).get_module()
else:
raise FileNotFoundError(f"Couldn't find a dataset script at {relative_to_absolute_path(path)}")
elif os.path.isfile(combined_path):
return LocalDatasetModuleFactoryWithScript(
combined_path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path
).get_module()
elif os.path.isdir(path):
return LocalDatasetModuleFactoryWithoutScript(
path, data_dir=data_dir, data_files=data_files, download_mode=download_mode
).get_module()
# Try remotely
elif is_relative_path(path) and path.count("/") <= 1:
try:
_raise_if_offline_mode_is_enabled()
hf_api = HfApi(config.HF_ENDPOINT)
try:
dataset_info = hf_api.dataset_info(
repo_id=path,
revision=revision,
token=download_config.token,
timeout=100.0,
)
except Exception as e: # noqa catch any exception of hf_hub and consider that the dataset doesn't exist
if isinstance(
e,
(
OfflineModeIsEnabled,
requests.exceptions.ConnectTimeout,
requests.exceptions.ConnectionError,
),
):
raise ConnectionError(f"Couldn't reach '{path}' on the Hub ({type(e).__name__})")
elif "404" in str(e):
msg = f"Dataset '{path}' doesn't exist on the Hub"
raise FileNotFoundError(msg + f" at revision '{revision}'" if revision else msg)
elif "401" in str(e):
msg = f"Dataset '{path}' doesn't exist on the Hub"
msg = msg + f" at revision '{revision}'" if revision else msg
raise FileNotFoundError(
msg + ". If the repo is private or gated, make sure to log in with `huggingface-cli login`."
)
else:
raise e
if filename in [sibling.rfilename for sibling in dataset_info.siblings]:
return HubDatasetModuleFactoryWithScript(
path,
revision=revision,
download_config=download_config,
download_mode=download_mode,
dynamic_modules_path=dynamic_modules_path,
).get_module()
else:
return HubDatasetModuleFactoryWithoutScript(
path,
revision=revision,
data_dir=data_dir,
data_files=data_files,
download_config=download_config,
download_mode=download_mode,
).get_module()
except (
Exception
) as e1: # noqa all the attempts failed, before raising the error we should check if the module is already cached.
try:
return CachedDatasetModuleFactory(path, dynamic_modules_path=dynamic_modules_path).get_module()
except Exception: # noqa if it's not in the cache, then it doesn't exist.
if isinstance(e1, OfflineModeIsEnabled):
raise ConnectionError(f"Couldn't reach the Hugging Face Hub for dataset '{path}': {e1}") from None
if isinstance(e1, EmptyDatasetError):
raise e1 from None
if isinstance(e1, FileNotFoundError):
raise FileNotFoundError(
f"Couldn't find a dataset script at {relative_to_absolute_path(combined_path)} or any data file in the same directory. "
f"Couldn't find '{path}' on the Hugging Face Hub either: {type(e1).__name__}: {e1}"
) from None
raise e1 from None
else:
raise FileNotFoundError(
f"Couldn't find a dataset script at {relative_to_absolute_path(combined_path)} or any data file in the same directory."
)
@deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
def metric_module_factory(
path: str,
revision: Optional[Union[str, Version]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
dynamic_modules_path: Optional[str] = None,
**download_kwargs,
) -> MetricModule:
"""
Download/extract/cache a metric module.
<Deprecated version="2.5.0">
Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
</Deprecated>
Metrics codes are cached inside the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks).
Args:
path (str): Path or name of the metric script.
- if ``path`` is a local metric script or a directory containing a local metric script (if the script has the same name as the directory):
-> load the module from the metric script
e.g. ``'./metrics/accuracy'`` or ``'./metrics/accuracy/accuracy.py'``.
- if ``path`` is a metric on the Hugging Face Hub (ex: `glue`, `squad`)
-> load the module from the metric script in the GitHub repository at huggingface/datasets
e.g. ``'accuracy'`` or ``'rouge'``.
revision (Optional ``Union[str, datasets.Version]``):
If specified, the module will be loaded from the datasets repository at this version.
By default:
- it is set to the local version of the lib.
- it will also try to load it from the main branch if it's not available at the local version of the lib.
Specifying a version that is different from your local version of the lib might cause compatibility issues.
download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules):
Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`.
By default, the datasets and metrics are stored inside the `datasets_modules` module.
**download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override
the attributes in download_config if supplied.
Returns:
MetricModule
"""
with warnings.catch_warnings():
# Ignore equivalent warnings to the one already issued
warnings.filterwarnings("ignore", message=".*https://huggingface.co/docs/evaluate$", category=FutureWarning)
if download_config is None:
download_config = DownloadConfig(**download_kwargs)
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
download_config.extract_compressed_file = True
download_config.force_extract = True
filename = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1]
if not filename.endswith(".py"):
filename = filename + ".py"
combined_path = os.path.join(path, filename)
# Try locally
if path.endswith(filename):
if os.path.isfile(path):
return LocalMetricModuleFactory(
path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path
).get_module()
else:
raise FileNotFoundError(f"Couldn't find a metric script at {relative_to_absolute_path(path)}")
elif os.path.isfile(combined_path):
return LocalMetricModuleFactory(
combined_path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path
).get_module()
elif is_relative_path(path) and path.count("/") == 0:
try:
return GithubMetricModuleFactory(
path,
revision=revision,
download_config=download_config,
download_mode=download_mode,
dynamic_modules_path=dynamic_modules_path,
).get_module()
except (
Exception
) as e1: # noqa all the attempts failed, before raising the error we should check if the module is already cached.
try:
return CachedMetricModuleFactory(path, dynamic_modules_path=dynamic_modules_path).get_module()
except Exception: # noqa if it's not in the cache, then it doesn't exist.
if not isinstance(e1, FileNotFoundError):
raise e1 from None
raise FileNotFoundError(
f"Couldn't find a metric script at {relative_to_absolute_path(combined_path)}. "
f"Metric '{path}' doesn't exist on the Hugging Face Hub either."
) from None
else:
raise FileNotFoundError(f"Couldn't find a metric script at {relative_to_absolute_path(combined_path)}.")
@deprecated("Use 'evaluate.load' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate")
def load_metric(
path: str,
config_name: Optional[str] = None,
process_id: int = 0,
num_process: int = 1,
cache_dir: Optional[str] = None,
experiment_id: Optional[str] = None,
keep_in_memory: bool = False,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
revision: Optional[Union[str, Version]] = None,
**metric_init_kwargs,
) -> Metric:
"""Load a `datasets.Metric`.
<Deprecated version="2.5.0">
Use `evaluate.load` instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate
</Deprecated>
Args:
path (``str``):
path to the metric processing script with the metric builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./metrics/rouge'`` or ``'./metrics/rogue/rouge.py'``
- a metric identifier on the HuggingFace datasets repo (list all available metrics with ``datasets.list_metrics()``)
e.g. ``'rouge'`` or ``'bleu'``
config_name (:obj:`str`, optional): selecting a configuration for the metric (e.g. the GLUE metric has a configuration for each subset)
process_id (:obj:`int`, optional): for distributed evaluation: id of the process
num_process (:obj:`int`, optional): for distributed evaluation: total number of processes
cache_dir (Optional str): path to store the temporary predictions and references (default to `~/.cache/huggingface/metrics/`)
experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
keep_in_memory (bool): Whether to store the temporary results in memory (defaults to False)
download_config (Optional ``datasets.DownloadConfig``: specific download configuration parameters.
download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
revision (Optional ``Union[str, datasets.Version]``): if specified, the module will be loaded from the datasets repository
at this version. By default, it is set to the local version of the lib. Specifying a version that is different from
your local version of the lib might cause compatibility issues.
Returns:
`datasets.Metric`
Example:
```py
>>> from datasets import load_metric
>>> accuracy = load_metric('accuracy')
>>> accuracy.compute(references=[1, 0], predictions=[1, 1])
{'accuracy': 0.5}
```
"""
with warnings.catch_warnings():
# Ignore equivalent warnings to the one already issued
warnings.filterwarnings("ignore", message=".*https://huggingface.co/docs/evaluate$", category=FutureWarning)
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
metric_module = metric_module_factory(
path, revision=revision, download_config=download_config, download_mode=download_mode
).module_path
metric_cls = import_main_class(metric_module, dataset=False)
metric = metric_cls(
config_name=config_name,
process_id=process_id,
num_process=num_process,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
experiment_id=experiment_id,
**metric_init_kwargs,
)
# Download and prepare resources for the metric
metric.download_and_prepare(download_config=download_config)
return metric
def load_dataset_builder(
path: str,
name: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
cache_dir: Optional[str] = None,
features: Optional[Features] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
revision: Optional[Union[str, Version]] = None,
token: Optional[Union[bool, str]] = None,
use_auth_token="deprecated",
storage_options: Optional[Dict] = None,
**config_kwargs,
) -> DatasetBuilder:
"""Load a dataset builder from the Hugging Face Hub, or a local dataset. A dataset builder can be used to inspect general information that is required to build a dataset (cache directory, config, dataset info, etc.)
without downloading the dataset itself.
You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`].
A dataset is a directory that contains:
- some data files in generic formats (JSON, CSV, Parquet, text, etc.)
- and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures.
Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online.
Args:
path (`str`):
Path or name of the dataset.
Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory.
For local datasets:
- if `path` is a local directory (containing data files only)
-> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
e.g. `'./path/to/directory/with/my/csv/data'`.
- if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory)
-> load the dataset builder from the dataset script
e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`])
- if `path` is a dataset repository on the HF hub (containing data files only)
-> load a generic dataset builder (csv, text etc.) based on the content of the repository
e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files.
- if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
-> load the dataset builder from the dataset script in the dataset repository
e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
name (`str`, *optional*):
Defining the name of the dataset configuration.
data_dir (`str`, *optional*):
Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`,
the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory.
data_files (`str` or `Sequence` or `Mapping`, *optional*):
Path(s) to source data file(s).
cache_dir (`str`, *optional*):
Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
features ([`Features`], *optional*):
Set the features type to use for this dataset.
download_config ([`DownloadConfig`], *optional*):
Specific download configuration parameters.
download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
Download/generate mode.
revision ([`Version`] or `str`, *optional*):
Version of the dataset script to load.
As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If `True`, or not specified, will get token from `"~/.huggingface"`.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If `True`, or not specified, will get token from `"~/.huggingface"`.
<Deprecated version="2.14.0">
`use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
</Deprecated>
storage_options (`dict`, *optional*, defaults to `None`):
**Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any.
<Added version="2.11.0"/>
**config_kwargs (additional keyword arguments):
Keyword arguments to be passed to the [`BuilderConfig`]
and used in the [`DatasetBuilder`].
Returns:
[`DatasetBuilder`]
Example:
```py
>>> from datasets import load_dataset_builder
>>> ds_builder = load_dataset_builder('rotten_tomatoes')
>>> ds_builder.info.features
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={use_auth_token}' instead.",
FutureWarning,
)
token = use_auth_token
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
if token is not None:
download_config = download_config.copy() if download_config else DownloadConfig()
download_config.token = token
if storage_options is not None:
download_config = download_config.copy() if download_config else DownloadConfig()
download_config.storage_options.update(storage_options)
dataset_module = dataset_module_factory(
path,
revision=revision,
download_config=download_config,
download_mode=download_mode,
data_dir=data_dir,
data_files=data_files,
)
# Get dataset builder class from the processing script
builder_kwargs = dataset_module.builder_kwargs
data_dir = builder_kwargs.pop("data_dir", data_dir)
data_files = builder_kwargs.pop("data_files", data_files)
config_name = builder_kwargs.pop(
"config_name", name or dataset_module.builder_configs_parameters.default_config_name
)
dataset_name = builder_kwargs.pop("dataset_name", None)
hash = builder_kwargs.pop("hash")
info = dataset_module.dataset_infos.get(config_name) if dataset_module.dataset_infos else None
if (
dataset_module.builder_configs_parameters.metadata_configs
and config_name in dataset_module.builder_configs_parameters.metadata_configs
):
hash = update_hash_with_config_parameters(
hash, dataset_module.builder_configs_parameters.metadata_configs[config_name]
)
if path in _PACKAGED_DATASETS_MODULES and data_files is None:
error_msg = f"Please specify the data files or data directory to load for the {path} dataset builder."
example_extensions = [
extension for extension in _EXTENSION_TO_MODULE if _EXTENSION_TO_MODULE[extension] == path
]
if example_extensions:
error_msg += f'\nFor example `data_files={{"train": "path/to/data/train/*.{example_extensions[0]}"}}`'
raise ValueError(error_msg)
builder_cls = get_dataset_builder_class(dataset_module, dataset_name=dataset_name)
# Instantiate the dataset builder
builder_instance: DatasetBuilder = builder_cls(
cache_dir=cache_dir,
dataset_name=dataset_name,
config_name=config_name,
data_dir=data_dir,
data_files=data_files,
hash=hash,
info=info,
features=features,
token=token,
storage_options=storage_options,
**builder_kwargs,
**config_kwargs,
)
return builder_instance
def load_dataset(
path: str,
name: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
split: Optional[Union[str, Split]] = None,
cache_dir: Optional[str] = None,
features: Optional[Features] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
verification_mode: Optional[Union[VerificationMode, str]] = None,
ignore_verifications="deprecated",
keep_in_memory: Optional[bool] = None,
save_infos: bool = False,
revision: Optional[Union[str, Version]] = None,
token: Optional[Union[bool, str]] = None,
use_auth_token="deprecated",
task="deprecated",
streaming: bool = False,
num_proc: Optional[int] = None,
storage_options: Optional[Dict] = None,
**config_kwargs,
) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]:
"""Load a dataset from the Hugging Face Hub, or a local dataset.
You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`].
A dataset is a directory that contains:
- some data files in generic formats (JSON, CSV, Parquet, text, etc.).
- and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures.
Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online.
This function does the following under the hood:
1. Download and import in the library the dataset script from `path` if it's not already cached inside the library.
If the dataset has no dataset script, then a generic dataset script is imported instead (JSON, CSV, Parquet, text, etc.)
Dataset scripts are small python scripts that define dataset builders. They define the citation, info and format of the dataset,
contain the path or URL to the original data files and the code to load examples from the original data files.
You can find the complete list of datasets in the Datasets [Hub](https://huggingface.co/datasets).
2. Run the dataset script which will:
* Download the dataset file from the original URL (see the script) if it's not already available locally or cached.
* Process and cache the dataset in typed Arrow tables for caching.
Arrow table are arbitrarily long, typed tables which can store nested objects and be mapped to numpy/pandas/python generic types.
They can be directly accessed from disk, loaded in RAM or even streamed over the web.
3. Return a dataset built from the requested splits in `split` (default: all).
It also allows to load a dataset from a local directory or a dataset repository on the Hugging Face Hub without dataset script.
In this case, it automatically loads all the data files from the directory or the dataset repository.
Args:
path (`str`):
Path or name of the dataset.
Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory.
For local datasets:
- if `path` is a local directory (containing data files only)
-> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
e.g. `'./path/to/directory/with/my/csv/data'`.
- if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory)
-> load the dataset builder from the dataset script
e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`])
- if `path` is a dataset repository on the HF hub (containing data files only)
-> load a generic dataset builder (csv, text etc.) based on the content of the repository
e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files.
- if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
-> load the dataset builder from the dataset script in the dataset repository
e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
name (`str`, *optional*):
Defining the name of the dataset configuration.
data_dir (`str`, *optional*):
Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`,
the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory.
data_files (`str` or `Sequence` or `Mapping`, *optional*):
Path(s) to source data file(s).
split (`Split` or `str`):
Which split of the data to load.
If `None`, will return a `dict` with all splits (typically `datasets.Split.TRAIN` and `datasets.Split.TEST`).
If given, will return a single Dataset.
Splits can be combined and specified like in tensorflow-datasets.
cache_dir (`str`, *optional*):
Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
features (`Features`, *optional*):
Set the features type to use for this dataset.
download_config ([`DownloadConfig`], *optional*):
Specific download configuration parameters.
download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
Download/generate mode.
verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...).
<Added version="2.9.1"/>
ignore_verifications (`bool`, defaults to `False`):
Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...).
<Deprecated version="2.9.1">
`ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0.
Please use `verification_mode` instead.
</Deprecated>
keep_in_memory (`bool`, defaults to `None`):
Whether to copy the dataset in-memory. If `None`, the dataset
will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
nonzero. See more details in the [improve performance](../cache#improve-performance) section.
save_infos (`bool`, defaults to `False`):
Save the dataset information (checksums/size/splits/...).
revision ([`Version`] or `str`, *optional*):
Version of the dataset script to load.
As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If `True`, or not specified, will get token from `"~/.huggingface"`.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If `True`, or not specified, will get token from `"~/.huggingface"`.
<Deprecated version="2.14.0">
`use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
</Deprecated>
task (`str`):
The task to prepare the dataset for during training and evaluation. Casts the dataset's [`Features`] to standardized column names and types as detailed in `datasets.tasks`.
<Deprecated version="2.13.0">
`task` was deprecated in version 2.13.0 and will be removed in 3.0.0.
</Deprecated>
streaming (`bool`, defaults to `False`):
If set to `True`, don't download the data files. Instead, it streams the data progressively while
iterating on the dataset. An [`IterableDataset`] or [`IterableDatasetDict`] is returned instead in this case.
Note that streaming works for datasets that use data formats that support being iterated over like txt, csv, jsonl for example.
Json files may be downloaded completely. Also streaming from remote zip or gzip files is supported but other compressed formats
like rar and xz are not yet supported. The tgz format doesn't allow streaming.
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when downloading and generating the dataset locally.
Multiprocessing is disabled by default.
<Added version="2.7.0"/>
storage_options (`dict`, *optional*, defaults to `None`):
**Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any.
<Added version="2.11.0"/>
**config_kwargs (additional keyword arguments):
Keyword arguments to be passed to the `BuilderConfig`
and used in the [`DatasetBuilder`].
Returns:
[`Dataset`] or [`DatasetDict`]:
- if `split` is not `None`: the dataset requested,
- if `split` is `None`, a [`~datasets.DatasetDict`] with each split.
or [`IterableDataset`] or [`IterableDatasetDict`]: if `streaming=True`
- if `split` is not `None`, the dataset is requested
- if `split` is `None`, a [`~datasets.streaming.IterableDatasetDict`] with each split.
Example:
Load a dataset from the Hugging Face Hub:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset('rotten_tomatoes', split='train')
# Map data files to splits
>>> data_files = {'train': 'train.csv', 'test': 'test.csv'}
>>> ds = load_dataset('namespace/your_dataset_name', data_files=data_files)
```
Load a local dataset:
```py
# Load a CSV file
>>> from datasets import load_dataset
>>> ds = load_dataset('csv', data_files='path/to/local/my_dataset.csv')
# Load a JSON file
>>> from datasets import load_dataset
>>> ds = load_dataset('json', data_files='path/to/local/my_dataset.json')
# Load from a local loading script
>>> from datasets import load_dataset
>>> ds = load_dataset('path/to/local/loading_script/loading_script.py', split='train')
```
Load an [`~datasets.IterableDataset`]:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset('rotten_tomatoes', split='train', streaming=True)
```
Load an image dataset with the `ImageFolder` dataset builder:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset('imagefolder', data_dir='/path/to/images', split='train')
```
"""
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={use_auth_token}' instead.",
FutureWarning,
)
token = use_auth_token
if ignore_verifications != "deprecated":
verification_mode = VerificationMode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS
warnings.warn(
"'ignore_verifications' was deprecated in favor of 'verification_mode' in version 2.9.1 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.",
FutureWarning,
)
if task != "deprecated":
warnings.warn(
"'task' was deprecated in version 2.13.0 and will be removed in 3.0.0.\n",
FutureWarning,
)
else:
task = None
if data_files is not None and not data_files:
raise ValueError(f"Empty 'data_files': '{data_files}'. It should be either non-empty or None (default).")
if Path(path, config.DATASET_STATE_JSON_FILENAME).exists():
raise ValueError(
"You are trying to load a dataset that was saved using `save_to_disk`. "
"Please use `load_from_disk` instead."
)
if streaming and num_proc is not None:
raise NotImplementedError(
"Loading a streaming dataset in parallel with `num_proc` is not implemented. "
"To parallelize streaming, you can wrap the dataset with a PyTorch DataLoader using `num_workers` > 1 instead."
)
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
verification_mode = VerificationMode(
(verification_mode or VerificationMode.BASIC_CHECKS) if not save_infos else VerificationMode.ALL_CHECKS
)
# Create a dataset builder
builder_instance = load_dataset_builder(
path=path,
name=name,
data_dir=data_dir,
data_files=data_files,
cache_dir=cache_dir,
features=features,
download_config=download_config,
download_mode=download_mode,
revision=revision,
token=token,
storage_options=storage_options,
**config_kwargs,
)
# Return iterable dataset in case of streaming
if streaming:
return builder_instance.as_streaming_dataset(split=split)
# Some datasets are already processed on the HF google storage
# Don't try downloading from Google storage for the packaged datasets as text, json, csv or pandas
try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES
# Download and prepare data
builder_instance.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
try_from_hf_gcs=try_from_hf_gcs,
num_proc=num_proc,
storage_options=storage_options,
)
# Build dataset for splits
keep_in_memory = (
keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
)
ds = builder_instance.as_dataset(split=split, verification_mode=verification_mode, in_memory=keep_in_memory)
# Rename and cast features to match task schema
if task is not None:
# To avoid issuing the same warning twice
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
ds = ds.prepare_for_task(task)
if save_infos:
builder_instance._save_infos()
return ds
def load_from_disk(
dataset_path: str, fs="deprecated", keep_in_memory: Optional[bool] = None, storage_options: Optional[dict] = None
) -> Union[Dataset, DatasetDict]:
"""
Loads a dataset that was previously saved using [`~Dataset.save_to_disk`] from a dataset directory, or
from a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
Args:
dataset_path (`str`):
Path (e.g. `"dataset/train"`) or remote URI (e.g.
`"s3://my-bucket/dataset/train"`) of the [`Dataset`] or [`DatasetDict`] directory where the dataset will be
loaded from.
fs (`~filesystems.S3FileSystem` or `fsspec.spec.AbstractFileSystem`, *optional*):
Instance of the remote filesystem used to download the files from.
<Deprecated version="2.9.0">
`fs` was deprecated in version 2.9.0 and will be removed in 3.0.0.
Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`.
</Deprecated>
keep_in_memory (`bool`, defaults to `None`):
Whether to copy the dataset in-memory. If `None`, the dataset
will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
nonzero. See more details in the [improve performance](../cache#improve-performance) section.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.9.0"/>
Returns:
[`Dataset`] or [`DatasetDict`]:
- If `dataset_path` is a path of a dataset directory: the dataset requested.
- If `dataset_path` is a path of a dataset dict directory, a [`DatasetDict`] with each split.
Example:
```py
>>> from datasets import load_from_disk
>>> ds = load_from_disk('path/to/dataset/directory')
```
"""
if fs != "deprecated":
warnings.warn(
"'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n"
"You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
FutureWarning,
)
storage_options = fs.storage_options
fs_token_paths = fsspec.get_fs_token_paths(dataset_path, storage_options=storage_options)
fs: fsspec.AbstractFileSystem = fs_token_paths[0]
# gets filesystem from dataset, either s3:// or file:// and adjusted dataset_path
if is_remote_filesystem(fs):
dest_dataset_path = extract_path_from_uri(dataset_path)
path_join = posixpath.join
else:
fs = fsspec.filesystem("file")
dest_dataset_path = dataset_path
path_join = os.path.join
if not fs.exists(dest_dataset_path):
raise FileNotFoundError(f"Directory {dataset_path} not found")
if fs.isfile(path_join(dest_dataset_path, config.DATASET_INFO_FILENAME)) and fs.isfile(
path_join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME)
):
return Dataset.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options)
elif fs.isfile(path_join(dest_dataset_path, config.DATASETDICT_JSON_FILENAME)):
return DatasetDict.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options)
else:
raise FileNotFoundError(
f"Directory {dataset_path} is neither a `Dataset` directory nor a `DatasetDict` directory."
)
| datasets-main | src/datasets/load.py |
import importlib
import inspect
from functools import wraps
from typing import TYPE_CHECKING, Optional
from .download.download_config import DownloadConfig
from .download.streaming_download_manager import (
xbasename,
xdirname,
xet_parse,
xexists,
xgetsize,
xglob,
xgzip_open,
xisdir,
xisfile,
xjoin,
xlistdir,
xnumpy_load,
xopen,
xpandas_read_csv,
xpandas_read_excel,
xPath,
xrelpath,
xsio_loadmat,
xsplit,
xsplitext,
xwalk,
xxml_dom_minidom_parse,
)
from .utils.logging import get_logger
from .utils.patching import patch_submodule
from .utils.py_utils import get_imports
logger = get_logger(__name__)
if TYPE_CHECKING:
from .builder import DatasetBuilder
def extend_module_for_streaming(module_path, download_config: Optional[DownloadConfig] = None):
"""Extend the module to support streaming.
We patch some functions in the module to use `fsspec` to support data streaming:
- We use `fsspec.open` to open and read remote files. We patch the module function:
- `open`
- We use the "::" hop separator to join paths and navigate remote compressed/archive files. We patch the module
functions:
- `os.path.join`
- `pathlib.Path.joinpath` and `pathlib.Path.__truediv__` (called when using the "/" operator)
The patched functions are replaced with custom functions defined to work with the
:class:`~download.streaming_download_manager.StreamingDownloadManager`.
Args:
module_path: Path to the module to be extended.
download_config : mainly use use_auth_token or storage_options to support different platforms and auth types.
"""
module = importlib.import_module(module_path)
# TODO(QL): always update the module to add subsequent new authentication without removing old ones
if hasattr(module, "_patched_for_streaming") and module._patched_for_streaming:
if isinstance(module._patched_for_streaming, DownloadConfig):
module._patched_for_streaming.token = download_config.token
module._patched_for_streaming.storage_options = download_config.storage_options
return
def wrap_auth(function):
@wraps(function)
def wrapper(*args, **kwargs):
return function(*args, download_config=download_config, **kwargs)
wrapper._decorator_name_ = "wrap_auth"
return wrapper
# open files in a streaming fashion
patch_submodule(module, "open", wrap_auth(xopen)).start()
patch_submodule(module, "os.listdir", wrap_auth(xlistdir)).start()
patch_submodule(module, "os.walk", wrap_auth(xwalk)).start()
patch_submodule(module, "glob.glob", wrap_auth(xglob)).start()
# allow to navigate in remote zip files
patch_submodule(module, "os.path.join", xjoin).start()
patch_submodule(module, "os.path.dirname", xdirname).start()
patch_submodule(module, "os.path.basename", xbasename).start()
patch_submodule(module, "os.path.relpath", xrelpath).start()
patch_submodule(module, "os.path.split", xsplit).start()
patch_submodule(module, "os.path.splitext", xsplitext).start()
# allow checks on paths
patch_submodule(module, "os.path.exists", wrap_auth(xexists)).start()
patch_submodule(module, "os.path.isdir", wrap_auth(xisdir)).start()
patch_submodule(module, "os.path.isfile", wrap_auth(xisfile)).start()
patch_submodule(module, "os.path.getsize", wrap_auth(xgetsize)).start()
patch_submodule(module, "pathlib.Path", xPath).start()
# file readers
patch_submodule(module, "gzip.open", wrap_auth(xgzip_open)).start()
patch_submodule(module, "numpy.load", wrap_auth(xnumpy_load)).start()
patch_submodule(module, "pandas.read_csv", wrap_auth(xpandas_read_csv), attrs=["__version__"]).start()
patch_submodule(module, "pandas.read_excel", wrap_auth(xpandas_read_excel), attrs=["__version__"]).start()
patch_submodule(module, "scipy.io.loadmat", wrap_auth(xsio_loadmat), attrs=["__version__"]).start()
patch_submodule(module, "xml.etree.ElementTree.parse", wrap_auth(xet_parse)).start()
patch_submodule(module, "xml.dom.minidom.parse", wrap_auth(xxml_dom_minidom_parse)).start()
module._patched_for_streaming = download_config
def extend_dataset_builder_for_streaming(builder: "DatasetBuilder"):
"""Extend the dataset builder module and the modules imported by it to support streaming.
Args:
builder (:class:`DatasetBuilder`): Dataset builder instance.
"""
# this extends the open and os.path.join functions for data streaming
download_config = DownloadConfig(storage_options=builder.storage_options, token=builder.token)
extend_module_for_streaming(builder.__module__, download_config=download_config)
# if needed, we also have to extend additional internal imports (like wmt14 -> wmt_utils)
if not builder.__module__.startswith("datasets."): # check that it's not a packaged builder like csv
for imports in get_imports(inspect.getfile(builder.__class__)):
if imports[0] == "internal":
internal_import_name = imports[1]
internal_module_name = ".".join(builder.__module__.split(".")[:-1] + [internal_import_name])
extend_module_for_streaming(internal_module_name, download_config=download_config)
# builders can inherit from other builders that might use streaming functionality
# (for example, ImageFolder and AudioFolder inherit from FolderBuilder which implements examples generation)
# but these parents builders are not patched automatically as they are not instantiated, so we patch them here
from .builder import DatasetBuilder
parent_builder_modules = [
cls.__module__
for cls in type(builder).__mro__[1:] # make sure it's not the same module we've already patched
if issubclass(cls, DatasetBuilder) and cls.__module__ != DatasetBuilder.__module__
] # check it's not a standard builder from datasets.builder
for module in parent_builder_modules:
extend_module_for_streaming(module, download_config=download_config)
| datasets-main | src/datasets/streaming.py |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" DatasetInfo and MetricInfo record information we know about a dataset and a metric.
This includes things that we know about the dataset statically, i.e.:
- description
- canonical location
- does it have validation and tests splits
- size
- etc.
This also includes the things that can and should be computed once we've
processed the dataset as well:
- number of examples (in each split)
- etc.
"""
import copy
import dataclasses
import json
import os
import posixpath
import warnings
from dataclasses import dataclass
from pathlib import Path
from typing import ClassVar, Dict, List, Optional, Union
import fsspec
from huggingface_hub import DatasetCard, DatasetCardData
from . import config
from .features import Features, Value
from .filesystems import is_remote_filesystem
from .splits import SplitDict
from .tasks import TaskTemplate, task_template_from_dict
from .utils import Version
from .utils.logging import get_logger
from .utils.py_utils import asdict, unique_values
logger = get_logger(__name__)
@dataclass
class SupervisedKeysData:
input: str = ""
output: str = ""
@dataclass
class DownloadChecksumsEntryData:
key: str = ""
value: str = ""
class MissingCachedSizesConfigError(Exception):
"""The expected cached sizes of the download file are missing."""
class NonMatchingCachedSizesError(Exception):
"""The prepared split doesn't have expected sizes."""
@dataclass
class PostProcessedInfo:
features: Optional[Features] = None
resources_checksums: Optional[dict] = None
def __post_init__(self):
# Convert back to the correct classes when we reload from dict
if self.features is not None and not isinstance(self.features, Features):
self.features = Features.from_dict(self.features)
@classmethod
def from_dict(cls, post_processed_info_dict: dict) -> "PostProcessedInfo":
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in post_processed_info_dict.items() if k in field_names})
@dataclass
class DatasetInfo:
"""Information about a dataset.
`DatasetInfo` documents datasets, including its name, version, and features.
See the constructor arguments and properties for a full list.
Not all fields are known on construction and may be updated later.
Attributes:
description (`str`):
A description of the dataset.
citation (`str`):
A BibTeX citation of the dataset.
homepage (`str`):
A URL to the official homepage for the dataset.
license (`str`):
The dataset's license. It can be the name of the license or a paragraph containing the terms of the license.
features ([`Features`], *optional*):
The features used to specify the dataset's column types.
post_processed (`PostProcessedInfo`, *optional*):
Information regarding the resources of a possible post-processing of a dataset. For example, it can contain the information of an index.
supervised_keys (`SupervisedKeysData`, *optional*):
Specifies the input feature and the label for supervised learning if applicable for the dataset (legacy from TFDS).
builder_name (`str`, *optional*):
The name of the `GeneratorBasedBuilder` subclass used to create the dataset. Usually matched to the corresponding script name. It is also the snake_case version of the dataset builder class name.
config_name (`str`, *optional*):
The name of the configuration derived from [`BuilderConfig`].
version (`str` or [`Version`], *optional*):
The version of the dataset.
splits (`dict`, *optional*):
The mapping between split name and metadata.
download_checksums (`dict`, *optional*):
The mapping between the URL to download the dataset's checksums and corresponding metadata.
download_size (`int`, *optional*):
The size of the files to download to generate the dataset, in bytes.
post_processing_size (`int`, *optional*):
Size of the dataset in bytes after post-processing, if any.
dataset_size (`int`, *optional*):
The combined size in bytes of the Arrow tables for all splits.
size_in_bytes (`int`, *optional*):
The combined size in bytes of all files associated with the dataset (downloaded files + Arrow files).
task_templates (`List[TaskTemplate]`, *optional*):
The task templates to prepare the dataset for during training and evaluation. Each template casts the dataset's [`Features`] to standardized column names and types as detailed in `datasets.tasks`.
**config_kwargs (additional keyword arguments):
Keyword arguments to be passed to the [`BuilderConfig`] and used in the [`DatasetBuilder`].
"""
# Set in the dataset scripts
description: str = dataclasses.field(default_factory=str)
citation: str = dataclasses.field(default_factory=str)
homepage: str = dataclasses.field(default_factory=str)
license: str = dataclasses.field(default_factory=str)
features: Optional[Features] = None
post_processed: Optional[PostProcessedInfo] = None
supervised_keys: Optional[SupervisedKeysData] = None
task_templates: Optional[List[TaskTemplate]] = None
# Set later by the builder
builder_name: Optional[str] = None
dataset_name: Optional[str] = None # for packaged builders, to be different from builder_name
config_name: Optional[str] = None
version: Optional[Union[str, Version]] = None
# Set later by `download_and_prepare`
splits: Optional[dict] = None
download_checksums: Optional[dict] = None
download_size: Optional[int] = None
post_processing_size: Optional[int] = None
dataset_size: Optional[int] = None
size_in_bytes: Optional[int] = None
_INCLUDED_INFO_IN_YAML: ClassVar[List[str]] = [
"config_name",
"download_size",
"dataset_size",
"features",
"splits",
]
def __post_init__(self):
# Convert back to the correct classes when we reload from dict
if self.features is not None and not isinstance(self.features, Features):
self.features = Features.from_dict(self.features)
if self.post_processed is not None and not isinstance(self.post_processed, PostProcessedInfo):
self.post_processed = PostProcessedInfo.from_dict(self.post_processed)
if self.version is not None and not isinstance(self.version, Version):
if isinstance(self.version, str):
self.version = Version(self.version)
else:
self.version = Version.from_dict(self.version)
if self.splits is not None and not isinstance(self.splits, SplitDict):
self.splits = SplitDict.from_split_dict(self.splits)
if self.supervised_keys is not None and not isinstance(self.supervised_keys, SupervisedKeysData):
if isinstance(self.supervised_keys, (tuple, list)):
self.supervised_keys = SupervisedKeysData(*self.supervised_keys)
else:
self.supervised_keys = SupervisedKeysData(**self.supervised_keys)
# Parse and make a list of templates
if self.task_templates is not None:
if isinstance(self.task_templates, (list, tuple)):
templates = [
template if isinstance(template, TaskTemplate) else task_template_from_dict(template)
for template in self.task_templates
]
self.task_templates = [template for template in templates if template is not None]
elif isinstance(self.task_templates, TaskTemplate):
self.task_templates = [self.task_templates]
else:
template = task_template_from_dict(self.task_templates)
self.task_templates = [template] if template is not None else []
# Align task templates with features
if self.task_templates is not None:
self.task_templates = list(self.task_templates)
if self.features is not None:
self.task_templates = [
template.align_with_features(self.features) for template in (self.task_templates)
]
def write_to_directory(
self, dataset_info_dir, pretty_print=False, fs="deprecated", storage_options: Optional[dict] = None
):
"""Write `DatasetInfo` and license (if present) as JSON files to `dataset_info_dir`.
Args:
dataset_info_dir (`str`):
Destination directory.
pretty_print (`bool`, defaults to `False`):
If `True`, the JSON will be pretty-printed with the indent level of 4.
fs (`fsspec.spec.AbstractFileSystem`, *optional*):
Instance of the remote filesystem used to download the files from.
<Deprecated version="2.9.0">
`fs` was deprecated in version 2.9.0 and will be removed in 3.0.0.
Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`.
</Deprecated>
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.9.0"/>
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.info.write_to_directory("/path/to/directory/")
```
"""
if fs != "deprecated":
warnings.warn(
"'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n"
"You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
FutureWarning,
)
storage_options = fs.storage_options
fs_token_paths = fsspec.get_fs_token_paths(dataset_info_dir, storage_options=storage_options)
fs: fsspec.AbstractFileSystem = fs_token_paths[0]
is_local = not is_remote_filesystem(fs)
path_join = os.path.join if is_local else posixpath.join
with fs.open(path_join(dataset_info_dir, config.DATASET_INFO_FILENAME), "wb") as f:
self._dump_info(f, pretty_print=pretty_print)
if self.license:
with fs.open(path_join(dataset_info_dir, config.LICENSE_FILENAME), "wb") as f:
self._dump_license(f)
def _dump_info(self, file, pretty_print=False):
"""Dump info in `file` file-like object open in bytes mode (to support remote files)"""
file.write(json.dumps(asdict(self), indent=4 if pretty_print else None).encode("utf-8"))
def _dump_license(self, file):
"""Dump license in `file` file-like object open in bytes mode (to support remote files)"""
file.write(self.license.encode("utf-8"))
@classmethod
def from_merge(cls, dataset_infos: List["DatasetInfo"]):
dataset_infos = [dset_info.copy() for dset_info in dataset_infos if dset_info is not None]
description = "\n\n".join(unique_values(info.description for info in dataset_infos)).strip()
citation = "\n\n".join(unique_values(info.citation for info in dataset_infos)).strip()
homepage = "\n\n".join(unique_values(info.homepage for info in dataset_infos)).strip()
license = "\n\n".join(unique_values(info.license for info in dataset_infos)).strip()
features = None
supervised_keys = None
task_templates = None
# Find common task templates across all dataset infos
all_task_templates = [info.task_templates for info in dataset_infos if info.task_templates is not None]
if len(all_task_templates) > 1:
task_templates = list(set(all_task_templates[0]).intersection(*all_task_templates[1:]))
elif len(all_task_templates):
task_templates = list(set(all_task_templates[0]))
# If no common task templates found, replace empty list with None
task_templates = task_templates if task_templates else None
return cls(
description=description,
citation=citation,
homepage=homepage,
license=license,
features=features,
supervised_keys=supervised_keys,
task_templates=task_templates,
)
@classmethod
def from_directory(
cls, dataset_info_dir: str, fs="deprecated", storage_options: Optional[dict] = None
) -> "DatasetInfo":
"""Create [`DatasetInfo`] from the JSON file in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the [`DatasetInfo`].
This will overwrite all previous metadata.
Args:
dataset_info_dir (`str`):
The directory containing the metadata file. This
should be the root directory of a specific dataset version.
fs (`fsspec.spec.AbstractFileSystem`, *optional*):
Instance of the remote filesystem used to download the files from.
<Deprecated version="2.9.0">
`fs` was deprecated in version 2.9.0 and will be removed in 3.0.0.
Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`.
</Deprecated>
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.9.0"/>
Example:
```py
>>> from datasets import DatasetInfo
>>> ds_info = DatasetInfo.from_directory("/path/to/directory/")
```
"""
if fs != "deprecated":
warnings.warn(
"'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n"
"You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
FutureWarning,
)
storage_options = fs.storage_options
fs_token_paths = fsspec.get_fs_token_paths(dataset_info_dir, storage_options=storage_options)
fs: fsspec.AbstractFileSystem = fs_token_paths[0]
logger.info(f"Loading Dataset info from {dataset_info_dir}")
if not dataset_info_dir:
raise ValueError("Calling DatasetInfo.from_directory() with undefined dataset_info_dir.")
is_local = not is_remote_filesystem(fs)
path_join = os.path.join if is_local else posixpath.join
with fs.open(path_join(dataset_info_dir, config.DATASET_INFO_FILENAME), "r", encoding="utf-8") as f:
dataset_info_dict = json.load(f)
return cls.from_dict(dataset_info_dict)
@classmethod
def from_dict(cls, dataset_info_dict: dict) -> "DatasetInfo":
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dataset_info_dict.items() if k in field_names})
def update(self, other_dataset_info: "DatasetInfo", ignore_none=True):
self_dict = self.__dict__
self_dict.update(
**{
k: copy.deepcopy(v)
for k, v in other_dataset_info.__dict__.items()
if (v is not None or not ignore_none)
}
)
def copy(self) -> "DatasetInfo":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
def _to_yaml_dict(self) -> dict:
yaml_dict = {}
dataset_info_dict = asdict(self)
for key in dataset_info_dict:
if key in self._INCLUDED_INFO_IN_YAML:
value = getattr(self, key)
if hasattr(value, "_to_yaml_list"): # Features, SplitDict
yaml_dict[key] = value._to_yaml_list()
elif hasattr(value, "_to_yaml_string"): # Version
yaml_dict[key] = value._to_yaml_string()
else:
yaml_dict[key] = value
return yaml_dict
@classmethod
def _from_yaml_dict(cls, yaml_data: dict) -> "DatasetInfo":
yaml_data = copy.deepcopy(yaml_data)
if yaml_data.get("features") is not None:
yaml_data["features"] = Features._from_yaml_list(yaml_data["features"])
if yaml_data.get("splits") is not None:
yaml_data["splits"] = SplitDict._from_yaml_list(yaml_data["splits"])
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in yaml_data.items() if k in field_names})
class DatasetInfosDict(Dict[str, DatasetInfo]):
def write_to_directory(self, dataset_infos_dir, overwrite=False, pretty_print=False) -> None:
total_dataset_infos = {}
dataset_infos_path = os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)
dataset_readme_path = os.path.join(dataset_infos_dir, "README.md")
if not overwrite:
total_dataset_infos = self.from_directory(dataset_infos_dir)
total_dataset_infos.update(self)
if os.path.exists(dataset_infos_path):
# for backward compatibility, let's update the JSON file if it exists
with open(dataset_infos_path, "w", encoding="utf-8") as f:
dataset_infos_dict = {
config_name: asdict(dset_info) for config_name, dset_info in total_dataset_infos.items()
}
json.dump(dataset_infos_dict, f, indent=4 if pretty_print else None)
# Dump the infos in the YAML part of the README.md file
if os.path.exists(dataset_readme_path):
dataset_card = DatasetCard.load(dataset_readme_path)
dataset_card_data = dataset_card.data
else:
dataset_card = None
dataset_card_data = DatasetCardData()
if total_dataset_infos:
total_dataset_infos.to_dataset_card_data(dataset_card_data)
dataset_card = (
DatasetCard("---\n" + str(dataset_card_data) + "\n---\n") if dataset_card is None else dataset_card
)
dataset_card.save(Path(dataset_readme_path))
@classmethod
def from_directory(cls, dataset_infos_dir) -> "DatasetInfosDict":
logger.info(f"Loading Dataset Infos from {dataset_infos_dir}")
# Load the info from the YAML part of README.md
if os.path.exists(os.path.join(dataset_infos_dir, "README.md")):
dataset_card_data = DatasetCard.load(Path(dataset_infos_dir) / "README.md").data
if "dataset_info" in dataset_card_data:
return cls.from_dataset_card_data(dataset_card_data)
if os.path.exists(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)):
# this is just to have backward compatibility with dataset_infos.json files
with open(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f:
return cls(
{
config_name: DatasetInfo.from_dict(dataset_info_dict)
for config_name, dataset_info_dict in json.load(f).items()
}
)
else:
return cls()
@classmethod
def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "DatasetInfosDict":
if isinstance(dataset_card_data.get("dataset_info"), (list, dict)):
if isinstance(dataset_card_data["dataset_info"], list):
return cls(
{
dataset_info_yaml_dict.get("config_name", "default"): DatasetInfo._from_yaml_dict(
dataset_info_yaml_dict
)
for dataset_info_yaml_dict in dataset_card_data["dataset_info"]
}
)
else:
dataset_info = DatasetInfo._from_yaml_dict(dataset_card_data["dataset_info"])
dataset_info.config_name = dataset_card_data["dataset_info"].get("config_name", "default")
return cls({dataset_info.config_name: dataset_info})
else:
return cls()
def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None:
if self:
# first get existing metadata info
if "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], dict):
dataset_metadata_infos = {
dataset_card_data["dataset_info"].get("config_name", "default"): dataset_card_data["dataset_info"]
}
elif "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], list):
dataset_metadata_infos = {
config_metadata["config_name"]: config_metadata
for config_metadata in dataset_card_data["dataset_info"]
}
else:
dataset_metadata_infos = {}
# update/rewrite existing metadata info with the one to dump
total_dataset_infos = {
**dataset_metadata_infos,
**{config_name: dset_info._to_yaml_dict() for config_name, dset_info in self.items()},
}
# the config_name from the dataset_infos_dict takes over the config_name of the DatasetInfo
for config_name, dset_info_yaml_dict in total_dataset_infos.items():
dset_info_yaml_dict["config_name"] = config_name
if len(total_dataset_infos) == 1:
# use a struct instead of a list of configurations, since there's only one
dataset_card_data["dataset_info"] = next(iter(total_dataset_infos.values()))
config_name = dataset_card_data["dataset_info"].pop("config_name", None)
if config_name != "default":
# if config_name is not "default" preserve it and put at the first position
dataset_card_data["dataset_info"] = {
"config_name": config_name,
**dataset_card_data["dataset_info"],
}
else:
dataset_card_data["dataset_info"] = []
for config_name, dataset_info_yaml_dict in sorted(total_dataset_infos.items()):
# add the config_name field in first position
dataset_info_yaml_dict.pop("config_name", None)
dataset_info_yaml_dict = {"config_name": config_name, **dataset_info_yaml_dict}
dataset_card_data["dataset_info"].append(dataset_info_yaml_dict)
@dataclass
class MetricInfo:
"""Information about a metric.
`MetricInfo` documents a metric, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later.
"""
# Set in the dataset scripts
description: str
citation: str
features: Features
inputs_description: str = dataclasses.field(default_factory=str)
homepage: str = dataclasses.field(default_factory=str)
license: str = dataclasses.field(default_factory=str)
codebase_urls: List[str] = dataclasses.field(default_factory=list)
reference_urls: List[str] = dataclasses.field(default_factory=list)
streamable: bool = False
format: Optional[str] = None
# Set later by the builder
metric_name: Optional[str] = None
config_name: Optional[str] = None
experiment_id: Optional[str] = None
def __post_init__(self):
if self.format is not None:
for key, value in self.features.items():
if not isinstance(value, Value):
raise ValueError(
f"When using 'numpy' format, all features should be a `datasets.Value` feature. "
f"Here {key} is an instance of {value.__class__.__name__}"
)
def write_to_directory(self, metric_info_dir, pretty_print=False):
"""Write `MetricInfo` as JSON to `metric_info_dir`.
Also save the license separately in LICENCE.
If `pretty_print` is True, the JSON will be pretty-printed with the indent level of 4.
Example:
```py
>>> from datasets import load_metric
>>> metric = load_metric("accuracy")
>>> metric.info.write_to_directory("/path/to/directory/")
```
"""
with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), "w", encoding="utf-8") as f:
json.dump(asdict(self), f, indent=4 if pretty_print else None)
if self.license:
with open(os.path.join(metric_info_dir, config.LICENSE_FILENAME), "w", encoding="utf-8") as f:
f.write(self.license)
@classmethod
def from_directory(cls, metric_info_dir) -> "MetricInfo":
"""Create MetricInfo from the JSON file in `metric_info_dir`.
Args:
metric_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version.
Example:
```py
>>> from datasets import MetricInfo
>>> metric_info = MetricInfo.from_directory("/path/to/directory/")
```
"""
logger.info(f"Loading Metric info from {metric_info_dir}")
if not metric_info_dir:
raise ValueError("Calling MetricInfo.from_directory() with undefined metric_info_dir.")
with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), encoding="utf-8") as f:
metric_info_dict = json.load(f)
return cls.from_dict(metric_info_dict)
@classmethod
def from_dict(cls, metric_info_dict: dict) -> "MetricInfo":
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in metric_info_dict.items() if k in field_names})
| datasets-main | src/datasets/info.py |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""
Hashing function for dataset keys using `hashlib.md5`
Requirements for the hash function:
- Provides a uniformly distributed hash from random space
- Adequately fast speed
- Working with multiple input types (in this case, `str`, `int` or `bytes`)
- Should be platform independent (generates same hash on different OS and systems)
The hashing function provides a unique 128-bit integer hash of the key provided.
The split name is being used here as the hash salt to avoid having same hashes
in different splits due to same keys
"""
import hashlib
from typing import Union
def _as_bytes(hash_data: Union[str, int, bytes]) -> bytes:
"""
Returns the input hash_data in its bytes form
Args:
hash_data: the hash salt/key to be converted to bytes
"""
if isinstance(hash_data, bytes):
# Data already in bytes, returns as it as
return hash_data
elif isinstance(hash_data, str):
# We keep the data as it as for it ot be later encoded to UTF-8
# However replace `\\` with `/` for Windows compatibility
hash_data = hash_data.replace("\\", "/")
elif isinstance(hash_data, int):
hash_data = str(hash_data)
else:
# If data is not of the required type, raise error
raise InvalidKeyError(hash_data)
return hash_data.encode("utf-8")
class InvalidKeyError(Exception):
"""Raises an error when given key is of invalid datatype."""
def __init__(self, hash_data):
self.prefix = "\nFAILURE TO GENERATE DATASET: Invalid key type detected"
self.err_msg = f"\nFound Key {hash_data} of type {type(hash_data)}"
self.suffix = "\nKeys should be either str, int or bytes type"
super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
class DuplicatedKeysError(Exception):
"""Raise an error when duplicate key found."""
def __init__(self, key, duplicate_key_indices, fix_msg=""):
self.key = key
self.duplicate_key_indices = duplicate_key_indices
self.fix_msg = fix_msg
self.prefix = "Found multiple examples generated with the same key"
if len(duplicate_key_indices) <= 20:
self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices)} have the key {key}"
else:
self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices[:20])}... ({len(duplicate_key_indices) - 20} more) have the key {key}"
self.suffix = "\n" + fix_msg if fix_msg else ""
super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
class KeyHasher:
"""KeyHasher class for providing hash using md5"""
def __init__(self, hash_salt: str):
self._split_md5 = hashlib.md5(_as_bytes(hash_salt))
def hash(self, key: Union[str, int, bytes]) -> int:
"""Returns 128-bits unique hash of input key
Args:
key: the input key to be hashed (should be str, int or bytes)
Returns: 128-bit int hash key"""
md5 = self._split_md5.copy()
byte_key = _as_bytes(key)
md5.update(byte_key)
# Convert to integer with hexadecimal conversion
return int(md5.hexdigest(), 16)
| datasets-main | src/datasets/keyhash.py |
from .parallel import parallel_backend, parallel_map, ParallelBackendConfig # noqa F401
| datasets-main | src/datasets/parallel/__init__.py |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
logger = logging.get_logger(__name__)
class ParallelBackendConfig:
backend_name = None
@experimental
def parallel_map(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func):
"""
**Experimental.** Apply a function to iterable elements in parallel, where the implementation uses either
multiprocessing.Pool or joblib for parallelization.
Args:
function (`Callable[[Any], Any]`): Function to be applied to `iterable`.
iterable (`list`, `tuple` or `np.ndarray`): Iterable elements to apply function to.
num_proc (`int`): Number of processes (if no backend specified) or jobs (using joblib).
types (`tuple`): Additional types (besides `dict` values) to apply `function` recursively to their elements.
disable_tqdm (`bool`): Whether to disable the tqdm progressbar.
desc (`str`): Prefix for the tqdm progressbar.
single_map_nested_func (`Callable`): Map function that applies `function` to an element from `iterable`.
Takes a tuple of function, data_struct, types, rank, disable_tqdm, desc as input, where data_struct is an
element of `iterable`, and `rank` is used for progress bar.
"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func
)
return _map_with_joblib(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func)
def _map_with_multiprocessing_pool(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func):
num_proc = num_proc if num_proc <= len(iterable) else len(iterable)
split_kwds = [] # We organize the splits ourselve (contiguous splits)
for index in range(num_proc):
div = len(iterable) // num_proc
mod = len(iterable) % num_proc
start = div * index + min(index, mod)
end = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc))
if len(iterable) != sum(len(i[1]) for i in split_kwds):
raise ValueError(
f"Error dividing inputs iterable among processes. "
f"Total number of objects {len(iterable)}, "
f"length: {sum(len(i[1]) for i in split_kwds)}"
)
logger.info(
f"Spawning {num_proc} processes for {len(iterable)} objects in slices of {[len(i[1]) for i in split_kwds]}"
)
initargs, initializer = None, None
if not disable_tqdm:
initargs, initializer = (RLock(),), tqdm.set_lock
with Pool(num_proc, initargs=initargs, initializer=initializer) as pool:
mapped = pool.map(single_map_nested_func, split_kwds)
logger.info(f"Finished {num_proc} processes")
mapped = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"Unpacked {len(mapped)} objects")
return mapped
def _map_with_joblib(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func):
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=num_proc):
return joblib.Parallel()(
joblib.delayed(single_map_nested_func)((function, obj, types, None, True, None)) for obj in iterable
)
@experimental
@contextlib.contextmanager
def parallel_backend(backend_name: str):
"""
**Experimental.** Configures the parallel backend for parallelized dataset loading, which uses the parallelization
implemented by joblib.
Args:
backend_name (str): Name of backend for parallelization implementation, has to be supported by joblib.
Example usage:
```py
with parallel_backend('spark'):
dataset = load_dataset(..., num_proc=2)
```
"""
ParallelBackendConfig.backend_name = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
ParallelBackendConfig.backend_name = None
| datasets-main | src/datasets/parallel/parallel.py |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class QuestionAnsweringExtractive(TaskTemplate):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
task: str = field(default="question-answering-extractive", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"question": Value("string"), "context": Value("string")})
label_schema: ClassVar[Features] = Features(
{
"answers": Sequence(
{
"text": Value("string"),
"answer_start": Value("int32"),
}
)
}
)
question_column: str = "question"
context_column: str = "context"
answers_column: str = "answers"
@property
def column_mapping(self) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| datasets-main | src/datasets/tasks/question_answering.py |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=True)
class ImageClassification(TaskTemplate):
task: str = field(default="image-classification", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"image": Image()})
label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
image_column: str = "image"
label_column: str = "labels"
def align_with_features(self, features):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features.")
if not isinstance(features[self.label_column], ClassLabel):
raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
task_template = copy.deepcopy(self)
label_schema = self.label_schema.copy()
label_schema["labels"] = features[self.label_column]
task_template.__dict__["label_schema"] = label_schema
return task_template
@property
def column_mapping(self) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| datasets-main | src/datasets/tasks/image_classification.py |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class LanguageModeling(TaskTemplate):
task: str = field(default="language-modeling", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"text": Value("string")})
label_schema: ClassVar[Features] = Features({})
text_column: str = "text"
@property
def column_mapping(self) -> Dict[str, str]:
return {self.text_column: "text"}
| datasets-main | src/datasets/tasks/language_modeling.py |
from typing import Optional
from ..utils.logging import get_logger
from .audio_classificiation import AudioClassification
from .automatic_speech_recognition import AutomaticSpeechRecognition
from .base import TaskTemplate
from .image_classification import ImageClassification
from .language_modeling import LanguageModeling
from .question_answering import QuestionAnsweringExtractive
from .summarization import Summarization
from .text_classification import TextClassification
__all__ = [
"AutomaticSpeechRecognition",
"AudioClassification",
"ImageClassification",
"LanguageModeling",
"QuestionAnsweringExtractive",
"Summarization",
"TaskTemplate",
"TextClassification",
]
logger = get_logger(__name__)
NAME2TEMPLATE = {
AutomaticSpeechRecognition.task: AutomaticSpeechRecognition,
AudioClassification.task: AudioClassification,
ImageClassification.task: ImageClassification,
LanguageModeling.task: LanguageModeling,
QuestionAnsweringExtractive.task: QuestionAnsweringExtractive,
Summarization.task: Summarization,
TextClassification.task: TextClassification,
}
def task_template_from_dict(task_template_dict: dict) -> Optional[TaskTemplate]:
"""Create one of the supported task templates in :py:mod:`datasets.tasks` from a dictionary."""
task_name = task_template_dict.get("task")
if task_name is None:
logger.warning(f"Couldn't find template for task '{task_name}'. Available templates: {list(NAME2TEMPLATE)}")
return None
template = NAME2TEMPLATE.get(task_name)
return template.from_dict(task_template_dict)
| datasets-main | src/datasets/tasks/__init__.py |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class TextClassification(TaskTemplate):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
task: str = field(default="text-classification", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"text": Value("string")})
label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
text_column: str = "text"
label_column: str = "labels"
def align_with_features(self, features):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features.")
if not isinstance(features[self.label_column], ClassLabel):
raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
task_template = copy.deepcopy(self)
label_schema = self.label_schema.copy()
label_schema["labels"] = features[self.label_column]
task_template.__dict__["label_schema"] = label_schema
return task_template
@property
def column_mapping(self) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| datasets-main | src/datasets/tasks/text_classification.py |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class AutomaticSpeechRecognition(TaskTemplate):
task: str = field(default="automatic-speech-recognition", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"audio": Audio()})
label_schema: ClassVar[Features] = Features({"transcription": Value("string")})
audio_column: str = "audio"
transcription_column: str = "transcription"
def align_with_features(self, features):
if self.audio_column not in features:
raise ValueError(f"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column], Audio):
raise ValueError(f"Column {self.audio_column} is not an Audio type.")
task_template = copy.deepcopy(self)
input_schema = self.input_schema.copy()
input_schema["audio"] = features[self.audio_column]
task_template.__dict__["input_schema"] = input_schema
return task_template
@property
def column_mapping(self) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| datasets-main | src/datasets/tasks/automatic_speech_recognition.py |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class Summarization(TaskTemplate):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
task: str = field(default="summarization", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"text": Value("string")})
label_schema: ClassVar[Features] = Features({"summary": Value("string")})
text_column: str = "text"
summary_column: str = "summary"
@property
def column_mapping(self) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| datasets-main | src/datasets/tasks/summarization.py |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=True)
class AudioClassification(TaskTemplate):
task: str = field(default="audio-classification", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"audio": Audio()})
label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
audio_column: str = "audio"
label_column: str = "labels"
def align_with_features(self, features):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features.")
if not isinstance(features[self.label_column], ClassLabel):
raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
task_template = copy.deepcopy(self)
label_schema = self.label_schema.copy()
label_schema["labels"] = features[self.label_column]
task_template.__dict__["label_schema"] = label_schema
return task_template
@property
def column_mapping(self) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| datasets-main | src/datasets/tasks/audio_classificiation.py |
import abc
import copy
import dataclasses
from dataclasses import dataclass
from typing import ClassVar, Dict, Type, TypeVar
from ..features import Features
T = TypeVar("T", bound="TaskTemplate")
@dataclass(frozen=True)
class TaskTemplate(abc.ABC):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
task: str
input_schema: ClassVar[Features]
label_schema: ClassVar[Features]
def align_with_features(self: T, features: Features) -> T:
"""
Align features with the task template.
"""
# No-op
return copy.deepcopy(self)
@property
def features(self) -> Features:
return Features(**self.input_schema, **self.label_schema)
@property
@abc.abstractmethod
def column_mapping(self) -> Dict[str, str]:
raise NotImplementedError
@classmethod
def from_dict(cls: Type[T], template_dict: dict) -> T:
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in template_dict.items() if k in field_names})
| datasets-main | src/datasets/tasks/base.py |