python_code
stringlengths 0
290k
| repo_name
stringclasses 30
values | file_path
stringlengths 6
125
|
---|---|---|
from allennlp.common import Params
from allennlp.data import Instance, Token
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField
from allennlp.data.samplers import MaxTokensBatchSampler
from allennlp.data.dataset_readers.dataset_reader import AllennlpDataset
from allennlp.data.dataloader import PyTorchDataLoader
from .sampler_test import SamplerTest
class TestMaxTokensSampler(SamplerTest):
def test_create_batches_groups_correctly(self):
dataset = AllennlpDataset(self.instances, vocab=self.vocab)
sampler = MaxTokensBatchSampler(
dataset, max_tokens=8, padding_noise=0, sorting_keys=["text"]
)
grouped_instances = []
for indices in sampler:
grouped_instances.append([self.instances[idx] for idx in indices])
expected_groups = [
[self.instances[4], self.instances[2]],
[self.instances[0], self.instances[1]],
[self.instances[3]],
]
for group in grouped_instances:
assert group in expected_groups
expected_groups.remove(group)
assert expected_groups == []
def test_guess_sorting_key_picks_the_longest_key(self):
dataset = AllennlpDataset(self.instances, vocab=self.vocab)
sampler = MaxTokensBatchSampler(dataset, max_tokens=8, padding_noise=0)
instances = []
short_tokens = [Token(t) for t in ["what", "is", "this", "?"]]
long_tokens = [Token(t) for t in ["this", "is", "a", "not", "very", "long", "passage"]]
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
assert sampler.sorting_keys is None
sampler._guess_sorting_keys(instances)
assert sampler.sorting_keys == ["passage"]
def test_from_params(self):
dataset = AllennlpDataset(self.instances, self.vocab)
params = Params({})
sorting_keys = ["s1", "s2"]
params["sorting_keys"] = sorting_keys
params["max_tokens"] = 32
sampler = MaxTokensBatchSampler.from_params(params=params, data_source=dataset)
assert sampler.sorting_keys == sorting_keys
assert sampler.padding_noise == 0.1
assert sampler.max_tokens == 32
params = Params({"sorting_keys": sorting_keys, "padding_noise": 0.5, "max_tokens": 100})
sampler = MaxTokensBatchSampler.from_params(params=params, data_source=dataset)
assert sampler.sorting_keys == sorting_keys
assert sampler.padding_noise == 0.5
assert sampler.max_tokens == 100
def test_batch_count(self):
dataset = AllennlpDataset(self.instances, vocab=self.vocab)
sampler = MaxTokensBatchSampler(
dataset, max_tokens=8, padding_noise=0, sorting_keys=["text"]
)
# We use a custom collate_fn for testing, which doesn't actually create tensors,
# just the allennlp Batches.
dataloader = PyTorchDataLoader(
dataset, batch_sampler=sampler, collate_fn=lambda x: Batch(x)
)
assert len(dataloader) == 3
| allennlp-master | tests/data/samplers/max_tokens_batch_sampler_test.py |
allennlp-master | tests/data/samplers/__init__.py |
|
from typing import List, Iterable, Dict, Union
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary, Instance, Token, Batch
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer
class LazyIterable:
def __init__(self, instances):
self._instances = instances
def __iter__(self):
return (instance for instance in self._instances)
class SamplerTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.token_indexers = {"tokens": SingleIdTokenIndexer()}
self.vocab = Vocabulary()
self.this_index = self.vocab.add_token_to_namespace("this")
self.is_index = self.vocab.add_token_to_namespace("is")
self.a_index = self.vocab.add_token_to_namespace("a")
self.sentence_index = self.vocab.add_token_to_namespace("sentence")
self.another_index = self.vocab.add_token_to_namespace("another")
self.yet_index = self.vocab.add_token_to_namespace("yet")
self.very_index = self.vocab.add_token_to_namespace("very")
self.long_index = self.vocab.add_token_to_namespace("long")
instances = [
self.create_instance(["this", "is", "a", "sentence"]),
self.create_instance(["this", "is", "another", "sentence"]),
self.create_instance(["yet", "another", "sentence"]),
self.create_instance(
["this", "is", "a", "very", "very", "very", "very", "long", "sentence"]
),
self.create_instance(["sentence"]),
]
self.instances = instances
self.lazy_instances = LazyIterable(instances)
def create_instance(self, str_tokens: List[str]):
tokens = [Token(t) for t in str_tokens]
instance = Instance({"text": TextField(tokens, self.token_indexers)})
return instance
def create_instances_from_token_counts(self, token_counts: List[int]) -> List[Instance]:
return [self.create_instance(["word"] * count) for count in token_counts]
def get_batches_stats(self, batches: Iterable[Batch]) -> Dict[str, Union[int, List[int]]]:
grouped_instances = [batch.instances for batch in batches]
group_lengths = [len(group) for group in grouped_instances]
sample_sizes = []
for batch in batches:
batch_sequence_length = max(
instance.get_padding_lengths()["text"]["tokens___tokens"]
for instance in batch.instances
)
sample_sizes.append(batch_sequence_length * len(batch.instances))
return {
"batch_lengths": group_lengths,
"total_instances": sum(group_lengths),
"sample_sizes": sample_sizes,
}
def assert_instances_are_correct(self, candidate_instances):
# First we need to remove padding tokens from the candidates.
candidate_instances = [
tuple(w for w in instance if w != 0) for instance in candidate_instances
]
expected_instances = [
tuple(instance.fields["text"]._indexed_tokens["tokens"]["tokens"])
for instance in self.instances
]
assert set(candidate_instances) == set(expected_instances)
| allennlp-master | tests/data/samplers/sampler_test.py |
from allennlp.common import Params
from allennlp.data import Instance, Token
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField
from allennlp.data.samplers import BucketBatchSampler
from allennlp.data.dataset_readers.dataset_reader import AllennlpDataset
from allennlp.data.dataloader import PyTorchDataLoader
from .sampler_test import SamplerTest
class TestBucketSampler(SamplerTest):
def test_create_batches_groups_correctly(self):
dataset = AllennlpDataset(self.instances, vocab=self.vocab)
sampler = BucketBatchSampler(dataset, batch_size=2, padding_noise=0, sorting_keys=["text"])
grouped_instances = []
for indices in sampler:
grouped_instances.append([self.instances[idx] for idx in indices])
expected_groups = [
[self.instances[4], self.instances[2]],
[self.instances[0], self.instances[1]],
[self.instances[3]],
]
for group in grouped_instances:
assert group in expected_groups
expected_groups.remove(group)
assert expected_groups == []
def test_guess_sorting_key_picks_the_longest_key(self):
dataset = AllennlpDataset(self.instances, vocab=self.vocab)
sampler = BucketBatchSampler(dataset, batch_size=2, padding_noise=0)
instances = []
short_tokens = [Token(t) for t in ["what", "is", "this", "?"]]
long_tokens = [Token(t) for t in ["this", "is", "a", "not", "very", "long", "passage"]]
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
assert sampler.sorting_keys is None
sampler._guess_sorting_keys(instances)
assert sampler.sorting_keys == ["passage"]
def test_from_params(self):
dataset = AllennlpDataset(self.instances, self.vocab)
params = Params({})
sorting_keys = ["s1", "s2"]
params["sorting_keys"] = sorting_keys
params["batch_size"] = 32
sampler = BucketBatchSampler.from_params(params=params, data_source=dataset)
assert sampler.sorting_keys == sorting_keys
assert sampler.padding_noise == 0.1
assert sampler.batch_size == 32
params = Params(
{
"sorting_keys": sorting_keys,
"padding_noise": 0.5,
"batch_size": 100,
"drop_last": True,
}
)
sampler = BucketBatchSampler.from_params(params=params, data_source=dataset)
assert sampler.sorting_keys == sorting_keys
assert sampler.padding_noise == 0.5
assert sampler.batch_size == 100
assert sampler.drop_last
def test_drop_last_works(self):
dataset = AllennlpDataset(self.instances, vocab=self.vocab)
sampler = BucketBatchSampler(
dataset,
batch_size=2,
padding_noise=0,
sorting_keys=["text"],
drop_last=True,
)
# We use a custom collate_fn for testing, which doesn't actually create tensors,
# just the allennlp Batches.
dataloader = PyTorchDataLoader(
dataset, batch_sampler=sampler, collate_fn=lambda x: Batch(x)
)
batches = [batch for batch in iter(dataloader)]
stats = self.get_batches_stats(batches)
# all batches have length batch_size
assert all(batch_len == 2 for batch_len in stats["batch_lengths"])
# we should have lost one instance by skipping the last batch
assert stats["total_instances"] == len(self.instances) - 1
def test_batch_count(self):
dataset = AllennlpDataset(self.instances, vocab=self.vocab)
sampler = BucketBatchSampler(dataset, batch_size=2, padding_noise=0, sorting_keys=["text"])
# We use a custom collate_fn for testing, which doesn't actually create tensors,
# just the allennlp Batches.
dataloader = PyTorchDataLoader(
dataset, batch_sampler=sampler, collate_fn=lambda x: Batch(x)
)
assert len(dataloader) == 3
def test_batch_count_with_drop_last(self):
dataset = AllennlpDataset(self.instances, vocab=self.vocab)
sampler = BucketBatchSampler(
dataset,
batch_size=2,
padding_noise=0,
sorting_keys=["text"],
drop_last=True,
)
# We use a custom collate_fn for testing, which doesn't actually create tensors,
# just the allennlp Batches.
dataloader = PyTorchDataLoader(
dataset, batch_sampler=sampler, collate_fn=lambda x: Batch(x)
)
assert len(dataloader) == 2
| allennlp-master | tests/data/samplers/bucket_batch_sampler_test.py |
import numpy
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token
from allennlp.data.fields import TextField, IndexField
from allennlp.data.token_indexers import SingleIdTokenIndexer
class TestIndexField(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.text = TextField(
[Token(t) for t in ["here", "is", "a", "sentence", "."]],
{"words": SingleIdTokenIndexer("words")},
)
def test_as_tensor_converts_field_correctly(self):
index_field = IndexField(4, self.text)
tensor = index_field.as_tensor(index_field.get_padding_lengths()).detach().cpu().numpy()
numpy.testing.assert_array_equal(tensor, numpy.array([4]))
def test_index_field_raises_on_incorrect_label_type(self):
with pytest.raises(ConfigurationError):
_ = IndexField("hello", self.text)
def test_index_field_empty_field_works(self):
index_field = IndexField(4, self.text)
empty_index = index_field.empty_field()
assert empty_index.sequence_index == -1
def test_printing_doesnt_crash(self):
print(self.text)
def test_equality(self):
index_field1 = IndexField(4, self.text)
index_field2 = IndexField(4, self.text)
index_field3 = IndexField(
4,
TextField(
[Token(t) for t in ["AllenNLP", "is", "the", "bomb", "!"]],
{"words": SingleIdTokenIndexer("words")},
),
)
assert index_field1 == 4
assert index_field1 == index_field1
assert index_field1 == index_field2
assert index_field1 != index_field3
assert index_field2 != index_field3
assert index_field3 == index_field3
| allennlp-master | tests/data/fields/index_field_test.py |
import numpy
import torch
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.data.fields import ArrayField, ListField
class TestArrayField(AllenNlpTestCase):
def test_get_padding_lengths_correctly_returns_ordered_shape(self):
shape = [3, 4, 5, 6]
array = numpy.zeros(shape)
array_field = ArrayField(array)
lengths = array_field.get_padding_lengths()
for i in range(len(lengths)):
assert lengths["dimension_{}".format(i)] == shape[i]
def test_as_tensor_handles_larger_padding_dimensions(self):
shape = [3, 4]
array = numpy.ones(shape)
array_field = ArrayField(array)
padded_tensor = (
array_field.as_tensor({"dimension_0": 5, "dimension_1": 6}).detach().cpu().numpy()
)
numpy.testing.assert_array_equal(padded_tensor[:3, :4], array)
numpy.testing.assert_array_equal(padded_tensor[3:, 4:], 0.0)
def test_padding_handles_list_fields(self):
array1 = ArrayField(numpy.ones([2, 3]))
array2 = ArrayField(numpy.ones([1, 5]))
empty_array = array1.empty_field()
list_field = ListField([array1, array2, empty_array])
returned_tensor = (
list_field.as_tensor(list_field.get_padding_lengths()).detach().cpu().numpy()
)
correct_tensor = numpy.array(
[
[[1.0, 1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0, 0.0]],
[[1.0, 1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]],
]
)
numpy.testing.assert_array_equal(returned_tensor, correct_tensor)
def test_padding_handles_list_fields_with_padding_values(self):
array1 = ArrayField(numpy.ones([2, 3]), padding_value=-1)
array2 = ArrayField(numpy.ones([1, 5]), padding_value=-1)
empty_array = array1.empty_field()
list_field = ListField([array1, array2, empty_array])
returned_tensor = (
list_field.as_tensor(list_field.get_padding_lengths()).detach().cpu().numpy()
)
correct_tensor = numpy.array(
[
[[1.0, 1.0, 1.0, -1.0, -1.0], [1.0, 1.0, 1.0, -1.0, -1.0]],
[[1.0, 1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0, -1.0]],
[[-1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0]],
]
)
numpy.testing.assert_array_equal(returned_tensor, correct_tensor)
def test_printing_doesnt_crash(self):
array = ArrayField(numpy.ones([2, 3]), padding_value=-1)
print(array)
def test_as_tensor_works_with_scalar(self):
array = ArrayField(numpy.asarray(42))
returned_tensor = array.as_tensor(array.get_padding_lengths())
current_tensor = numpy.asarray(42)
numpy.testing.assert_array_equal(returned_tensor, current_tensor)
def test_as_tensor_with_scalar_keeps_dtype(self):
array = ArrayField(numpy.asarray(42, dtype=numpy.float32))
returned_tensor = array.as_tensor(array.get_padding_lengths())
assert returned_tensor.dtype == torch.float32
def test_alternative_dtypes(self):
shape = [3, 4, 5, 6]
array = numpy.zeros(shape)
# Setting dtype to numpy.int64 should produce a torch.LongTensor when field is converted to
# a tensor
array_field1 = ArrayField(array, dtype=numpy.int64)
returned_tensor1 = array_field1.as_tensor(array_field1.get_padding_lengths())
assert returned_tensor1.dtype == torch.int64
# Setting dtype to numpy.uint8 should produce a torch.ByteTensor when field is converted to
# a tensor
array_field2 = ArrayField(array, dtype=numpy.uint8)
returned_tensor2 = array_field2.as_tensor(array_field2.get_padding_lengths())
assert returned_tensor2.dtype == torch.uint8
# Padding should not affect dtype
padding_lengths = {"dimension_" + str(i): 10 for i, _ in enumerate(shape)}
padded_tensor = array_field2.as_tensor(padding_lengths)
assert padded_tensor.dtype == torch.uint8
# Empty fields should have the same dtype
empty_field = array_field2.empty_field()
assert empty_field.dtype == array_field2.dtype
def test_len_works_with_scalar(self):
array = ArrayField(numpy.asarray(42))
assert len(array) == 1
def test_eq(self):
array1 = ArrayField(numpy.asarray([1, 1, 1]))
array2 = ArrayField(numpy.asarray([[1, 1, 1], [1, 1, 1]]))
array3 = ArrayField(numpy.asarray([1, 1, 2]))
array4 = ArrayField(numpy.asarray([1, 1, 1]))
assert array1 != array2
assert array1 != array3
assert array1 == array4
| allennlp-master | tests/data/fields/array_field_test.py |
from typing import Dict
import numpy
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary, Instance
from allennlp.data.fields import TextField, LabelField, ListField, IndexField, SequenceLabelField
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenCharactersIndexer
from allennlp.data.dataloader import PyTorchDataLoader
from allennlp.data.dataset_readers.dataset_reader import AllennlpDataset
from allennlp.data.tokenizers import SpacyTokenizer
from allennlp.models import Model
from allennlp.modules import Embedding
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
class DummyModel(Model):
"""
Performs a common operation (embedding) that won't work on an empty tensor.
Returns an arbitrary loss.
"""
def __init__(self, vocab: Vocabulary) -> None:
super().__init__(vocab)
weight = torch.ones(vocab.get_vocab_size(), 10)
token_embedding = Embedding(
num_embeddings=vocab.get_vocab_size(), embedding_dim=10, weight=weight, trainable=False
)
self.embedder = BasicTextFieldEmbedder({"words": token_embedding})
def forward( # type: ignore
self, list_tensor: Dict[str, torch.LongTensor]
) -> Dict[str, torch.Tensor]:
self.embedder(list_tensor)
return {"loss": 1.0}
class TestListField(AllenNlpTestCase):
def setup_method(self):
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("this", "words")
self.vocab.add_token_to_namespace("is", "words")
self.vocab.add_token_to_namespace("a", "words")
self.vocab.add_token_to_namespace("sentence", "words")
self.vocab.add_token_to_namespace("s", "characters")
self.vocab.add_token_to_namespace("e", "characters")
self.vocab.add_token_to_namespace("n", "characters")
self.vocab.add_token_to_namespace("t", "characters")
self.vocab.add_token_to_namespace("c", "characters")
for label in ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"]:
self.vocab.add_token_to_namespace(label, "labels")
self.word_indexer = {"words": SingleIdTokenIndexer("words")}
self.words_and_characters_indexers = {
"words": SingleIdTokenIndexer("words"),
"characters": TokenCharactersIndexer("characters", min_padding_length=1),
}
self.field1 = TextField(
[Token(t) for t in ["this", "is", "a", "sentence"]], self.word_indexer
)
self.field2 = TextField(
[Token(t) for t in ["this", "is", "a", "different", "sentence"]], self.word_indexer
)
self.field3 = TextField(
[Token(t) for t in ["this", "is", "another", "sentence"]], self.word_indexer
)
self.empty_text_field = self.field1.empty_field()
self.index_field = IndexField(1, self.field1)
self.empty_index_field = self.index_field.empty_field()
self.sequence_label_field = SequenceLabelField([1, 1, 0, 1], self.field1)
self.empty_sequence_label_field = self.sequence_label_field.empty_field()
tokenizer = SpacyTokenizer()
tokens = tokenizer.tokenize("Foo")
text_field = TextField(tokens, self.word_indexer)
empty_list_field = ListField([text_field.empty_field()])
empty_fields = {"list_tensor": empty_list_field}
self.empty_instance = Instance(empty_fields)
non_empty_list_field = ListField([text_field])
non_empty_fields = {"list_tensor": non_empty_list_field}
self.non_empty_instance = Instance(non_empty_fields)
super().setup_method()
def test_get_padding_lengths(self):
list_field = ListField([self.field1, self.field2, self.field3])
list_field.index(self.vocab)
lengths = list_field.get_padding_lengths()
assert lengths == {"num_fields": 3, "list_words___tokens": 5}
def test_list_field_can_handle_empty_text_fields(self):
list_field = ListField([self.field1, self.field2, self.empty_text_field])
list_field.index(self.vocab)
tensor_dict = list_field.as_tensor(list_field.get_padding_lengths())
numpy.testing.assert_array_equal(
tensor_dict["words"]["tokens"].detach().cpu().numpy(),
numpy.array([[2, 3, 4, 5, 0], [2, 3, 4, 1, 5], [0, 0, 0, 0, 0]]),
)
def test_list_field_can_handle_empty_index_fields(self):
list_field = ListField([self.index_field, self.index_field, self.empty_index_field])
list_field.index(self.vocab)
tensor = list_field.as_tensor(list_field.get_padding_lengths())
numpy.testing.assert_array_equal(
tensor.detach().cpu().numpy(), numpy.array([[1], [1], [-1]])
)
def test_list_field_can_handle_empty_sequence_label_fields(self):
list_field = ListField(
[self.sequence_label_field, self.sequence_label_field, self.empty_sequence_label_field]
)
list_field.index(self.vocab)
tensor = list_field.as_tensor(list_field.get_padding_lengths())
numpy.testing.assert_array_equal(
tensor.detach().cpu().numpy(), numpy.array([[1, 1, 0, 1], [1, 1, 0, 1], [0, 0, 0, 0]])
)
def test_all_fields_padded_to_max_length(self):
list_field = ListField([self.field1, self.field2, self.field3])
list_field.index(self.vocab)
tensor_dict = list_field.as_tensor(list_field.get_padding_lengths())
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][0].detach().cpu().numpy(), numpy.array([2, 3, 4, 5, 0])
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][1].detach().cpu().numpy(), numpy.array([2, 3, 4, 1, 5])
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][2].detach().cpu().numpy(), numpy.array([2, 3, 1, 5, 0])
)
def test_nested_list_fields_are_padded_correctly(self):
nested_field1 = ListField([LabelField(c) for c in ["a", "b", "c", "d", "e"]])
nested_field2 = ListField([LabelField(c) for c in ["f", "g", "h", "i", "j", "k"]])
list_field = ListField([nested_field1.empty_field(), nested_field1, nested_field2])
list_field.index(self.vocab)
padding_lengths = list_field.get_padding_lengths()
assert padding_lengths == {"num_fields": 3, "list_num_fields": 6}
tensor = list_field.as_tensor(padding_lengths).detach().cpu().numpy()
numpy.testing.assert_almost_equal(
tensor, [[-1, -1, -1, -1, -1, -1], [0, 1, 2, 3, 4, -1], [5, 6, 7, 8, 9, 10]]
)
def test_fields_can_pad_to_greater_than_max_length(self):
list_field = ListField([self.field1, self.field2, self.field3])
list_field.index(self.vocab)
padding_lengths = list_field.get_padding_lengths()
padding_lengths["list_words___tokens"] = 7
padding_lengths["num_fields"] = 5
tensor_dict = list_field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][0].detach().cpu().numpy(),
numpy.array([2, 3, 4, 5, 0, 0, 0]),
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][1].detach().cpu().numpy(),
numpy.array([2, 3, 4, 1, 5, 0, 0]),
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][2].detach().cpu().numpy(),
numpy.array([2, 3, 1, 5, 0, 0, 0]),
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][3].detach().cpu().numpy(),
numpy.array([0, 0, 0, 0, 0, 0, 0]),
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][4].detach().cpu().numpy(),
numpy.array([0, 0, 0, 0, 0, 0, 0]),
)
def test_as_tensor_can_handle_multiple_token_indexers(self):
self.field1._token_indexers = self.words_and_characters_indexers
self.field2._token_indexers = self.words_and_characters_indexers
self.field3._token_indexers = self.words_and_characters_indexers
list_field = ListField([self.field1, self.field2, self.field3])
list_field.index(self.vocab)
padding_lengths = list_field.get_padding_lengths()
tensor_dict = list_field.as_tensor(padding_lengths)
words = tensor_dict["words"]["tokens"].detach().cpu().numpy()
characters = tensor_dict["characters"]["token_characters"].detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(
words, numpy.array([[2, 3, 4, 5, 0], [2, 3, 4, 1, 5], [2, 3, 1, 5, 0]])
)
numpy.testing.assert_array_almost_equal(
characters[0],
numpy.array(
[
[5, 1, 1, 2, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 3, 4, 5, 3, 4, 6, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
]
),
)
numpy.testing.assert_array_almost_equal(
characters[1],
numpy.array(
[
[5, 1, 1, 2, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 3, 1, 3, 4, 5],
[2, 3, 4, 5, 3, 4, 6, 3, 0],
]
),
)
numpy.testing.assert_array_almost_equal(
characters[2],
numpy.array(
[
[5, 1, 1, 2, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0],
[1, 4, 1, 5, 1, 3, 1, 0, 0],
[2, 3, 4, 5, 3, 4, 6, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
]
),
)
def test_as_tensor_can_handle_multiple_token_indexers_and_empty_fields(self):
self.field1._token_indexers = self.words_and_characters_indexers
self.field2._token_indexers = self.words_and_characters_indexers
self.field3._token_indexers = self.words_and_characters_indexers
list_field = ListField([self.field1.empty_field(), self.field1, self.field2])
list_field.index(self.vocab)
padding_lengths = list_field.get_padding_lengths()
tensor_dict = list_field.as_tensor(padding_lengths)
words = tensor_dict["words"]["tokens"].detach().cpu().numpy()
characters = tensor_dict["characters"]["token_characters"].detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(
words, numpy.array([[0, 0, 0, 0, 0], [2, 3, 4, 5, 0], [2, 3, 4, 1, 5]])
)
numpy.testing.assert_array_almost_equal(characters[0], numpy.zeros([5, 9]))
numpy.testing.assert_array_almost_equal(
characters[1],
numpy.array(
[
[5, 1, 1, 2, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 3, 4, 5, 3, 4, 6, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
]
),
)
numpy.testing.assert_array_almost_equal(
characters[2],
numpy.array(
[
[5, 1, 1, 2, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 3, 1, 3, 4, 5],
[2, 3, 4, 5, 3, 4, 6, 3, 0],
]
),
)
def test_printing_doesnt_crash(self):
list_field = ListField([self.field1, self.field2])
print(list_field)
def test_sequence_methods(self):
list_field = ListField([self.field1, self.field2, self.field3])
assert len(list_field) == 3
assert list_field[1] == self.field2
assert [f for f in list_field] == [self.field1, self.field2, self.field3]
def test_empty_list_can_be_tensorized(self):
tokenizer = SpacyTokenizer()
tokens = tokenizer.tokenize("Foo")
text_field = TextField(tokens, self.word_indexer)
list_field = ListField([text_field.empty_field()])
fields = {
"list": list_field,
"bar": TextField(tokenizer.tokenize("BAR"), self.word_indexer),
}
instance = Instance(fields)
instance.index_fields(self.vocab)
instance.as_tensor_dict()
def test_batch_with_some_empty_lists_works(self):
dataset = AllennlpDataset([self.empty_instance, self.non_empty_instance], self.vocab)
model = DummyModel(self.vocab)
model.eval()
loader = PyTorchDataLoader(dataset, batch_size=2)
batch = next(iter(loader))
model.forward(**batch)
# This use case may seem a bit peculiar. It's intended for situations where
# you have sparse inputs that are used as additional features for some
# prediction, and they are sparse enough that they can be empty for some
# cases. It would be silly to try to handle these as None in your model; it
# makes a whole lot more sense to just have a minimally-sized tensor that
# gets entirely masked and has no effect on the rest of the model.
def test_batch_of_entirely_empty_lists_works(self):
dataset = AllennlpDataset([self.empty_instance, self.empty_instance], self.vocab)
model = DummyModel(self.vocab)
model.eval()
loader = PyTorchDataLoader(dataset, batch_size=2)
batch = next(iter(loader))
model.forward(**batch)
def test_list_of_text_padding(self):
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.fields import (
TextField,
ListField,
)
from allennlp.data import Vocabulary
word_indexer = {"tokens": PretrainedTransformerIndexer("albert-base-v2")}
text_field = TextField(
[
Token(t, text_id=2, type_id=1)
for t in ["▁allen", "n", "lp", "▁has", "▁no", "▁bugs", "."]
],
word_indexer,
)
list_field = ListField([text_field])
vocab = Vocabulary()
list_field.index(vocab)
padding_lengths = {
"list_tokens___mask": 10,
"list_tokens___token_ids": 10,
"list_tokens___type_ids": 10,
"num_fields": 2,
}
tensors = list_field.as_tensor(padding_lengths)["tokens"]
assert tensors["mask"].size() == (2, 10)
assert tensors["mask"][0, 0] == True # noqa: E712
assert tensors["mask"][0, 9] == False # noqa: E712
assert (tensors["mask"][1, :] == False).all() # noqa: E712
assert tensors["token_ids"].size() == (2, 10)
assert tensors["token_ids"][0, 0] == 2
assert tensors["token_ids"][0, 9] == 0
assert (tensors["token_ids"][1, :] == 0).all()
assert tensors["type_ids"].size() == (2, 10)
assert tensors["type_ids"][0, 0] == 1
assert tensors["type_ids"][0, 9] == 0
assert (tensors["type_ids"][1, :] == 0).all()
| allennlp-master | tests/data/fields/list_field_test.py |
from collections import defaultdict
from typing import Dict, List
import numpy
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenCharactersIndexer, TokenIndexer
class DictReturningTokenIndexer(TokenIndexer):
"""
A stub TokenIndexer that returns multiple arrays of different lengths.
"""
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
pass
def tokens_to_indices(
self, tokens: List[Token], vocabulary: Vocabulary
) -> Dict[str, List[int]]:
return {
"token_ids": (
[10, 15]
+ [vocabulary.get_token_index(token.text, "words") for token in tokens]
+ [25]
),
"additional_key": [22, 29],
}
class TestTextField(AllenNlpTestCase):
def setup_method(self):
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("sentence", namespace="words")
self.vocab.add_token_to_namespace("A", namespace="words")
self.vocab.add_token_to_namespace("A", namespace="characters")
self.vocab.add_token_to_namespace("s", namespace="characters")
self.vocab.add_token_to_namespace("e", namespace="characters")
self.vocab.add_token_to_namespace("n", namespace="characters")
self.vocab.add_token_to_namespace("t", namespace="characters")
self.vocab.add_token_to_namespace("c", namespace="characters")
super().setup_method()
def test_field_counts_vocab_items_correctly(self):
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"words": SingleIdTokenIndexer("words")},
)
namespace_token_counts = defaultdict(lambda: defaultdict(int))
field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts["words"]["This"] == 1
assert namespace_token_counts["words"]["is"] == 1
assert namespace_token_counts["words"]["a"] == 1
assert namespace_token_counts["words"]["sentence"] == 1
assert namespace_token_counts["words"]["."] == 1
assert list(namespace_token_counts.keys()) == ["words"]
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={
"characters": TokenCharactersIndexer("characters", min_padding_length=1)
},
)
namespace_token_counts = defaultdict(lambda: defaultdict(int))
field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts["characters"]["T"] == 1
assert namespace_token_counts["characters"]["h"] == 1
assert namespace_token_counts["characters"]["i"] == 2
assert namespace_token_counts["characters"]["s"] == 3
assert namespace_token_counts["characters"]["a"] == 1
assert namespace_token_counts["characters"]["e"] == 3
assert namespace_token_counts["characters"]["n"] == 2
assert namespace_token_counts["characters"]["t"] == 1
assert namespace_token_counts["characters"]["c"] == 1
assert namespace_token_counts["characters"]["."] == 1
assert list(namespace_token_counts.keys()) == ["characters"]
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={
"words": SingleIdTokenIndexer("words"),
"characters": TokenCharactersIndexer("characters", min_padding_length=1),
},
)
namespace_token_counts = defaultdict(lambda: defaultdict(int))
field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts["characters"]["T"] == 1
assert namespace_token_counts["characters"]["h"] == 1
assert namespace_token_counts["characters"]["i"] == 2
assert namespace_token_counts["characters"]["s"] == 3
assert namespace_token_counts["characters"]["a"] == 1
assert namespace_token_counts["characters"]["e"] == 3
assert namespace_token_counts["characters"]["n"] == 2
assert namespace_token_counts["characters"]["t"] == 1
assert namespace_token_counts["characters"]["c"] == 1
assert namespace_token_counts["characters"]["."] == 1
assert namespace_token_counts["words"]["This"] == 1
assert namespace_token_counts["words"]["is"] == 1
assert namespace_token_counts["words"]["a"] == 1
assert namespace_token_counts["words"]["sentence"] == 1
assert namespace_token_counts["words"]["."] == 1
assert set(namespace_token_counts.keys()) == {"words", "characters"}
def test_index_converts_field_correctly(self):
vocab = Vocabulary()
sentence_index = vocab.add_token_to_namespace("sentence", namespace="words")
capital_a_index = vocab.add_token_to_namespace("A", namespace="words")
capital_a_char_index = vocab.add_token_to_namespace("A", namespace="characters")
s_index = vocab.add_token_to_namespace("s", namespace="characters")
e_index = vocab.add_token_to_namespace("e", namespace="characters")
n_index = vocab.add_token_to_namespace("n", namespace="characters")
t_index = vocab.add_token_to_namespace("t", namespace="characters")
c_index = vocab.add_token_to_namespace("c", namespace="characters")
field = TextField(
[Token(t) for t in ["A", "sentence"]],
{"words": SingleIdTokenIndexer(namespace="words")},
)
field.index(vocab)
assert field._indexed_tokens["words"]["tokens"] == [capital_a_index, sentence_index]
field1 = TextField(
[Token(t) for t in ["A", "sentence"]],
{"characters": TokenCharactersIndexer(namespace="characters", min_padding_length=1)},
)
field1.index(vocab)
assert field1._indexed_tokens["characters"]["token_characters"] == [
[capital_a_char_index],
[s_index, e_index, n_index, t_index, e_index, n_index, c_index, e_index],
]
field2 = TextField(
[Token(t) for t in ["A", "sentence"]],
token_indexers={
"words": SingleIdTokenIndexer(namespace="words"),
"characters": TokenCharactersIndexer(namespace="characters", min_padding_length=1),
},
)
field2.index(vocab)
assert field2._indexed_tokens["words"]["tokens"] == [capital_a_index, sentence_index]
assert field2._indexed_tokens["characters"]["token_characters"] == [
[capital_a_char_index],
[s_index, e_index, n_index, t_index, e_index, n_index, c_index, e_index],
]
def test_get_padding_lengths_raises_if_no_indexed_tokens(self):
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"words": SingleIdTokenIndexer("words")},
)
with pytest.raises(ConfigurationError):
field.get_padding_lengths()
def test_padding_lengths_are_computed_correctly(self):
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"words": SingleIdTokenIndexer("words")},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {"words___tokens": 5}
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={
"characters": TokenCharactersIndexer("characters", min_padding_length=1)
},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {
"characters___token_characters": 5,
"characters___num_token_characters": 8,
}
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={
"characters": TokenCharactersIndexer("characters", min_padding_length=1),
"words": SingleIdTokenIndexer("words"),
},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {
"characters___token_characters": 5,
"characters___num_token_characters": 8,
"words___tokens": 5,
}
def test_as_tensor_handles_words(self):
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"words": SingleIdTokenIndexer("words")},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
tensor_dict = field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"].detach().cpu().numpy(), numpy.array([1, 1, 1, 2, 1])
)
def test_as_tensor_handles_longer_lengths(self):
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"words": SingleIdTokenIndexer("words")},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
padding_lengths["words___tokens"] = 10
tensor_dict = field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"].detach().cpu().numpy(),
numpy.array([1, 1, 1, 2, 1, 0, 0, 0, 0, 0]),
)
def test_as_tensor_handles_characters(self):
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={
"characters": TokenCharactersIndexer("characters", min_padding_length=1)
},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
tensor_dict = field.as_tensor(padding_lengths)
expected_character_array = numpy.array(
[
[1, 1, 1, 3, 0, 0, 0, 0],
[1, 3, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 5, 6, 4, 5, 7, 4],
[1, 0, 0, 0, 0, 0, 0, 0],
]
)
numpy.testing.assert_array_almost_equal(
tensor_dict["characters"]["token_characters"].detach().cpu().numpy(),
expected_character_array,
)
def test_as_tensor_handles_characters_if_empty_field(self):
field = TextField(
[],
token_indexers={
"characters": TokenCharactersIndexer("characters", min_padding_length=1)
},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
tensor_dict = field.as_tensor(padding_lengths)
expected_character_array = numpy.array([])
numpy.testing.assert_array_almost_equal(
tensor_dict["characters"]["token_characters"].detach().cpu().numpy(),
expected_character_array,
)
def test_as_tensor_handles_words_and_characters_with_longer_lengths(self):
field = TextField(
[Token(t) for t in ["a", "sentence", "."]],
token_indexers={
"words": SingleIdTokenIndexer("words"),
"characters": TokenCharactersIndexer("characters", min_padding_length=1),
},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
padding_lengths["words___tokens"] = 5
padding_lengths["characters___token_characters"] = 5
padding_lengths["characters___num_token_characters"] = 10
tensor_dict = field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"].detach().cpu().numpy(), numpy.array([1, 2, 1, 0, 0])
)
numpy.testing.assert_array_almost_equal(
tensor_dict["characters"]["token_characters"].detach().cpu().numpy(),
numpy.array(
[
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 5, 6, 4, 5, 7, 4, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
),
)
def test_printing_doesnt_crash(self):
field = TextField(
[Token(t) for t in ["A", "sentence"]],
{"words": SingleIdTokenIndexer(namespace="words")},
)
print(field)
def test_token_indexer_returns_dict(self):
field = TextField(
[Token(t) for t in ["A", "sentence"]],
token_indexers={
"field_with_dict": DictReturningTokenIndexer(),
"words": SingleIdTokenIndexer("words"),
"characters": TokenCharactersIndexer("characters", min_padding_length=1),
},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {
"field_with_dict___token_ids": 5,
"field_with_dict___additional_key": 2,
"words___tokens": 2,
"characters___token_characters": 2,
"characters___num_token_characters": 8,
}
padding_lengths["field_with_dict___token_ids"] = 7
padding_lengths["field_with_dict___additional_key"] = 3
padding_lengths["words___tokens"] = 4
padding_lengths["characters___token_characters"] = 4
tensors = field.as_tensor(padding_lengths)
assert list(tensors["field_with_dict"]["token_ids"].shape) == [7]
assert list(tensors["field_with_dict"]["additional_key"].shape) == [3]
assert list(tensors["words"]["tokens"].shape) == [4]
assert list(tensors["characters"]["token_characters"].shape) == [4, 8]
def test_token_padding_lengths_are_computed_correctly(self):
field = TextField(
[Token(t) for t in ["A", "sentence"]],
token_indexers={
"field_with_dict": DictReturningTokenIndexer(token_min_padding_length=3),
"words": SingleIdTokenIndexer("words", token_min_padding_length=3),
"characters": TokenCharactersIndexer(
"characters", min_padding_length=1, token_min_padding_length=3
),
},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {
"field_with_dict___token_ids": 5,
"field_with_dict___additional_key": 3,
"words___tokens": 3,
"characters___token_characters": 3,
"characters___num_token_characters": 8,
}
tensors = field.as_tensor(padding_lengths)
assert tensors["field_with_dict"]["additional_key"].tolist()[-1] == 0
assert tensors["words"]["tokens"].tolist()[-1] == 0
assert tensors["characters"]["token_characters"].tolist()[-1] == [0] * 8
def test_sequence_methods(self):
field = TextField([Token(t) for t in ["This", "is", "a", "sentence", "."]], {})
assert len(field) == 5
assert field[1].text == "is"
assert [token.text for token in field] == ["This", "is", "a", "sentence", "."]
| allennlp-master | tests/data/fields/text_field_test.py |
allennlp-master | tests/data/fields/__init__.py |
|
import logging
import numpy
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.fields import MultiLabelField
from allennlp.data.vocabulary import Vocabulary
class TestMultiLabelField(AllenNlpTestCase):
def test_as_tensor_returns_integer_tensor(self):
f = MultiLabelField([2, 3], skip_indexing=True, label_namespace="test1", num_labels=5)
tensor = f.as_tensor(f.get_padding_lengths()).detach().cpu().tolist()
assert tensor == [0, 0, 1, 1, 0]
assert {type(item) for item in tensor} == {int}
def test_multilabel_field_can_index_with_vocab(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("rel0", namespace="rel_labels")
vocab.add_token_to_namespace("rel1", namespace="rel_labels")
vocab.add_token_to_namespace("rel2", namespace="rel_labels")
f = MultiLabelField(["rel1", "rel0"], label_namespace="rel_labels")
f.index(vocab)
tensor = f.as_tensor(f.get_padding_lengths()).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([1, 1, 0]))
def test_multilabel_field_raises_with_non_integer_labels_and_no_indexing(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField(["non integer field"], skip_indexing=True)
def test_multilabel_field_raises_with_no_indexing_and_missing_num_labels(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField([0, 2], skip_indexing=True, num_labels=None)
def test_multilabel_field_raises_with_no_indexing_and_wrong_num_labels(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField([0, 2, 4], skip_indexing=True, num_labels=3)
def test_multilabel_field_raises_with_incorrect_label_type(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField([1, 2], skip_indexing=False)
def test_multilabel_field_raises_with_given_num_labels(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField([1, 2], skip_indexing=False, num_labels=4)
def test_multilabel_field_empty_field_works(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("label1", namespace="test_empty_labels")
vocab.add_token_to_namespace("label2", namespace="test_empty_labels")
f = MultiLabelField([], label_namespace="test_empty_labels")
f.index(vocab)
tensor = f.as_tensor(f.get_padding_lengths()).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 0]))
g = f.empty_field()
g.index(vocab)
tensor = g.as_tensor(g.get_padding_lengths()).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 0]))
h = MultiLabelField(
[0, 0, 1], label_namespace="test_empty_labels", num_labels=3, skip_indexing=True
)
tensor = h.empty_field().as_tensor(None).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 0, 0]))
def test_class_variables_for_namespace_warnings_work_correctly(self, caplog):
with caplog.at_level(logging.WARNING, logger="allennlp.data.fields.multilabel_field"):
assert "text" not in MultiLabelField._already_warned_namespaces
_ = MultiLabelField(["test"], label_namespace="text")
assert caplog.records
# We've warned once, so we should have set the class variable to False.
assert "text" in MultiLabelField._already_warned_namespaces
caplog.clear()
_ = MultiLabelField(["test2"], label_namespace="text")
assert not caplog.records
# ... but a new namespace should still log a warning.
assert "text2" not in MultiLabelField._already_warned_namespaces
caplog.clear()
_ = MultiLabelField(["test"], label_namespace="text2")
assert caplog
def test_printing_doesnt_crash(self):
field = MultiLabelField(["label"], label_namespace="namespace")
print(field)
| allennlp-master | tests/data/fields/multilabel_field_test.py |
import logging
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.fields import LabelField
from allennlp.data import Vocabulary
class TestLabelField(AllenNlpTestCase):
def test_as_tensor_returns_integer_tensor(self):
label = LabelField(5, skip_indexing=True)
tensor = label.as_tensor(label.get_padding_lengths())
assert tensor.item() == 5
def test_label_field_can_index_with_vocab(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("entailment", namespace="labels")
vocab.add_token_to_namespace("contradiction", namespace="labels")
vocab.add_token_to_namespace("neutral", namespace="labels")
label = LabelField("entailment")
label.index(vocab)
tensor = label.as_tensor(label.get_padding_lengths())
assert tensor.item() == 0
def test_label_field_raises_with_non_integer_labels_and_no_indexing(self):
with pytest.raises(ConfigurationError):
_ = LabelField("non integer field", skip_indexing=True)
def test_label_field_raises_with_incorrect_label_type(self):
with pytest.raises(ConfigurationError):
_ = LabelField([], skip_indexing=False)
def test_label_field_empty_field_works(self):
label = LabelField("test")
empty_label = label.empty_field()
assert empty_label.label == -1
def test_class_variables_for_namespace_warnings_work_correctly(self, caplog):
with caplog.at_level(logging.WARNING, logger="allennlp.data.fields.label_field"):
assert "text" not in LabelField._already_warned_namespaces
_ = LabelField("test", label_namespace="text")
assert caplog.records
# We've warned once, so we should have set the class variable to False.
assert "text" in LabelField._already_warned_namespaces
caplog.clear()
_ = LabelField("test2", label_namespace="text")
assert not caplog.records
# ... but a new namespace should still log a warning.
assert "text2" not in LabelField._already_warned_namespaces
caplog.clear()
_ = LabelField("test", label_namespace="text2")
assert caplog.records
def test_printing_doesnt_crash(self):
label = LabelField("label", label_namespace="namespace")
print(label)
| allennlp-master | tests/data/fields/label_field_test.py |
import pytest
import numpy
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.fields import AdjacencyField, TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data import Vocabulary, Token
class TestAdjacencyField(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.text = TextField(
[Token(t) for t in ["here", "is", "a", "sentence", "."]],
{"words": SingleIdTokenIndexer("words")},
)
def test_adjacency_field_can_index_with_vocab(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("a", namespace="labels")
vocab.add_token_to_namespace("b", namespace="labels")
vocab.add_token_to_namespace("c", namespace="labels")
labels = ["a", "b"]
indices = [(0, 1), (2, 1)]
adjacency_field = AdjacencyField(indices, self.text, labels)
adjacency_field.index(vocab)
tensor = adjacency_field.as_tensor(adjacency_field.get_padding_lengths())
numpy.testing.assert_equal(
tensor.numpy(),
numpy.array(
[
[-1, 0, -1, -1, -1],
[-1, -1, -1, -1, -1],
[-1, 1, -1, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1],
]
),
)
def test_adjacency_field_raises_with_out_of_bounds_indices(self):
with pytest.raises(ConfigurationError):
_ = AdjacencyField([(0, 24)], self.text)
def test_adjacency_field_raises_with_mismatching_labels_for_indices(self):
with pytest.raises(ConfigurationError):
_ = AdjacencyField([(0, 1), (0, 2)], self.text, ["label1"])
def test_adjacency_field_raises_with_duplicate_indices(self):
with pytest.raises(ConfigurationError):
_ = AdjacencyField([(0, 1), (0, 1)], self.text, ["label1"])
def test_adjacency_field_empty_field_works(self):
field = AdjacencyField([(0, 1)], self.text)
empty_field = field.empty_field()
assert empty_field.indices == []
def test_printing_doesnt_crash(self):
adjacency_field = AdjacencyField([(0, 1)], self.text, ["label1"])
print(adjacency_field)
| allennlp-master | tests/data/fields/adjacency_field_test.py |
import numpy
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token
from allennlp.data.fields import TextField, SpanField
from allennlp.data.token_indexers import SingleIdTokenIndexer
class TestSpanField(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.indexers = {"words": SingleIdTokenIndexer("words")}
self.text = TextField(
[Token(t) for t in ["here", "is", "a", "sentence", "for", "spans", "."]], self.indexers
)
def test_as_tensor_converts_span_field_correctly(self):
span_field = SpanField(2, 3, self.text)
tensor = span_field.as_tensor(span_field.get_padding_lengths()).detach().cpu().numpy()
numpy.testing.assert_array_equal(tensor, numpy.array([2, 3]))
def test_span_field_raises_on_incorrect_label_type(self):
with pytest.raises(TypeError):
_ = SpanField("hello", 3, self.text)
def test_span_field_raises_on_ill_defined_span(self):
with pytest.raises(ValueError):
_ = SpanField(4, 1, self.text)
def test_span_field_raises_if_span_end_is_greater_than_sentence_length(self):
with pytest.raises(ValueError):
_ = SpanField(1, 30, self.text)
def test_empty_span_field_works(self):
span_field = SpanField(1, 3, self.text)
empty_span = span_field.empty_field()
assert empty_span.span_start == -1
assert empty_span.span_end == -1
def test_printing_doesnt_crash(self):
span_field = SpanField(2, 3, self.text)
print(span_field)
def test_equality(self):
span_field1 = SpanField(2, 3, self.text)
span_field2 = SpanField(2, 3, self.text)
span_field3 = SpanField(
2, 3, TextField([Token(t) for t in ["not", "the", "same", "tokens"]], self.indexers)
)
assert span_field1 == (2, 3)
assert span_field1 == span_field1
assert span_field1 == span_field2
assert span_field1 != span_field3
assert span_field2 != span_field3
| allennlp-master | tests/data/fields/span_field_test.py |
from allennlp.data.fields import Field
def test_eq_with_inheritance():
class SubField(Field):
__slots__ = ["a"]
def __init__(self, a):
self.a = a
class SubSubField(SubField):
__slots__ = ["b"]
def __init__(self, a, b):
super().__init__(a)
self.b = b
class SubSubSubField(SubSubField):
__slots__ = ["c"]
def __init__(self, a, b, c):
super().__init__(a, b)
self.c = c
assert SubField(1) == SubField(1)
assert SubField(1) != SubField(2)
assert SubSubField(1, 2) == SubSubField(1, 2)
assert SubSubField(1, 2) != SubSubField(1, 1)
assert SubSubField(1, 2) != SubSubField(2, 2)
assert SubSubSubField(1, 2, 3) == SubSubSubField(1, 2, 3)
assert SubSubSubField(1, 2, 3) != SubSubSubField(0, 2, 3)
def test_eq_with_inheritance_for_non_slots_field():
class SubField(Field):
def __init__(self, a):
self.a = a
assert SubField(1) == SubField(1)
assert SubField(1) != SubField(2)
def test_eq_with_inheritance_for_mixed_field():
class SubField(Field):
__slots__ = ["a"]
def __init__(self, a):
self.a = a
class SubSubField(SubField):
def __init__(self, a, b):
super().__init__(a)
self.b = b
assert SubField(1) == SubField(1)
assert SubField(1) != SubField(2)
assert SubSubField(1, 2) == SubSubField(1, 2)
assert SubSubField(1, 2) != SubSubField(1, 1)
assert SubSubField(1, 2) != SubSubField(2, 2)
| allennlp-master | tests/data/fields/field_test.py |
from collections import defaultdict
import logging
import pytest
import numpy
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.fields import TextField, SequenceLabelField
from allennlp.data.token_indexers import SingleIdTokenIndexer
class TestSequenceLabelField(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.text = TextField(
[Token(t) for t in ["here", "are", "some", "words", "."]],
{"words": SingleIdTokenIndexer("words")},
)
def test_tag_length_mismatch_raises(self):
with pytest.raises(ConfigurationError):
wrong_tags = ["B", "O", "O"]
_ = SequenceLabelField(wrong_tags, self.text)
def test_count_vocab_items_correctly_indexes_tags(self):
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="labels")
counter = defaultdict(lambda: defaultdict(int))
sequence_label_field.count_vocab_items(counter)
assert counter["labels"]["B"] == 1
assert counter["labels"]["I"] == 1
assert counter["labels"]["O"] == 3
assert set(counter.keys()) == {"labels"}
def test_index_converts_field_correctly(self):
vocab = Vocabulary()
b_index = vocab.add_token_to_namespace("B", namespace="*labels")
i_index = vocab.add_token_to_namespace("I", namespace="*labels")
o_index = vocab.add_token_to_namespace("O", namespace="*labels")
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="*labels")
sequence_label_field.index(vocab)
assert sequence_label_field._indexed_labels == [b_index, i_index, o_index, o_index, o_index]
def test_as_tensor_produces_integer_targets(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("B", namespace="*labels")
vocab.add_token_to_namespace("I", namespace="*labels")
vocab.add_token_to_namespace("O", namespace="*labels")
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="*labels")
sequence_label_field.index(vocab)
padding_lengths = sequence_label_field.get_padding_lengths()
tensor = sequence_label_field.as_tensor(padding_lengths).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 1, 2, 2, 2]))
def test_sequence_label_field_raises_on_incorrect_type(self):
with pytest.raises(ConfigurationError):
_ = SequenceLabelField([[], [], [], [], []], self.text)
def test_class_variables_for_namespace_warnings_work_correctly(self, caplog):
with caplog.at_level(logging.WARNING, logger="allennlp.data.fields.sequence_label_field"):
tags = ["B", "I", "O", "O", "O"]
assert "text" not in SequenceLabelField._already_warned_namespaces
_ = SequenceLabelField(tags, self.text, label_namespace="text")
assert caplog.records
# We've warned once, so we should have set the class variable to False.
assert "text" in SequenceLabelField._already_warned_namespaces
caplog.clear()
_ = SequenceLabelField(tags, self.text, label_namespace="text")
assert not caplog.records
# ... but a new namespace should still log a warning.
assert "text2" not in SequenceLabelField._already_warned_namespaces
caplog.clear()
_ = SequenceLabelField(tags, self.text, label_namespace="text2")
assert caplog.records
def test_printing_doesnt_crash(self):
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="labels")
print(sequence_label_field)
def test_sequence_methods(self):
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="labels")
assert len(sequence_label_field) == 5
assert sequence_label_field[1] == "I"
assert [label for label in sequence_label_field] == tags
| allennlp-master | tests/data/fields/sequence_label_field_test.py |
import pytest
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.data.fields import MetadataField
class TestMetadataField(AllenNlpTestCase):
def test_mapping_works_with_dict(self):
field = MetadataField({"a": 1, "b": [0]})
assert "a" in field
assert field["a"] == 1
assert len(field) == 2
keys = {k for k in field}
assert keys == {"a", "b"}
values = [v for v in field.values()]
assert len(values) == 2
assert 1 in values
assert [0] in values
def test_mapping_raises_with_non_dict(self):
field = MetadataField(0)
with pytest.raises(TypeError):
_ = field[0]
with pytest.raises(TypeError):
_ = len(field)
with pytest.raises(TypeError):
_ = [x for x in field]
| allennlp-master | tests/data/fields/metadata_field_test.py |
import pytest
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.data.fields import FlagField
class TestFlagField(AllenNlpTestCase):
def test_get_padding_lengths_returns_nothing(self):
flag_field = FlagField(True)
assert flag_field.get_padding_lengths() == {}
def test_as_tensor_just_returns_value(self):
for value in [True, 3.234, "this is a string"]:
assert FlagField(value).as_tensor({}) == value
def test_printing_doesnt_crash(self):
flag = FlagField(True)
print(flag)
def test_batch_tensors_returns_single_value(self):
value = True
fields = [FlagField(value) for _ in range(5)]
values = [field.as_tensor({}) for field in fields]
batched_value = fields[0].batch_tensors(values)
assert batched_value == value
def test_batch_tensors_crashes_with_non_uniform_values(self):
field = FlagField(True)
with pytest.raises(ValueError):
field.batch_tensors([True, False, True])
with pytest.raises(ValueError):
field.batch_tensors([1, 2, 3, 4])
with pytest.raises(ValueError):
field.batch_tensors(["different", "string", "flags"])
| allennlp-master | tests/data/fields/flag_field_test.py |
allennlp-master | benchmarks/__init__.py |
|
import torch
from allennlp.nn import util
from allennlp.common.testing import requires_gpu
@requires_gpu
def bench_add_sentence_boundary_token_ids(benchmark):
device = torch.device("cuda")
# shape: (32, 50)
tensor = torch.tensor([[3] * 50] * 32, device=device)
# shape: (32, 50)
mask = torch.tensor([[True] * 50, [True] * 30 + [False] * 20] * 16, device=device)
begin_token = 1
end_token = 2
benchmark(util.add_sentence_boundary_token_ids, tensor, mask, begin_token, end_token)
@requires_gpu
def bench_remove_sentence_boundaries(benchmark):
device = torch.device("cuda")
# shape: (32, 50, 1)
tensor = torch.tensor([[3] * 50] * 32, device=device).unsqueeze(-1)
# shape: (32, 50)
mask = torch.tensor([[True] * 50, [True] * 30 + [False] * 20] * 16, device=device)
benchmark(util.remove_sentence_boundaries, tensor, mask)
@requires_gpu
def bench_create_tensor_then_send_to_device(benchmark):
device = torch.device("cuda:0")
def create_tensor():
return torch.rand((32, 50)).to(device)
benchmark(create_tensor)
@requires_gpu
def bench_create_tensor_directly_on_device(benchmark):
device = torch.device("cuda:0")
def create_tensor():
return torch.rand((32, 50), device=device)
benchmark(create_tensor)
| allennlp-master | benchmarks/nn/util_bench.py |
allennlp-master | benchmarks/data/__init__.py |
|
allennlp-master | benchmarks/data/tokenizers/__init__.py |
|
from allennlp.data.tokenizers import CharacterTokenizer
tokenizer = CharacterTokenizer()
passage = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor "
"incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis "
"nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. "
"Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu "
"fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in "
"culpa qui officia deserunt mollit anim id est laborum."
)
def bench_character_tokenizer(benchmark):
benchmark(tokenizer.tokenize, passage)
| allennlp-master | benchmarks/data/tokenizers/character_tokenizer_bench.py |
# encoding: utf-8
"""
Prepares markdown release notes for GitHub releases.
"""
import os
from typing import List
from allennlp.version import VERSION
TAG = os.environ["TAG"]
ADDED_HEADER = "### Added 🎉"
CHANGED_HEADER = "### Changed ⚠️"
FIXED_HEADER = "### Fixed ✅"
REMOVED_HEADER = "### Removed 👋"
def get_change_log_notes() -> str:
in_current_section = False
current_section_notes: List[str] = []
with open("CHANGELOG.md") as changelog:
for line in changelog:
if line.startswith("## "):
if line.startswith("## Unreleased"):
continue
if line.startswith(f"## [{TAG}]"):
in_current_section = True
continue
break
if in_current_section:
if line.startswith("### Added"):
line = ADDED_HEADER + "\n"
elif line.startswith("### Changed"):
line = CHANGED_HEADER + "\n"
elif line.startswith("### Fixed"):
line = FIXED_HEADER + "\n"
elif line.startswith("### Removed"):
line = REMOVED_HEADER + "\n"
current_section_notes.append(line)
assert current_section_notes
return "## What's new\n\n" + "".join(current_section_notes).strip() + "\n"
def get_commit_history() -> str:
stream = os.popen(
f"git log $(git describe --always --tags --abbrev=0 {TAG}^^)..{TAG}^ --oneline"
)
return "## Commits\n\n" + stream.read()
def main():
assert TAG == f"v{VERSION}"
print(get_change_log_notes())
print(get_commit_history())
if __name__ == "__main__":
main()
| allennlp-master | scripts/release_notes.py |
#!/usr/bin/env python
"""
This script is used to populate the table of contents for the API in the mkdocs config file.
"""
import argparse
from pathlib import Path
from typing import Any, List
from ruamel.yaml import YAML
from allennlp.version import VERSION
API_TOC_KEY = "API"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("target_yaml", help="Path to the target mkdocs config file.")
parser.add_argument("source_yaml", help="Path to the mkdocs skeleton config file.")
parser.add_argument("docs_root", help="The root of the markdown docs folder.")
parser.add_argument(
"api_docs_path", help="The root of the API docs within the markdown docs root folder."
)
parser.add_argument("--docs-version", type=str, default=f"v{VERSION}")
return parser.parse_args()
def build_api_toc(source_path: Path, docs_root: Path):
nav_entries: List[Any] = []
for child in source_path.iterdir():
if child.is_dir():
nav_subsection = build_api_toc(child, docs_root)
elif child.suffix == ".md":
nav_subsection = str(child.relative_to(docs_root))
nav_entries.append({child.stem: nav_subsection})
nav_entries.sort(key=lambda x: list(x)[0], reverse=False)
return nav_entries
def main():
yaml = YAML()
opts = parse_args()
source_yaml = yaml.load(Path(opts.source_yaml))
nav_entries = build_api_toc(Path(opts.api_docs_path), Path(opts.docs_root))
# Add version to name.
source_yaml["site_name"] = f"AllenNLP {opts.docs_version}"
# Find the yaml sub-object corresponding to the API table of contents.
site_nav = source_yaml["nav"]
for nav_obj in site_nav:
if API_TOC_KEY in nav_obj:
break
nav_obj[API_TOC_KEY] = nav_entries
with open(opts.target_yaml, "w") as f:
yaml.dump(source_yaml, f)
print(f"{opts.target_yaml} created")
if __name__ == "__main__":
main()
| allennlp-master | scripts/build_docs_config.py |
#!/usr/bin/env python
import glob
import logging
import os
import re
import shutil
import sys
import tempfile
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
from allennlp.commands.test_install import _get_module_root
from allennlp.commands.train import train_model_from_file, train_model
from allennlp.common import Params
from allennlp.common.util import pushd
logger = logging.getLogger(__name__)
def train_fixture(config_prefix: str, config_filename: str = "experiment.json") -> None:
config_file = config_prefix + config_filename
serialization_dir = config_prefix + "serialization"
# Train model doesn't like it if we have incomplete serialization
# directories, so remove them if they exist.
if os.path.exists(serialization_dir):
shutil.rmtree(serialization_dir)
# train the model
train_model_from_file(config_file, serialization_dir)
# remove unnecessary files
shutil.rmtree(os.path.join(serialization_dir, "log"))
for filename in glob.glob(os.path.join(serialization_dir, "*")):
if (
filename.endswith(".log")
or filename.endswith(".json")
or re.search(r"epoch_[0-9]+\.th$", filename)
):
os.remove(filename)
def train_fixture_gpu(config_prefix: str) -> None:
config_file = config_prefix + "experiment.json"
serialization_dir = config_prefix + "serialization"
params = Params.from_file(config_file)
params["trainer"]["cuda_device"] = 0
# train this one to a tempdir
tempdir = tempfile.gettempdir()
train_model(params, tempdir)
# now copy back the weights and and archived model
shutil.copy(os.path.join(tempdir, "best.th"), os.path.join(serialization_dir, "best_gpu.th"))
shutil.copy(
os.path.join(tempdir, "model.tar.gz"), os.path.join(serialization_dir, "model_gpu.tar.gz")
)
if __name__ == "__main__":
module_root = _get_module_root().parent
with pushd(module_root, verbose=True):
models = [
("basic_classifier", "experiment_seq2seq.jsonnet"),
"simple_tagger",
"simple_tagger_with_elmo",
"simple_tagger_with_span_f1",
]
for model in models:
if isinstance(model, tuple):
model, config_filename = model
train_fixture(f"allennlp/tests/fixtures/{model}/", config_filename)
else:
train_fixture(f"allennlp/tests/fixtures/{model}/")
| allennlp-master | scripts/train_fixtures.py |
#!/usr/bin/env python
# encoding: UTF-8
"""
Goes through all the inline-links in markdown files and reports the breakages.
"""
import re
import sys
import pathlib
import os
from multiprocessing.dummy import Pool
from typing import Tuple, NamedTuple, Optional
import requests
OK_STATUS_CODES = (
200,
401, # the resource exists but may require some sort of login.
403, # ^ same
405, # HEAD method not allowed.
406, # the resource exists, but our default 'Accept-' header may not match what the server can provide.
)
THREADS = 10
http_session = requests.Session()
for resource_prefix in ("http://", "https://"):
http_session.mount(
resource_prefix,
requests.adapters.HTTPAdapter(max_retries=5, pool_connections=20, pool_maxsize=THREADS),
)
class MatchTuple(NamedTuple):
source: str
name: str
link: str
def url_ok(match_tuple: MatchTuple) -> Tuple[bool, str]:
"""Check if a URL is reachable."""
try:
result = http_session.head(match_tuple.link, timeout=5, allow_redirects=True)
return (
result.ok or result.status_code in OK_STATUS_CODES,
f"status code = {result.status_code}",
)
except (requests.ConnectionError, requests.Timeout):
return False, "connection error"
def path_ok(match_tuple: MatchTuple) -> bool:
"""Check if a file in this repository exists."""
relative_path = match_tuple.link.split("#")[0]
full_path = os.path.join(os.path.dirname(str(match_tuple.source)), relative_path)
return os.path.exists(full_path)
def link_ok(match_tuple: MatchTuple) -> Tuple[MatchTuple, bool, Optional[str]]:
reason: Optional[str] = None
if match_tuple.link.startswith("http"):
result_ok, reason = url_ok(match_tuple)
else:
result_ok = path_ok(match_tuple)
print(f" {'✓' if result_ok else '✗'} {match_tuple.link}")
return match_tuple, result_ok, reason
def main():
print("Finding all markdown files in the current directory...")
project_root = (pathlib.Path(__file__).parent / "..").resolve()
markdown_files = project_root.glob("**/*.md")
all_matches = set()
url_regex = re.compile(r"\[([^!][^\]]+)\]\(([^)(]+)\)")
for markdown_file in markdown_files:
with open(markdown_file) as handle:
for line in handle.readlines():
matches = url_regex.findall(line)
for name, link in matches:
if "localhost" not in link:
all_matches.add(MatchTuple(source=str(markdown_file), name=name, link=link))
print(f" {len(all_matches)} markdown files found")
print("Checking to make sure we can retrieve each link...")
with Pool(processes=THREADS) as pool:
results = pool.map(link_ok, [match for match in list(all_matches)])
unreachable_results = [
(match_tuple, reason) for match_tuple, success, reason in results if not success
]
if unreachable_results:
print(f"Unreachable links ({len(unreachable_results)}):")
for match_tuple, reason in unreachable_results:
print(" > Source: " + match_tuple.source)
print(" Name: " + match_tuple.name)
print(" Link: " + match_tuple.link)
if reason is not None:
print(" Reason: " + reason)
sys.exit(1)
print("No Unreachable link found.")
if __name__ == "__main__":
main()
| allennlp-master | scripts/check_links.py |
from datetime import datetime as dt
import os
from github import Github
def main():
g = Github(os.environ["GITHUB_TOKEN"])
repo = g.get_repo("allenai/allennlp")
open_issues = repo.get_issues(state="open")
for issue in open_issues:
if (
issue.milestone is None
and issue.assignees
and issue.pull_request is None
and (dt.utcnow() - issue.updated_at).days >= 14
):
assignees = ", ".join([f"@{user.login}" for user in issue.assignees])
print(f"Pinging {assignees} for {issue}")
issue.create_comment(
f"{assignees} this is just a friendly ping to make sure you "
"haven't forgotten about this issue 😜"
)
if __name__ == "__main__":
main()
| allennlp-master | scripts/ping_issue_assignees.py |
#!/usr/bin/env python
import argparse
from typing import Dict
import requests
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("version_type", choices=["stable", "latest", "current"])
return parser.parse_args()
def get_current_version() -> str:
VERSION: Dict[str, str] = {}
with open("allennlp/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
return "v" + VERSION["VERSION"]
def get_latest_version() -> str:
resp = requests.get("https://api.github.com/repos/allenai/allennlp/tags")
return resp.json()[0]["name"]
def get_stable_version() -> str:
resp = requests.get("https://api.github.com/repos/allenai/allennlp/releases/latest")
return resp.json()["tag_name"]
def main() -> None:
opts = parse_args()
if opts.version_type == "stable":
print(get_stable_version())
elif opts.version_type == "latest":
print(get_latest_version())
elif opts.version_type == "current":
print(get_current_version())
else:
raise NotImplementedError
if __name__ == "__main__":
main()
| allennlp-master | scripts/get_version.py |
from datetime import datetime as dt
import os
from github import Github
LABELS_TO_EXEMPT = ["contributions welcome", "merge when ready", "under development", "help wanted"]
def main():
g = Github(os.environ["GITHUB_TOKEN"])
repo = g.get_repo("allenai/allennlp")
open_issues = repo.get_issues(state="open")
for issue in open_issues:
if (
issue.milestone is None
and not issue.assignees
and issue.pull_request is None
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 14
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
print("Closing", issue)
issue.create_comment(
"This issue is being closed due to lack of activity. "
"If you think it still needs to be addressed, please comment on this thread 👇"
)
issue.add_to_labels("stale")
issue.edit(state="closed")
if __name__ == "__main__":
main()
| allennlp-master | scripts/close_stale_issues.py |
#!/usr/bin/env python
"""
Turn docstrings from a single module into a markdown file.
We do this with PydocMarkdown, using custom processors and renderers defined here.
"""
import argparse
from collections import OrderedDict
from dataclasses import dataclass
from enum import Enum
import logging
from multiprocessing import Pool, cpu_count
import os
from pathlib import Path
import re
import sys
from typing import Optional, Tuple, List
from nr.databind.core import Struct
from nr.interface import implements, override
from pydoc_markdown import PydocMarkdown
from pydoc_markdown.contrib.loaders.python import PythonLoader
from pydoc_markdown.contrib.renderers.markdown import MarkdownRenderer
from pydoc_markdown.interfaces import Processor, Renderer
from pydoc_markdown.reflection import Argument, Module, Function, Class, Data
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("py2md")
BASE_MODULE = os.environ.get("BASE_MODULE", "allennlp")
BASE_SOURCE_LINK = os.environ.get(
"BASE_SOURCE_LINK", "https://github.com/allenai/allennlp/blob/master/allennlp/"
)
class DocstringError(Exception):
pass
def emphasize(s: str) -> str:
# Need to escape underscores.
s = s.replace("_", "\\_")
return f"__{s}__"
class Section(Enum):
ARGUMENTS = "ARGUMENTS"
PARAMETERS = "PARAMETERS"
ATTRIBUTES = "ATTRIBUTES"
MEMBERS = "MEMBERS"
RETURNS = "RETURNS"
RAISES = "RAISES"
EXAMPLES = "EXAMPLES"
OTHER = "OTHER"
@classmethod
def from_str(cls, section: str) -> "Section":
section = section.upper()
for member in cls:
if section == member.value:
return member
return cls.OTHER
REQUIRED_PARAM_RE = re.compile(r"^`([^`]+)`(, required\.?)?$")
OPTIONAL_PARAM_RE = re.compile(
r"^`([^`]+)`,?\s+(optional,?\s)?\(\s?(optional,\s)?default\s?=\s?`([^`]+)`\s?\)\.?$"
)
OPTIONAL_PARAM_NO_DEFAULT_RE = re.compile(r"^`([^`]+)`,?\s+optional\.?$")
@dataclass
class Param:
ident: str
ty: Optional[str] = None
required: bool = False
default: Optional[str] = None
@classmethod
def from_line(cls, line: str) -> Optional["Param"]:
if ":" not in line:
return None
ident, description = line.split(":", 1)
ident = ident.strip()
description = description.strip()
if " " in ident:
return None
maybe_match = REQUIRED_PARAM_RE.match(description)
if maybe_match:
ty = maybe_match.group(1)
return cls(ident=ident, ty=ty, required=True)
maybe_match = OPTIONAL_PARAM_RE.match(description)
if maybe_match:
ty = maybe_match.group(1)
default = maybe_match.group(4)
return cls(ident=ident, ty=ty, required=False, default=default)
maybe_match = OPTIONAL_PARAM_NO_DEFAULT_RE.match(description)
if maybe_match:
ty = maybe_match.group(1)
return cls(ident=ident, ty=ty, required=False)
raise DocstringError(
f"Invalid parameter / attribute description: '{line}'\n"
"Make sure types are enclosed in backticks.\n"
"Required parameters should be documented like: '{ident} : `{type}`'\n"
"Optional parameters should be documented like: '{ident} : `{type}`, optional (default = `{expr}`)'\n"
)
def to_line(self) -> str:
line: str = f"- {emphasize(self.ident)} :"
if self.ty:
line += f" `{self.ty}`"
if not self.required:
line += ", optional"
if self.default:
line += f" (default = `{self.default}`)"
line += " <br>"
return line
# For now we handle attributes / members in the same way as parameters / arguments.
Attrib = Param
@dataclass
class RetVal:
description: Optional[str] = None
ident: Optional[str] = None
ty: Optional[str] = None
@classmethod
def from_line(cls, line: str) -> "RetVal":
if ": " not in line:
return cls(description=line)
ident, ty = line.split(":", 1)
ident = ident.strip()
ty = ty.strip()
if ty and not ty.startswith("`"):
raise DocstringError(f"Type should be enclosed in backticks: '{line}'")
return cls(ident=ident, ty=ty)
def to_line(self) -> str:
if self.description:
line = f"- {self.description} <br>"
elif self.ident:
line = f"- {emphasize(self.ident)}"
if self.ty:
line += f" : {self.ty} <br>"
else:
line += " <br>"
else:
raise DocstringError("RetVal must have either description or ident")
return line
@dataclass
class ProcessorState:
parameters: "OrderedDict[str, Param]"
current_section: Optional[Section] = None
codeblock_opened: bool = False
consecutive_blank_line_count: int = 0
@implements(Processor)
class AllenNlpDocstringProcessor(Struct):
"""
Use to turn our docstrings into Markdown.
"""
CROSS_REF_RE = re.compile("(:(class|func|mod):`~?([a-zA-Z0-9_.]+)`)")
UNDERSCORE_HEADER_RE = re.compile(r"(.*)\n-{3,}\n")
MULTI_LINE_LINK_RE = re.compile(r"(\[[^\]]+\])\n\s*(\([^\)]+\))")
@override
def process(self, graph, resolver):
graph.visit(self.process_node)
def process_node(self, node):
if not getattr(node, "docstring", None):
return
lines: List[str] = []
state: ProcessorState = ProcessorState(parameters=OrderedDict())
docstring = node.docstring
# Standardize header syntax to use '#' instead of underscores.
docstring = self.UNDERSCORE_HEADER_RE.sub(r"# \g<1>", docstring)
# It's common to break up markdown links into multiple lines in docstrings, but
# they won't render as links in the doc HTML unless they are all on one line.
docstring = self.MULTI_LINE_LINK_RE.sub(r"\g<1>\g<2>", docstring)
for line in docstring.split("\n"):
# Check if we're starting or ending a codeblock.
if line.startswith("```"):
state.codeblock_opened = not state.codeblock_opened
if not state.codeblock_opened:
# If we're not in a codeblock, we'll do some pre-processing.
if not line.strip():
state.consecutive_blank_line_count += 1
if state.consecutive_blank_line_count >= 2:
state.current_section = None
else:
state.consecutive_blank_line_count = 0
line = self._preprocess_line(line, state)
lines.append(line)
# Now set the docstring to our preprocessed version of it.
node.docstring = "\n".join(lines)
def _preprocess_line(self, line, state: ProcessorState) -> str:
match = re.match(r"#+ (.*)$", line)
if match:
state.current_section = Section.from_str(match.group(1).strip())
line = re.sub(r"#+ (.*)$", r"<strong>\1</strong>\n", line)
else:
if line and not line.startswith(" ") and not line.startswith("!!! "):
if state.current_section in (
Section.ARGUMENTS,
Section.PARAMETERS,
):
param = Param.from_line(line)
if param:
line = param.to_line()
elif state.current_section in (Section.ATTRIBUTES, Section.MEMBERS):
attrib = Attrib.from_line(line)
if attrib:
line = attrib.to_line()
elif state.current_section in (Section.RETURNS, Section.RAISES):
retval = RetVal.from_line(line)
line = retval.to_line()
line = self._transform_cross_references(line)
return line
def _transform_cross_references(self, line: str) -> str:
"""
Replace sphinx style crossreferences with markdown links.
"""
for match, ty, name in self.CROSS_REF_RE.findall(line):
if name.startswith(f"{BASE_MODULE}."):
path = name.split(".")
if ty == "mod":
href = "/api/" + "/".join(path[1:])
else:
href = "/api/" + "/".join(path[1:-1]) + "#" + path[-1].lower()
cross_ref = f"[`{path[-1]}`]({href})"
elif "." not in name:
cross_ref = f"[`{name}`](#{name.lower()})"
else:
cross_ref = f"`{name}`"
line = line.replace(match, cross_ref)
return line
@implements(Processor)
class AllenNlpFilterProcessor(Struct):
"""
Used to filter out nodes that we don't want to document.
"""
PRIVATE_METHODS_TO_KEEP = {"DatasetReader._read", "__call__"}
def process(self, graph, _resolver):
graph.visit(self._process_node)
def _process_node(self, node):
def _check(node):
if node.name.startswith("_"):
if node.name in self.PRIVATE_METHODS_TO_KEEP:
return True
if (
node.parent
and f"{node.parent.name}.{node.name}" in self.PRIVATE_METHODS_TO_KEEP
):
return True
return False
if node.parent and node.parent.name.startswith("_"):
return False
if node.name == "logger" and isinstance(node.parent, Module):
return False
return True
if not _check(node):
node.visible = False
@implements(Renderer)
class AllenNlpRenderer(MarkdownRenderer):
def _format_function_signature(
self,
func: Function,
override_name: str = None,
add_method_bar: bool = True,
include_parent_class: bool = True,
) -> str:
parts = []
for dec in func.decorators:
parts.append("@{}{}\n".format(dec.name, dec.args or ""))
if self.signature_python_help_style and not func.is_method():
parts.append("{} = ".format(func.path()))
if func.is_async:
parts.append("async ")
if self.signature_with_def:
parts.append("def ")
if self.signature_class_prefix and (
func.is_function() and func.parent and func.parent.is_class()
):
parts.append(func.parent.name + ".")
parts.append((override_name or func.name))
signature_args = Argument.format_arglist(func.args)
if signature_args.endswith(","):
signature_args = signature_args[:-1].strip()
if (
len(parts[-1])
+ len(signature_args)
+ (0 if not func.return_ else len(str(func.return_)))
> 60
):
signature_args = ",\n ".join(
filter(lambda s: s.strip() not in ("", ","), (str(arg) for arg in func.args))
)
parts.append("(\n " + signature_args + "\n)")
else:
parts.append("(" + signature_args + ")")
if func.return_:
parts.append(" -> {}".format(func.return_))
result = "".join(parts)
if add_method_bar and func.is_method():
result = "\n".join(" | " + line for line in result.split("\n"))
if include_parent_class:
bases = ", ".join(map(str, func.parent.bases))
if func.parent.metaclass:
bases += ", metaclass=" + str(func.parent.metaclass)
if bases:
class_signature = f"class {func.parent.name}({bases})"
else:
class_signature = f"class {func.parent.name}"
result = f"{class_signature}:\n | ...\n{result}"
return result
def _format_data_signature(self, data: Data) -> str:
expr = str(data.expr)
if len(expr) > self.data_expression_maxlength:
expr = expr[: self.data_expression_maxlength] + " ..."
if data.annotation:
signature = f"{data.name}: {data.annotation} = {expr}"
else:
signature = f"{data.name} = {expr}"
if data.parent and data.parent.is_class():
bases = ", ".join(map(str, data.parent.bases))
if data.parent.metaclass:
bases += ", metaclass=" + str(data.parent.metaclass)
if bases:
class_signature = f"class {data.parent.name}({bases})"
else:
class_signature = f"class {data.parent.name}"
return f"{class_signature}:\n | ...\n | {signature}"
else:
return signature
def _format_classdef_signature(self, cls: Class) -> str:
code = ""
if cls.decorators:
for dec in cls.decorators:
code += "@{}{}\n".format(dec.name, dec.args or "")
bases = ", ".join(map(str, cls.bases))
if cls.metaclass:
bases += ", metaclass=" + str(cls.metaclass)
if bases:
code += "class {}({})".format(cls.name, bases)
else:
code += "class {}".format(cls.name)
if self.signature_python_help_style:
code = cls.path() + " = " + code
if self.classdef_render_init_signature_if_needed and (
"__init__" in cls.members and not cls.members["__init__"].visible
):
code += ":\n" + self._format_function_signature(
cls.members["__init__"],
add_method_bar=True,
include_parent_class=False,
)
return code
def _render_module_breadcrumbs(self, fp, mod: Module):
submods = mod.name.split(".")
breadcrumbs = []
for i, submod_name in enumerate(submods):
if i == 0:
title = f"<i>{submod_name}</i>"
elif i == len(submods) - 1:
title = f"<strong>.{submod_name}</strong>"
else:
title = f"<i>.{submod_name}</i>"
breadcrumbs.append(title)
"/".join(submods[1:])
source_link = BASE_SOURCE_LINK + "/".join(submods[1:]) + ".py"
fp.write(
"<div>\n"
' <p class="alignleft">' + "".join(breadcrumbs) + "</p>\n"
f' <p class="alignright"><a class="sourcelink" href="{source_link}">[SOURCE]</a></p>\n'
"</div>\n"
'<div style="clear: both;"></div>\n\n---\n\n'
)
def _render_object(self, fp, level, obj):
if not isinstance(obj, Module) or self.render_module_header:
self._render_header(fp, level, obj)
if isinstance(obj, Module):
self._render_module_breadcrumbs(fp, obj)
self._render_signature_block(fp, obj)
if obj.docstring:
lines = obj.docstring.split("\n")
if self.docstrings_as_blockquote:
lines = ["> " + x for x in lines]
fp.write("\n".join(lines))
fp.write("\n\n")
def py2md(module: str, out: Optional[str] = None) -> bool:
"""
Returns `True` if module successfully processed, otherwise `False`.
"""
logger.debug("Processing %s", module)
pydocmd = PydocMarkdown(
loaders=[PythonLoader(modules=[module])],
processors=[AllenNlpFilterProcessor(), AllenNlpDocstringProcessor()],
renderer=AllenNlpRenderer(
filename=out,
add_method_class_prefix=False,
add_member_class_prefix=False,
data_code_block=True,
signature_with_def=True,
use_fixed_header_levels=False,
render_module_header=False,
descriptive_class_title=False,
),
)
if out:
out_path = Path(out)
os.makedirs(out_path.parent, exist_ok=True)
pydocmd.load_modules()
try:
pydocmd.process()
except DocstringError as err:
logger.exception("Failed to process %s.\n%s", module, err)
return False
pydocmd.render()
return True
def _py2md_wrapper(x: Tuple[str, str]) -> bool:
"""
Used to wrap py2md since we can't pickle a lambda (needed for multiprocessing).
"""
return py2md(x[0], x[1])
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("modules", nargs="+", type=str, help="""The Python modules to parse.""")
parser.add_argument(
"-o",
"--out",
nargs="+",
type=str,
help="""Output files.
If given, must have the same number of items as 'modules'.
If not given, stdout is used.""",
)
return parser.parse_args()
def main():
opts = parse_args()
outputs = opts.out if opts.out else [None] * len(opts.modules)
if len(outputs) != len(opts.modules):
raise ValueError("Number inputs and outputs should be the same.")
n_threads = cpu_count()
errors: int = 0
if len(opts.modules) > n_threads and opts.out:
# If writing to files, can process in parallel.
chunk_size = max([1, int(len(outputs) / n_threads)])
logger.info("Using %d threads", n_threads)
with Pool(n_threads) as p:
for result in p.imap(_py2md_wrapper, zip(opts.modules, outputs), chunk_size):
if not result:
errors += 1
else:
# If writing to stdout, need to process sequentially. Otherwise the output
# could get intertwined.
for module, out in zip(opts.modules, outputs):
result = py2md(module, out)
if not result:
errors += 1
logger.info("Processed %d modules", len(opts.modules))
if errors:
logger.error("Found %d errors", errors)
sys.exit(1)
if __name__ == "__main__":
main()
| allennlp-master | scripts/py2md.py |
from typing import Optional
import pytest
from allennlp.common.testing import AllenNlpTestCase
from scripts.py2md import py2md, Param, DocstringError
class TestPy2md(AllenNlpTestCase):
def test_basic_example(self, capsys):
py2md("scripts.tests.py2md.basic_example")
captured = capsys.readouterr()
with open(
self.PROJECT_ROOT / "scripts" / "tests" / "py2md" / "basic_example_expected_output.md"
) as f:
expected = f.read()
assert captured.out.split("\n") == expected.split("\n")
@pytest.mark.parametrize(
"line_in, line_out",
[
(
"a : `int`, optional (default = `None`)",
"- __a__ : `int`, optional (default = `None`) <br>",
),
(
"foo : `Tuple[int, ...]`, optional (default = `()`)",
"- __foo__ : `Tuple[int, ...]`, optional (default = `()`) <br>",
),
("a : `int`, required", "- __a__ : `int` <br>"),
("a : `int`", "- __a__ : `int` <br>"),
("_a : `int`", "- __\\_a__ : `int` <br>"),
("a_ : `int`", "- __a\\___ : `int` <br>"),
],
)
def test_param_from_and_to_line(line_in: str, line_out: Optional[str]):
param = Param.from_line(line_in)
assert param is not None
assert param.to_line() == line_out
@pytest.mark.parametrize(
"line",
[
"a : `int`, optional (default = None)",
"a : `int`, optional (default = `None)",
"a : `int`, optional (default = None`)",
"a : int",
"a : `int",
"a : int`",
],
)
def test_param_from_bad_line_raises(line: str):
with pytest.raises(DocstringError):
Param.from_line(line)
| allennlp-master | scripts/tests/py2md/py2md_test.py |
"""
This is a docstring.
And this is a multi-line line: [http://example.com]
(https://example.com/blah/blah/blah.html).
"""
from dataclasses import dataclass
SOME_GLOBAL_VAR = "Ahhhh I'm a global var!!"
"""
This is a global var.
"""
def func_with_no_args():
"""
This function has no args.
"""
return None
def func_with_args(a: int, b: int, c: int = 3) -> int:
"""
This function has some args.
# Parameters
a : `int`
A number.
b : `int`
Another number.
c : `int`, optional (default = `3`)
Yet another number.
Notes
-----
These are some notes.
# Returns
`int`
The result of `a + b * c`.
"""
return a + b * c
class SomeClass:
"""
I'm a class!
# Parameters
x : `float`
This attribute is called `x`.
"""
some_class_level_variable = 1
"""
This is how you document a class-level variable.
"""
some_class_level_var_with_type: int = 1
def __init__(self) -> None:
self.x = 1.0
def _private_method(self) -> None:
"""
Private methods should not be included in documentation.
"""
pass
def some_method(self) -> None:
"""
I'm a method!
But I don't do anything.
# Returns
`None`
"""
return None
def method_with_alternative_return_section(self) -> int:
"""
Another method.
# Returns
A completely arbitrary number.
"""
return 3
def method_with_alternative_return_section3(self) -> int:
"""
Another method.
# Returns
number : `int`
A completely arbitrary number.
"""
return 3
class AnotherClassWithReallyLongConstructor:
def __init__(
self,
a_really_long_argument_name: int = 0,
another_long_name: float = 2,
these_variable_names_are_terrible: str = "yea I know",
**kwargs,
) -> None:
self.a = a_really_long_argument_name
self.b = another_long_name
self.c = these_variable_names_are_terrible
self.other = kwargs
@dataclass
class ClassWithDecorator:
x: int
class _PrivateClass:
def public_method_on_private_class(self):
"""
This should not be documented since the class is private.
"""
pass
| allennlp-master | scripts/tests/py2md/basic_example.py |
import pytest
import sqlite3
from unittest.mock import call, Mock
from allennlp.common.testing import AllenNlpTestCase
from scripts.ai2_internal.resume_daemon import (
BeakerStatus,
create_table,
handler,
logger,
resume,
start_autoresume,
)
# Don't spam the log in tests.
logger.removeHandler(handler)
class ResumeDaemonTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.connection = sqlite3.connect(":memory:")
create_table(self.connection)
def test_create_beaker_status_works(self):
status = BeakerStatus("stopped")
assert status.name == "stopped"
def test_create_beaker_status_throws(self):
with pytest.raises(ValueError):
status = BeakerStatus("garbage")
assert status.name == "garbage"
def test_does_nothing_on_empty_db(self):
beaker = Mock()
resume(self.connection, beaker)
assert not beaker.method_calls
def test_does_not_resume_a_running_experiment(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.running
resume(self.connection, beaker)
beaker.get_status.assert_called()
assert len(beaker.method_calls) == 1
def test_does_not_resume_a_finished_experiment(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.succeeded
resume(self.connection, beaker)
beaker.get_status.assert_called()
assert len(beaker.method_calls) == 1
def test_does_resume_a_preempted_experiment(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.preempted
beaker.resume.return_value = "foo2"
resume(self.connection, beaker)
beaker.get_status.assert_called()
beaker.resume.assert_called()
assert len(beaker.method_calls) == 2
def test_respects_upper_bound_on_resumes(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.preempted
for i in range(10):
beaker.resume.return_value = f"foo{i}"
resume(self.connection, beaker)
calls = [
call.get_status("foo"),
call.resume("foo"),
call.get_status("foo0"),
call.resume("foo0"),
call.get_status("foo1"),
call.resume("foo1"),
call.get_status("foo2"),
call.resume("foo2"),
call.get_status("foo3"),
call.resume("foo3"),
call.get_status("foo4"),
]
beaker.assert_has_calls(calls)
def test_handles_a_realistic_scenario(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.preempted
for i in range(10):
beaker.resume.return_value = f"foo{i}"
if i == 2:
beaker.get_status.return_value = BeakerStatus.succeeded
resume(self.connection, beaker)
calls = [
call.get_status("foo"),
call.resume("foo"),
call.get_status("foo0"),
call.resume("foo0"),
call.get_status("foo1"),
]
beaker.assert_has_calls(calls)
| allennlp-master | scripts/tests/ai2_internal/resume_daemon_test.py |
#! /usr/bin/env python3
# Tool to automatically resume preemptible beaker experiments created with run_with_beaker.py.
#
# Examples
# --------
#
# Ensure an experiment will be resumed:
# resume_daemon.py --action=start --experiment-id=$YOUR_EXPERIMENT_ID
#
# Stop resuming an experiment:
# resume_daemon.py --action=stop --experiment-id=$YOUR_EXPERIMENT_ID
#
# Details
# -------
#
# In order to operate, resume_daemon.py does the following:
#
# 1. Modifies the user's crontab.
# 2. Maintains a SQLite DB in ~/.allennlp/resume.db.
# 3. Keeps logs in ~/.allennlp/resume.log.
#
# The reliance on crontab means that resumes will only occur when the running
# system is powered on. Longer term Beaker is planning on adding this
# functionality to their service directly, which will obsolete this tool.
import argparse
import json
import logging
import os
import random
import sqlite3
import subprocess
import time
from enum import Enum
from logging.handlers import RotatingFileHandler
from sqlite3 import Connection
from subprocess import PIPE
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
fmt="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
dot_allennlp_dir = f"{os.environ['HOME']}/.allennlp"
# Special case for users that haven't run AllenNLP locally.
if not os.path.exists(dot_allennlp_dir):
os.mkdir(dot_allennlp_dir)
handler = RotatingFileHandler(
f"{dot_allennlp_dir}/resume.log", maxBytes=1024 * 1024, backupCount=10
)
handler.setFormatter(formatter)
logger.addHandler(handler)
BEAKER_QUERY_INTERVAL_SECONDS = 1.0
# See https://github.com/beaker/client/blob/master/api/task_status.go
class BeakerStatus(Enum):
submitted = "submitted"
provisioning = "provisioning"
initializing = "initializing"
running = "running"
terminating = "terminating"
preempted = "preempted"
succeeded = "succeeded"
skipped = "skipped"
stopped = "stopped"
failed = "failed"
def __str__(self):
return self.name
def is_end_state(self):
if self is BeakerStatus.preempted:
return True
elif self is BeakerStatus.succeeded:
return True
elif self is BeakerStatus.skipped:
return True
elif self is BeakerStatus.stopped:
return True
elif self is BeakerStatus.failed:
return True
else:
return False
class BeakerWrapper:
def get_status(self, experiment_id: str) -> BeakerStatus:
command = ["beaker", "experiment", "inspect", experiment_id]
experiment_json = subprocess.check_output(command)
# Example output from beaker.
# brendanr.local$ beaker experiment inspect ex_g7knlblsjxxk
# [
# {
# "id": "ex_g7knlblsjxxk",
# "owner": {
# "id": "us_a4hw8yvr3xut",
# "name": "ai2",
# "displayName": "AI2"
# },
# "author": {
# "id": "us_hl8x796649u9",
# "name": "brendanr",
# "displayName": "Brendan Roof"
# },
# "workspace": "",
# "user": {
# "id": "",
# "name": "",
# "displayName": ""
# },
# "nodes": [
# {
# "name": "training",
# "task_id": "",
# "taskId": "tk_64wm85lc3f0m",
# "result_id": "",
# "resultId": "ds_du02un92r57b",
# "status": "initializing",
# "child_task_ids": null,
# "childTaskIds": [],
# "parent_task_ids": null,
# "parentTaskIds": []
# }
# ],
# "created": "2019-09-25T02:03:30.820437Z",
# "archived": false
# }
# ]
experiment_data = json.loads(experiment_json)
# Beaker lets there be multiple tasks in a single experiment. Here we
# just try to handle the simple case of single task experiments like
# those created by run_with_beaker.py.
assert len(experiment_data) == 1, "Experiment not created with run_with_beaker.py"
assert (
len(experiment_data[0]["nodes"]) == 1
), "Experiment not created with run_with_beaker.py"
status = BeakerStatus(experiment_data[0]["nodes"][0]["status"])
# Small delay to avoid thrashing Beaker.
time.sleep(BEAKER_QUERY_INTERVAL_SECONDS)
return status
def resume(self, experiment_id: str) -> str:
command = ["beaker", "experiment", "resume", f"--experiment-name={experiment_id}"]
# Small delay to avoid thrashing Beaker.
time.sleep(BEAKER_QUERY_INTERVAL_SECONDS)
return subprocess.check_output(command, universal_newlines=True).strip()
def create_table(connection: Connection) -> None:
cursor = connection.cursor()
create_table_statement = """
CREATE TABLE active_experiments
(experiment_id TEXT PRIMARY KEY, original_id TEXT, max_resumes INTEGER, current_resume INTEGER)
"""
cursor.execute(create_table_statement)
connection.commit()
def start_autoresume(connection: Connection, experiment_id: str, max_resumes: int) -> None:
cursor = connection.cursor()
cursor.execute(
"INSERT INTO active_experiments VALUES (?, ?, ?, ?)",
(experiment_id, experiment_id, max_resumes, 0),
)
connection.commit()
def stop_autoresume(connection: Connection, experiment_id: str) -> None:
cursor = connection.cursor()
cursor.execute("SELECT * FROM active_experiments WHERE experiment_id = ?", (experiment_id,))
result = cursor.fetchall()
assert result, f"Experiment {experiment_id} not found!"
cursor.execute("DELETE FROM active_experiments WHERE experiment_id = ?", (experiment_id,))
connection.commit()
def resume(connection: Connection, beaker: BeakerWrapper) -> None:
logger.info("Checking if resumes are needed.")
cursor = connection.cursor()
cursor.execute("SELECT * FROM active_experiments")
experiments = cursor.fetchall()
for experiment_row in experiments:
experiment_id, original_id, max_resumes, current_resume = experiment_row
status = beaker.get_status(experiment_id)
if status.is_end_state():
stop_autoresume(connection, experiment_id)
if status is BeakerStatus.preempted:
if current_resume >= max_resumes:
logger.info(
f"Experiment {experiment_id} preempted too many times "
f"({max_resumes}). Original experiment: {original_id}"
)
else:
new_experiment_id = beaker.resume(experiment_id)
logger.info(
f"Experiment {experiment_id} preempted "
f"({current_resume}/{max_resumes}). Resuming as: "
f"{new_experiment_id} Original experiment: {original_id}"
)
cursor.execute(
"INSERT INTO active_experiments VALUES (?, ?, ?, ?)",
(new_experiment_id, original_id, max_resumes, current_resume + 1),
)
connection.commit()
else:
logger.info(
f"Experiment {experiment_id} completed with status: "
f"{status}. Original experiment: {original_id}"
)
class Action(Enum):
start = "start"
stop = "stop"
resume = "resume"
def __str__(self):
return self.name
def main(args) -> None:
# Smooth load from potentially many daemons on different machines.
time.sleep(random.randint(0, args.random_delay_seconds))
db_path = f"{dot_allennlp_dir}/resume.db"
connection = sqlite3.connect(db_path)
# Create the DB if needed.
cursor = connection.cursor()
cursor.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='active_experiments'"
)
tables = cursor.fetchall()
if not tables:
create_table(connection)
# Modify the crontab if needed.
crontab_l_result = subprocess.run(
["crontab", "-l"], universal_newlines=True, stdout=PIPE, stderr=PIPE
)
if crontab_l_result.returncode == 0:
current_crontab = crontab_l_result.stdout
else:
# `crontab -l` fails when a crontab hasn't been installed previously.
# Sanity check the error message to guard against blowing away the
# crontab in some obscure failure case.
assert "no crontab" in crontab_l_result.stderr, f"crontab failed: {crontab_l_result.stderr}"
current_crontab = ""
full_path = os.path.abspath(__file__)
if full_path not in current_crontab:
# Execute this script every ten minutes. We set the PATH to that used
# to run this install step to make sure that we have access to python3
# and beaker.
cron_line = (
f"*/10 * * * * bash -c 'export PATH={os.environ['PATH']};"
f" python3 {full_path} --action=resume --random-delay-seconds=60'\n"
)
new_crontab = current_crontab + cron_line
subprocess.run(["crontab", "-"], input=new_crontab, encoding="utf-8")
if args.action is Action.start:
assert args.experiment_id
start_autoresume(connection, args.experiment_id, args.max_resumes)
elif args.action is Action.stop:
assert args.experiment_id
stop_autoresume(connection, args.experiment_id)
elif args.action is Action.resume:
beaker = BeakerWrapper()
resume(connection, beaker)
else:
raise Exception(f"Unaccounted for action {args.action}")
connection.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--action", type=Action, choices=list(Action), required=True)
parser.add_argument("--experiment-id", type=str)
parser.add_argument("--max-resumes", type=int, default=10)
parser.add_argument("--random-delay-seconds", type=int, default=0)
args = parser.parse_args()
try:
main(args)
except Exception:
# Ensure traces are logged.
# TODO(brendanr): Is there a better way to do this?
logger.exception("Fatal error")
raise
| allennlp-master | scripts/ai2_internal/resume_daemon.py |
#! /usr/bin/env python
# Script to launch AllenNLP Beaker jobs.
import argparse
import os
import json
import random
import tempfile
import subprocess
import sys
# This has to happen before we import spacy (even indirectly), because for some crazy reason spacy
# thought it was a good idea to set the random seed on import...
random_int = random.randint(0, 2 ** 32)
sys.path.insert(
0, os.path.dirname(os.path.abspath(os.path.join(os.path.join(__file__, os.pardir), os.pardir)))
)
from allennlp.common.params import Params
def main(param_file: str, args: argparse.Namespace):
commit = subprocess.check_output(["git", "rev-parse", "HEAD"], universal_newlines=True).strip()
docker_image = f"allennlp/allennlp:{commit}"
overrides = args.overrides
# Reads params and sets environment.
ext_vars = {}
for var in args.env:
key, value = var.split("=")
ext_vars[key] = value
params = Params.from_file(param_file, overrides, ext_vars)
# Write params as json. Otherwise Jsonnet's import feature breaks.
params_dir = tempfile.mkdtemp(prefix="config")
compiled_params_path = os.path.join(params_dir, "config.json")
params.to_file(compiled_params_path)
print(f"Compiled jsonnet config written to {compiled_params_path}.")
flat_params = params.as_flat_dict()
env = {}
for k, v in flat_params.items():
k = str(k).replace(".", "_")
env[k] = str(v)
# If the git repository is dirty, add a random hash.
result = subprocess.run("git diff-index --quiet HEAD --", shell=True)
if result.returncode != 0:
dirty_hash = "%x" % random_int
docker_image += "-" + dirty_hash
if args.image:
image = args.image
print(f"Using the specified image: {image}")
else:
print(f"Building the Docker image ({docker_image})...")
subprocess.run(f"docker build -t {docker_image} .", shell=True, check=True)
print("Create a Beaker image...")
image = subprocess.check_output(
f"beaker image create --quiet {docker_image}", shell=True, universal_newlines=True
).strip()
print(f" Image created: {docker_image}")
config_dataset_id = subprocess.check_output(
f"beaker dataset create --quiet {params_dir}/*", shell=True, universal_newlines=True
).strip()
# Arguments that differ between preemptible and regular machine execution.
if args.preemptible:
allennlp_prefix = ["/stage/allennlp/resumable_train.sh", "/output", "/config/config.json"]
else:
allennlp_prefix = [
"python",
"-m",
"allennlp.run",
"train",
"/config/config.json",
"-s",
"/output",
]
# All other arguments
allennlp_suffix = ["--file-friendly-logging"]
for package_name in args.include_package:
allennlp_suffix.append("--include-package")
allennlp_suffix.append(package_name)
allennlp_command = allennlp_prefix + allennlp_suffix
dataset_mounts = []
for source in args.source + [f"{config_dataset_id}:/config"]:
datasetId, containerPath = source.split(":")
dataset_mounts.append({"datasetId": datasetId, "containerPath": containerPath})
for var in args.env:
key, value = var.split("=")
env[key] = value
requirements = {}
if args.cpu:
requirements["cpu"] = float(args.cpu)
if args.memory:
requirements["memory"] = args.memory
if args.gpu_count:
requirements["gpuCount"] = int(args.gpu_count)
if args.preemptible:
requirements["preemptible"] = True
config_spec = {
"description": args.desc,
"image": image,
"resultPath": "/output",
"args": allennlp_command,
"datasetMounts": dataset_mounts,
"requirements": requirements,
"env": env,
}
config_task = {"spec": config_spec, "name": "training"}
config = {"tasks": [config_task]}
output_path = (
args.spec_output_path
if args.spec_output_path
else tempfile.mkstemp(".yaml", "beaker-config-")[1]
)
with open(output_path, "w") as output:
output.write(json.dumps(config, indent=4))
print(f"Beaker spec written to {output_path}.")
experiment_command = ["beaker", "experiment", "create", "--quiet", "--file", output_path]
if args.name:
experiment_command.append("--name")
experiment_command.append(args.name.replace(" ", "-"))
def resume_command(experiment_id):
resume_daemon_path = os.path.join(os.path.dirname(__file__), "resume_daemon.py")
return [
# Run with python (instead of calling directly) in case the
# executable bit wasn't preserved for some reason.
"python3",
resume_daemon_path,
"--action=start",
f"--max-resumes={args.max_resumes}",
f"--experiment-id={experiment_id}",
]
if args.dry_run:
print("This is a dry run (--dry-run). Launch your job with the following command:")
print(" " + " ".join(experiment_command))
if args.max_resumes > 0:
print("Configure auto-resumes with the following command:")
print(" " + " ".join(resume_command("$YOUR_EXPERIMENT_ID")))
else:
print("Running the experiment:")
print(" " + " ".join(experiment_command))
experiment_id = subprocess.check_output(experiment_command, universal_newlines=True).strip()
print(
f"Experiment {experiment_id} submitted. "
f"See progress at https://beaker.org/ex/{experiment_id}"
)
if args.max_resumes > 0:
print("Configuring auto-resumes:")
print(" " + " ".join(resume_command(experiment_id)))
subprocess.run(resume_command(experiment_id))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("param_file", type=str, help="The model configuration file.")
parser.add_argument("--name", type=str, help="A name for the experiment.")
parser.add_argument(
"--spec_output_path", type=str, help="The destination to write the experiment spec."
)
parser.add_argument(
"--dry-run", action="store_true", help="If specified, an experiment will not be created."
)
parser.add_argument(
"--image", type=str, help="The image to use (if unspecified one will be built)"
)
parser.add_argument("--desc", type=str, help="A description for the experiment.")
parser.add_argument(
"--env",
action="append",
default=[],
help="Set environment variables (e.g. NAME=value or NAME)",
)
parser.add_argument(
"--source",
action="append",
default=[],
help="Bind a remote data source (e.g. source-id:/target/path)",
)
parser.add_argument("--cpu", help="CPUs to reserve for this experiment (e.g., 0.5)")
parser.add_argument(
"--gpu-count", default=1, help="GPUs to use for this experiment (e.g., 1 (default))"
)
parser.add_argument("--memory", help="Memory to reserve for this experiment (e.g., 1GB)")
parser.add_argument(
"--preemptible", action="store_true", help="Allow task to run on preemptible hardware"
)
parser.add_argument(
"--max-resumes",
type=int,
default=0,
help="When running with --preemptible, use a cronjob to automatically resume this many times.",
)
parser.add_argument(
"--include-package",
type=str,
action="append",
default=[],
help="Additional packages to include",
)
parser.add_argument(
"-o",
"--overrides",
type=str,
default="",
help="a JSON structure used to override the experiment configuration",
)
args = parser.parse_args()
if args.max_resumes > 0:
assert args.preemptible, "--max-resumes requires --preemptible!"
main(args.param_file, args)
| allennlp-master | scripts/ai2_internal/run_with_beaker.py |
import argparse
import json
from dotenv import load_dotenv
import plotly
import shutil
import smtplib
import ssl
import sys
import textwrap
from data_measurements import dataset_statistics
from data_measurements.zipf import zipf
from huggingface_hub import create_repo, Repository, hf_api
from os import getenv
from os.path import exists, join as pjoin
from pathlib import Path
import utils
from utils import dataset_utils
logs = utils.prepare_logging(__file__)
def load_or_prepare_widgets(ds_args, show_embeddings=False,
show_perplexities=False, use_cache=False):
"""
Loader specifically for the widgets used in the app.
Args:
ds_args:
show_embeddings:
show_perplexities:
use_cache:
Returns:
"""
dstats = dataset_statistics.DatasetStatisticsCacheClass(**ds_args, use_cache=use_cache)
# Header widget
dstats.load_or_prepare_dset_peek()
# General stats widget
dstats.load_or_prepare_general_stats()
# Labels widget
dstats.load_or_prepare_labels()
# Text lengths widget
dstats.load_or_prepare_text_lengths()
if show_embeddings:
# Embeddings widget
dstats.load_or_prepare_embeddings()
if show_perplexities:
# Text perplexities widget
dstats.load_or_prepare_text_perplexities()
# Text duplicates widget
dstats.load_or_prepare_text_duplicates()
# nPMI widget
dstats.load_or_prepare_npmi()
# Zipf widget
dstats.load_or_prepare_zipf()
def load_or_prepare(dataset_args, calculation=False, use_cache=False):
# TODO: Catch error exceptions for each measurement, so that an error
# for one measurement doesn't break the calculation of all of them.
do_all = False
dstats = dataset_statistics.DatasetStatisticsCacheClass(**dataset_args,
use_cache=use_cache)
logs.info("Tokenizing dataset.")
dstats.load_or_prepare_tokenized_df()
logs.info("Calculating vocab.")
dstats.load_or_prepare_vocab()
if not calculation:
do_all = True
if do_all or calculation == "general":
logs.info("\n* Calculating general statistics.")
dstats.load_or_prepare_general_stats()
logs.info("Done!")
logs.info(
"Basic text statistics now available at %s." % dstats.general_stats_json_fid)
if do_all or calculation == "duplicates":
logs.info("\n* Calculating text duplicates.")
dstats.load_or_prepare_text_duplicates()
duplicates_fid_dict = dstats.duplicates_files
logs.info("If all went well, then results are in the following files:")
for key, value in duplicates_fid_dict.items():
logs.info("%s: %s" % (key, value))
if do_all or calculation == "lengths":
logs.info("\n* Calculating text lengths.")
dstats.load_or_prepare_text_lengths()
length_fid_dict = dstats.length_obj.get_filenames()
print("If all went well, then results are in the following files:")
for key, value in length_fid_dict.items():
print("%s: %s" % (key, value))
print()
if do_all or calculation == "labels":
logs.info("\n* Calculating label statistics.")
if dstats.label_field not in dstats.dset.features:
logs.warning("No label field found.")
logs.info("No label statistics to calculate.")
else:
dstats.load_or_prepare_labels()
npmi_fid_dict = dstats.label_files
print("If all went well, then results are in the following files:")
for key, value in npmi_fid_dict.items():
print("%s: %s" % (key, value))
print()
if do_all or calculation == "npmi":
print("\n* Preparing nPMI.")
dstats.load_or_prepare_npmi()
npmi_fid_dict = dstats.npmi_files
print("If all went well, then results are in the following files:")
for key, value in npmi_fid_dict.items():
if isinstance(value, dict):
print(key + ":")
for key2, value2 in value.items():
print("\t%s: %s" % (key2, value2))
else:
print("%s: %s" % (key, value))
print()
if do_all or calculation == "zipf":
logs.info("\n* Preparing Zipf.")
dstats.load_or_prepare_zipf()
logs.info("Done!")
zipf_json_fid, zipf_fig_json_fid, zipf_fig_html_fid = zipf.get_zipf_fids(
dstats.dataset_cache_dir)
logs.info("Zipf results now available at %s." % zipf_json_fid)
logs.info(
"Figure saved to %s, with corresponding json at %s."
% (zipf_fig_html_fid, zipf_fig_json_fid)
)
# Don't do this one until someone specifically asks for it -- takes awhile.
if calculation == "embeddings":
logs.info("\n* Preparing text embeddings.")
dstats.load_or_prepare_embeddings()
# Don't do this one until someone specifically asks for it -- takes awhile.
if calculation == "perplexities":
logs.info("\n* Preparing text perplexities.")
dstats.load_or_prepare_text_perplexities()
def pass_args_to_DMT(dset_name, dset_config, split_name, text_field, label_field, label_names, calculation, dataset_cache_dir, prepare_gui=False, use_cache=True):
if not use_cache:
logs.info("Not using any cache; starting afresh")
dataset_args = {
"dset_name": dset_name,
"dset_config": dset_config,
"split_name": split_name,
"text_field": text_field,
"label_field": label_field,
"label_names": label_names,
"dataset_cache_dir": dataset_cache_dir
}
if prepare_gui:
load_or_prepare_widgets(dataset_args, use_cache=use_cache)
else:
load_or_prepare(dataset_args, calculation=calculation, use_cache=use_cache)
def set_defaults(args):
if not args.config:
args.config = "default"
logs.info("Config name not specified. Assuming it's 'default'.")
if not args.split:
args.split = "train"
logs.info("Split name not specified. Assuming it's 'train'.")
if not args.feature:
args.feature = "text"
logs.info("Text column name not given. Assuming it's 'text'.")
if not args.label_field:
args.label_field = "label"
logs.info("Label column name not given. Assuming it's 'label'.")
return args
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(
"""
Example for hate speech18 dataset:
python3 run_data_measurements.py --dataset="hate_speech18" --config="default" --split="train" --feature="text"
Example for IMDB dataset:
python3 run_data_measurements.py --dataset="imdb" --config="plain_text" --split="train" --label_field="label" --feature="text"
"""
),
)
parser.add_argument(
"-d", "--dataset", required=True, help="Name of dataset to prepare"
)
parser.add_argument(
"-c", "--config", required=False, default="", help="Dataset configuration to prepare"
)
parser.add_argument(
"-s", "--split", required=False, default="", type=str,
help="Dataset split to prepare"
)
parser.add_argument(
"-f",
"--feature",
"-t",
"--text-field",
required=False,
nargs="+",
type=str,
default="",
help="Column to prepare (handled as text)",
)
parser.add_argument(
"-w",
"--calculation",
help="""What to calculate (defaults to everything except embeddings and perplexities).\n
Options are:\n
- `general` (for duplicate counts, missing values, length statistics.)\n
- `duplicates` for duplicate counts\n
- `lengths` for text length distribution\n
- `labels` for label distribution\n
- `embeddings` (Warning: Slow.)\n
- `perplexities` (Warning: Slow.)\n
- `npmi` for word associations\n
- `zipf` for zipfian statistics
""",
)
parser.add_argument(
"-l",
"--label_field",
type=str,
required=False,
default="",
help="Field name for label column in dataset (Required if there is a label field that you want information about)",
)
parser.add_argument('-n', '--label_names', nargs='+', default=[])
parser.add_argument(
"--use_cache",
default=False,
required=False,
action="store_true",
help="Whether to use cached files (Optional)",
)
parser.add_argument("--out_dir", default="cache_dir",
help="Where to write out to.")
parser.add_argument(
"--overwrite_previous",
default=False,
required=False,
action="store_true",
help="Whether to overwrite a previous local cache for these same arguments (Optional)",
)
parser.add_argument(
"--email",
default=None,
help="An email that recieves a message about whether the computation was successful. If email is not None, then you must have EMAIL_PASSWORD=<your email password> for the sender email (data.measurements.tool@gmail.com) in a file named .env at the root of this repo.")
parser.add_argument(
"--push_cache_to_hub",
default=False,
required=False,
action="store_true",
help="Whether to push the cache to an organization on the hub. If you are using this option, you must have HUB_CACHE_ORGANIZATION=<the organization you've set up on the hub to store your cache> and HF_TOKEN=<your hf token> on separate lines in a file named .env at the root of this repo.",
)
parser.add_argument("--prepare_GUI_data", default=False, required=False,
action="store_true",
help="Use this to process all of the stats used in the GUI.")
parser.add_argument("--keep_local", default=True, required=False,
action="store_true",
help="Whether to save the data locally.")
orig_args = parser.parse_args()
args = set_defaults(orig_args)
logs.info("Proceeding with the following arguments:")
logs.info(args)
# run_data_measurements.py -d hate_speech18 -c default -s train -f text -w npmi
if args.email is not None:
if Path(".env").is_file():
load_dotenv(".env")
EMAIL_PASSWORD = getenv("EMAIL_PASSWORD")
context = ssl.create_default_context()
port = 465
server = smtplib.SMTP_SSL("smtp.gmail.com", port, context=context)
server.login("data.measurements.tool@gmail.com", EMAIL_PASSWORD)
dataset_cache_name, local_dataset_cache_dir = dataset_utils.get_cache_dir_naming(args.out_dir, args.dataset, args.config, args.split, args.feature)
if not args.use_cache and exists(local_dataset_cache_dir):
if args.overwrite_previous:
shutil.rmtree(local_dataset_cache_dir)
else:
raise OSError("Cached results for this dataset already exist at %s. "
"Delete it or use the --overwrite_previous argument." % local_dataset_cache_dir)
# Initialize the local cache directory
dataset_utils.make_path(local_dataset_cache_dir)
# Initialize the repository
# TODO: print out local or hub cache directory location.
if args.push_cache_to_hub:
repo = dataset_utils.initialize_cache_hub_repo(local_dataset_cache_dir, dataset_cache_name)
# Run the measurements.
try:
pass_args_to_DMT(
dset_name=args.dataset,
dset_config=args.config,
split_name=args.split,
text_field=args.feature,
label_field=args.label_field,
label_names=args.label_names,
calculation=args.calculation,
dataset_cache_dir=local_dataset_cache_dir,
prepare_gui=args.prepare_GUI_data,
use_cache=args.use_cache,
)
if args.push_cache_to_hub:
repo.push_to_hub(commit_message="Added dataset cache.")
computed_message = f"Data measurements have been computed for dataset" \
f" with these arguments: {args}."
logs.info(computed_message)
if args.email is not None:
computed_message += "\nYou can return to the data measurements tool " \
"to view them."
server.sendmail("data.measurements.tool@gmail.com", args.email,
"Subject: Data Measurements Computed!\n\n" + computed_message)
logs.info(computed_message)
except Exception as e:
logs.exception(e)
error_message = f"An error occurred in computing data measurements " \
f"for dataset with arguments: {args}. " \
f"Feel free to make an issue here: " \
f"https://github.com/huggingface/data-measurements-tool/issues"
if args.email is not None:
server.sendmail("data.measurements.tool@gmail.com", args.email,
"Subject: Data Measurements not Computed\n\n" + error_message)
logs.warning("Data measurements not computed. ☹️")
logs.warning(error_message)
return
if not args.keep_local:
# Remove the dataset from local storage - we only want it stored on the hub.
logs.warning("Deleting measurements data locally at %s" % local_dataset_cache_dir)
shutil.rmtree(local_dataset_cache_dir)
else:
logs.info("Measurements made available locally at %s" % local_dataset_cache_dir)
if __name__ == "__main__":
main()
| data-measurements-tool-main | run_data_measurements.py |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import ast
import gradio as gr
from os.path import isdir
from data_measurements.dataset_statistics import DatasetStatisticsCacheClass as dmt_cls
import utils
from utils import dataset_utils
from utils import gradio_utils as gr_utils
import widgets
logs = utils.prepare_logging(__file__)
# Utility for sidebar description and selection of the dataset
DATASET_NAME_TO_DICT = dataset_utils.get_dataset_info_dicts()
def get_load_prepare_list(dstats):
"""
# Get load_or_prepare functions for the measurements we will display
"""
# Measurement calculation:
# Add any additional modules and their load-prepare function here.
load_prepare_list = [("general stats", dstats.load_or_prepare_general_stats),
("label distribution", dstats.load_or_prepare_labels),
("text_lengths", dstats.load_or_prepare_text_lengths),
("duplicates", dstats.load_or_prepare_text_duplicates),
("npmi", dstats.load_or_prepare_npmi),
("zipf", dstats.load_or_prepare_zipf)]
return load_prepare_list
def get_ui_widgets():
"""Get the widgets that will be displayed in the UI."""
return [widgets.DatasetDescription(DATASET_NAME_TO_DICT),
widgets.GeneralStats(),
widgets.LabelDistribution(),
widgets.TextLengths(),
widgets.Duplicates(),
widgets.Npmi(),
widgets.Zipf()]
def get_widgets():
"""
# A measurement widget requires 2 things:
# - A load or prepare function
# - A display function
# We define these in two separate functions get_load_prepare_list and get_ui_widgets;
# any widget can be added by modifying both functions and the rest of the app logic will work.
# get_load_prepare_list is a function since it requires a DatasetStatisticsCacheClass which will
# not be created until dataset and config values are selected in the ui
"""
return get_load_prepare_list, get_ui_widgets()
def get_title(dstats):
title_str = f"### Showing: {dstats.dset_name} - {dstats.dset_config} - {dstats.split_name} - {'-'.join(dstats.text_field)}"
logs.info("showing header")
return title_str
def display_initial_UI():
"""Displays the header in the UI"""
# Extract the selected arguments
dataset_args = gr_utils.sidebar_selection(DATASET_NAME_TO_DICT)
return dataset_args
def load_or_prepare_widgets(dstats, load_prepare_list, show_perplexities, live=True, pull_cache_from_hub=False):
"""
Takes the dataset arguments from the GUI and uses them to load a dataset from the Hub or, if
a cache for those arguments is available, to load it from the cache.
Widget data is loaded only when the system is live (deployed for users).
Otherwise, the data is prepared if it doesn't yet exist.
Args:
ds_args (dict): the dataset arguments defined via the streamlit app GUI
load_prepare_list (list): List of (widget_name, widget_load_or_prepare_function)
show_perplexities (Bool): whether perplexities should be loaded and displayed for this dataset
live (Bool): Whether the system is deployed for live use by users.
pull_cache_from_hub (Bool): Whether the cache should be pulled from the hub (vs locally)
Returns:
dstats: the computed dataset statistics (from the dataset_statistics class)
"""
# When we're "live" (tool is being used by users on our servers),
# cache is used and the f'ns are instructed to only try to load cache,
# not to prepare/compute anything anew.
if live:
# Only use what's cached; don't prepare anything
load_only = True
logs.info("Only using cache.")
else:
# Prepare things anew and cache them if we're not live.
load_only = False
logs.info("Making new calculations if cache is not there.")
if pull_cache_from_hub:
dataset_utils.pull_cache_from_hub(dstats.cache_path, dstats.dataset_cache_dir)
# Data common across DMT:
# Includes the dataset text/requested feature column,
# the dataset tokenized, and the vocabulary
dstats.load_or_prepare_text_dataset(load_only=load_only)
# Just a snippet of the dataset
dstats.load_or_prepare_dset_peek(load_only=load_only)
# Tokenized dataset
dstats.load_or_prepare_tokenized_df(load_only=load_only)
# Vocabulary (uses tokenized dataset)
dstats.load_or_prepare_vocab(load_only=load_only)
# Custom widgets
for widget_tuple in load_prepare_list:
widget_name = widget_tuple[0]
widget_fn = widget_tuple[1]
try:
widget_fn(load_only=load_only)
except Exception as e:
logs.warning("Issue with %s." % widget_name)
logs.exception(e)
# TODO: If these are cached, can't we just show them by default?
# It won't take up computation time.
if show_perplexities:
try:
dstats.load_or_prepare_text_perplexities(load_only=load_only)
except Exception as e:
logs.warning("Issue with %s." % "perplexities")
logs.exception(e)
return dstats
def show_column(dstats, display_list, show_perplexities, column_id=""):
"""
Function for displaying the elements in the streamlit app.
Args:
dstats (class): The dataset_statistics.py DatasetStatisticsCacheClass
display_list (list): List of tuples for (widget_name, widget_display_function)
show_perplexities (Bool): Whether perplexities should be loaded and displayed for this dataset
column_id (str): Which column of the dataset the analysis is done on [DEPRECATED for v1]
"""
# start showing stuff
gr_utils.expander_header(dstats, DATASET_NAME_TO_DICT)
for widget_tuple in display_list:
widget_type = widget_tuple[0]
widget_fn = widget_tuple[1]
logs.info("showing %s." % widget_type)
try:
widget_fn(dstats, column_id)
except Exception as e:
logs.warning("Jk jk jk. There was an issue with %s:" % widget_type)
logs.exception(e)
# TODO: Fix how this is a weird outlier.
if show_perplexities:
gr_utils.expander_text_perplexities(dstats, column_id)
logs.info("Have finished displaying the widgets.")
def create_demo(live: bool, pull_cache_from_hub: bool):
with gr.Blocks() as demo:
state = gr.State()
with gr.Row():
with gr.Column(scale=1):
dataset_args = display_initial_UI()
get_load_prepare_list_fn, widget_list = get_widgets()
# # TODO: Make this less of a weird outlier.
# Doesn't do anything right now
show_perplexities = gr.Checkbox(label="Show text perplexities")
with gr.Column(scale=4):
gr.Markdown("# Data Measurements Tool")
title = gr.Markdown()
for widget in widget_list:
widget.render()
def update_ui(dataset: str, config: str, split: str, feature: str):
feature = ast.literal_eval(feature)
label_field, label_names = gr_utils.get_label_names(dataset, config, DATASET_NAME_TO_DICT)
dstats = dmt_cls(dset_name=dataset, dset_config=config, split_name=split, text_field=feature,
label_field=label_field, label_names=label_names, use_cache=True)
load_prepare_list = get_load_prepare_list_fn(dstats)
dstats = load_or_prepare_widgets(dstats, load_prepare_list, show_perplexities=False,
live=live, pull_cache_from_hub=pull_cache_from_hub)
output = {title: get_title(dstats), state: dstats}
for widget in widget_list:
output.update(widget.update(dstats))
return output
def update_dataset(dataset: str):
new_values = gr_utils.update_dataset(dataset, DATASET_NAME_TO_DICT)
config = new_values[0][1]
feature = new_values[1][1]
split = new_values[2][1]
new_dropdown = {
dataset_args["dset_config"]: gr.Dropdown.update(choices=new_values[0][0], value=config),
dataset_args["text_field"]: gr.Dropdown.update(choices=new_values[1][0], value=feature),
dataset_args["split_name"]: gr.Dropdown.update(choices=new_values[2][0], value=split),
}
return new_dropdown
def update_config(dataset: str, config: str):
new_values = gr_utils.update_config(dataset, config, DATASET_NAME_TO_DICT)
feature = new_values[0][1]
split = new_values[1][1]
new_dropdown = {
dataset_args["text_field"]: gr.Dropdown.update(choices=new_values[0][0], value=feature),
dataset_args["split_name"]: gr.Dropdown.update(choices=new_values[1][0], value=split)
}
return new_dropdown
measurements = [comp for output in widget_list for comp in output.output_components]
demo.load(update_ui,
inputs=[dataset_args["dset_name"], dataset_args["dset_config"], dataset_args["split_name"], dataset_args["text_field"]],
outputs=[title, state] + measurements)
for widget in widget_list:
widget.add_events(state)
dataset_args["dset_name"].change(update_dataset,
inputs=[dataset_args["dset_name"]],
outputs=[dataset_args["dset_config"],
dataset_args["split_name"], dataset_args["text_field"],
title, state] + measurements)
dataset_args["dset_config"].change(update_config,
inputs=[dataset_args["dset_name"], dataset_args["dset_config"]],
outputs=[dataset_args["split_name"], dataset_args["text_field"],
title, state] + measurements)
dataset_args["calculate_btn"].click(update_ui,
inputs=[dataset_args["dset_name"], dataset_args["dset_config"],
dataset_args["split_name"], dataset_args["text_field"]],
outputs=[title, state] + measurements)
return demo
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--live", default=False, required=False, action="store_true", help="Flag to specify that this is not running live.")
parser.add_argument(
"--pull_cache_from_hub", default=False, required=False, action="store_true", help="Flag to specify whether to look in the hub for measurements caches. If you are using this option, you must have HUB_CACHE_ORGANIZATION=<the organization you've set up on the hub to store your cache> and HF_TOKEN=<your hf token> on separate lines in a file named .env at the root of this repo.")
arguments = parser.parse_args()
live = arguments.live
pull_cache_from_hub = arguments.pull_cache_from_hub
# Create and initialize the demo
demo = create_demo(live, pull_cache_from_hub)
demo.launch()
if __name__ == "__main__":
main()
| data-measurements-tool-main | app.py |
data-measurements-tool-main | lengths/__init__.py |
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("npmi", module_type= "measurement")
launch_gradio_widget(module) | data-measurements-tool-main | npmi/app.py |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Change print statements to logging?
# from evaluate import logging as logs
import warnings
import datasets
import evaluate
import numpy as np
import pandas as pd
from sklearn.preprocessing import MultiLabelBinarizer
_CITATION = """\
Osman Aka, Ken Burke, Alex Bauerle, Christina Greer, and Margaret Mitchell. \
2021. Measuring Model Biases in the Absence of Ground Truth. \
In Proceedings of the 2021 AAAI/ACM Conference on AI, Ethics, and Society \
(AIES '21). Association for Computing Machinery, New York, NY, USA, 327–335. \
https://doi.org/10.1145/3461702.3462557
"""
_DESCRIPTION = """\
Normalized Pointwise Information (nPMI) is an entropy-based measurement
of association, used here to measure the association between words.
"""
_KWARGS_DESCRIPTION = """\
Args:
references (list of lists): List of tokenized sentences.
vocab_counts (dict or dataframe): Vocab terms and their counts
Returns:
npmi_df: A dataframe with (1) nPMI association scores for each term; \
(2) the difference between them.
"""
# TODO: Is this necessary?
warnings.filterwarnings(action="ignore", category=UserWarning)
# When we divide by 0 in log
np.seterr(divide="ignore")
# treating inf values as NaN as well
pd.set_option("use_inf_as_na", True)
# This can be changed to whatever a person likes;
# it is the number of batches to use when iterating through the vocabulary.
_NUM_BATCHES = 500
PROP = "proportion"
CNT = "count"
class nPMI(evaluate.Measurement):
def _info(self):
return evaluate.MeasurementInfo(
module_type="measurement",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"references": datasets.Sequence(
datasets.Value("string", id="sequence"),
id="references"),
}
)
# TODO: Create docs for this.
# reference_urls=["https://huggingface.co/docs/..."],
)
def _compute(self, references, vocab_counts, subgroup):
if isinstance(vocab_counts, dict):
vocab_counts_df = pd.DataFrame.from_dict(vocab_counts,
orient='index',
columns=[CNT])
elif isinstance(vocab_counts, pd.DataFrame):
vocab_counts_df = vocab_counts
else:
print("Can't support the data structure for the vocab counts. =(")
return
# These are used throughout the rest of the functions
self.references = references
self.vocab_counts_df = vocab_counts_df
self.vocab_counts_df[PROP] = vocab_counts_df[CNT] / sum(
vocab_counts_df[CNT])
# self.mlb_list holds num batches x num_sentences
self.mlb_list = []
# Index of the subgroup word in the sparse vector
subgroup_idx = vocab_counts_df.index.get_loc(subgroup)
print("Calculating co-occurrences...")
df_coo = self.calc_cooccurrences(subgroup, subgroup_idx)
vocab_cooc_df = self.set_idx_cols(df_coo, subgroup)
print("Calculating PMI...")
pmi_df = self.calc_PMI(vocab_cooc_df, subgroup)
print("Calculating nPMI...")
npmi_df = self.calc_nPMI(pmi_df, vocab_cooc_df, subgroup)
npmi_bias = npmi_df.max(axis=0) + abs(npmi_df.min(axis=0))
return {"bias": npmi_bias, "co-occurrences": vocab_cooc_df,
"pmi": pmi_df, "npmi": npmi_df}
def _binarize_words_in_sentence(self):
print("Creating co-occurrence matrix for PMI calculations.")
batches = np.linspace(0, len(self.references), _NUM_BATCHES).astype(int)
i = 0
# Creates list of size (# batches x # sentences)
while i < len(batches) - 1:
# Makes a sparse matrix (shape: # sentences x # words),
# with the occurrence of each word per sentence.
mlb = MultiLabelBinarizer(classes=self.vocab_counts_df.index)
print(
"%s of %s sentence binarize batches." % (
str(i), str(len(batches)))
)
# Returns series: batch size x num_words
mlb_series = mlb.fit_transform(
self.references[batches[i]:batches[i + 1]]
)
i += 1
self.mlb_list.append(mlb_series)
def calc_cooccurrences(self, subgroup, subgroup_idx):
initialize = True
coo_df = None
# Big computation here! Should only happen once.
print(
"Approaching big computation! Here, we binarize all words in the sentences, making a sparse matrix of sentences."
)
if not self.mlb_list:
self._binarize_words_in_sentence()
for batch_id in range(len(self.mlb_list)):
print(
"%s of %s co-occurrence count batches"
% (str(batch_id), str(len(self.mlb_list)))
)
# List of all the sentences (list of vocab) in that batch
batch_sentence_row = self.mlb_list[batch_id]
# Dataframe of # sentences in batch x vocabulary size
sent_batch_df = pd.DataFrame(batch_sentence_row)
# Subgroup counts per-sentence for the given batch
subgroup_df = sent_batch_df[subgroup_idx]
subgroup_df.columns = [subgroup]
# Remove the sentences where the count of the subgroup is 0.
# This way we have less computation & resources needs.
subgroup_df = subgroup_df[subgroup_df > 0]
mlb_subgroup_only = sent_batch_df[sent_batch_df[subgroup_idx] > 0]
# Create cooccurrence matrix for the given subgroup and all words.
batch_coo_df = pd.DataFrame(mlb_subgroup_only.T.dot(subgroup_df))
# Creates a batch-sized dataframe of co-occurrence counts.
# Note these could just be summed rather than be batch size.
if initialize:
coo_df = batch_coo_df
else:
coo_df = coo_df.add(batch_coo_df, fill_value=0)
initialize = False
print("Returning co-occurrence matrix")
return pd.DataFrame(coo_df)
def set_idx_cols(self, df_coo, subgroup):
"""
:param df_coo: Co-occurrence counts for subgroup, length is num_words
:return:
"""
count_df = df_coo.set_index(self.vocab_counts_df.index)
count_df.columns = [subgroup + "-count"]
count_df[subgroup + "-count"] = count_df[subgroup + "-count"].astype(
int)
return count_df
def calc_PMI(self, vocab_cooc_df, subgroup):
"""
# PMI(x;y) = h(y) - h(y|x)
# = h(subgroup) - h(subgroup|word)
# = log (p(subgroup|word) / p(subgroup))
# nPMI additionally divides by -log(p(x,y)) = -log(p(x|y)p(y))
"""
# Calculation of p(subgroup)
# TODO: Is this better?
# subgroup_prob = vocab_counts_df.loc[subgroup][PROP]
subgroup_prob = self.vocab_counts_df.loc[subgroup][CNT] / sum(
self.vocab_counts_df[CNT])
# Calculation of p(subgroup|word) = count(subgroup,word) / count(word)
# Because the indices match (the vocab words),
# this division doesn't need to specify the index (I think?!)
p_subgroup_g_word = (
vocab_cooc_df[subgroup + "-count"] / self.vocab_counts_df[
CNT]
)
pmi_df = pd.DataFrame()
pmi_df[subgroup + "-pmi"] = np.log(p_subgroup_g_word / subgroup_prob)
# Note: A potentially faster solution for adding count, npmi,
# can be based on this zip idea:
# df_test['size_kb'], df_test['size_mb'], df_test['size_gb'] =
# zip(*df_test['size'].apply(sizes))
return pmi_df.dropna()
def calc_nPMI(self, pmi_df, vocab_cooc_df, subgroup):
"""
# nPMI additionally divides by -log(p(x,y)) = -log(p(x|y)p(y))
# = -log(p(word|subgroup)p(word))
"""
p_word_g_subgroup = vocab_cooc_df[subgroup + "-count"] / sum(
vocab_cooc_df[subgroup + "-count"]
)
p_word = pmi_df.apply(
lambda x: self.vocab_counts_df.loc[x.name][PROP], axis=1
)
normalize_pmi = -np.log(p_word_g_subgroup * p_word)
npmi_df = pd.DataFrame()
npmi_df[subgroup + "-npmi"] = pmi_df[subgroup + "-pmi"] / normalize_pmi
return npmi_df.dropna()
| data-measurements-tool-main | npmi/npmi.py |
import logging
import os
from pathlib import Path
def prepare_logging(fid):
# Create the directory for log files (if it doesn't exist)
Path('./log_files').mkdir(exist_ok=True)
log_fid = Path(fid).stem
logs = logging.getLogger(log_fid)
logs.setLevel(logging.DEBUG)
logs.propagate = False
log_fid = Path(fid).stem
if not logs.handlers:
# Logging info to log file
file_path = ("./log_files/%s.log" % log_fid)
print("Logging output in %s " % file_path)
file = logging.FileHandler(file_path)
fileformat = logging.Formatter("%(asctime)s:%(pathname)s, %(module)s:%(lineno)s\n%(message)s")
file.setLevel(logging.INFO)
file.setFormatter(fileformat)
# Logging debug messages to stream
stream = logging.StreamHandler()
streamformat = logging.Formatter("[data_measurements_tool] {%(pathname)s:%(lineno)d} %(module)s %(levelname)s - %(message)s")
stream.setLevel(logging.DEBUG)
stream.setFormatter(streamformat)
logs.addHandler(file)
logs.addHandler(stream)
return logs | data-measurements-tool-main | utils/__init__.py |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pandas as pd
import plotly
import pyarrow.feather as feather
import utils
from dataclasses import asdict
from datasets import Dataset, get_dataset_infos, load_dataset, load_from_disk, \
NamedSplit
from dotenv import load_dotenv
from huggingface_hub import Repository, list_datasets
from json2html import *
from os import getenv
from os.path import exists, isdir, join as pjoin
from pathlib import Path
# treating inf values as NaN as well
pd.set_option("use_inf_as_na", True)
## String names used in Hugging Face dataset configs.
HF_FEATURE_FIELD = "features"
HF_LABEL_FIELD = "label"
HF_DESC_FIELD = "description"
CACHE_DIR = "cache_dir"
## String names we are using within this code.
# These are not coming from the stored dataset nor HF config,
# but rather used as identifiers in our dicts and dataframes.
TEXT_FIELD = "text"
PERPLEXITY_FIELD = "perplexity"
TOKENIZED_FIELD = "tokenized_text"
EMBEDDING_FIELD = "embedding"
LENGTH_FIELD = "length"
VOCAB = "vocab"
WORD = "word"
CNT = "count"
PROP = "proportion"
TEXT_NAN_CNT = "text_nan_count"
TXT_LEN = "text lengths"
TOT_WORDS = "total words"
TOT_OPEN_WORDS = "total open words"
_DATASET_LIST = [
"c4",
"squad",
"squad_v2",
"hate_speech18",
"hate_speech_offensive",
"glue",
"super_glue",
"wikitext",
"imdb",
]
_STREAMABLE_DATASET_LIST = [
"c4",
"wikitext",
]
_MAX_ROWS = 200000
logs = utils.prepare_logging(__file__)
def _load_dotenv_for_cache_on_hub():
"""
This function loads and returns the organization name that you've set up on the
hub for storing your data measurements cache on the hub. It also loads the associated
access token. It expects you to have HUB_CACHE_ORGANIZATION=<the organization you've set up on the hub to store your cache>
and HF_TOKEN=<your hf token> on separate lines in a file named .env at the root of this repo.
Returns:
tuple of strings: hub_cache_organization, hf_token
"""
if Path(".env").is_file():
load_dotenv(".env")
hf_token = getenv("HF_TOKEN")
hub_cache_organization = getenv("HUB_CACHE_ORGANIZATION")
return hub_cache_organization, hf_token
def get_cache_dir_naming(out_dir, dataset, config, split, feature):
feature_text = hyphenated(feature)
dataset_cache_name = f"{dataset}_{config}_{split}_{feature_text}"
local_dataset_cache_dir = out_dir + "/" + dataset_cache_name
return dataset_cache_name, local_dataset_cache_dir
def initialize_cache_hub_repo(local_cache_dir, dataset_cache_name):
"""
This function tries to initialize a dataset cache on the huggingface hub. The
function expects you to have HUB_CACHE_ORGANIZATION=<the organization you've set up on the hub to store your cache>
and HF_TOKEN=<your hf token> on separate lines in a file named .env at the root of this repo.
Args:
local_cache_dir (string):
The path to the local dataset cache.
dataset_cache_name (string):
The name of the dataset repo on the huggingface hub that you want.
"""
hub_cache_organization, hf_token = _load_dotenv_for_cache_on_hub()
clone_source = pjoin(hub_cache_organization, dataset_cache_name)
repo = Repository(local_dir=local_cache_dir,
clone_from=clone_source,
repo_type="dataset", use_auth_token=hf_token)
repo.lfs_track(["*.feather"])
return repo
def pull_cache_from_hub(cache_path, dataset_cache_dir):
"""
This function tries to pull a datasets cache from the huggingface hub if a
cache for the dataset does not already exist locally. The function expects you
to have you HUB_CACHE_ORGANIZATION=<the organization you've set up on the hub to store your cache>
and HF_TOKEN=<your hf token> on separate lines in a file named .env at the root of this repo.
Args:
cache_path (string):
The path to the local dataset cache that you want.
dataset_cache_dir (string):
The name of the dataset repo on the huggingface hub.
"""
hub_cache_organization, hf_token = _load_dotenv_for_cache_on_hub()
clone_source = pjoin(hub_cache_organization, dataset_cache_dir)
if isdir(cache_path):
logs.warning("Already a local cache for the dataset, so not pulling from the hub.")
else:
# Here, dataset_info.id is of the form: <hub cache organization>/<dataset cache dir>
if dataset_cache_dir in [
dataset_info.id.split("/")[-1] for dataset_info in
list_datasets(author=hub_cache_organization,
use_auth_token=hf_token)]:
Repository(local_dir=cache_path,
clone_from=clone_source,
repo_type="dataset", use_auth_token=hf_token)
logs.info("Pulled cache from hub!")
else:
logs.warning("Asking to pull cache from hub but cannot find cached repo on the hub.")
def load_truncated_dataset(
dataset_name,
config_name,
split_name,
num_rows=_MAX_ROWS,
use_cache=True,
cache_dir=CACHE_DIR,
use_streaming=True,
save=True,
):
"""
This function loads the first `num_rows` items of a dataset for a
given `config_name` and `split_name`.
If `use_cache` and `cache_name` exists, the truncated dataset is loaded from
`cache_name`.
Otherwise, a new truncated dataset is created and immediately saved
to `cache_name`.
When the dataset is streamable, we iterate through the first
`num_rows` examples in streaming mode, write them to a jsonl file,
then create a new dataset from the json.
This is the most direct way to make a Dataset from an IterableDataset
as of datasets version 1.6.1.
Otherwise, we download the full dataset and select the first
`num_rows` items
Args:
dataset_name (string):
dataset id in the dataset library
config_name (string):
dataset configuration
split_name (string):
split name
num_rows (int) [optional]:
number of rows to truncate the dataset to
cache_dir (string):
name of the cache directory
use_cache (bool):
whether to load from the cache if it exists
use_streaming (bool):
whether to use streaming when the dataset supports it
save (bool):
whether to save the dataset locally
Returns:
Dataset: the (truncated if specified) dataset as a Dataset object
"""
logs.info("Loading or preparing dataset saved in %s " % cache_dir)
if use_cache and exists(cache_dir):
dataset = load_from_disk(cache_dir)
else:
if use_streaming and dataset_name in _STREAMABLE_DATASET_LIST:
iterable_dataset = load_dataset(
dataset_name,
name=config_name,
split=split_name,
streaming=True,
).take(num_rows)
rows = list(iterable_dataset)
f = open("temp.jsonl", "w", encoding="utf-8")
for row in rows:
_ = f.write(json.dumps(row) + "\n")
f.close()
dataset = Dataset.from_json(
"temp.jsonl", features=iterable_dataset.features, split=NamedSplit(split_name)
)
else:
full_dataset = load_dataset(
dataset_name,
name=config_name,
split=split_name,
)
if len(full_dataset) >= num_rows:
dataset = full_dataset.select(range(num_rows))
# Make the directory name clear that it's not the full dataset.
cache_dir = pjoin(cache_dir, ("_%s" % num_rows))
else:
dataset = full_dataset
if save:
dataset.save_to_disk(cache_dir)
return dataset
def hyphenated(features):
"""When multiple features are asked for, hyphenate them together when they're used for filenames or titles"""
return '-'.join(features)
def get_typed_features(features, ftype="string", parents=None):
"""
Recursively get a list of all features of a certain dtype
:param features:
:param ftype:
:param parents:
:return: a list of tuples > e.g. ('A', 'B', 'C') for feature example['A']['B']['C']
"""
if parents is None:
parents = []
typed_features = []
for name, feat in features.items():
if isinstance(feat, dict):
if feat.get("dtype", None) == ftype or feat.get("feature", {}).get(
("dtype", None) == ftype
):
typed_features += [tuple(parents + [name])]
elif "feature" in feat:
if feat["feature"].get("dtype", None) == ftype:
typed_features += [tuple(parents + [name])]
elif isinstance(feat["feature"], dict):
typed_features += get_typed_features(
feat["feature"], ftype, parents + [name]
)
else:
for k, v in feat.items():
if isinstance(v, dict):
typed_features += get_typed_features(
v, ftype, parents + [name, k]
)
elif name == "dtype" and feat == ftype:
typed_features += [tuple(parents)]
return typed_features
def get_label_features(features, parents=None):
"""
Recursively get a list of all features that are ClassLabels
:param features:
:param parents:
:return: pairs of tuples as above and the list of class names
"""
if parents is None:
parents = []
label_features = []
for name, feat in features.items():
if isinstance(feat, dict):
if "names" in feat:
label_features += [(tuple(parents + [name]), feat["names"])]
elif "feature" in feat:
if "names" in feat:
label_features += [
(tuple(parents + [name]), feat["feature"]["names"])
]
elif isinstance(feat["feature"], dict):
label_features += get_label_features(
feat["feature"], parents + [name]
)
else:
for k, v in feat.items():
if isinstance(v, dict):
label_features += get_label_features(v, parents + [name, k])
elif name == "names":
label_features += [(tuple(parents), feat)]
return label_features
# get the info we need for the app sidebar in dict format
def dictionarize_info(dset_info):
info_dict = asdict(dset_info)
res = {
"config_name": info_dict["config_name"],
"splits": {
spl: spl_info["num_examples"]
for spl, spl_info in info_dict["splits"].items()
},
"features": {
"string": get_typed_features(info_dict["features"], "string"),
"int32": get_typed_features(info_dict["features"], "int32"),
"float32": get_typed_features(info_dict["features"], "float32"),
"label": get_label_features(info_dict["features"]),
},
"description": dset_info.description,
}
return res
def get_dataset_info_dicts(dataset_id=None):
"""
Creates a dict from dataset configs.
Uses the datasets lib's get_dataset_infos
:return: Dictionary mapping dataset names to their configurations
"""
if dataset_id is not None:
ds_name_to_conf_dict = {
dataset_id: {
config_name: dictionarize_info(config_info)
for config_name, config_info in get_dataset_infos(dataset_id).items()
}
}
else:
ds_name_to_conf_dict = {
ds_id: {
config_name: dictionarize_info(config_info)
for config_name, config_info in get_dataset_infos(ds_id).items()
}
for ds_id in _DATASET_LIST
}
return ds_name_to_conf_dict
# get all instances of a specific field in a dataset
def extract_field(examples, field_path, new_field_name=None):
if new_field_name is None:
new_field_name = "_".join(field_path)
field_list = []
# TODO: Breaks the CLI if this isn't checked.
if isinstance(field_path, str):
field_path = [field_path]
item_list = examples[field_path[0]]
for field_name in field_path[1:]:
item_list = [
next_item
for item in item_list
for next_item in (
item[field_name]
if isinstance(item[field_name], list)
else [item[field_name]]
)
]
field_list += [
field
for item in item_list
for field in (item if isinstance(item, list) else [item])
]
return {new_field_name: field_list}
def make_path(path):
os.makedirs(path, exist_ok=True)
def counter_dict_to_df(dict_input, key_as_column=False):
df_output = pd.DataFrame(dict_input, index=[0]).T
if key_as_column:
df_output.reset_index(inplace=True)
df_output.columns = ["instance", "count"]
else:
df_output.columns = ["count"]
return df_output.sort_values(by="count", ascending=False)
def write_plotly(fig, fid):
write_json(plotly.io.to_json(fig), fid)
def read_plotly(fid):
fig = plotly.io.from_json(json.load(open(fid, encoding="utf-8")))
return fig
def write_json_as_html(input_json, html_fid):
html_dict = json2html.convert(json=input_json)
with open(html_fid, "w+") as f:
f.write(html_dict)
def df_to_write_html(input_df, html_fid):
"""Writes a dataframe to an HTML file"""
input_df.to_HTML(html_fid)
def read_df(df_fid):
return pd.DataFrame.from_dict(read_json(df_fid), orient="index")
def write_df(df, df_fid):
"""In order to preserve the index of our dataframes, we can't
use the compressed pandas dataframe file format .feather.
There's a preference for json amongst HF devs, so we use that here."""
df_dict = df.to_dict('index')
write_json(df_dict, df_fid)
def write_json(json_dict, json_fid):
with open(json_fid, "w", encoding="utf-8") as f:
json.dump(json_dict, f)
def read_json(json_fid):
json_dict = json.load(open(json_fid, encoding="utf-8"))
return json_dict | data-measurements-tool-main | utils/dataset_utils.py |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import gradio as gr
import numpy as np
import pandas as pd
from matplotlib.figure import Figure
import seaborn as sns
import statistics
import streamlit as st
import utils
import utils.dataset_utils as ds_utils
from st_aggrid import AgGrid, GridOptionsBuilder
from utils.dataset_utils import HF_DESC_FIELD, HF_FEATURE_FIELD, HF_LABEL_FIELD
logs = utils.prepare_logging(__file__)
st.set_option('deprecation.showPyplotGlobalUse', False)
# Note: Make sure to consider colorblind-friendly colors for your images! Ex:
# ["#332288", "#117733", "#882255", "#AA4499", "#CC6677", "#44AA99", "#DDCC77",
# "#88CCEE"]
pd.options.display.float_format = "{:,.3f}".format # '{:20,.2f}'.format
def subheader():
gr.Markdown("""This demo showcases the
[dataset metrics as we develop them](https://huggingface.co/blog/data-measurements-tool).
Right now this has:
- dynamic loading of datasets in the lib
- fetching config and info without downloading the dataset
- propose the list of candidate text and label features to select.
""")
def get_label_names(dataset_name: str, config_name: str, ds_name_to_dict):
label_field, label_names = (
ds_name_to_dict[dataset_name][config_name][HF_FEATURE_FIELD][
HF_LABEL_FIELD][0]
if len(
ds_name_to_dict[dataset_name][config_name][HF_FEATURE_FIELD][
HF_LABEL_FIELD]
) > 0
else ((), [])
)
return label_field, label_names
def update_dataset(dataset_name: str, ds_name_to_dict):
# choose a config to analyze
ds_configs = ds_name_to_dict[dataset_name]
# special handling for the largest-by-far dataset, C4
if dataset_name == "c4":
config_names = ['en', 'en.noblocklist', 'realnewslike']
else:
config_names = list(ds_configs.keys())
config_name = config_names[0]
ds_config = ds_configs[config_name]
text_features = ds_config[HF_FEATURE_FIELD]["string"]
text_features = [('text',)] if dataset_name == "c4" else [tp for tp in text_features if tp[0] != "id"]
feature = str(text_features[0])
text_features = [str(f) for f in text_features]
avail_splits = list(ds_config["splits"].keys())
split = avail_splits[0]
return [(config_names, config_name), (text_features, feature), (avail_splits, split)]
def update_config(dataset_name: str, config_name: str, ds_name_to_dict):
ds_config = ds_name_to_dict[dataset_name][config_name]
text_features = ds_config[HF_FEATURE_FIELD]["string"]
text_features = [('text',)] if dataset_name == "c4" else [tp for tp in text_features if tp[0] != "id"]
feature = str(text_features[0])
text_features = [str(f) for f in text_features]
avail_splits = list(ds_config["splits"].keys())
split = avail_splits[0]
return [(text_features, feature), (avail_splits, split)]
def sidebar_selection(ds_name_to_dict, column_id=""):
ds_names = list(ds_name_to_dict.keys())
with gr.Accordion(f"Choose dataset and field {column_id}", open=True):
subheader()
# choose a dataset to analyze
ds_name = gr.Dropdown(
label=f"Choose dataset to explore{column_id}:",
choices=ds_names,
value="hate_speech18",
)
# choose a config to analyze
ds_configs = ds_name_to_dict[ds_name.value]
# special handling for the largest-by-far dataset, C4
if ds_name == "c4":
config_names = ['en', 'en.noblocklist', 'realnewslike']
else:
config_names = list(ds_configs.keys())
config_name = gr.Dropdown(
label=f"Choose configuration{column_id}:",
choices=config_names,
value=config_names[0],
)
# choose a subset of num_examples
ds_config = ds_configs[config_name.value]
text_features = ds_config[HF_FEATURE_FIELD]["string"]
# TODO @yacine: Explain what this is doing and why eg tp[0] could = "id"
text = f"Which text feature from the {column_id} dataset would you like to analyze?"
choices = [('text',)] if ds_name == "c4" else [tp for tp in text_features if tp[0] != "id"]
text_field = gr.Dropdown(
label=text,
choices=[str(f) for f in choices],
value=str(choices[0])
)
# Choose a split and dataset size
avail_splits = list(ds_config["splits"].keys())
# 12.Nov note: Removing "test" because those should not be examined
# without discussion of pros and cons, which we haven't done yet.
if "test" in avail_splits:
avail_splits.remove("test")
split = gr.Dropdown(
label=f"Which split from the{column_id} dataset would you like to analyze?",
choices=avail_splits,
value=avail_splits[0],
)
label_field, label_names = get_label_names(ds_name.value, config_name.value, ds_name_to_dict)
calculate_btn = gr.Button(value="Calculate", variant="primary")
return {
"dset_name": ds_name,
"dset_config": config_name,
"split_name": split,
"text_field": text_field,
"label_field": label_field,
"label_names": label_names,
"calculate_btn": calculate_btn
}
def expander_header(dstats, ds_name_to_dict, column_id=""):
with st.expander(f"Dataset Description{column_id}"):
st.markdown(
ds_name_to_dict[dstats.dset_name][dstats.dset_config][HF_DESC_FIELD]
)
st.dataframe(dstats.dset_peek)
def expander_general_stats(dstats, column_id=""):
with gr.Accordion(f"General Text Statistics{column_id}"):
st.caption(
"Use this widget to check whether the terms you see most "
"represented in the dataset make sense for the goals of the dataset."
)
st.markdown("There are {0} total words".format(str(dstats.total_words)))
st.markdown(
"There are {0} words after removing closed "
"class words".format(str(dstats.total_open_words))
)
st.markdown(
"The most common "
"[open class words](https://dictionary.apa.org/open-class-words) "
"and their counts are: "
)
st.dataframe(dstats.sorted_top_vocab_df)
st.markdown(
"There are {0} missing values in the dataset.".format(
str(dstats.text_nan_count)
)
)
if dstats.dups_frac > 0:
st.markdown(
"The dataset is {0}% duplicates. "
"For more information about the duplicates, "
"click the 'Duplicates' tab below.".format(
str(round(dstats.dups_frac * 100, 2)))
)
else:
st.markdown("There are 0 duplicate items in the dataset. ")
def expander_label_distribution(dstats, column_id=""):
with st.expander(f"Label Distribution{column_id}", expanded=False):
st.caption(
"Use this widget to see how balanced the labels in your dataset are."
)
if dstats.fig_labels:
st.plotly_chart(dstats.fig_labels, use_container_width=True)
else:
st.markdown("No labels were found in the dataset")
def expander_text_lengths(dstats, column_id=""):
_TEXT_LENGTH_CAPTION = (
"Use this widget to identify outliers, particularly suspiciously long "
"outliers."
)
with st.expander(f"Text Lengths{column_id}", expanded=False):
st.caption(_TEXT_LENGTH_CAPTION)
st.markdown(
"Below, you can see how the lengths of the text instances in your "
"dataset are distributed."
)
st.markdown(
"Any unexpected peaks or valleys in the distribution may help to "
"identify instances you want to remove or augment."
)
st.markdown(
"### Here is the count of different text lengths in "
"your dataset:"
)
# When matplotlib first creates this, it's a Figure.
# Once it's saved, then read back in,
# it's an ndarray that must be displayed using st.image
# (I know, lame).
if isinstance(dstats.length_obj.fig_lengths, Figure):
st.pyplot(dstats.length_obj.fig_lengths, use_container_width=True)
else:
try:
st.image(dstats.length_obj.fig_lengths)
except Exception as e:
logs.exception("Hit exception for lengths figure:")
logs.exception(e)
st.markdown(
"The average length of text instances is **"
+ str(round(dstats.length_obj.avg_length, 2))
+ " words**, with a standard deviation of **"
+ str(round(dstats.length_obj.std_length, 2))
+ "**."
)
if dstats.length_obj.lengths_df is not None:
start_id_show_lengths = st.selectbox(
"Show examples of length:",
np.sort(dstats.length_obj.lengths_df["length"].unique())[::-1].tolist(),
key=f"select_show_length_{column_id}",
)
st.table(
dstats.length_obj.lengths_df[
dstats.length_obj.lengths_df["length"] == start_id_show_lengths
].set_index("length")
)
def expander_text_duplicates(dstats, column_id=""):
with st.expander(f"Text Duplicates{column_id}", expanded=False):
st.caption(
"Use this widget to identify text strings that appear more than "
"once."
)
st.markdown(
"A model's training and testing may be negatively affected by "
"unwarranted duplicates "
"([Lee et al., 2021](https://arxiv.org/abs/2107.06499))."
)
st.markdown("------")
st.write(
"### Here is the list of all the duplicated items and their counts "
"in the dataset."
)
if not dstats.duplicates_results:
st.write("There are no duplicates in this dataset! 🥳")
else:
st.write("The fraction of the data that is a duplicate is:")
st.write(str(round(dstats.dups_frac, 4)))
# TODO: Check if this is slow when the size is large --
# Should we store as dataframes?
# Dataframes allow this to be interactive.
st.dataframe(ds_utils.counter_dict_to_df(dstats.dups_dict))
def expander_text_perplexities(dstats, column_id=""):
with st.expander(f"Text Perplexities{column_id}", expanded=False):
st.caption(
"Use this widget to identify text perplexities from GPT-2."
)
st.markdown(
"""
Outlier perplexities, especially very high values, could highlight
an issue with an example. Smaller variations should be interpreted
with more care, as they indicate how similar to the GPT-2 training
corpus the examples are rather than being reflective of general
linguistic properties.
For more information on GPT-2,
see its [model card](https://hf.co/gpt2).
"""
)
st.markdown("------")
st.write(
"### Here is the list of the examples in the dataset, sorted by "
"GPT-2 perplexity:"
)
if dstats.perplexities_df is None or dstats.perplexities_df.empty:
st.write(
"Perplexities have not been computed yet for this dataset, or "
"this dataset is too large for the UI (> 1,000,000 examples).")
else:
st.dataframe(dstats.perplexities_df.reset_index(drop=True))
def expander_npmi_description(min_vocab):
_NPMI_CAPTION = (
"Use this widget to identify problematic biases and stereotypes in "
"your data."
)
_NPMI_CAPTION1 = """
nPMI scores for a word help to identify potentially
problematic associations, ranked by how close the association is."""
_NPMI_CAPTION2 = """
nPMI bias scores for paired words help to identify how word
associations are skewed between the selected selected words
([Aka et al., 2021](https://arxiv.org/abs/2103.03417)).
"""
st.caption(_NPMI_CAPTION)
st.markdown(_NPMI_CAPTION1)
st.markdown(_NPMI_CAPTION2)
st.markdown(" ")
st.markdown(
"You can select from gender and sexual orientation "
"identity terms that appear in the dataset at least %s "
"times." % min_vocab
)
st.markdown(
"The resulting ranked words are those that co-occur with both "
"identity terms. "
)
st.markdown(
"The more *positive* the score, the more associated the word is with "
"the first identity term. "
"The more *negative* the score, the more associated the word is with "
"the second identity term."
)
def expander_zipf(dstats, column_id=""):
z = dstats.z
zipf_fig = dstats.zipf_fig
with st.expander(
f"Vocabulary Distribution{column_id}: Zipf's Law Fit", expanded=False
):
try:
_ZIPF_CAPTION = """This shows how close the observed language is to an ideal
natural language distribution following [Zipf's law](https://en.wikipedia.org/wiki/Zipf%27s_law),
calculated by minimizing the [Kolmogorov-Smirnov (KS) statistic](https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test)."""
powerlaw_eq = r"""p(x) \propto x^{- \alpha}"""
zipf_summary = (
"The optimal alpha based on this dataset is: **"
+ str(round(z.alpha, 2))
+ "**, with a KS distance of: **"
+ str(round(z.ks_distance, 2))
)
zipf_summary += (
"**. This was fit with a minimum rank value of: **"
+ str(int(z.xmin))
+ "**, which is the optimal rank *beyond which* the scaling regime of the power law fits best."
)
alpha_warning = "Your alpha value is a bit on the high side, which means that the distribution over words in this dataset is a bit unnatural. This could be due to non-language items throughout the dataset."
xmin_warning = "The minimum rank for this fit is a bit on the high side, which means that the frequencies of your most common words aren't distributed as would be expected by Zipf's law."
fit_results_table = pd.DataFrame.from_dict(
{
r"Alpha:": [str("%.2f" % z.alpha)],
"KS distance:": [str("%.2f" % z.ks_distance)],
"Min rank:": [str("%s" % int(z.xmin))],
},
columns=["Results"],
orient="index",
)
fit_results_table.index.name = column_id
st.caption(
"Use this widget for the counts of different words in your dataset, measuring the difference between the observed count and the expected count under Zipf's law."
)
st.markdown(_ZIPF_CAPTION)
st.write(
"""
A Zipfian distribution follows the power law: $p(x) \propto x^{-α}$
with an ideal α value of 1."""
)
st.markdown(
"In general, an alpha greater than 2 or a minimum rank greater than 10 (take with a grain of salt) means that your distribution is relativaly _unnatural_ for natural language. This can be a sign of mixed artefacts in the dataset, such as HTML markup."
)
st.markdown(
"Below, you can see the counts of each word in your dataset vs. the expected number of counts following a Zipfian distribution."
)
st.markdown("-----")
st.write("### Here is your dataset's Zipf results:")
st.dataframe(fit_results_table)
st.write(zipf_summary)
# TODO: Nice UI version of the content in the comments.
# st.markdown("\nThe KS test p-value is < %.2f" % z.ks_test.pvalue)
# if z.ks_test.pvalue < 0.01:
# st.markdown(
# "\n Great news! Your data fits a powerlaw with a minimum KS " "distance of %.4f" % z.distance)
# else:
# st.markdown("\n Sadly, your data does not fit a powerlaw. =(")
# st.markdown("Checking the goodness of fit of our observed distribution")
# st.markdown("to the hypothesized power law distribution")
# st.markdown("using a Kolmogorov–Smirnov (KS) test.")
st.plotly_chart(zipf_fig, use_container_width=True)
if z.alpha > 2:
st.markdown(alpha_warning)
if z.xmin > 5:
st.markdown(xmin_warning)
except:
st.write("Under construction!")
def npmi_widget(dstats, column_id=""):
"""
Part of the UI, but providing for interaction.
:param column_id:
:param dstats:
:return:
"""
min_vocab = dstats.min_vocab_count
npmi_stats = dstats.npmi_obj
available_terms = npmi_stats.avail_identity_terms
with st.expander(f"Word Association{column_id}: nPMI", expanded=False):
if npmi_stats and len(available_terms) > 0:
expander_npmi_description(min_vocab)
st.markdown("-----")
term1 = st.selectbox(
f"What is the first term you want to select?{column_id}",
available_terms,
)
term2 = st.selectbox(
f"What is the second term you want to select?{column_id}",
reversed(available_terms),
)
try:
joint_npmi_df = npmi_stats.get_display(term1, term2)
npmi_show(joint_npmi_df)
except Exception as e:
logs.exception(e)
st.markdown(
"**WARNING!** The nPMI for these terms has not been"
" pre-computed, please re-run caching."
)
else:
st.markdown("No words found co-occurring with both of the selected identity"
" terms.")
def npmi_show(paired_results):
if paired_results.empty:
st.markdown(
"No words that co-occur enough times for results! Or there's a 🐛."
" Or we're still computing this one. 🤷")
else:
logs.debug("Results to be shown in streamlit are")
logs.debug(paired_results)
s = pd.DataFrame(
paired_results.sort_values(paired_results.columns[0], ascending=True))
s.index.name = "word"
bias_col = s.filter(like="bias").columns
#count_cols = s.filter(like="count").columns
# Keep the dataframe from being crazy big.
if s.shape[0] > 10000:
bias_thres = max(abs(s[s[0]][5000]),
abs(s[s[0]][-5000]))
logs.info(f"filtering with bias threshold: {bias_thres}")
s_filtered = s[s[0].abs() > bias_thres]
else:
s_filtered = s
cm = sns.palplot(sns.diverging_palette(270, 36, s=99, l=48, n=16))
out_df = s_filtered.style.background_gradient(subset=bias_col, cmap=cm).format(formatter="{:,.3f}").set_properties(**{"align": "center", "width":"100em"}).set_caption("nPMI scores between the selected identity terms and the words they both co-occur with")
#set_properties(subset=count_cols, **{"width": "10em", "text-align": "center"}).
# .format(subset=count_cols, formatter=int).
#.format(subset=bias_col, formatter="{:,.3f}")
st.write("### Here is your dataset's bias results:")
st.dataframe(out_df)
| data-measurements-tool-main | utils/gradio_utils.py |
data-measurements-tool-main | data_measurements/__init__.py |
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import nltk
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import seaborn as sns
import statistics
import utils
import utils.dataset_utils as ds_utils
from data_measurements.tokenize import Tokenize
from data_measurements.labels import labels
from data_measurements.perplexity import perplexity
from data_measurements.lengths import lengths
from data_measurements.text_duplicates import text_duplicates as td
from data_measurements.npmi import npmi
from data_measurements.zipf import zipf
from datasets import load_from_disk
from nltk.corpus import stopwords
from os import mkdir, getenv
from os.path import exists, isdir
from os.path import join as pjoin
from pathlib import Path
from sklearn.feature_extraction.text import CountVectorizer
from utils.dataset_utils import (CNT, LENGTH_FIELD,
TEXT_FIELD, PERPLEXITY_FIELD, PROP,
TEXT_NAN_CNT, TOKENIZED_FIELD, TOT_OPEN_WORDS,
TOT_WORDS, VOCAB, WORD)
logs = utils.prepare_logging(__file__)
# TODO: Read this in depending on chosen language / expand beyond english
nltk.download("stopwords", quiet=True)
_CLOSED_CLASS = (
stopwords.words("english")
+ ["t", "n", "ll", "d", "s"]
+ ["wasn", "weren", "won", "aren", "wouldn", "shouldn", "didn", "don",
"hasn", "ain", "couldn", "doesn", "hadn", "haven", "isn", "mightn",
"mustn", "needn", "shan", "would", "could", "dont"]
+ [str(i) for i in range(0, 99)]
)
IDENTITY_TERMS = [
"man",
"woman",
"non-binary",
"gay",
"lesbian",
"queer",
"trans",
"straight",
"cis",
"she",
"her",
"hers",
"he",
"him",
"his",
"they",
"them",
"their",
"theirs",
"himself",
"herself",
]
# treating inf values as NaN as well
pd.set_option("use_inf_as_na", True)
MIN_VOCAB_COUNT = 10
_NUM_VOCAB_BATCHES = 2000
_TOP_N = 100
class DatasetStatisticsCacheClass:
def __init__(
self,
dset_name,
dset_config,
split_name,
text_field,
label_field,
label_names,
cache_dir="cache_dir",
dataset_cache_dir=None,
use_cache=False,
save=True,
):
### What are we analyzing?
# name of the Hugging Face dataset
self.dset_name = dset_name
# name of the dataset config
self.dset_config = dset_config
# name of the split to analyze
self.split_name = split_name
# which text/feature fields are we analysing?
self.text_field = text_field
## Label variables
# which label fields are we analysing?
self.label_field = label_field
# what are the names of the classes?
self.label_names = label_names
# save label pie chart in the class so it doesn't ge re-computed
self.fig_labels = None
## Hugging Face dataset objects
self.dset = None # original dataset
# HF dataset with all of the self.text_field instances in self.dset
self.text_dset = None
self.dset_peek = None
# HF dataset with text embeddings in the same order as self.text_dset
self.embeddings_dset = None
# HF dataset with all of the self.label_field instances in self.dset
# TODO: Not being used anymore; make sure & remove.
self.label_dset = None
self.length_obj = None
## Data frames
# Tokenized text
self.tokenized_df = None
# Data Frame version of self.label_dset
# TODO: Not being used anymore. Make sure and remove
self.label_df = None
# where are they being cached?
self.label_files = {}
# label pie chart used in the UI
self.fig_labels = None
# results
self.label_results = None
## Caching
if not dataset_cache_dir:
_, self.dataset_cache_dir = ds_utils.get_cache_dir_naming(cache_dir,
dset_name,
dset_config,
split_name,
text_field)
else:
self.dataset_cache_dir = dataset_cache_dir
# Use stored data if there; otherwise calculate afresh
self.use_cache = use_cache
# Save newly calculated results.
self.save = save
self.dset_peek = None
# Tokenized text
self.tokenized_df = None
## Zipf
# Save zipf fig so it doesn't need to be recreated.
self.zipf_fig = None
# Zipf object
self.z = None
## Vocabulary
# Vocabulary with word counts in the dataset
self.vocab_counts_df = None
# Vocabulary filtered to remove stopwords
self.vocab_counts_filtered_df = None
self.sorted_top_vocab_df = None
# Text Duplicates
self.duplicates_results = None
self.duplicates_files = {}
self.dups_frac = 0
self.dups_dict = {}
## Perplexity
self.perplexities_df = None
## Lengths
self.avg_length = None
self.std_length = None
self.length_stats_dict = None
self.length_df = None
self.fig_tok_length = None
self.num_uniq_lengths = 0
## "General" stats
self.general_stats_dict = {}
self.total_words = 0
self.total_open_words = 0
# Number of NaN values (NOT empty strings)
self.text_nan_count = 0
# nPMI
self.npmi_obj = None
# The minimum amount of times a word should occur to be included in
# word-count-based calculations (currently just relevant to nPMI)
self.min_vocab_count = MIN_VOCAB_COUNT
self.hf_dset_cache_dir = pjoin(self.dataset_cache_dir, "base_dset")
self.tokenized_df_fid = pjoin(self.dataset_cache_dir, "tokenized_df.json")
self.text_dset_fid = pjoin(self.dataset_cache_dir, "text_dset")
self.dset_peek_json_fid = pjoin(self.dataset_cache_dir, "dset_peek.json")
## Length cache files
self.length_df_fid = pjoin(self.dataset_cache_dir, "length_df.json")
self.length_stats_json_fid = pjoin(self.dataset_cache_dir, "length_stats.json")
self.vocab_counts_df_fid = pjoin(self.dataset_cache_dir,
"vocab_counts.json")
self.dup_counts_df_fid = pjoin(self.dataset_cache_dir, "dup_counts_df.json")
self.fig_tok_length_fid = pjoin(self.dataset_cache_dir, "fig_tok_length.png")
## General text stats
self.general_stats_json_fid = pjoin(self.dataset_cache_dir,
"general_stats_dict.json")
# Needed for UI
self.sorted_top_vocab_df_fid = pjoin(
self.dataset_cache_dir, "sorted_top_vocab.json"
)
# Set the HuggingFace dataset object with the given arguments.
self.dset = self._get_dataset()
self.text_dset = None
# Defines self.text_dset, a HF Dataset with just the TEXT_FIELD instances in self.dset extracted
self.load_or_prepare_text_dataset()
def _get_dataset(self):
"""
Gets the HuggingFace Dataset object.
First tries to use the given cache directory if specified;
otherwise saves to the given cache directory if specified.
"""
dset = ds_utils.load_truncated_dataset(self.dset_name, self.dset_config,
self.split_name,
cache_dir=self.hf_dset_cache_dir,
save=self.save)
return dset
def load_or_prepare_text_dataset(self, load_only=False):
"""
Prepares the HF dataset text/feature based on given config, split, etc.
Args:
load_only: Whether only a cached dataset can be used.
"""
logs.info("Doing text dset.")
if self.use_cache and exists(self.text_dset_fid):
# load extracted text
self.text_dset = load_from_disk(self.text_dset_fid)
logs.info("Loaded dataset from disk")
logs.info(self.text_dset)
# ...Or load it from the server and store it anew
elif not load_only:
# Defines self.text_dset
self.prepare_text_dset()
if self.save:
# save extracted text instances
logs.info("Saving dataset to disk")
self.text_dset.save_to_disk(self.text_dset_fid)
def prepare_text_dset(self):
logs.info("Working with dataset:")
logs.info(self.dset)
# Extract all text instances from the user-specified self.text_field,
# which is a dataset-specific text/feature field;
# create a new feature called TEXT_FIELD, which is a constant shared
# across DMT logic.
self.text_dset = self.dset.map(
lambda examples: ds_utils.extract_field(
examples, self.text_field, TEXT_FIELD
),
batched=True,
remove_columns=list(self.dset.features),
)
def load_or_prepare_general_stats(self, load_only=False):
"""
Content for expander_general_stats widget.
Provides statistics for total words, total open words,
the sorted top vocab, the NaN count, and the duplicate count.
Args:
Returns:
"""
# General statistics
# For the general statistics, text duplicates are not saved in their
# own files, but rather just the text duplicate fraction is saved in the
# "general" file. We therefore set save=False for
# the text duplicate files in this case.
# Similarly, we don't get the full list of duplicates
# in general stats, so set list_duplicates to False
self.load_or_prepare_text_duplicates(load_only=load_only, save=False,
list_duplicates=False)
logs.info("Duplicates results:")
logs.info(self.duplicates_results)
self.general_stats_dict.update(self.duplicates_results)
# TODO: Tighten the rest of this similar to text_duplicates.
if (
self.use_cache
and exists(self.general_stats_json_fid)
and exists(self.sorted_top_vocab_df_fid)
):
logs.info("Loading cached general stats")
self.load_general_stats()
elif not load_only:
logs.info("Preparing general stats")
self.prepare_general_stats()
if self.save:
ds_utils.write_df(self.sorted_top_vocab_df,
self.sorted_top_vocab_df_fid)
ds_utils.write_json(self.general_stats_dict,
self.general_stats_json_fid)
def load_or_prepare_text_lengths(self, load_only=False):
"""
The text length widget relies on this function, which provides
a figure of the text lengths, some text length statistics, and
a text length dataframe to peruse.
Args:
load_only (Bool): Whether we can compute anew, or just need to try to grab cache.
Returns:
"""
# We work with the already tokenized dataset
self.load_or_prepare_tokenized_df()
self.length_obj = lengths.DMTHelper(self, load_only=load_only, save=self.save)
self.length_obj.run_DMT_processing()
## Labels functions
def load_or_prepare_labels(self, load_only=False):
"""Uses a generic Labels class, with attributes specific to this
project as input.
Computes results for each label column,
or else uses what's available in the cache.
Currently supports Datasets with just one label column.
"""
label_obj = labels.DMTHelper(self, load_only=load_only, save=self.save)
self.label_files = label_obj.get_label_filenames()
if self.use_cache and exists(self.label_files["figure json"]) and exists(self.label_files["statistics"]):
self.fig_labels = ds_utils.read_plotly(self.label_files["figure json"])
self.label_results = ds_utils.read_json(self.label_files["statistics"])
elif not load_only:
label_obj.run_DMT_processing()
self.fig_labels = label_obj.fig_labels
self.label_results = label_obj.label_results
# Get vocab with word counts
def load_or_prepare_vocab(self, load_only=False):
"""
Calculates the vocabulary count from the tokenized text.
The resulting dataframes may be used in nPMI calculations, zipf, etc.
:param
:return:
"""
if self.use_cache and exists(self.vocab_counts_df_fid):
logs.info("Reading vocab from cache")
self.load_vocab()
self.vocab_counts_filtered_df = filter_vocab(self.vocab_counts_df)
elif not load_only:
if self.tokenized_df is None:
# Building the vocabulary starts with tokenizing.
self.load_or_prepare_tokenized_df(load_only=False)
logs.info("Calculating vocab afresh")
word_count_df = count_vocab_frequencies(self.tokenized_df)
logs.info("Making dfs with proportion.")
self.vocab_counts_df = calc_p_word(word_count_df)
self.vocab_counts_filtered_df = filter_vocab(self.vocab_counts_df)
if self.save:
logs.info("Writing out.")
ds_utils.write_df(self.vocab_counts_df, self.vocab_counts_df_fid)
logs.info("unfiltered vocab")
logs.info(self.vocab_counts_df)
logs.info("filtered vocab")
logs.info(self.vocab_counts_filtered_df)
def load_vocab(self):
self.vocab_counts_df = ds_utils.read_df(self.vocab_counts_df_fid)
def load_or_prepare_text_duplicates(self, load_only=False, save=True, list_duplicates=True):
"""Uses a text duplicates library, which
returns strings with their counts, fraction of data that is duplicated,
or else uses what's available in the cache.
"""
dups_obj = td.DMTHelper(self, load_only=load_only, save=save)
dups_obj.run_DMT_processing(list_duplicates=list_duplicates)
self.duplicates_results = dups_obj.duplicates_results
self.dups_frac = self.duplicates_results[td.DUPS_FRAC]
if list_duplicates and td.DUPS_DICT in self.duplicates_results:
self.dups_dict = self.duplicates_results[td.DUPS_DICT]
self.duplicates_files = dups_obj.get_duplicates_filenames()
def load_or_prepare_text_perplexities(self, load_only=False):
perplex_obj = perplexity.DMTHelper(self, load_only=load_only)
perplex_obj.run_DMT_processing()
self.perplexities_df = perplex_obj.df
def load_general_stats(self):
self.general_stats_dict = json.load(
open(self.general_stats_json_fid, encoding="utf-8")
)
self.sorted_top_vocab_df = ds_utils.read_df(self.sorted_top_vocab_df_fid)
self.text_nan_count = self.general_stats_dict[TEXT_NAN_CNT]
self.dups_frac = self.general_stats_dict[td.DUPS_FRAC]
self.total_words = self.general_stats_dict[TOT_WORDS]
self.total_open_words = self.general_stats_dict[TOT_OPEN_WORDS]
def prepare_general_stats(self):
if self.tokenized_df is None:
logs.warning("Tokenized dataset not yet loaded; doing so.")
self.load_or_prepare_tokenized_df()
if self.vocab_counts_df is None:
logs.warning("Vocab not yet loaded; doing so.")
self.load_or_prepare_vocab()
self.sorted_top_vocab_df = self.vocab_counts_filtered_df.sort_values(
"count", ascending=False
).head(_TOP_N)
self.total_words = len(self.vocab_counts_df)
self.total_open_words = len(self.vocab_counts_filtered_df)
self.text_nan_count = int(self.tokenized_df.isnull().sum().sum())
self.load_or_prepare_text_duplicates()
self.general_stats_dict = {
TOT_WORDS: self.total_words,
TOT_OPEN_WORDS: self.total_open_words,
TEXT_NAN_CNT: self.text_nan_count,
td.DUPS_FRAC: self.dups_frac
}
def load_or_prepare_dataset(self, load_only=False):
"""
Prepares the HF dataset text/feature based on given config, split, etc.
Args:
load_only: Whether only a cached dataset can be used.
"""
logs.info("Doing text dset.")
if self.use_cache and exists(self.text_dset_fid):
# load extracted text
self.text_dset = load_from_disk(self.text_dset_fid)
logs.warning("Loaded dataset from disk")
logs.warning(self.text_dset)
# ...Or load it from the server and store it anew
elif not load_only:
self.prepare_text_dset()
if self.save:
# save extracted text instances
logs.warning("Saving dataset to disk")
self.text_dset.save_to_disk(self.text_dset_fid)
# TODO: Are we not using this anymore?
def load_or_prepare_dset_peek(self, load_only=False):
if self.use_cache and exists(self.dset_peek_json_fid):
with open(self.dset_peek_json_fid, "r") as f:
self.dset_peek = json.load(f)["dset peek"]
elif not load_only:
self.dset_peek = self.dset[:100]
if self.save:
ds_utils.write_json({"dset peek": self.dset_peek},
self.dset_peek_json_fid)
def load_or_prepare_tokenized_df(self, load_only=False):
if self.use_cache and exists(self.tokenized_df_fid):
self.tokenized_df = ds_utils.read_df(self.tokenized_df_fid)
elif not load_only:
# tokenize all text instances
self.tokenized_df = Tokenize(self.text_dset, feature=TEXT_FIELD,
tok_feature=TOKENIZED_FIELD).get_df()
logs.info("tokenized df is")
logs.info(self.tokenized_df)
if self.save:
logs.warning("Saving tokenized dataset to disk")
# save tokenized text
ds_utils.write_df(self.tokenized_df, self.tokenized_df_fid)
def load_or_prepare_npmi(self, load_only=False):
npmi_obj = npmi.DMTHelper(self, IDENTITY_TERMS, load_only=load_only, use_cache=self.use_cache, save=self.save)
npmi_obj.run_DMT_processing()
self.npmi_obj = npmi_obj
self.npmi_results = npmi_obj.results_dict
self.npmi_files = npmi_obj.get_filenames()
def load_or_prepare_zipf(self, load_only=False):
zipf_json_fid, zipf_fig_json_fid, zipf_fig_html_fid = zipf.get_zipf_fids(
self.dataset_cache_dir)
if self.use_cache and exists(zipf_json_fid):
# Zipf statistics
# Read Zipf statistics: Alpha, p-value, etc.
with open(zipf_json_fid, "r") as f:
zipf_dict = json.load(f)
self.z = zipf.Zipf(self.vocab_counts_df)
self.z.load(zipf_dict)
# Zipf figure
if exists(zipf_fig_json_fid):
self.zipf_fig = ds_utils.read_plotly(zipf_fig_json_fid)
elif not load_only:
self.zipf_fig = zipf.make_zipf_fig(self.z)
if self.save:
ds_utils.write_plotly(self.zipf_fig)
elif not load_only:
self.prepare_zipf()
if self.save:
zipf_dict = self.z.get_zipf_dict()
ds_utils.write_json(zipf_dict, zipf_json_fid)
ds_utils.write_plotly(self.zipf_fig, zipf_fig_json_fid)
self.zipf_fig.write_html(zipf_fig_html_fid)
def prepare_zipf(self):
# Calculate zipf from scratch
# TODO: Does z even need to be self?
self.z = zipf.Zipf(self.vocab_counts_df)
self.z.calc_fit()
self.zipf_fig = zipf.make_zipf_fig(self.z)
def dummy(doc):
return doc
def count_vocab_frequencies(tokenized_df):
"""
Based on an input pandas DataFrame with a 'text' column,
this function will count the occurrences of all words.
:return: [num_words x num_sentences] DataFrame with the rows corresponding to the
different vocabulary words and the column to the presence (0 or 1) of that word.
"""
cvec = CountVectorizer(
tokenizer=dummy,
preprocessor=dummy,
)
# We do this to calculate per-word statistics
# Fast calculation of single word counts
logs.info(
"Fitting dummy tokenization to make matrix using the previous tokenization"
)
cvec.fit(tokenized_df[TOKENIZED_FIELD])
document_matrix = cvec.transform(tokenized_df[TOKENIZED_FIELD])
batches = np.linspace(0, tokenized_df.shape[0], _NUM_VOCAB_BATCHES).astype(
int)
i = 0
tf = []
while i < len(batches) - 1:
if i % 100 == 0:
logs.info("%s of %s vocab batches" % (str(i), str(len(batches))))
batch_result = np.sum(
document_matrix[batches[i]: batches[i + 1]].toarray(), axis=0
)
tf.append(batch_result)
i += 1
word_count_df = pd.DataFrame(
[np.sum(tf, axis=0)], columns=cvec.get_feature_names_out()
).transpose()
# Now organize everything into the dataframes
word_count_df.columns = [CNT]
word_count_df.index.name = WORD
return word_count_df
def calc_p_word(word_count_df):
# p(word)
word_count_df[PROP] = word_count_df[CNT] / float(sum(word_count_df[CNT]))
vocab_counts_df = pd.DataFrame(
word_count_df.sort_values(by=CNT, ascending=False))
vocab_counts_df[VOCAB] = vocab_counts_df.index
return vocab_counts_df
def filter_vocab(vocab_counts_df):
# TODO: Add warnings (which words are missing) to log file?
filtered_vocab_counts_df = vocab_counts_df.drop(_CLOSED_CLASS,
errors="ignore")
filtered_count = filtered_vocab_counts_df[CNT]
filtered_count_denom = float(sum(filtered_vocab_counts_df[CNT]))
filtered_vocab_counts_df[PROP] = filtered_count / filtered_count_denom
return filtered_vocab_counts_df | data-measurements-tool-main | data_measurements/dataset_statistics.py |
import pandas as pd
import utils
from sklearn.feature_extraction.text import CountVectorizer
logs = utils.prepare_logging(__file__)
TEXT = "text"
TOKENIZED_TEXT = "tokenized_text"
class Tokenize:
def __init__(self, text_dset, feature=TEXT, tok_feature=TOKENIZED_TEXT,
lowercase=True):
self.text_dset = text_dset
self.feature = feature
self.tok_feature = tok_feature
self.lowercase = lowercase
# Pattern for tokenization
self.cvec = CountVectorizer(token_pattern="(?u)\\b\\w+\\b",
lowercase=lowercase)
self.tokenized_dset = self.do_tokenization()
def do_tokenization(self):
"""
Tokenizes a Hugging Face dataset in the self.feature field.
:return: Hugging Face Dataset with tokenized text in self.tok_feature.
"""
sent_tokenizer = self.cvec.build_tokenizer()
def tokenize_batch(examples):
if self.lowercase:
tok_sent = {
self.tok_feature: [tuple(sent_tokenizer(text.lower())) for
text in examples[self.feature]]}
else:
tok_sent = {
self.tok_feature: [tuple(sent_tokenizer(text)) for text in
examples[self.feature]]}
return tok_sent
tokenized_dset = self.text_dset.map(
tokenize_batch,
batched=True
)
logs.info("Tokenized the dataset.")
return tokenized_dset
def get(self):
return self.tokenized_dset
def get_df(self):
return pd.DataFrame(self.tokenized_dset)
| data-measurements-tool-main | data_measurements/tokenize.py |
data-measurements-tool-main | data_measurements/lengths/__init__.py |
|
import logging
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from PIL import Image
import seaborn as sns
import statistics
from os.path import join as pjoin
import pandas as pd
import utils
from utils import dataset_utils as ds_utils
from collections import Counter
from os.path import exists, isdir
from os.path import join as pjoin
TEXT_FIELD = "text"
TOKENIZED_FIELD = "tokenized_text"
LENGTH_FIELD = "length"
UNIQ = "num_instance_lengths"
AVG = "average_instance_length"
STD = "standard_dev_instance_length"
logs = utils.prepare_logging(__file__)
def make_fig_lengths(lengths_df):
# How the hell is this working? plt transforms to sns ?!
logs.info("Creating lengths figure.")
plt.switch_backend('Agg')
fig_tok_lengths, axs = plt.subplots(figsize=(15, 6), dpi=150)
plt.xlabel("Number of tokens")
plt.title("Binned counts of text lengths, with kernel density estimate and ticks for each instance.")
sns.histplot(data=lengths_df, kde=True, ax=axs, x=LENGTH_FIELD, legend=False)
sns.rugplot(data=lengths_df, ax=axs)
return fig_tok_lengths
class DMTHelper:
def __init__(self, dstats, load_only=False, save=True):
self.tokenized_df = dstats.tokenized_df
# Whether to only use cache
self.load_only = load_only
# Whether to try using cache first.
# Must be true when self.load_only = True; this function assures that.
self.use_cache = dstats.use_cache
self.cache_dir = dstats.dataset_cache_dir
self.save = save
# Lengths class object
self.lengths_obj = None
# Content shared in the DMT:
# The figure, the table, and the sufficient statistics (measurements)
self.fig_lengths = None
self.lengths_df = None
self.avg_length = None
self.std_length = None
self.uniq_counts = None
# Dict for the measurements, used in caching
self.length_stats_dict = {}
# Filenames, used in caching
self.lengths_dir = "lengths"
length_meas_json = "length_measurements.json"
lengths_fig_png = "lengths_fig.png"
lengths_df_json = "lengths_table.json"
self.length_stats_json_fid = pjoin(self.cache_dir, self.lengths_dir, length_meas_json)
self.lengths_fig_png_fid = pjoin(self.cache_dir, self.lengths_dir, lengths_fig_png)
self.lengths_df_json_fid = pjoin(self.cache_dir, self.lengths_dir, lengths_df_json)
def run_DMT_processing(self):
"""
Gets data structures for the figure, table, and measurements.
"""
# First look to see what we can load from cache.
if self.use_cache:
logs.info("Trying to load from cache...")
# Defines self.lengths_df, self.length_stats_dict, self.fig_lengths
# This is the table, the dict of measurements, and the figure
self.load_lengths_cache()
# Sets the measurements as attributes of the DMT object
self.set_attributes()
# If we do not have measurements loaded from cache...
if not self.length_stats_dict and not self.load_only:
logs.info("Preparing length results")
# Compute length statistics. Uses the Lengths class.
self.lengths_obj = self._prepare_lengths()
# Dict of measurements
self.length_stats_dict = self.lengths_obj.length_stats_dict
# Table of text and lengths
self.lengths_df = self.lengths_obj.lengths_df
# Sets the measurements in the length_stats_dict
self.set_attributes()
# Makes the figure
self.fig_lengths = make_fig_lengths(self.lengths_df)
# Finish
if self.save:
logs.info("Saving results.")
self._write_lengths_cache()
if exists(self.lengths_fig_png_fid):
# As soon as we have a figure, we redefine it as an image.
# This is a hack to handle a UI display error (TODO: file bug)
self.fig_lengths = Image.open(self.lengths_fig_png_fid)
def set_attributes(self):
if self.length_stats_dict:
self.avg_length = self.length_stats_dict[AVG]
self.std_length = self.length_stats_dict[STD]
self.uniq_counts = self.length_stats_dict[UNIQ]
else:
logs.info("No lengths stats found. =(")
def load_lengths_cache(self):
# Dataframe with <sentence, length> exists. Load it.
if exists(self.lengths_df_json_fid):
self.lengths_df = ds_utils.read_df(self.lengths_df_json_fid)
# Image exists. Load it.
if exists(self.lengths_fig_png_fid):
self.fig_lengths = Image.open(self.lengths_fig_png_fid) # mpimg.imread(self.lengths_fig_png_fid)
# Measurements exist. Load them.
if exists(self.length_stats_json_fid):
# Loads the length measurements
self.length_stats_dict = ds_utils.read_json(self.length_stats_json_fid)
def _write_lengths_cache(self):
# Writes the data structures using the corresponding filetypes.
ds_utils.make_path(pjoin(self.cache_dir, self.lengths_dir))
if self.length_stats_dict != {}:
ds_utils.write_json(self.length_stats_dict, self.length_stats_json_fid)
if isinstance(self.fig_lengths, Figure):
self.fig_lengths.savefig(self.lengths_fig_png_fid)
if isinstance(self.lengths_df, pd.DataFrame):
ds_utils.write_df(self.lengths_df, self.lengths_df_json_fid)
def _prepare_lengths(self):
"""Loads a Lengths object and computes length statistics"""
# Length object for the dataset
lengths_obj = Lengths(dataset=self.tokenized_df)
lengths_obj.prepare_lengths()
return lengths_obj
def get_filenames(self):
lengths_fid_dict = {"statistics": self.length_stats_json_fid,
"figure png": self.lengths_fig_png_fid,
"table": self.lengths_df_json_fid}
return lengths_fid_dict
class Lengths:
"""Generic class for text length processing.
Uses DataFrames for faster processing.
Given a dataframe with tokenized words in a column called TOKENIZED_TEXT,
and the text instances in a column called TEXT, compute statistics.
"""
def __init__(self, dataset):
self.dset_df = dataset
# Dict of measurements
self.length_stats_dict = {}
# Measurements
self.avg_length = None
self.std_length = None
self.num_uniq_lengths = None
# Table of lengths and sentences
self.lengths_df = None
def prepare_lengths(self):
self.lengths_df = pd.DataFrame(self.dset_df[TEXT_FIELD])
self.lengths_df[LENGTH_FIELD] = self.dset_df[TOKENIZED_FIELD].apply(len)
lengths_array = self.lengths_df[LENGTH_FIELD]
self.avg_length = statistics.mean(lengths_array)
self.std_length = statistics.stdev(lengths_array)
self.num_uniq_lengths = len(lengths_array.unique())
self.length_stats_dict = {
"average_instance_length": self.avg_length,
"standard_dev_instance_length": self.std_length,
"num_instance_lengths": self.num_uniq_lengths,
}
| data-measurements-tool-main | data_measurements/lengths/lengths.py |
data-measurements-tool-main | data_measurements/text_duplicates/__init__.py |
|
import evaluate
import logging
import os
import pandas as pd
import plotly.express as px
import utils
import utils.dataset_utils as ds_utils
from collections import Counter
from os.path import exists, isdir
from os.path import join as pjoin
TEXT = "text"
# These are string constants defined in the evaluate library.
# They may need to be updated if the evaluate library changes these strings
DUPS_FRAC = "duplicate_fraction"
# Evaluate calls the dictionary a "list"
DUPS_DICT = "duplicates_dict"
# This isn't in the evaluate measurement, but TODO to add that...
# DUPS_SUM = "duplicate_sum"
logs = utils.prepare_logging(__file__)
class DMTHelper:
"""Helper class for the Data Measurements Tool.
This allows us to keep all variables and functions related to labels
in one file.
Does caching and using the evaluate library for computation.
"""
def __init__(self, dstats, load_only, save):
# Input HuggingFace Dataset.
self.dset = dstats.text_dset[TEXT]
if self.dset is None:
dstats.load_or_prepare_text_dset()
self.dset = dstats.text_dset
self.use_cache = dstats.use_cache
# Note: This is None as it can be called different times with different
# settings, and so we want fresh results each time. With the evaluate
# integration, results are different depending on whether
# list_duplicates is set.
self.duplicates_results = None
self.cache_dir = dstats.dataset_cache_dir
self.save = save
self.load_only = load_only
# Filenames
self.dups_dir = "text_duplicates"
dups_json = "text_duplicates.json"
dups_html = "text_duplicates.html"
self.dups_result_json_fid = pjoin(self.cache_dir, self.dups_dir, dups_json)
self.dups_result_html_fid = pjoin(self.cache_dir, self.dups_dir, dups_html)
def run_DMT_processing(self, list_duplicates=True):
"""Calls functions to do the main work.
DMT uses the full duplicates list in a widget,
so it is set to default True.
"""
# First look to see what we can load from cache.
if self.use_cache:
self.duplicates_results = self._load_duplicates_cache()
if self.duplicates_results:
logs.info("Loaded cached text duplicate results.")
if not self.duplicates_results and not self.load_only:
self.duplicates_results = self._prepare_duplicates(list_duplicates=list_duplicates)
logs.info("Prepared duplicates.")
if self.save:
self._write_duplicates_cache()
def _prepare_duplicates(self, list_duplicates=True):
"""Wraps the evaluate library."""
duplicates = evaluate.load("text_duplicates")
results = duplicates.compute(data=self.dset, list_duplicates=list_duplicates)
return results
def _load_duplicates_cache(self):
"""Loads previously computed results from cache."""
results = {}
if exists(self.dups_result_json_fid):
results = ds_utils.read_json(self.dups_result_json_fid)
return results
def _write_duplicates_cache(self):
"""Writes newly computed results to cache."""
ds_utils.make_path(pjoin(self.cache_dir, self.dups_dir))
if self.duplicates_results:
ds_utils.write_json(self.duplicates_results, self.dups_result_json_fid)
# TODO: Use df_to_html rather than write_json_as_html;
# this will make it possible to order the results.
# But they must first be turned into a dataframe.
ds_utils.write_json_as_html(self.duplicates_results, self.dups_result_html_fid)
def get_duplicates_filenames(self):
dups_fid_dict = {"statistics": self.dups_result_json_fid, "html":self.dups_result_html_fid}
return dups_fid_dict
| data-measurements-tool-main | data_measurements/text_duplicates/text_duplicates.py |
data-measurements-tool-main | data_measurements/embeddings/__init__.py |
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from os.path import exists
from os.path import join as pjoin
import plotly.graph_objects as go
import torch
import transformers
from datasets import load_from_disk
from plotly.io import read_json
from tqdm import tqdm
from utils.dataset_utils import EMBEDDING_FIELD
def sentence_mean_pooling(model_output, attention_mask):
"""Mean pooling of token embeddings for a sentence."""
token_embeddings = model_output[
0
] # First element of model_output contains all token embeddings
input_mask_expanded = (
attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
)
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
input_mask_expanded.sum(1), min=1e-9
)
class Embeddings:
def __init__(
self,
dstats=None,
text_dset=None,
text_field_name="text",
cache_path="",
use_cache=False,
):
"""Item embeddings and clustering"""
self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
self.model_name = "sentence-transformers/all-mpnet-base-v2"
self.tokenizer = transformers.AutoTokenizer.from_pretrained(self.model_name)
self.model = transformers.AutoModel.from_pretrained(self.model_name).to(
self.device
)
self.text_dset = text_dset if dstats is None else dstats.text_dset
self.text_field_name = (
text_field_name if dstats is None else dstats.our_text_field
)
self.cache_path = cache_path if dstats is None else dstats.cache_path
self.embeddings_dset_fid = pjoin(self.cache_path, "embeddings_dset")
self.embeddings_dset = None
self.node_list_fid = pjoin(self.cache_path, "node_list.th")
self.node_list = None
self.nid_map = None
self.fig_tree_fid = pjoin(self.cache_path, "node_figure.json")
self.fig_tree = None
self.cached_clusters = {}
self.use_cache = use_cache
def compute_sentence_embeddings(self, sentences):
"""
Takes a list of sentences and computes their embeddings
using self.tokenizer and self.model (with output dimension D)
followed by mean pooling of the token representations and normalization
Args:
sentences ([string]): list of N input sentences
Returns:
torch.Tensor: sentence embeddings, dimension NxD
"""
batch = self.tokenizer(
sentences, padding=True, truncation=True, return_tensors="pt"
)
batch = {k: v.to(self.device) for k, v in batch.items()}
with torch.no_grad():
model_output = self.model(**batch)
sentence_embeds = sentence_mean_pooling(
model_output, batch["attention_mask"]
)
sentence_embeds /= sentence_embeds.norm(dim=-1, keepdim=True)
return sentence_embeds
def make_embeddings(self):
"""
Batch computes the embeddings of the Dataset self.text_dset,
using the field self.text_field_name as input.
Returns:
Dataset: HF dataset object with a single EMBEDDING_FIELD field
corresponding to the embeddings (list of floats)
"""
def batch_embed_sentences(sentences):
return {
EMBEDDING_FIELD: [
embed.tolist()
for embed in self.compute_sentence_embeddings(
sentences[self.text_field_name]
)
]
}
self.embeddings_dset = self.text_dset.map(
batch_embed_sentences,
batched=True,
batch_size=32,
remove_columns=[self.text_field_name],
)
return self.embeddings_dset
def make_text_embeddings(self):
"""Load embeddings dataset from cache or compute it."""
if self.use_cache and exists(self.embeddings_dset_fid):
self.embeddings_dset = load_from_disk(self.embeddings_dset_fid)
else:
self.embeddings_dset = self.make_embeddings()
self.embeddings_dset.save_to_disk(self.embeddings_dset_fid)
def make_hierarchical_clustering(
self,
batch_size=1000,
approx_neighbors=1000,
min_cluster_size=10,
):
if self.use_cache and exists(self.node_list_fid):
self.node_list, self.nid_map = torch.load(self.node_list_fid)
else:
self.make_text_embeddings()
embeddings = torch.Tensor(self.embeddings_dset[EMBEDDING_FIELD])
self.node_list = fast_cluster(
embeddings, batch_size, approx_neighbors, min_cluster_size
)
self.nid_map = dict(
[(node["nid"], nid) for nid, node in enumerate(self.node_list)]
)
torch.save((self.node_list, self.nid_map), self.node_list_fid)
print(exists(self.fig_tree_fid), self.fig_tree_fid)
if self.use_cache and exists(self.fig_tree_fid):
self.fig_tree = read_json(self.fig_tree_fid)
else:
self.fig_tree = make_tree_plot(
self.node_list, self.nid_map, self.text_dset, self.text_field_name
)
self.fig_tree.write_json(self.fig_tree_fid)
def find_cluster_beam(self, sentence, beam_size=20):
"""
This function finds the `beam_size` leaf clusters that are closest to the
proposed sentence and returns the full path from the root to the cluster
along with the dot product between the sentence embedding and the
cluster centroid
Args:
sentence (string): input sentence for which to find clusters
beam_size (int): this is a beam size algorithm to explore the tree
Returns:
[([int], float)]: list of (path_from_root, score) sorted by score
"""
embed = self.compute_sentence_embeddings([sentence])[0].to("cpu")
active_paths = [([0], torch.dot(embed, self.node_list[0]["centroid"]).item())]
finished_paths = []
children_ids_list = [
[
self.nid_map[nid]
for nid in self.node_list[path[-1]]["children_ids"]
if nid in self.nid_map
]
for path, score in active_paths
]
while len(active_paths) > 0:
next_ids = sorted(
[
(
beam_id,
nid,
torch.dot(embed, self.node_list[nid]["centroid"]).item(),
)
for beam_id, children_ids in enumerate(children_ids_list)
for nid in children_ids
],
key=lambda x: x[2],
reverse=True,
)[:beam_size]
paths = [
(active_paths[beam_id][0] + [next_id], score)
for beam_id, next_id, score in next_ids
]
active_paths = []
for path, score in paths:
if (
len(
[
nid
for nid in self.node_list[path[-1]]["children_ids"]
if nid in self.nid_map
]
)
> 0
):
active_paths += [(path, score)]
else:
finished_paths += [(path, score)]
children_ids_list = [
[
self.nid_map[nid]
for nid in self.node_list[path[-1]]["children_ids"]
if nid in self.nid_map
]
for path, score in active_paths
]
return sorted(
finished_paths,
key=lambda x: x[-1],
reverse=True,
)[:beam_size]
def prepare_merges(embeddings, batch_size=1000, approx_neighbors=1000, low_thres=0.5):
"""
Prepares an initial list of merges for hierarchical
clustering. First compute the `approx_neighbors` nearest neighbors,
then propose a merge for any two points that are closer than `low_thres`
Note that if a point has more than `approx_neighbors` neighbors
closer than `low_thres`, this approach will miss some of those merges
Args:
embeddings (toch.Tensor): Tensor of sentence embeddings - dimension NxD
batch_size (int): compute nearest neighbors of `batch_size` points at a time
approx_neighbors (int): only keep `approx_neighbors` nearest neighbors of a point
low_thres (float): only return merges where the dot product is greater than `low_thres`
Returns:
torch.LongTensor: proposed merges ([i, j] with i>j) - dimension: Mx2
torch.Tensor: merge scores - dimension M
"""
top_idx_pre = torch.cat(
[torch.LongTensor(range(embeddings.shape[0]))[:, None]] * batch_size, dim=1
)
top_val_all = torch.Tensor(0, approx_neighbors)
top_idx_all = torch.LongTensor(0, approx_neighbors)
n_batches = math.ceil(len(embeddings) / batch_size)
for b in tqdm(range(n_batches)):
# TODO: batch across second dimension
cos_scores = torch.mm(
embeddings[b * batch_size : (b + 1) * batch_size], embeddings.t()
)
for i in range(cos_scores.shape[0]):
cos_scores[i, (b * batch_size) + i :] = -1
top_val_large, top_idx_large = cos_scores.topk(
k=approx_neighbors, dim=-1, largest=True
)
top_val_all = torch.cat([top_val_all, top_val_large], dim=0)
top_idx_all = torch.cat([top_idx_all, top_idx_large], dim=0)
max_neighbor_dist = top_val_large[:, -1].max().item()
if max_neighbor_dist > low_thres:
print(
f"WARNING: with the current set of neireast neighbor, the farthest is {max_neighbor_dist}"
)
all_merges = torch.cat(
[
top_idx_pre[top_val_all > low_thres][:, None],
top_idx_all[top_val_all > low_thres][:, None],
],
dim=1,
)
all_merge_scores = top_val_all[top_val_all > low_thres]
return (all_merges, all_merge_scores)
def merge_nodes(nodes, current_thres, previous_thres, all_merges, all_merge_scores):
"""
Merge all nodes if the max dot product between any of their descendants
is greater than current_thres.
Args:
nodes ([dict]): list of dicts representing the current set of nodes
current_thres (float): merge all nodes closer than current_thres
previous_thres (float): nodes closer than previous_thres are already merged
all_merges (torch.LongTensor): proposed merges ([i, j] with i>j) - dimension: Mx2
all_merge_scores (torch.Tensor): merge scores - dimension M
Returns:
[dict]: extended list with the newly created internal nodes
"""
merge_ids = (all_merge_scores <= previous_thres) * (
all_merge_scores > current_thres
)
if merge_ids.sum().item() > 0:
merges = all_merges[merge_ids]
for a, b in merges.tolist():
node_a = nodes[a]
while node_a["parent_id"] != -1:
node_a = nodes[node_a["parent_id"]]
node_b = nodes[b]
while node_b["parent_id"] != -1:
node_b = nodes[node_b["parent_id"]]
if node_a["nid"] == node_b["nid"]:
continue
else:
# merge if threshold allows
if (node_a["depth"] + node_b["depth"]) > 0 and min(
node_a["merge_threshold"], node_b["merge_threshold"]
) == current_thres:
merge_to = None
merge_from = None
if node_a["nid"] < node_b["nid"]:
merge_from = node_a
merge_to = node_b
if node_a["nid"] > node_b["nid"]:
merge_from = node_b
merge_to = node_a
merge_to["depth"] = max(merge_to["depth"], merge_from["depth"])
merge_to["weight"] += merge_from["weight"]
merge_to["children_ids"] += (
merge_from["children_ids"]
if merge_from["depth"] > 0
else [merge_from["nid"]]
)
for cid in merge_from["children_ids"]:
nodes[cid]["parent_id"] = merge_to["nid"]
merge_from["parent_id"] = merge_to["nid"]
# else new node
else:
new_nid = len(nodes)
new_node = {
"nid": new_nid,
"parent_id": -1,
"depth": max(node_a["depth"], node_b["depth"]) + 1,
"weight": node_a["weight"] + node_b["weight"],
"children": [],
"children_ids": [node_a["nid"], node_b["nid"]],
"example_ids": [],
"merge_threshold": current_thres,
}
node_a["parent_id"] = new_nid
node_b["parent_id"] = new_nid
nodes += [new_node]
return nodes
def finalize_node(node, nodes, min_cluster_size):
"""Post-process nodes to sort children by descending weight,
get full list of leaves in the sub-tree, and direct links
to the cildren nodes, then recurses to all children.
Nodes with fewer than `min_cluster_size` descendants are collapsed
into a single leaf.
"""
node["children"] = sorted(
[
finalize_node(nodes[cid], nodes, min_cluster_size)
for cid in node["children_ids"]
],
key=lambda x: x["weight"],
reverse=True,
)
if node["depth"] > 0:
node["example_ids"] = [
eid for child in node["children"] for eid in child["example_ids"]
]
node["children"] = [
child for child in node["children"] if child["weight"] >= min_cluster_size
]
assert node["weight"] == len(node["example_ids"]), print(node)
return node
def fast_cluster(
embeddings,
batch_size=1000,
approx_neighbors=1000,
min_cluster_size=10,
low_thres=0.5,
):
"""
Computes an approximate hierarchical clustering based on example
embeddings. The join criterion is min clustering, i.e. two clusters
are joined if any pair of their descendants are closer than a threshold
The approximate comes from the fact that only the `approx_neighbors` nearest
neighbors of an example are considered for merges
"""
batch_size = min(embeddings.shape[0], batch_size)
all_merges, all_merge_scores = prepare_merges(
embeddings, batch_size, approx_neighbors, low_thres
)
# prepare leaves
nodes = [
{
"nid": nid,
"parent_id": -1,
"depth": 0,
"weight": 1,
"children": [],
"children_ids": [],
"example_ids": [nid],
"merge_threshold": 1.0,
}
for nid in range(embeddings.shape[0])
]
# one level per threshold range
for i in range(10):
p_thres = 1 - i * 0.05
c_thres = 0.95 - i * 0.05
nodes = merge_nodes(nodes, c_thres, p_thres, all_merges, all_merge_scores)
# make root
root_children = [
node
for node in nodes
if node["parent_id"] == -1 and node["weight"] >= min_cluster_size
]
root = {
"nid": len(nodes),
"parent_id": -1,
"depth": max([node["depth"] for node in root_children]) + 1,
"weight": sum([node["weight"] for node in root_children]),
"children": [],
"children_ids": [node["nid"] for node in root_children],
"example_ids": [],
"merge_threshold": -1.0,
}
nodes += [root]
for node in root_children:
node["parent_id"] = root["nid"]
# finalize tree
tree = finalize_node(root, nodes, min_cluster_size)
node_list = []
def rec_map_nodes(node, node_list):
node_list += [node]
for child in node["children"]:
rec_map_nodes(child, node_list)
rec_map_nodes(tree, node_list)
# get centroids and distances
for node in node_list:
node_embeds = embeddings[node["example_ids"]]
node["centroid"] = node_embeds.sum(dim=0)
node["centroid"] /= node["centroid"].norm()
node["centroid_dot_prods"] = torch.mv(node_embeds, node["centroid"])
node["sorted_examples_centroid"] = sorted(
[
(eid, edp.item())
for eid, edp in zip(node["example_ids"], node["centroid_dot_prods"])
],
key=lambda x: x[1],
reverse=True,
)
return node_list
def make_tree_plot(node_list, nid_map, text_dset, text_field_name):
"""
Makes a graphical representation of the tree encoded
in node-list. The hover label for each node shows the number
of descendants and the 5 examples that are closest to the centroid
"""
for nid, node in enumerate(node_list):
# get list of
node_examples = {}
for sid, score in node["sorted_examples_centroid"]:
node_examples[text_dset[sid][text_field_name]] = score
if len(node_examples) >= 5:
break
node["label"] = node.get(
"label",
f"{nid:2d} - {node['weight']:5d} items <br>"
+ "<br>".join(
[
f" {score:.2f} > {txt[:64]}" + ("..." if len(txt) >= 63 else "")
for txt, score in node_examples.items()
]
),
)
# make plot nodes
labels = [node["label"] for node in node_list]
root = node_list[0]
root["X"] = 0
root["Y"] = 0
def rec_make_coordinates(node):
total_weight = 0
add_weight = len(node["example_ids"]) - sum(
[child["weight"] for child in node["children"]]
)
for child in node["children"]:
child["X"] = node["X"] + total_weight
child["Y"] = node["Y"] - 1
total_weight += child["weight"] + add_weight / len(node["children"])
rec_make_coordinates(child)
rec_make_coordinates(root)
E = [] # list of edges
Xn = []
Yn = []
Xe = []
Ye = []
for nid, node in enumerate(node_list):
Xn += [node["X"]]
Yn += [node["Y"]]
for child in node["children"]:
E += [(nid, nid_map[child["nid"]])]
Xe += [node["X"], child["X"], None]
Ye += [node["Y"], child["Y"], None]
# make figure
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=Xe,
y=Ye,
mode="lines",
line=dict(color="rgb(210,210,210)", width=1),
hoverinfo="none",
)
)
fig.add_trace(
go.Scatter(
x=Xn,
y=Yn,
mode="markers",
name="nodes",
marker=dict(
symbol="circle-dot",
size=18,
color="#6175c1",
line=dict(color="rgb(50,50,50)", width=1)
# '#DB4551',
),
text=labels,
hoverinfo="text",
opacity=0.8,
)
)
return fig
| data-measurements-tool-main | data_measurements/embeddings/embeddings.py |
data-measurements-tool-main | data_measurements/npmi/__init__.py |
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import sys
import utils
import utils.dataset_utils as ds_utils
import warnings
from collections import defaultdict
from os.path import exists
from os.path import join as pjoin
from sklearn.preprocessing import MultiLabelBinarizer
from utils.dataset_utils import (CNT, TOKENIZED_FIELD)
# Might be nice to print to log instead? Happens when we drop closed class.
warnings.filterwarnings(action="ignore", category=UserWarning)
# When we divide by 0 in log
np.seterr(divide="ignore")
# treating inf values as NaN as well
pd.set_option("use_inf_as_na", True)
logs = utils.prepare_logging(__file__)
# TODO: Should be possible for a user to specify this.
NUM_BATCHES = 500
# For the associations of an identity term
SING = "associations"
# For the difference between the associations of identity terms
DIFF = "biases"
# Used in the figures we show in DMT
DMT = "combined"
def pair_terms(id_terms):
"""Creates alphabetically ordered paired terms based on the given terms."""
pairs = []
for i in range(len(id_terms)):
term1 = id_terms[i]
for j in range(i + 1, len(id_terms)):
term2 = id_terms[j]
# Use one ordering for a pair.
pair = tuple(sorted([term1, term2]))
pairs += [pair]
return pairs
class DMTHelper:
"""Helper class for the Data Measurements Tool.
This allows us to keep all variables and functions related to labels
in one file.
"""
def __init__(self, dstats, identity_terms, load_only=False, use_cache=False,
save=True):
# The data measurements tool settings (dataset, config, etc.)
self.dstats = dstats
# Whether we can use caching (when live, no).
self.load_only = load_only
# Whether to first try using cache before calculating
self.use_cache = use_cache
# Whether to save results
self.save = save
# Tokenized dataset
tokenized_df = dstats.tokenized_df
self.tokenized_sentence_df = tokenized_df[TOKENIZED_FIELD]
# Dataframe of shape #vocab x 1 (count)
self.vocab_counts_df = dstats.vocab_counts_df
# Cutoff for the number of times something must occur to be included
self.min_count = dstats.min_vocab_count
self.cache_path = pjoin(dstats.dataset_cache_dir, SING)
self.avail_terms_json_fid = pjoin(self.cache_path,
"identity_terms.json")
# TODO: Users ideally can type in whatever words they want.
# This is the full list of terms.
self.identity_terms = identity_terms
logs.info("Using term list:")
logs.info(self.identity_terms)
# identity_terms terms that are available more than MIN_VOCAB_COUNT
self.avail_identity_terms = []
# TODO: Let users specify
self.open_class_only = True
# Single-word associations
self.assoc_results_dict = defaultdict(dict)
# Paired term association bias
self.bias_results_dict = defaultdict(dict)
# Dataframes used in displays.
self.bias_dfs_dict = defaultdict(dict)
# Results of the single word associations and their paired bias values.
# Formatted as:
# {(s1,s2)): {pd.DataFrame({s1-s2:diffs, s1:assoc, s2:assoc})}}
self.results_dict = defaultdict(lambda: defaultdict(dict))
# Filenames for cache, based on the results
self.filenames_dict = defaultdict(dict)
def run_DMT_processing(self):
# The identity terms that can be used
self.load_or_prepare_avail_identity_terms()
# Association measurements & pair-wise differences for identity terms.
self.load_or_prepare_dmt_results()
def load_or_prepare_avail_identity_terms(self):
"""
Figures out what identity terms the user can select, based on whether
they occur more than self.min_vocab_count times
Provides identity terms -- uniquely and in pairs -- occurring at least
self.min_vocab_count times.
"""
# If we're trying to use the cache of available terms
if self.use_cache:
self.avail_identity_terms = self._load_identity_cache()
if self.avail_identity_terms:
logs.info(
"Loaded identity terms occuring >%s times" % self.min_count)
# Figure out the identity terms if we're not just loading from cache
if not self.load_only:
if not self.avail_identity_terms:
self.avail_identity_terms = self._prepare_identity_terms()
# Finish
if self.save:
self._write_term_cache()
def _load_identity_cache(self):
if exists(self.avail_terms_json_fid):
avail_identity_terms = ds_utils.read_json(self.avail_terms_json_fid)
return avail_identity_terms
return []
def _prepare_identity_terms(self):
"""Uses DataFrame magic to return those terms that appear
greater than min_vocab times."""
# Mask to get the identity terms
true_false = [term in self.vocab_counts_df.index for term in
self.identity_terms]
# List of identity terms
word_list_tmp = [x for x, y in zip(self.identity_terms, true_false) if
y]
# Whether said identity terms have a count > min_count
true_false_counts = [
self.vocab_counts_df.loc[word, CNT] >= self.min_count for word in
word_list_tmp]
# List of identity terms with a count higher than min_count
avail_identity_terms = [word for word, y in
zip(word_list_tmp, true_false_counts) if y]
logs.debug("Identity terms that occur > %s times are:" % self.min_count)
logs.debug(avail_identity_terms)
return avail_identity_terms
def load_or_prepare_dmt_results(self):
# Initialize with no results (reset).
self.results_dict = {}
# Filenames for caching and saving
self._make_fids()
# If we're trying to use the cache of already computed results
if self.use_cache:
# Loads the association results and dataframes used in the display.
logs.debug("Trying to load...")
self.results_dict = self._load_dmt_cache()
# Compute results if we can
if not self.load_only:
# If there isn't a solution using cache
if not self.results_dict:
# Does the actual computations
self.prepare_results()
# Finish
if self.save:
# Writes the paired & singleton dataframe out.
self._write_dmt_cache()
def _load_dmt_cache(self):
"""
Loads dataframe with paired differences and individual item scores.
"""
results_dict = defaultdict(lambda: defaultdict(dict))
pairs = pair_terms(self.avail_identity_terms)
for pair in pairs:
combined_fid = self.filenames_dict[DMT][pair]
if exists(combined_fid):
results_dict[pair] = ds_utils.read_df(combined_fid)
return results_dict
def prepare_results(self):
assoc_obj = nPMI(self.dstats.vocab_counts_df,
self.tokenized_sentence_df,
self.avail_identity_terms)
self.assoc_results_dict = assoc_obj.assoc_results_dict
self.results_dict = assoc_obj.bias_results_dict
def _prepare_dmt_dfs(self, measure="npmi"):
"""
Create the main dataframe that is used in the DMT, which lists
the npmi scores for each paired identity term and the difference between
them. The difference between them is the "bias".
"""
# Paired identity terms, associations and differences, in one dataframe.
bias_dfs_dict = defaultdict(dict)
logs.debug("bias results dict is")
logs.debug(self.bias_results_dict)
for pair in sorted(self.bias_results_dict):
combined_df = pd.DataFrame()
# Paired identity terms, values are the the difference between them.
combined_df[pair] = pd.DataFrame(self.bias_results_dict[pair])
s1 = pair[0]
s2 = pair[1]
# Single identity term 1, values
combined_df[s1] = pd.DataFrame(self.assoc_results_dict[s1][measure])
# Single identity term 2, values
combined_df[s2] = pd.DataFrame(self.assoc_results_dict[s2][measure])
# Full dataframe with scores per-term,
# as well as the difference between.
bias_dfs_dict[pair] = combined_df
# {pair: {pd.DataFrame({(s1,s2)):diffs, s1:assocs, s2:assocs})}}
logs.debug("combined df is")
logs.debug(bias_dfs_dict)
return bias_dfs_dict
def _write_term_cache(self):
ds_utils.make_path(self.cache_path)
if self.avail_identity_terms:
ds_utils.write_json(self.avail_identity_terms,
self.avail_terms_json_fid)
def _write_dmt_cache(self, measure="npmi"):
ds_utils.make_path(pjoin(self.cache_path, measure))
for pair, bias_df in self.results_dict.items():
logs.debug("Results for pair is:")
logs.debug(bias_df)
fid = self.filenames_dict[DMT][pair]
logs.debug("Writing to %s" % fid)
ds_utils.write_df(bias_df, fid)
def _make_fids(self, measure="npmi"):
"""
Utility function to create filename/path strings for the different
result caches. This include single identity term results as well
as the difference between them. Also includes the datastructure used in
the DMT, which is a dataframe that has:
(term1, term2) difference, term1 (scores), term2 (scores)
"""
self.filenames_dict = {SING: {}, DIFF: {}, DMT: {}}
# When we have the available identity terms,
# we can make cache filenames for them.
for id_term in self.avail_identity_terms:
filename = SING + "-" + id_term + ".json"
json_fid = pjoin(self.cache_path, measure, filename)
self.filenames_dict[SING][id_term] = json_fid
paired_terms = pair_terms(self.avail_identity_terms)
for id_term_tuple in paired_terms:
# The paired association results (bias) are stored with these files.
id_term_str = '-'.join(id_term_tuple)
filename = DIFF + "-" + id_term_str + ".json"
json_fid = pjoin(self.cache_path, measure, filename)
self.filenames_dict[DIFF][id_term_tuple] = json_fid
# The display dataframes in the DMT are stored with these files.
filename = DMT + "-" + id_term_str + ".json"
json_fid = pjoin(self.cache_path, measure, filename)
self.filenames_dict[DMT][id_term_tuple] = json_fid
def get_display(self, s1, s2):
pair = tuple(sorted([s1, s2]))
display_df = self.results_dict[pair]
logs.debug(self.results_dict)
display_df.columns = ["bias", s1, s2]
return display_df
def get_filenames(self):
filenames = {"available terms": self.avail_terms_json_fid,
"results": self.filenames_dict}
return filenames
class nPMI:
"""
Uses the vocabulary dataframe and tokenized sentences to calculate
co-occurrence statistics, PMI, and nPMI
"""
def __init__(self, vocab_counts_df, tokenized_sentence_df, given_id_terms):
logs.debug("Initiating assoc class.")
self.vocab_counts_df = vocab_counts_df
# TODO: Change this logic so just the vocabulary is given.
self.vocabulary = list(vocab_counts_df.index)
self.vocab_counts = pd.DataFrame([0] * len(self.vocabulary))
logs.debug("vocabulary is is")
logs.debug(self.vocab_counts_df)
self.tokenized_sentence_df = tokenized_sentence_df
logs.debug("tokenized sentences are")
logs.debug(self.tokenized_sentence_df)
self.given_id_terms = given_id_terms
logs.info("identity terms are")
logs.info(self.given_id_terms)
# Terms we calculate the difference between
self.paired_terms = pair_terms(given_id_terms)
# Matrix of # sentences x vocabulary size
self.word_cnts_per_sentence = self.count_words_per_sentence()
logs.info("Calculating results...")
# Formatted as {subgroup:{"count":{...},"npmi":{...}}}
self.assoc_results_dict = self.calc_measures()
# Dictionary keyed by pair tuples. Each value is a dataframe with
# vocab terms as the index, and columns of paired difference and
# individual scores for the two identity terms.
self.bias_results_dict = self.calc_bias(self.assoc_results_dict)
def count_words_per_sentence(self):
# Counts the number of each vocabulary item per-sentence in batches.
logs.info("Creating co-occurrence matrix for nPMI calculations.")
word_cnts_per_sentence = []
logs.info(self.tokenized_sentence_df)
batches = np.linspace(0, self.tokenized_sentence_df.shape[0],
NUM_BATCHES).astype(int)
# Creates matrix of size # batches x # sentences
for batch_num in range(len(batches) - 1):
# Makes matrix shape: batch size (# sentences) x # words,
# with the occurrence of each word per sentence.
# vocab_counts_df.index is the vocabulary.
mlb = MultiLabelBinarizer(classes=self.vocabulary)
if batch_num % 100 == 0:
logs.debug(
"%s of %s sentence binarize batches." % (
str(batch_num), str(len(batches)))
)
# Per-sentence word counts
sentence_batch = self.tokenized_sentence_df[
batches[batch_num]:batches[batch_num + 1]]
mlb_series = mlb.fit_transform(sentence_batch)
word_cnts_per_sentence.append(mlb_series)
return word_cnts_per_sentence
def calc_measures(self):
id_results = {}
for subgroup in self.given_id_terms:
logs.info("Calculating for %s " % subgroup)
# Index of the identity term in the vocabulary
subgroup_idx = self.vocabulary.index(subgroup)
print("idx is %s" % subgroup_idx)
logs.debug("Calculating co-occurrences...")
vocab_cooc_df = self.calc_cooccurrences(subgroup, subgroup_idx)
logs.debug("Calculating PMI...")
pmi_df = self.calc_PMI(vocab_cooc_df, subgroup)
logs.debug("PMI dataframe is:")
logs.debug(pmi_df)
logs.debug("Calculating nPMI...")
npmi_df = self.calc_nPMI(pmi_df, vocab_cooc_df, subgroup)
logs.debug("npmi df is")
logs.debug(npmi_df)
# Create a data structure for the identity term associations
id_results[subgroup] = {"count": vocab_cooc_df,
"pmi": pmi_df,
"npmi": npmi_df}
logs.debug("results_dict is:")
print(id_results)
return id_results
def calc_cooccurrences(self, subgroup, subgroup_idx):
initialize = True
coo_df = None
# Big computation here! Should only happen once.
logs.debug(
"Approaching big computation! Here, we binarize all words in the "
"sentences, making a sparse matrix of sentences."
)
for batch_id in range(len(self.word_cnts_per_sentence)):
# Every 100 batches, print out the progress.
if not batch_id % 100:
logs.debug(
"%s of %s co-occurrence count batches"
% (str(batch_id), str(len(self.word_cnts_per_sentence)))
)
# List of all the sentences (list of vocab) in that batch
batch_sentence_row = self.word_cnts_per_sentence[batch_id]
# Dataframe of # sentences in batch x vocabulary size
sent_batch_df = pd.DataFrame(batch_sentence_row)
# Subgroup counts per-sentence for the given batch
subgroup_df = sent_batch_df[subgroup_idx]
subgroup_df.columns = [subgroup]
# Remove the sentences where the count of the subgroup is 0.
# This way we have less computation & resources needs.
subgroup_df = subgroup_df[subgroup_df > 0]
mlb_subgroup_only = sent_batch_df[sent_batch_df[subgroup_idx] > 0]
# Create cooccurrence matrix for the given subgroup and all words.
batch_coo_df = pd.DataFrame(mlb_subgroup_only.T.dot(subgroup_df))
# Creates a batch-sized dataframe of co-occurrence counts.
# Note these could just be summed rather than be batch size.
if initialize:
coo_df = batch_coo_df
else:
coo_df = coo_df.add(batch_coo_df, fill_value=0)
initialize = False
logs.debug("Made co-occurrence matrix")
logs.debug(coo_df)
count_df = coo_df.set_index(self.vocab_counts_df.index)
count_df.columns = ["count"]
count_df["count"] = count_df["count"].astype(int)
return count_df
def calc_PMI(self, vocab_cooc_df, subgroup):
"""A
# PMI(x;y) = h(y) - h(y|x)
# = h(subgroup) - h(subgroup|word)az
# = log (p(subgroup|word) / p(subgroup))
# nPMI additionally divides by -log(p(x,y)) = -log(p(x|y)p(y))
"""
print("vocab cooc df")
print(vocab_cooc_df)
print("vocab counts")
print(self.vocab_counts_df["count"])
# Calculation of p(subgroup)
subgroup_prob = self.vocab_counts_df.loc[subgroup]["proportion"]
# Calculation of p(subgroup|word) = count(subgroup,word) / count(word)
# Because the indices match (the vocab words),
# this division doesn't need to specify the index (I think?!)
vocab_cooc_df.columns = ["cooc"]
p_subgroup_g_word = (
vocab_cooc_df["cooc"] / self.vocab_counts_df["count"])
logs.info("p_subgroup_g_word is")
logs.info(p_subgroup_g_word)
pmi_df = pd.DataFrame()
pmi_df[subgroup] = np.log(p_subgroup_g_word / subgroup_prob).dropna()
# Note: A potentially faster solution for adding count, npmi,
# can be based on this zip idea:
# df_test['size_kb'], df_test['size_mb'], df_test['size_gb'] =
# zip(*df_test['size'].apply(sizes))
return pmi_df
def calc_nPMI(self, pmi_df, vocab_cooc_df, subgroup):
"""
# nPMI additionally divides by -log(p(x,y)) = -log(p(x|y)p(y))
# = -log(p(word|subgroup)p(word))
"""
p_word_g_subgroup = vocab_cooc_df["cooc"] / sum(vocab_cooc_df["cooc"])
logs.debug("p_word_g_subgroup")
logs.debug(p_word_g_subgroup)
p_word = pmi_df.apply(
lambda x: self.vocab_counts_df.loc[x.name]["proportion"], axis=1
)
logs.debug("p word is")
logs.debug(p_word)
normalize_pmi = -np.log(p_word_g_subgroup * p_word)
npmi_df = pd.DataFrame()
npmi_df[subgroup] = pmi_df[subgroup] / normalize_pmi
return npmi_df.dropna()
def calc_bias(self, measurements_dict, measure="npmi"):
"""Uses the subgroup dictionaries to compute the differences across pairs.
Uses dictionaries rather than dataframes due to the fact that dicts seem
to be preferred amongst evaluate users so far.
:return: Dict of (id_term1, id_term2):{term1:diff, term2:diff ...}"""
paired_results_dict = {}
for pair in self.paired_terms:
paired_results = pd.DataFrame()
s1 = pair[0]
s2 = pair[1]
s1_results = measurements_dict[s1][measure]
s2_results = measurements_dict[s2][measure]
# !!! This is the final result of all the work !!!
word_diffs = s1_results[s1] - s2_results[s2]
paired_results[("%s - %s" % (s1, s2))] = word_diffs
paired_results[s1] = s1_results
paired_results[s2] = s2_results
paired_results_dict[pair] = paired_results.dropna()
logs.debug("Paired bias results from the main nPMI class are ")
logs.debug(paired_results_dict)
return paired_results_dict
def _write_debug_msg(self, batch_id, subgroup_df=None,
subgroup_sentences=None, msg_type="batching"):
if msg_type == "batching":
if not batch_id % 100:
logs.debug(
"%s of %s co-occurrence count batches"
% (str(batch_id), str(len(self.word_cnts_per_sentence)))
)
elif msg_type == "transpose":
if not batch_id % 100:
logs.debug("Removing 0 counts, subgroup_df is")
logs.debug(subgroup_df)
logs.debug("subgroup_sentences is")
logs.debug(subgroup_sentences)
logs.debug(
"Now we do the transpose approach for co-occurrences")
| data-measurements-tool-main | data_measurements/npmi/npmi.py |
import evaluate
import logging
import os
import pandas as pd
import plotly.express as px
import utils
import utils.dataset_utils as ds_utils
from collections import Counter
from os.path import exists, isdir
from os.path import join as pjoin
LABEL_FIELD = "labels"
LABEL_NAMES = "label_names"
LABEL_LIST = "label_list"
LABEL_MEASUREMENT = "label_measurement"
# Specific to the evaluate library
EVAL_LABEL_MEASURE = "label_distribution"
EVAL_LABEL_ID = "labels"
EVAL_LABEL_FRAC = "fractions"
# TODO: This should ideally be in what's returned from the evaluate library
EVAL_LABEL_SUM = "sums"
logs = utils.prepare_logging(__file__)
def map_labels(label_field, ds_name_to_dict, ds_name, config_name):
try:
label_field, label_names = (
ds_name_to_dict[ds_name][config_name]["features"][label_field][0]
if len(
ds_name_to_dict[ds_name][config_name]["features"][label_field]) > 0
else ((), [])
)
except KeyError as e:
logs.exception(e)
logs.warning("Not returning a label-name mapping")
return []
return label_names
def make_label_results_dict(label_measurement, label_names):
label_dict = {LABEL_MEASUREMENT: label_measurement,
LABEL_NAMES: label_names}
return label_dict
def make_label_fig(label_results, chart_type="pie"):
try:
label_names = label_results[LABEL_NAMES]
label_measurement = label_results[LABEL_MEASUREMENT]
label_sums = label_measurement[EVAL_LABEL_SUM]
if chart_type == "bar":
fig_labels = plt.bar(
label_measurement[EVAL_LABEL_MEASURE][EVAL_LABEL_ID],
label_measurement[EVAL_LABEL_MEASURE][EVAL_LABEL_FRAC])
else:
if chart_type != "pie":
logs.info("Oops! Don't have that chart-type implemented.")
logs.info("Making the default pie chart")
# IMDB - unsupervised has a labels column where all values are -1,
# which breaks the assumption that
# the number of label_names == the number of label_sums.
# This handles that case, assuming it will happen in other datasets.
if len(label_names) != len(label_sums):
logs.warning("Can't make a figure with the given label names: "
"We don't have the right amount of label types "
"to apply them to!")
return False
fig_labels = px.pie(names=label_names, values=label_sums)
except KeyError:
logs.info("Input label data missing required key(s).")
logs.info("We require %s, %s" % (LABEL_NAMES, LABEL_MEASUREMENT))
logs.info("We found: %s" % ",".join(label_results.keys()))
return False
return fig_labels
def extract_label_names(label_field, ds_name, config_name):
ds_name_to_dict = ds_utils.get_dataset_info_dicts(ds_name)
label_names = map_labels(label_field, ds_name_to_dict, ds_name, config_name)
return label_names
class DMTHelper:
"""Helper class for the Data Measurements Tool.
This allows us to keep all variables and functions related to labels
in one file.
"""
def __init__(self, dstats, load_only, save):
logs.info("Initializing labels.")
# -- Data Measurements Tool variables
self.label_results = dstats.label_results
self.fig_labels = dstats.fig_labels
self.use_cache = dstats.use_cache
self.cache_dir = dstats.dataset_cache_dir
self.load_only = load_only
self.save = save
# -- Hugging Face Dataset variables
self.label_field = dstats.label_field
# Input HuggingFace dataset
self.dset = dstats.dset
self.dset_name = dstats.dset_name
self.dset_config = dstats.dset_config
self.label_names = dstats.label_names
# -- Filenames
self.label_dir = "labels"
label_json = "labels.json"
label_fig_json = "labels_fig.json"
label_fig_html = "labels_fig.html"
self.labels_json_fid = pjoin(self.cache_dir, self.label_dir,
label_json)
self.labels_fig_json_fid = pjoin(self.cache_dir, self.label_dir,
label_fig_json)
self.labels_fig_html_fid = pjoin(self.cache_dir, self.label_dir,
label_fig_html)
def run_DMT_processing(self):
"""
Loads or prepares the Labels measurements and figure as specified by
the DMT options.
"""
# First look to see what we can load from cache.
if self.use_cache:
logs.info("Trying to load labels.")
self.fig_labels, self.label_results = self._load_label_cache()
if self.fig_labels:
logs.info("Loaded cached label figure.")
if self.label_results:
logs.info("Loaded cached label results.")
# If we can prepare the results afresh...
if not self.load_only:
# If we didn't load them already, compute label statistics.
if not self.label_results:
logs.info("Preparing labels.")
self.label_results = self._prepare_labels()
# If we didn't load it already, create figure.
if not self.fig_labels:
logs.info("Creating label figure.")
self.fig_labels = \
make_label_fig(self.label_results)
# Finish
if self.save:
self._write_label_cache()
def _load_label_cache(self):
fig_labels = {}
label_results = {}
# Measurements exist. Load them.
if exists(self.labels_json_fid):
# Loads the label list, names, and results
label_results = ds_utils.read_json(self.labels_json_fid)
# Image exists. Load it.
if exists(self.labels_fig_json_fid):
fig_labels = ds_utils.read_plotly(self.labels_fig_json_fid)
return fig_labels, label_results
def _prepare_labels(self):
"""Loads a Labels object and computes label statistics"""
# Label object for the dataset
label_obj = Labels(dataset=self.dset,
dataset_name=self.dset_name,
config_name=self.dset_config)
# TODO: Handle the case where there are multiple label columns.
# The logic throughout the code assumes only one.
if type(self.label_field) == tuple:
label_field = self.label_field[0]
elif type(self.label_field) == str:
label_field = self.label_field
else:
logs.warning("Unexpected format %s for label column name(s). "
"Not computing label statistics." %
type(self.label_field))
return {}
label_results = label_obj.prepare_labels(label_field, self.label_names)
return label_results
def _write_label_cache(self):
ds_utils.make_path(pjoin(self.cache_dir, self.label_dir))
if self.label_results:
ds_utils.write_json(self.label_results, self.labels_json_fid)
if self.fig_labels:
ds_utils.write_plotly(self.fig_labels, self.labels_fig_json_fid)
self.fig_labels.write_html(self.labels_fig_html_fid)
def get_label_filenames(self):
label_fid_dict = {"statistics": self.labels_json_fid,
"figure json": self.labels_fig_json_fid,
"figure html": self.labels_fig_html_fid}
return label_fid_dict
class Labels:
"""Generic class for label processing.
Uses the Dataset to extract the label column and compute label measurements.
"""
def __init__(self, dataset, dataset_name=None, config_name=None):
# Input HuggingFace Dataset.
self.dset = dataset
# These are used to extract label names, when the label names
# are stored in the Dataset object but not in the "label" column
# we are working with, which may instead just be ints corresponding to
# the names
self.ds_name = dataset_name
self.config_name = config_name
# For measurement data and additional metadata.
self.label_results_dict = {}
def prepare_labels(self, label_field, label_names=[]):
""" Uses the evaluate library to return the label distribution. """
logs.info("Inside main label calculation function.")
logs.debug("Looking for label field called '%s'" % label_field)
# The input Dataset object
# When the label field is not found, an error will be thrown.
if label_field in self.dset.features:
label_list = self.dset[label_field]
else:
logs.warning("No label column found -- nothing to do. Returning.")
logs.debug(self.dset.features)
return {}
# Get the evaluate library's measurement for label distro.
label_distribution = evaluate.load(EVAL_LABEL_MEASURE)
# Measure the label distro.
label_measurement = label_distribution.compute(data=label_list)
# TODO: Incorporate this summation into what the evaluate library returns.
label_sum_dict = Counter(label_list)
label_sums = [label_sum_dict[key] for key in sorted(label_sum_dict)]
label_measurement["sums"] = label_sums
if not label_names:
# Have to extract the label names from the Dataset object when the
# actual dataset columns are just ints representing the label names.
label_names = extract_label_names(label_field, self.ds_name,
self.config_name)
label_results = make_label_results_dict(label_measurement, label_names)
return label_results
| data-measurements-tool-main | data_measurements/labels/labels.py |
data-measurements-tool-main | data_measurements/labels/__init__.py |
|
import logging
import pandas as pd
from datasets import load_metric
from os.path import exists
from os.path import join as pjoin
import utils
from utils import dataset_utils as ds_utils
logs = utils.prepare_logging(__file__)
TOK_MODEL = "gpt2"
PERPLEXITY = load_metric("perplexity")
PERPLEXITY_FIELD = "perplexity"
class DMTHelper:
def __init__(self, dstats, load_only=False):
self.dstats = dstats
self.load_only = load_only
self.results_dict = {}
# Where in the Dataset object to find the text for the calculation
self.text_field = ds_utils.OUR_TEXT_FIELD
# Results in dataframe form
self.df = None
# Cache file
self.perplexities_df_fid = pjoin(self.dstats.dataset_cache_dir,
"perplexities_df.json")
def run_DMT_processing(self):
if self.dstats.use_cache and exists(self.perplexities_df_fid):
self.df = ds_utils.read_df(self.perplexities_df_fid)
elif not self.load_only:
self.prepare_text_perplexities()
if self.dstats.save:
ds_utils.write_df(self.df, self.perplexities_df_fid)
def prepare_text_perplexities(self):
texts = self.dstats.text_dset[self.text_field]
eval_results = PERPLEXITY.compute(input_texts=texts, model_id=TOK_MODEL)
# TODO: What other stuff might be useful to grab?
self.results_dict = {PERPLEXITY_FIELD: eval_results["perplexities"],
self.text_field: self.dstats.text_dset[self.text_field]}
self.df = pd.DataFrame(self.results_dict).sort_values(
by=PERPLEXITY_FIELD, ascending=False)
def get_df(self):
return self.df
| data-measurements-tool-main | data_measurements/perplexity/perplexity.py |
data-measurements-tool-main | data_measurements/perplexity/__init__.py |
|
data-measurements-tool-main | data_measurements/zipf/__init__.py |
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import numpy as np
import os
import pandas as pd
import plotly.graph_objects as go
import powerlaw
from os.path import join as pjoin
import utils
from scipy.stats import ks_2samp
from scipy.stats import zipf as zipf_lib
# treating inf values as NaN as well
pd.set_option("use_inf_as_na", True)
logs = utils.prepare_logging(__file__)
class Zipf:
def __init__(self, vocab_counts_df, count_str="count",
proportion_str="prop"):
self.vocab_counts_df = vocab_counts_df
# Strings used in the input dictionary
self.cnt_str = count_str
self.prop_str = proportion_str
self.alpha = None
self.xmin = None
self.xmax = None
self.p = None
self.ks_distance = None
self.observed_counts = None
self.word_counts_unique = None
self.word_ranks_unique = None
if self.vocab_counts_df is not None:
self.observed_counts = self.vocab_counts_df[self.cnt_str].values
self.word_counts_unique = list(set(self.observed_counts))
self.word_ranks_unique = list(
np.arange(1, len(self.word_counts_unique) + 1))
self.zipf_dict = {"xmin": None, "xmax": None, "alpha": None,
"ks_distance": None, "p-value": None,
"word_ranks_unique": self.word_ranks_unique,
"word_counts_unique": self.word_counts_unique}
self.fit = None
self.predicted_counts = None
def load(self, zipf_dict):
self.zipf_dict = zipf_dict
self.xmin = zipf_dict["xmin"]
self.xmax = zipf_dict["xmax"]
self.alpha = zipf_dict["alpha"]
self.ks_distance = zipf_dict["ks_distance"]
self.p = zipf_dict["p-value"]
self.word_ranks_unique = zipf_dict["word_ranks_unique"]
self.word_counts_unique = zipf_dict["word_counts_unique"]
def get_zipf_dict(self):
zipf_dict = {"xmin": int(self.xmin), "xmax": int(self.xmax),
"alpha": float(self.alpha),
"ks_distance": float(self.ks_distance),
"p-value": float(self.ks_test.pvalue),
"word_counts_unique": [int(count) for count in
self.word_counts_unique],
"word_ranks_unique": [int(rank) for rank in
self.word_ranks_unique]}
return zipf_dict
def calc_fit(self):
"""
Uses the powerlaw package to fit the observed frequencies
to a zipfian distribution.
We use the KS-distance to fit, as that seems more appropriate that MLE.
"""
logs.info("Fitting based on input vocab counts.")
self._make_rank_column()
# Note another method for determining alpha might be defined by
# (Newman, 2005): alpha = 1 + n * sum(ln( xi / xmin )) ^ -1
self.fit = powerlaw.Fit(self.observed_counts, fit_method="KS",
discrete=True)
# This should probably be a pmf (not pdf); using discrete=True above.
# original_data=False uses only the fitted data (within xmin and xmax).
# pdf_bin_edges: The portion of the data within the bin.
# observed_pdf: The probability density function (normalized histogram)
# of the data.
pdf_bin_edges, observed_pdf = self.fit.pdf(original_data=False)
# See the 'Distribution' class described here for info:
# https://pythonhosted.org/powerlaw/#powerlaw.Fit.pdf
theoretical_distro = self.fit.power_law
# The probability density function (normalized histogram) of the
# theoretical distribution.
predicted_pdf = theoretical_distro.pdf()
self._set_fit_vars(observed_pdf, predicted_pdf, theoretical_distro)
def _set_fit_vars(self, observed_pdf, predicted_pdf, theoretical_distro):
# !!!! CRITICAL VALUE FOR ZIPF !!!!
self.alpha = theoretical_distro.alpha
# Exclusive xmin: The optimal xmin *beyond which* the scaling regime of
# the power law fits best.
self.xmin = int(theoretical_distro.xmin)
self.xmax = theoretical_distro.xmax
# Can be None if there isn't an xmax returned;
# this handles that.
self._set_xmax()
self.ks_distance = theoretical_distro.KS()
self.ks_test = ks_2samp(observed_pdf, predicted_pdf)
self.p = self.ks_test[1]
logs.info("KS test:")
logs.info(self.ks_test)
self.predicted_counts = self._calc_zipf_counts()
def _make_rank_column(self):
# TODO: These proportions may have already been calculated.
prop_denom = float(sum(self.vocab_counts_df[self.cnt_str]))
count_prop = self.vocab_counts_df[self.cnt_str] / prop_denom
self.vocab_counts_df[self.prop_str] = count_prop
rank_column = self.vocab_counts_df[self.cnt_str].rank(
method="dense", numeric_only=True, ascending=False
)
self.vocab_counts_df["rank"] = rank_column.astype("int64")
def _calc_zipf_counts(self):
"""
The fit is based on an optimal xmin (minimum rank)
Let's use this to make count estimates for the zipf fit,
by multiplying the fitted pmf value by the sum of counts above xmin.
:return: array of count values following the fitted pmf.
"""
logs.info("Getting predicted counts.")
if not self.alpha:
logs.warning("Have not yet fit -- need the alpha value.")
logs.warning("Fitting now...")
self.calc_fit()
logs.info(self.word_counts_unique)
logs.info(self.xmin)
logs.info(self.xmax)
# The subset of words that fit
word_counts_fit_unique = self.word_counts_unique[
self.xmin + 1: self.xmax]
pmf_mass = float(sum(word_counts_fit_unique))
zipf_counts = np.array(
[self._estimate_count(rank, pmf_mass) for rank in
self.word_ranks_unique]
)
return zipf_counts
def _estimate_count(self, rank, pmf_mass):
return int(round(zipf_lib.pmf(rank, self.alpha) * pmf_mass))
def _set_xmax(self):
"""
xmax is usually None, so we add some handling to set it as the
maximum rank in the dataset.
:param xmax:
:return:
"""
if self.xmax is not None:
self.xmax = int(xmax)
elif self.word_counts_unique:
self.xmax = int(len(self.word_counts_unique))
elif self.word_ranks_unique:
self.xmax = int(len(self.word_ranks_unique))
# TODO: This might fit better in its own file handling class?
def get_zipf_fids(cache_path):
zipf_cache_dir = pjoin(cache_path, "zipf")
os.makedirs(zipf_cache_dir, exist_ok=True)
# Zipf cache files
zipf_fid = pjoin(zipf_cache_dir, "zipf_basic_stats.json")
zipf_fig_fid = pjoin(zipf_cache_dir, "zipf_fig.json")
zipf_fig_html_fid = pjoin(zipf_cache_dir, "zipf_fig.html")
return zipf_fid, zipf_fig_fid, zipf_fig_html_fid
def make_unique_rank_word_list(z):
"""
Function to help with the figure, creating strings for the hovertext.
"""
ranked_words = {}
word_counts = z.word_counts_unique
word_ranks = z.word_ranks_unique
for count, rank in zip(word_counts, word_ranks):
z.vocab_counts_df[z.vocab_counts_df[z.cnt_str] == count]["rank"] = rank
ranked_words[rank] = ",".join(
z.vocab_counts_df[
z.vocab_counts_df[z.cnt_str] == count].index.astype(str)
) # Use the hovertext kw argument for hover text
ranked_words_list = [wrds for rank, wrds in
sorted(ranked_words.items())]
return ranked_words_list
def make_zipf_fig(z):
xmin = z.xmin
word_ranks_unique = z.word_ranks_unique
observed_counts = z.observed_counts
zipf_counts = z.predicted_counts # "] #self.calc_zipf_counts()
ranked_words_list = make_unique_rank_word_list(z)
layout = go.Layout(xaxis=dict(range=[0, 100]))
fig = go.Figure(
data=[
go.Bar(
x=word_ranks_unique,
y=observed_counts,
hovertext=ranked_words_list,
name="Word Rank Frequency",
)
],
layout=layout,
)
fig.add_trace(
go.Scatter(
x=word_ranks_unique[xmin: len(word_ranks_unique)],
y=zipf_counts[xmin: len(word_ranks_unique)],
hovertext=ranked_words_list[xmin: len(word_ranks_unique)],
line=go.scatter.Line(color="crimson", width=3),
name="Zipf Predicted Frequency",
)
)
# Customize aspect
# fig.update_traces(marker_color='limegreen',
# marker_line_width=1.5, opacity=0.6)
fig.update_layout(
title_text="Word Counts, Observed and Predicted by Zipf")
fig.update_layout(xaxis_title="Word Rank")
fig.update_layout(yaxis_title="Frequency")
fig.update_layout(
legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.10))
return fig
| data-measurements-tool-main | data_measurements/zipf/zipf.py |
import gradio as gr
from widgets.widget_base import Widget
from data_measurements.dataset_statistics import DatasetStatisticsCacheClass as dmt_cls
import utils
import utils.dataset_utils as ds_utils
logs = utils.prepare_logging(__file__)
class Duplicates(Widget):
def __init__(self):
duplicates_text = f"""
Use this widget to identify text strings that appear more than once.
A model's training and testing may be negatively affected by unwarranted duplicates ([Lee et al., 2021](https://arxiv.org/abs/2107.06499))
------
### Here is the list of all the duplicated items and their counts in the dataset.
"""
self.duplicates_intro = gr.Markdown(render=False, value=duplicates_text)
self.duplicates_df = gr.DataFrame(render=False)
self.duplicates_text = gr.Markdown(render=False)
def render(self):
with gr.TabItem(f"Duplicates"):
self.duplicates_intro.render()
self.duplicates_text.render()
self.duplicates_df.render()
def update(self, dstats: dmt_cls):
output = {}
if not dstats.duplicates_results:
output[self.duplicates_df] = gr.DataFrame.update(visible=False)
output[self.duplicates_text] = gr.Markdown.update(visible=True,
value="There are no duplicates in this dataset! 🥳")
else:
dupes_df_tmp = ds_utils.counter_dict_to_df(dstats.dups_dict, key_as_column=True)
dupes_df_tmp.columns = ["instance", "count"]
# Nice to have the counts show up first, because the instances
# can be quite long (and run off the page)
dupes_df = dupes_df_tmp[["count", "instance"]]
output[self.duplicates_df] = gr.DataFrame.update(visible=True, value=dupes_df)
duplicates_text = f"The fraction of data that is duplicate is {str(round(dstats.dups_frac, 4))}"
output[self.duplicates_text] = gr.Markdown.update(value=duplicates_text, visible=True)
return output
@property
def output_components(self):
return [
self.duplicates_text,
self.duplicates_df,
]
def add_events(self, state: gr.State):
pass | data-measurements-tool-main | widgets/duplicates.py |
import gradio as gr
import pandas as pd
from widgets.widget_base import Widget
from data_measurements.dataset_statistics import DatasetStatisticsCacheClass as dmt_cls
import utils
logs = utils.prepare_logging(__file__)
class GeneralStats(Widget):
def __init__(self):
self.general_stats = gr.Markdown(render=False)
self.general_stats_top_vocab = gr.DataFrame(render=False)
self.general_stats_missing = gr.Markdown(render=False)
self.general_stats_duplicates = gr.Markdown(render=False)
def render(self):
with gr.TabItem(f"General Text Statistics"):
self.general_stats.render()
self.general_stats_missing.render()
self.general_stats_duplicates.render()
self.general_stats_top_vocab.render()
def update(self, dstats: dmt_cls):
general_stats_text = f"""
Use this widget to check whether the terms you see most represented in the dataset make sense for the goals of the dataset.
There are {str(dstats.total_words)} total words.
There are {dstats.total_open_words} after removing closed class words.
The most common [open class words](https://dictionary.apa.org/open-class-words) and their counts are:
"""
top_vocab = pd.DataFrame(dstats.sorted_top_vocab_df).round(4)
missing_text = (
f"There are {dstats.text_nan_count} missing values in the dataset"
)
if dstats.dups_frac > 0:
dupes_text = f"The dataset is {round(dstats.dups_frac * 100, 2)}% duplicates, For more information about the duplicates, click the 'Duplicates' tab."
else:
dupes_text = "There are 0 duplicate items in the dataset"
return {
self.general_stats: general_stats_text,
self.general_stats_top_vocab: top_vocab,
self.general_stats_missing: missing_text,
self.general_stats_duplicates: dupes_text,
}
@property
def output_components(self):
return [
self.general_stats,
self.general_stats_top_vocab,
self.general_stats_missing,
self.general_stats_duplicates,
]
def add_events(self, state: gr.State):
pass
| data-measurements-tool-main | widgets/general_stats.py |
import gradio as gr
from widgets.widget_base import Widget
from data_measurements.dataset_statistics import DatasetStatisticsCacheClass as dmt_cls
import utils
logs = utils.prepare_logging(__file__)
class TextLengths(Widget):
def __init__(self):
self.text_length_distribution_plot = gr.Image(render=False)
self.text_length_explainer = gr.Markdown(render=False)
self.text_length_drop_down = gr.Dropdown(render=False)
self.text_length_df = gr.DataFrame(render=False)
def update_text_length_df(self, length, dstats):
return dstats.length_obj.lengths_df[
dstats.length_obj.lengths_df["length"] == length
].set_index("length")
def render(self):
with gr.TabItem("Text Lengths"):
gr.Markdown(
"Use this widget to identify outliers, particularly suspiciously long outliers."
)
gr.Markdown(
"Below, you can see how the lengths of the text instances in your "
"dataset are distributed."
)
gr.Markdown(
"Any unexpected peaks or valleys in the distribution may help to "
"identify instances you want to remove or augment."
)
gr.Markdown(
"### Here is the count of different text lengths in " "your dataset:"
)
# When matplotlib first creates this, it's a Figure.
# Once it's saved, then read back in,
# it's an ndarray that must be displayed using st.image
# (I know, lame).
self.text_length_distribution_plot.render()
self.text_length_explainer.render()
self.text_length_drop_down.render()
self.text_length_df.render()
def update(self, dstats: dmt_cls):
explainer_text = (
"The average length of text instances is **"
+ str(round(dstats.length_obj.avg_length, 2))
+ " words**, with a standard deviation of **"
+ str(round(dstats.length_obj.std_length, 2))
+ "**."
)
# TODO: Add text on choosing the length you want to the dropdown.
output = {
self.text_length_distribution_plot: dstats.length_obj.fig_lengths,
self.text_length_explainer: explainer_text,
}
if dstats.length_obj.lengths_df is not None:
import numpy as np
choices = np.sort(dstats.length_obj.lengths_df["length"].unique())[
::-1
].tolist()
output[self.text_length_drop_down] = gr.Dropdown.update(
choices=choices, value=choices[0]
)
output[self.text_length_df] = self.update_text_length_df(choices[0], dstats)
else:
output[self.text_length_df] = gr.update(visible=False)
output[self.text_length_drop_down] = gr.update(visible=False)
return output
@property
def output_components(self):
return [
self.text_length_distribution_plot,
self.text_length_explainer,
self.text_length_drop_down,
self.text_length_df,
]
def add_events(self, state: gr.State):
self.text_length_drop_down.change(
self.update_text_length_df,
inputs=[self.text_length_drop_down, state],
outputs=[self.text_length_df],
)
| data-measurements-tool-main | widgets/text_lengths.py |
from widgets.widget_base import Widget
from widgets.dataset_description import DatasetDescription
from widgets.general_stats import GeneralStats
from widgets.label_distribution import LabelDistribution
from widgets.npmi import Npmi
from widgets.text_lengths import TextLengths
from widgets.zipf import Zipf
from widgets.duplicates import Duplicates | data-measurements-tool-main | widgets/__init__.py |
from abc import ABC, abstractmethod
import gradio as gr
from data_measurements.dataset_statistics import DatasetStatisticsCacheClass as dmt_cls
class Widget(ABC):
@abstractmethod
def render(self):
pass
@abstractmethod
def update(self, dstats: dmt_cls):
pass
@property
@abstractmethod
def output_components(self):
pass
@abstractmethod
def add_events(self, state: gr.State):
pass
| data-measurements-tool-main | widgets/widget_base.py |
import gradio as gr
import pandas as pd
from widgets.widget_base import Widget
from data_measurements.dataset_statistics import DatasetStatisticsCacheClass as dmt_cls
from utils.dataset_utils import HF_DESC_FIELD
import utils
logs = utils.prepare_logging(__file__)
class DatasetDescription(Widget):
def __init__(self, dataset_name_to_dict):
self.dataset_name_to_dict = dataset_name_to_dict
self.description_markdown = gr.Markdown(render=False)
self.description_df = gr.DataFrame(render=False, wrap=True)
def render(self):
with gr.TabItem("Dataset Description",):
self.description_markdown.render()
self.description_df.render()
def update(self, dstats: dmt_cls):
return {
self.description_markdown: self.dataset_name_to_dict[dstats.dset_name][
dstats.dset_config
][HF_DESC_FIELD],
self.description_df: pd.DataFrame(dstats.dset_peek),
}
def add_events(self, state: gr.State):
pass
@property
def output_components(self):
return [self.description_markdown, self.description_df]
| data-measurements-tool-main | widgets/dataset_description.py |
import gradio as gr
import pandas as pd
from widgets.widget_base import Widget
from data_measurements.dataset_statistics import DatasetStatisticsCacheClass as dmt_cls
import utils
logs = utils.prepare_logging(__file__)
class Npmi(Widget):
def __init__(self):
self.npmi_first_word = gr.Dropdown(
render=False, label="What is the first word you want to select?"
)
self.npmi_second_word = gr.Dropdown(
render=False, label="What is the second word you want to select?"
)
self.npmi_error_text = gr.Markdown(render=False)
self.npmi_df = gr.HTML(render=False)
self.sort = gr.Dropdown(label="Sort By Column", render=False)
self.npmi_empty_text = gr.Markdown(render=False)
self.npmi_description = gr.Markdown(render=False)
@property
def output_components(self):
return [
self.npmi_first_word,
self.npmi_second_word,
self.sort,
self.npmi_error_text,
self.npmi_df,
self.npmi_description,
self.npmi_empty_text,
]
def render(self):
with gr.TabItem("Word Association: nPMI"):
self.npmi_description.render()
self.npmi_first_word.render()
self.npmi_second_word.render()
self.sort.render()
self.npmi_df.render()
self.npmi_empty_text.render()
self.npmi_error_text.render()
def update(self, dstats: dmt_cls):
min_vocab = dstats.min_vocab_count
npmi_stats = dstats.npmi_obj
available_terms = npmi_stats.avail_identity_terms
output = {comp: gr.update(visible=False) for comp in self.output_components}
if npmi_stats and len(available_terms) > 0:
output[self.npmi_description] = gr.Markdown.update(
value=self.expander_npmi_description(min_vocab), visible=True
)
output[self.npmi_first_word] = gr.Dropdown.update(
choices=available_terms, value=available_terms[0], visible=True
)
output[self.npmi_second_word] = gr.Dropdown.update(
choices=available_terms[::-1], value=available_terms[-1], visible=True
)
output[self.sort] = gr.Dropdown.update(choices=['bias', available_terms[0], available_terms[-1]],
value='bias')
output.update(
self.npmi_show(available_terms[0], available_terms[-1], 'bias', dstats)
)
else:
output[self.npmi_error_text] = gr.Markdown.update(
visible=True,
value="No words found co-occurring with both of the selected identity terms.",
)
return output
def npmi_show(self, term1, term2, sort_col, dstats):
npmi_stats = dstats.npmi_obj
paired_results = npmi_stats.get_display(term1, term2)
output = {}
if paired_results.empty:
output[self.npmi_empty_text] = gr.Markdown.update(
value="""No words that co-occur enough times for results! Or there's a 🐛.
Or we're still computing this one. 🤷""",
visible=True,
)
output[self.npmi_df] = gr.DataFrame.update(visible=False)
else:
output[self.npmi_empty_text] = gr.Markdown.update(visible=False)
logs.debug("Results to be shown in streamlit are")
logs.debug(paired_results)
s = pd.DataFrame(
paired_results.sort_values(sort_col, ascending=False)
)
s.index.name = "word"
s = s.reset_index().round(4)
bias_col = [col for col in s.columns if col != "word"]
# Keep the dataframe from being crazy big.
if s.shape[0] > 10000:
bias_thres = max(abs(s[s[0]][5000]), abs(s[s[0]][-5000]))
logs.info(f"filtering with bias threshold: {bias_thres}")
s_filtered = s[s[0].abs() > bias_thres]
else:
s_filtered = s
out_df = (
s_filtered.style.background_gradient(subset=bias_col)
.format(formatter="{:,.3f}", subset=bias_col)
.set_properties(**{"text-align": "center", "width": "100em"})
.set_caption(
"nPMI scores between the selected identity terms and the words they both co-occur with"
)
)
output[self.npmi_df] = out_df.to_html()
return output
@staticmethod
def expander_npmi_description(min_vocab):
return f"""
Use this widget to identify problematic biases and stereotypes in
your data.
nPMI scores for a word help to identify potentially
problematic associations, ranked by how close the association is.
nPMI bias scores for paired words help to identify how word
associations are skewed between the selected selected words
([Aka et al., 2021](https://arxiv.org/abs/2103.03417)).
You can select from gender and sexual orientation
identity terms that appear in the dataset at least {min_vocab} times.
The resulting ranked words are those that co-occur with both identity terms.
The more *positive* the score, the more associated the word is with
the first identity term.
The more *negative* the score, the more associated the word is with
the second identity term.
-----
"""
def update_sort_and_npmi(self, first_word, second_word, sort_col, dstats):
output = {self.sort: gr.Dropdown.update(choices=['bias', first_word, second_word],
value='bias')}
new_df = self.npmi_show(first_word, second_word, sort_col, dstats)
output.update(new_df)
return output
def add_events(self, state: gr.State):
self.npmi_first_word.change(
self.update_sort_and_npmi,
inputs=[self.npmi_first_word, self.npmi_second_word, self.sort, state],
outputs=[self.npmi_df, self.npmi_empty_text, self.sort],
)
self.npmi_second_word.change(
self.update_sort_and_npmi,
inputs=[self.npmi_first_word, self.npmi_second_word, self.sort, state],
outputs=[self.npmi_df, self.npmi_empty_text, self.sort],
)
self.sort.change(
self.npmi_show,
inputs=[self.npmi_first_word, self.npmi_second_word, self.sort, state],
outputs=[self.npmi_df, self.npmi_empty_text],
)
| data-measurements-tool-main | widgets/npmi.py |
import gradio as gr
import pandas as pd
from widgets.widget_base import Widget
from data_measurements.dataset_statistics import DatasetStatisticsCacheClass as dmt_cls
import utils
logs = utils.prepare_logging(__file__)
class Zipf(Widget):
def __init__(self):
self.zipf_table = gr.DataFrame(render=False)
self.alpha_warning = gr.Markdown(
value="Your alpha value is a bit on the high side, which means that the distribution over words in this dataset is a bit unnatural. This could be due to non-language items throughout the dataset.",
render=False,
visible=False,
)
self.xmin_warning = gr.Markdown(
value="The minimum rank for this fit is a bit on the high side, which means that the frequencies of your most common words aren't distributed as would be expected by Zipf's law.",
render=False,
visible=False,
)
self.zipf_summary = gr.Markdown(render=False)
self.zipf_plot = gr.Plot(render=False)
def render(self):
with gr.TabItem("Vocabulary Distribution: Zipf's Law Fit"):
gr.Markdown(
"Use this widget for the counts of different words in your dataset, measuring the difference between the observed count and the expected count under Zipf's law."
)
gr.Markdown(
"""This shows how close the observed language is to an ideal
natural language distribution following [Zipf's law](https://en.wikipedia.org/wiki/Zipf%27s_law),
calculated by minimizing the [Kolmogorov-Smirnov (KS) statistic](https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test)."""
)
gr.Markdown(
"""
A Zipfian distribution follows the power law: $p(x) \propto x^{-α}$ with an ideal α value of 1.
In general, an alpha greater than 2 or a minimum rank greater than 10 (take with a grain of salt) means that your distribution is relativaly _unnatural_ for natural language. This can be a sign of mixed artefacts in the dataset, such as HTML markup.
Below, you can see the counts of each word in your dataset vs. the expected number of counts following a Zipfian distribution.
-----
### Here is your dataset's Zipf results:
"""
)
self.zipf_table.render()
self.zipf_summary.render()
self.zipf_plot.render()
self.alpha_warning.render()
self.xmin_warning.render()
def update(self, dstats: dmt_cls):
z = dstats.z
zipf_fig = dstats.zipf_fig
zipf_summary = (
"The optimal alpha based on this dataset is: **"
+ str(round(z.alpha, 2))
+ "**, with a KS distance of: **"
+ str(round(z.ks_distance, 2))
)
zipf_summary += (
"**. This was fit with a minimum rank value of: **"
+ str(int(z.xmin))
+ "**, which is the optimal rank *beyond which* the scaling regime of the power law fits best."
)
fit_results_table = pd.DataFrame.from_dict(
{
r"Alpha:": [str("%.2f" % z.alpha)],
"KS distance:": [str("%.2f" % z.ks_distance)],
"Min rank:": [str("%s" % int(z.xmin))],
},
columns=["Results"],
orient="index",
)
fit_results_table.index.name = ""
output = {
self.zipf_table: fit_results_table,
self.zipf_summary: zipf_summary,
self.zipf_plot: zipf_fig,
self.alpha_warning: gr.Markdown.update(visible=False),
self.xmin_warning: gr.Markdown.update(visible=False),
}
if z.alpha > 2:
output[self.alpha_warning] = gr.Markdown.update(visible=True)
if z.xmin > 5:
output[self.xmin_warning] = gr.Markdown.update(visible=True)
return output
@property
def output_components(self):
return [
self.zipf_table,
self.zipf_plot,
self.zipf_summary,
self.alpha_warning,
self.xmin_warning,
]
def add_events(self, state: gr.State):
pass
| data-measurements-tool-main | widgets/zipf.py |
import gradio as gr
from widgets.widget_base import Widget
from data_measurements.dataset_statistics import DatasetStatisticsCacheClass as dmt_cls
import utils
logs = utils.prepare_logging(__file__)
class LabelDistribution(Widget):
def __init__(self):
self.label_dist_plot = gr.Plot(render=False, visible=False)
self.label_dist_no_label_text = gr.Markdown(
value="No labels were found in the dataset", render=False, visible=False
)
self.label_dist_accordion = gr.Accordion(render=False, label="", open=False)
def render(self):
with gr.TabItem(label="Label Distribution"):
gr.Markdown(
"Use this widget to see how balanced the labels in your dataset are."
)
self.label_dist_plot.render()
self.label_dist_no_label_text.render()
def update(self, dstats: dmt_cls):
logs.info(f"FIGS labels: {bool(dstats.fig_labels)}")
if dstats.fig_labels:
output = {
self.label_dist_plot: gr.Plot.update(
value=dstats.fig_labels, visible=True
),
self.label_dist_no_label_text: gr.Markdown.update(visible=False),
}
else:
output = {
self.label_dist_plot: gr.Plot.update(visible=False),
self.label_dist_no_label_text: gr.Markdown.update(visible=True),
}
return output
@property
def output_components(self):
return [self.label_dist_plot, self.label_dist_no_label_text]
def add_events(self, state: gr.State):
pass
| data-measurements-tool-main | widgets/label_distribution.py |
import argparse
import os
import re
import nbformat
import shutil
import yaml
from pathlib import Path
re_framework_test = re.compile(r"^{#if\s+fw\s+===\s+'([^']+)'}\s*$")
re_framework_else = re.compile(r"^{:else}\s*$")
re_framework_end = re.compile(r"^{/if}\s*$")
re_html_line = re.compile(r"^<[^>]*/>\s*$")
re_html_tag = re.compile(r"<([^/>]*)>\s*$")
re_python_code = re.compile(r"^```(?:py|python|py no\-format|python no\-format)\s*$")
re_output_code = re.compile(r"^```(?:py|python)\s+out\s*$")
re_end_code = re.compile(r"^```\s*$")
frameworks = {"pt": "PyTorch", "tf": "TensorFlow"}
PATH_TO_COURSE = Path("chapters/")
# Languages to exlude from the notebook generation because the notebooks were
# created manually
LANGS_TO_EXCLUDE = ["fr"]
def read_and_split_frameworks(fname):
"""
Read the MDX in fname and creates two versions (if necessary) for each framework.
"""
with open(fname, "r") as f:
content = f.readlines()
contents = {"pt": [], "tf": []}
differences = False
current_content = []
line_idx = 0
for line in content:
if re_framework_test.search(line) is not None:
differences = True
framework = re_framework_test.search(line).groups()[0]
for key in contents:
contents[key].extend(current_content)
current_content = []
elif re_framework_else.search(line) is not None:
contents[framework].extend(current_content)
current_content = []
framework = "pt" if framework == "tf" else "tf"
elif re_framework_end.search(line) is not None:
contents[framework].extend(current_content)
current_content = []
else:
current_content.append(line)
if len(current_content) > 0:
for key in contents:
contents[key].extend(current_content)
if differences:
return {k: "".join(content) for k, content in contents.items()}
else:
return "".join(content)
def extract_cells(content):
"""
Extract the code/output cells from content.
"""
cells = []
current_cell = None
is_output = False
for line in content.split("\n"):
if re_python_code.search(line) is not None:
is_output = False
current_cell = []
elif re_output_code.search(line) is not None:
is_output = True
current_cell = []
elif re_end_code.search(line) is not None and current_cell is not None:
cell = "\n".join(current_cell)
if is_output:
if not isinstance(cells[-1], tuple):
cells[-1] = (cells[-1], cell)
else:
cells.append(cell)
current_cell = None
current_md = []
elif current_cell is not None:
current_cell.append(line)
return cells
def convert_to_nb_cell(cell):
"""
Convert some cell (either just code or tuple (code, output)) to a proper notebook cell.
"""
nb_cell = {"cell_type": "code", "execution_count": None, "metadata": {}}
if isinstance(cell, tuple):
nb_cell["source"] = cell[0]
nb_cell["outputs"] = [
nbformat.notebooknode.NotebookNode(
{
"data": {"text/plain": cell[1]},
"execution_count": None,
"metadata": {},
"output_type": "execute_result",
}
)
]
else:
nb_cell["source"] = cell
nb_cell["outputs"] = []
return nbformat.notebooknode.NotebookNode(nb_cell)
def nb_cell(source, code=True):
if not code:
return nbformat.notebooknode.NotebookNode({"cell_type": "markdown", "source": source, "metadata": {}})
return nbformat.notebooknode.NotebookNode(
{"cell_type": "code", "metadata": {}, "source": source, "execution_count": None, "outputs": []}
)
def build_notebook(fname, title, output_dir="."):
"""
Build the notebook for fname with a given title in output_dir.
"""
sections = read_and_split_frameworks(fname)
sections_with_accelerate = [
"chapter3/4", # "A full training",
"chapter7/2_pt", # "Token classification (PyTorch)",
"chapter7/3_pt", # "Fine-tuning a masked language model (PyTorch)"
"chapter7/4_pt", # "Translation (PyTorch)"
"chapter7/5_pt", # "Summarization (PyTorch)",
"chapter7/6_pt", # "Training a causal language model from scratch (PyTorch)"
"chapter7/7_pt", # "Question answering (PyTorch)"
]
sections_with_hf_hub = [
"chapter4/3_pt", # "Sharing pretrained models (PyTorch)"
"chapter4/3_tf", # "Sharing pretrained models (TensorFlow)"
"chapter5/5", # "Creating your own dataset"
"chapter7/2_pt", # "Token classification (PyTorch)"
"chapter7/2_tf", # "Token classification (TensorFlow)"
"chapter6/2", # "Training a new tokenizer from an old one"
"chapter7/3_pt", # "Fine-tuning a masked language model (PyTorch)"
"chapter7/3_tf", # "Fine-tuning a masked language model (TensorFlow)"
"chapter7/4_pt", # "Translation (PyTorch)"
"chapter7/4_tf", # "Translation (TensorFlow)"
"chapter7/5_pt", # "Summarization (PyTorch)"
"chapter7/5_tf", # "Summarization (TensorFlow)"
"chapter7/6_pt", # "Training a causal language model from scratch (PyTorch)"
"chapter7/6_tf", # "Training a causal language model from scratch (TensorFlow)"
"chapter7/7_pt", # "Question answering (PyTorch)"
"chapter7/7_tf", # "Question answering (TensorFlow)"
"chapter8/2", # "What to do when you get an error"
]
sections_with_faiss = [
"chapter5/6_pt", # "Semantic search with FAISS (PyTorch)"
"chapter5/6_tf", # "Semantic search with FAISS (TensorFlow)"
]
sections_with_gradio = [
"chapter9/2", # "Building your first demo"
"chapter9/3", # "Understanding the Interface class"
"chapter9/4", # "Sharing demos with others"
"chapter9/5", # "Integrations with the Hugging Face Hub"
"chapter9/6", # "Advanced Interface features"
"chapter9/7", # "Introduction to Blocks"
]
stem = Path(fname).stem
if not isinstance(sections, dict):
contents = [sections]
titles = [title]
fnames = [f"section{stem}.ipynb"]
section_names = [f"{Path(fname).parent.stem}/{stem}"]
else:
contents = []
titles = []
fnames = []
section_names = []
for key, section in sections.items():
contents.append(section)
titles.append(f"{title} ({frameworks[key]})")
fnames.append(f"section{stem}_{key}.ipynb")
section_names.append(f"{Path(fname).parent.stem}/{stem}_{key}")
for title, content, fname, section_name in zip(titles, contents, fnames, section_names):
cells = extract_cells(content)
if len(cells) == 0:
continue
nb_cells = [
nb_cell(f"# {title}", code=False),
nb_cell("Install the Transformers, Datasets, and Evaluate libraries to run this notebook.", code=False),
]
# Install cell
installs = ["!pip install datasets evaluate transformers[sentencepiece]"]
if section_name in sections_with_accelerate:
installs.append("!pip install accelerate")
installs.append("# To run the training on TPU, you will need to uncomment the following line:")
installs.append(
"# !pip install cloud-tpu-client==0.10 torch==1.9.0 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.9-cp37-cp37m-linux_x86_64.whl"
)
if section_name in sections_with_hf_hub:
installs.append("!apt install git-lfs")
if section_name in sections_with_faiss:
installs.append("!pip install faiss-gpu")
if section_name in sections_with_gradio:
installs.append("!pip install gradio")
nb_cells.append(nb_cell("\n".join(installs)))
if section_name in sections_with_hf_hub:
nb_cells.extend(
[
nb_cell(
"You will need to setup git, adapt your email and name in the following cell.", code=False
),
nb_cell(
'!git config --global user.email "you@example.com"\n!git config --global user.name "Your Name"'
),
nb_cell(
"You will also need to be logged in to the Hugging Face Hub. Execute the following and enter your credentials.",
code=False,
),
nb_cell("from huggingface_hub import notebook_login\n\nnotebook_login()"),
]
)
nb_cells += [convert_to_nb_cell(cell) for cell in cells]
metadata = {"colab": {"name": title, "provenance": []}}
nb_dict = {"cells": nb_cells, "metadata": metadata, "nbformat": 4, "nbformat_minor": 4}
notebook = nbformat.notebooknode.NotebookNode(nb_dict)
os.makedirs(output_dir, exist_ok=True)
nbformat.write(notebook, os.path.join(output_dir, fname), version=4)
def get_titles(language):
"""
Parse the _toctree.yml file to get the correspondence filename to title
"""
table = yaml.safe_load(open(os.path.join(f"chapters/{language}", "_toctree.yml"), "r"))
result = {}
for entry in table:
for section in entry["sections"]:
section_title = section["title"]
if "local_fw" in section:
section_names = section["local_fw"]
result[section_names["pt"]] = section_title
result[section_names["tf"]] = section_title
else:
section_name = section["local"]
result[section_name] = section_title
return {k: v for k, v in result.items() if "quiz" not in v}
def create_notebooks(language, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for folder in os.listdir(output_dir):
if folder.startswith("chapter"):
shutil.rmtree(os.path.join(output_dir, folder))
titles = get_titles(language)
for fname, title in titles.items():
build_notebook(
os.path.join(f"chapters/{language}", f"{fname}.mdx"),
title,
os.path.join(output_dir, Path(fname).parent),
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", type=str, help="Where to output the notebooks")
args = parser.parse_args()
languages = [f.stem for f in PATH_TO_COURSE.iterdir() if f.is_dir()]
for language in languages:
if language in LANGS_TO_EXCLUDE:
continue
language_output_dir = f"{args.output_dir}/{language}"
create_notebooks(language, language_output_dir)
# Remove empty notebook folders
if not any(Path(language_output_dir).iterdir()):
shutil.rmtree(language_output_dir)
| course-main | utils/generate_notebooks.py |
import re
import argparse
from pathlib import Path
PATTERN_TIMESTAMP = re.compile(
"^[0-9][0-9]:[0-9][0-9]:[0-9][0-9],[0-9][0-9][0-9] --> [0-9][0-9]:[0-9][0-9]:[0-9][0-9],[0-9][0-9][0-9]"
)
PATTERN_NUM = re.compile("\\d+")
def convert(input_file, output_file):
"""
Convert bilingual caption file to monolingual caption. Supported caption file type is SRT.
"""
line_count = 0
with open(input_file) as file:
with open(output_file, "w") as output_file:
for line in file:
if line_count == 0:
line_count += 1
output_file.write(line)
elif PATTERN_TIMESTAMP.match(line):
line_count += 1
output_file.write(line)
elif line == "\n":
line_count = 0
output_file.write(line)
else:
if line_count == 2:
output_file.write(line)
line_count += 1
output_file.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_language_folder", type=str, help="Folder with input bilingual SRT files to be converted"
)
parser.add_argument(
"--output_language_folder",
type=str,
default="tmp-subtitles",
help="Folder to store converted monolingual SRT files",
)
args = parser.parse_args()
output_path = Path(args.output_language_folder)
output_path.mkdir(parents=True, exist_ok=True)
input_files = Path(args.input_language_folder).glob("*.srt")
for input_file in input_files:
convert(input_file, output_path / input_file.name)
print(f"Succesfully converted {len(list(input_files))} files to {args.output_language_folder} folder")
| course-main | utils/convert_bilingual_monolingual.py |
import argparse
import black
import os
import re
from pathlib import Path
def blackify(filename, check_only=False):
# Read the content of the file
with open(filename, "r", encoding="utf-8") as f:
content = f.read()
lines = content.split("\n")
# Split the content into code samples in py or python blocks.
code_samples = []
line_index = 0
while line_index < len(lines):
line = lines[line_index]
if line.strip() in ["```py", "```python"]:
line_index += 1
start_index = line_index
while line_index < len(lines) and lines[line_index].strip() != "```":
line_index += 1
code = "\n".join(lines[start_index:line_index])
# Deal with ! instructions
code = re.sub(r"^!", r"## !", code, flags=re.MULTILINE)
code_samples.append({"start_index": start_index, "end_index": line_index - 1, "code": code})
line_index += 1
else:
line_index += 1
# Let's blackify the code! We put everything in one big text to go faster.
delimiter = "\n\n### New cell ###\n"
full_code = delimiter.join([sample["code"] for sample in code_samples])
formatted_code = full_code.replace("\t", " ")
formatted_code = black.format_str(formatted_code, mode=black.FileMode({black.TargetVersion.PY37}, line_length=90))
# Black adds last new lines we don't want, so we strip individual code samples.
cells = formatted_code.split(delimiter)
cells = [cell.strip() for cell in cells]
formatted_code = delimiter.join(cells)
if check_only:
return full_code == formatted_code
elif full_code == formatted_code:
# Nothing to do, all is good
return
formatted_code = re.sub(r"^## !", r"!", formatted_code, flags=re.MULTILINE)
print(f"Formatting {filename}")
# Re-build the content with formatted code
new_lines = []
start_index = 0
for sample, code in zip(code_samples, formatted_code.split(delimiter)):
new_lines.extend(lines[start_index : sample["start_index"]])
new_lines.append(code)
start_index = sample["end_index"] + 1
new_lines.extend(lines[start_index:])
with open(filename, "w", encoding="utf-8") as f:
f.write("\n".join(new_lines))
def format_all_files(check_only=False):
failures = []
for filename in Path("chapters").glob("**/*.mdx"):
try:
same = blackify(filename, check_only=check_only)
if check_only and not same:
failures.append(filename)
except Exception:
print(f"Failed to format {filename}.")
raise
if check_only and len(failures) > 0:
raise ValueError(f"{len(failures)} files need to be formatted, run `make style`.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--check_only",
action="store_true",
help="Just check files are properly formatted.",
)
args = parser.parse_args()
format_all_files(check_only=args.check_only)
| course-main | utils/code_formatter.py |
import pandas as pd
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_transcript_api.formatters import SRTFormatter
from youtubesearchpython import Playlist
from pathlib import Path
import argparse
COURSE_VIDEOS_PLAYLIST = "https://youtube.com/playlist?list=PLo2EIpI_JMQvWfQndUesu0nPBAtZ9gP1o"
TASK_VIDEOS_PLAYLIST = "https://youtube.com/playlist?list=PLo2EIpI_JMQtyEr-sLJSy5_SnLCb4vtQf"
# These videos are not part of the course, but are part of the task playlist
TASK_VIDEOS_TO_SKIP = ["tjAIM7BOYhw", "WdAeKSOpxhw", "KWwzcmG98Ds", "TksaY_FDgnk", "leNG9fN9FQU", "dKE8SIt9C-w"]
def generate_subtitles(language: str, youtube_language_code: str = None, is_task_playlist: bool = False):
metadata = []
formatter = SRTFormatter()
path = Path(f"subtitles/{language}")
path.mkdir(parents=True, exist_ok=True)
if is_task_playlist:
playlist_videos = Playlist.getVideos(TASK_VIDEOS_PLAYLIST)
else:
playlist_videos = Playlist.getVideos(COURSE_VIDEOS_PLAYLIST)
for idx, video in enumerate(playlist_videos["videos"]):
video_id = video["id"]
title = video["title"]
title_formatted = title.lower().replace(" ", "-").replace(":", "").replace("?", "")
id_str = f"{idx}".zfill(2)
if is_task_playlist:
srt_filename = f"{path}/tasks_{id_str}_{title_formatted}.srt"
else:
srt_filename = f"{path}/{id_str}_{title_formatted}.srt"
# Skip course events
if "Event Day" in title:
continue
# Skip task videos that don't belong to the course
if video_id in TASK_VIDEOS_TO_SKIP:
continue
# Get transcript
transcript_list = YouTubeTranscriptApi.list_transcripts(video_id)
english_transcript = transcript_list.find_transcript(language_codes=["en", "en-US"])
languages = pd.DataFrame(english_transcript.translation_languages)["language_code"].tolist()
# Map mismatched language codes
if language not in languages:
if youtube_language_code is None:
raise ValueError(
f"Language code {language} not found in YouTube's list of supported language: {languages}. Please provide a value for `youtube_language_code` and try again."
)
language_code = youtube_language_code
else:
language_code = language
try:
translated_transcript = english_transcript.translate(language_code)
translated_transcript = translated_transcript.fetch()
srt_formatted = formatter.format_transcript(translated_transcript)
with open(srt_filename, "w", encoding="utf-8") as f:
f.write(srt_formatted)
except:
print(f"Problem generating transcript for {title} with ID {video_id} at {video['link']}.")
with open(srt_filename, "w", encoding="utf-8") as f:
f.write("No transcript found for this video!")
metadata.append({"id": video_id, "title": title, "link": video["link"], "srt_filename": srt_filename})
df = pd.DataFrame(metadata)
if is_task_playlist:
df.to_csv(f"{path}/metadata_tasks.csv", index=False)
else:
df.to_csv(f"{path}/metadata.csv", index=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--language", type=str, help="Language to generate subtitles for")
parser.add_argument("--youtube_language_code", type=str, help="YouTube language code")
args = parser.parse_args()
generate_subtitles(args.language, args.youtube_language_code, is_task_playlist=False)
generate_subtitles(args.language, args.youtube_language_code, is_task_playlist=True)
print(f"All done! Subtitles stored at subtitles/{args.language}")
| course-main | utils/generate_subtitles.py |
import argparse
import os
import yaml
from pathlib import Path
PATH_TO_COURSE = Path("chapters/")
def load_sections(language: str):
toc = yaml.safe_load(open(os.path.join(PATH_TO_COURSE / language, "_toctree.yml"), "r"))
sections = []
for chapter in toc:
for section in chapter["sections"]:
sections.append(section["local"])
return set(sections)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--language", type=str, help="Translation language to validate")
args = parser.parse_args()
english_sections = load_sections("en")
translation_sections = load_sections(args.language)
missing_sections = sorted(english_sections.difference(translation_sections))
if len(missing_sections) > 0:
print("Completed sesions:\n")
for section in sorted(translation_sections):
print(section)
print("\nMissing sections:\n")
for section in missing_sections:
print(section)
else:
print("✅ No missing sections - translation complete!")
| course-main | utils/validate_translation.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors, the AllenNLP library authors.
# All rights reserved.
"""
Script to close stale issue. Taken in part from the AllenNLP repository.
https://github.com/allenai/allennlp.
Copied from https://github.com/huggingface/transformers
"""
from datetime import datetime as dt
import os
from github import Github
# ^ PyGithub - https://pygithub.readthedocs.io/en/stable/introduction.html
LABELS_TO_EXEMPT_IN_LOWERCASE = [label.lower() for label in [
"P0",
"P1",
"P2"
]]
def main():
g = Github(os.environ["GITHUB_TOKEN"])
repo = g.get_repo("huggingface/datasets-server")
open_issues = repo.get_issues(state="open")
for issue in open_issues:
now = dt.utcnow()
if (
(now - issue.created_at).days < 30
or any(label.name.lower() in LABELS_TO_EXEMPT_IN_LOWERCASE for label in issue.get_labels())
):
continue
comments = sorted(list(issue.get_comments()), key=lambda i: i.created_at, reverse=True)
last_comment = comments[0] if len(comments) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (now - issue.updated_at).days > 7
):
# close issue since it has been 7 days of inactivity since bot mention
issue.edit(state="closed")
elif (
(now - issue.updated_at).days > 23
):
#add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/datasets-server/blob/main/CONTRIBUTING.md) "
"are likely to be ignored."
)
if __name__ == "__main__":
main()
| datasets-server-main | tools/stale.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from typing import Optional
import pytest
from mongoengine import Document
from mongoengine.fields import StringField
from pymongo.errors import ServerSelectionTimeoutError
from libcommon.resources import (
CacheMongoResource,
MetricsMongoResource,
MongoConnectionFailure,
MongoResource,
QueueMongoResource,
)
def test_database_resource(queue_mongo_host: str) -> None:
database_1 = "datasets_server_1"
database_2 = "datasets_server_2"
host = queue_mongo_host
mongoengine_alias = "datasets_server_mongo_alias"
server_selection_timeout_ms = 5_000
resource_1 = MongoResource(
database=database_1,
host=host,
mongoengine_alias=mongoengine_alias,
server_selection_timeout_ms=server_selection_timeout_ms,
)
assert resource_1.is_available()
with pytest.raises(MongoConnectionFailure):
MongoResource(
database=database_2,
host=host,
mongoengine_alias=mongoengine_alias,
server_selection_timeout_ms=server_selection_timeout_ms,
)
resource_1.release()
resource_2 = MongoResource(
database=database_2,
host=host,
mongoengine_alias=mongoengine_alias,
server_selection_timeout_ms=server_selection_timeout_ms,
)
assert resource_2.is_available()
resource_2.release()
@pytest.mark.parametrize(
"host,mongoengine_alias,server_selection_timeout_ms,raises",
[
(None, "test_timeout_error", 5_000, False),
("mongodb://doesnotexist:123", "test_host_error", 5_000, True),
],
)
def test_database_resource_errors(
queue_mongo_host: str,
host: Optional[str],
mongoengine_alias: str,
server_selection_timeout_ms: int,
raises: bool,
) -> None:
if not host:
host = queue_mongo_host
database = "datasets_server_test"
resource = MongoResource(
database=database,
host=host,
mongoengine_alias=mongoengine_alias,
server_selection_timeout_ms=server_selection_timeout_ms,
)
# ^ this does not raise any issue, as it "only" registers the connection
class User(Document):
name = StringField()
meta = {"db_alias": mongoengine_alias}
if raises:
assert not resource.is_available()
with pytest.raises(ServerSelectionTimeoutError):
len(User.objects()) # type: ignore
else:
assert resource.is_available()
assert len(User.objects()) == 0 # type: ignore
# clean
User.drop_collection() # type: ignore
resource.release()
def test_cache_database(cache_mongo_host: str) -> None:
resource = CacheMongoResource(database="test_cache_database", host=cache_mongo_host)
class User(Document):
name = StringField()
meta = {"db_alias": resource.mongoengine_alias}
assert len(User.objects()) == 0 # type: ignore
# clean
User.drop_collection() # type: ignore
assert resource.is_available()
resource.release()
def test_queue_database(queue_mongo_host: str) -> None:
resource = QueueMongoResource(database="test_queue_database", host=queue_mongo_host)
class User(Document):
name = StringField()
meta = {"db_alias": resource.mongoengine_alias}
assert len(User.objects()) == 0 # type: ignore
# clean
User.drop_collection() # type: ignore
assert resource.is_available()
resource.release()
def test_metrics_database(metrics_mongo_host: str) -> None:
resource = MetricsMongoResource(database="test_metrics_database", host=metrics_mongo_host)
class User(Document):
name = StringField()
meta = {"db_alias": resource.mongoengine_alias}
assert len(User.objects()) == 0 # type: ignore
# clean
User.drop_collection() # type: ignore
assert resource.is_available()
resource.release()
| datasets-server-main | libs/libcommon/tests/test_resources.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import pytest
from libcommon.utils import inputs_to_string, is_image_url
@pytest.mark.parametrize(
"dataset,revision,config,split,prefix,expected",
[
("dataset", None, None, None, None, "dataset"),
("dataset", "revision", None, None, None, "dataset,revision"),
("dataset", "revision", "config", None, None, "dataset,revision,config"),
("dataset", "revision", None, "split", None, "dataset,revision"),
("dataset", "revision", "config", "split", None, "dataset,revision,config,split"),
("dataset", None, "config", "split", None, "dataset,config,split"),
("dataset", None, None, None, "prefix", "prefix,dataset"),
("dataset", "revision", "config", "split", "prefix", "prefix,dataset,revision,config,split"),
],
)
def test_inputs_to_string(dataset: str, revision: str, config: str, split: str, prefix: str, expected: str) -> None:
result = inputs_to_string(dataset=dataset, revision=revision, config=config, split=split, prefix=prefix)
assert result == expected
@pytest.mark.parametrize(
"text,expected",
[
("Some text", False),
("http://test", False),
("http://test/file.png", True),
("https://test/file.jpg", True),
],
)
def test_is_image_url(text: str, expected: bool) -> None:
assert is_image_url(text=text) == expected
| datasets-server-main | libs/libcommon/tests/test_utils.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Iterator
from pathlib import Path
from environs import Env
from pytest import fixture
from libcommon.queue import _clean_queue_database
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import _clean_cache_database
from libcommon.storage import StrPath, init_cached_assets_dir
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.datasets"]
@fixture(scope="session")
def env() -> Env:
return Env(expand_vars=True)
@fixture(scope="session")
def cache_mongo_host(env: Env) -> str:
try:
url = env.str(name="CACHE_MONGO_URL")
if type(url) is not str:
raise ValueError("CACHE_MONGO_URL is not set")
return url
except Exception as e:
raise ValueError("CACHE_MONGO_URL is not set") from e
@fixture(scope="session")
def queue_mongo_host(env: Env) -> str:
try:
url = env.str(name="QUEUE_MONGO_URL")
if type(url) is not str:
raise ValueError("QUEUE_MONGO_URL is not set")
return url
except Exception as e:
raise ValueError("QUEUE_MONGO_URL is not set") from e
@fixture(scope="session")
def metrics_mongo_host(env: Env) -> str:
try:
url = env.str(name="METRICS_MONGO_URL")
if type(url) is not str:
raise ValueError("METRICS_MONGO_URL is not set")
return url
except Exception as e:
raise ValueError("METRICS_MONGO_URL is not set") from e
@fixture
def cached_assets_directory(tmp_path: Path) -> StrPath:
cached_assets_directory = tmp_path / "cached-assets"
return init_cached_assets_dir(cached_assets_directory)
@fixture
def queue_mongo_resource(queue_mongo_host: str) -> Iterator[QueueMongoResource]:
database = "datasets_server_queue_test"
host = queue_mongo_host
if "test" not in database:
raise ValueError("Test must be launched on a test mongo database")
with QueueMongoResource(database=database, host=host, server_selection_timeout_ms=3_000) as queue_mongo_resource:
if not queue_mongo_resource.is_available():
raise RuntimeError("Mongo resource is not available")
yield queue_mongo_resource
_clean_queue_database()
queue_mongo_resource.release()
@fixture
def cache_mongo_resource(cache_mongo_host: str) -> Iterator[CacheMongoResource]:
database = "datasets_server_cache_test"
host = cache_mongo_host
if "test" not in database:
raise ValueError("Test must be launched on a test mongo database")
with CacheMongoResource(database=database, host=host) as cache_mongo_resource:
yield cache_mongo_resource
_clean_cache_database()
cache_mongo_resource.release()
| datasets-server-main | libs/libcommon/tests/conftest.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from collections.abc import Mapping
from datetime import datetime
from http import HTTPStatus
from time import process_time
from typing import Any, Optional, TypedDict
import pytest
from pymongo.errors import DocumentTooLarge
from libcommon.resources import CacheMongoResource
from libcommon.simple_cache import (
CachedArtifactError,
CachedResponseDocument,
CacheEntryDoesNotExistError,
CacheReportsPage,
CacheReportsWithContentPage,
CacheTotalMetricDocument,
InvalidCursor,
InvalidLimit,
delete_dataset_responses,
delete_response,
fetch_names,
get_best_response,
get_cache_reports,
get_cache_reports_with_content,
get_contents_page,
get_dataset_responses_without_content_for_kind,
get_outdated_split_full_names_for_step,
get_response,
get_response_with_details,
get_response_without_content,
get_responses_count_by_kind_status_and_error_code,
get_valid_datasets,
has_any_successful_response,
upsert_response,
)
from .utils import CONFIG_NAME_1, CONTENT_ERROR, DATASET_NAME
@pytest.fixture(autouse=True)
def cache_mongo_resource_autouse(cache_mongo_resource: CacheMongoResource) -> CacheMongoResource:
return cache_mongo_resource
def test_insert_null_values() -> None:
kind = "test_kind"
dataset_a = "test_dataset_a"
dataset_b = "test_dataset_b"
dataset_c = "test_dataset_c"
config = None
split = None
content = {"some": "content"}
http_status = HTTPStatus.OK
CachedResponseDocument.objects(kind=kind, dataset=dataset_a, config=config, split=split).upsert_one(
content=content,
http_status=http_status,
)
assert CachedResponseDocument.objects.count() == 1
cached_response = CachedResponseDocument.objects.get()
assert cached_response is not None
assert cached_response.config is None
assert "config" not in cached_response.to_json()
cached_response.validate()
CachedResponseDocument(
kind=kind, dataset=dataset_b, config=config, split=split, content=content, http_status=http_status
).save()
assert CachedResponseDocument.objects.count() == 2
cached_response = CachedResponseDocument.objects(dataset=dataset_b).get()
assert cached_response is not None
assert cached_response.config is None
assert "config" not in cached_response.to_json()
coll = CachedResponseDocument._get_collection()
coll.insert_one(
{
"kind": kind,
"dataset": dataset_c,
"config": None,
"split": None,
"content": content,
"http_status": http_status,
}
)
assert CachedResponseDocument.objects.count() == 3
cached_response = CachedResponseDocument.objects(dataset=dataset_c).get()
assert cached_response is not None
assert cached_response.config is None
assert "config" not in cached_response.to_json()
def assert_metric(http_status: HTTPStatus, error_code: Optional[str], kind: str, total: int) -> None:
metric = CacheTotalMetricDocument.objects(http_status=http_status, error_code=error_code, kind=kind).first()
assert metric is not None
assert metric.total == total
@pytest.mark.parametrize(
"config,split",
[
(None, None),
("test_config", None),
("test_config", "test_split"),
],
)
def test_upsert_response(config: Optional[str], split: Optional[str]) -> None:
kind = "test_kind"
dataset = "test_dataset"
config = None
split = None
content = {"some": "content"}
assert CacheTotalMetricDocument.objects().count() == 0
upsert_response(kind=kind, dataset=dataset, config=config, split=split, content=content, http_status=HTTPStatus.OK)
cached_response = get_response(kind=kind, dataset=dataset, config=config, split=split)
assert cached_response == {
"http_status": HTTPStatus.OK,
"content": content,
"error_code": None,
"job_runner_version": None,
"dataset_git_revision": None,
"progress": None,
}
cached_response_without_content = get_response_without_content(
kind=kind, dataset=dataset, config=config, split=split
)
assert cached_response_without_content == {
"http_status": HTTPStatus.OK,
"error_code": None,
"job_runner_version": None,
"dataset_git_revision": None,
"progress": None,
}
assert_metric(http_status=HTTPStatus.OK, error_code=None, kind=kind, total=1)
# ensure it's idempotent
upsert_response(kind=kind, dataset=dataset, config=config, split=split, content=content, http_status=HTTPStatus.OK)
cached_response2 = get_response(kind=kind, dataset=dataset, config=config, split=split)
assert cached_response2 == cached_response
assert_metric(http_status=HTTPStatus.OK, error_code=None, kind=kind, total=1)
another_config = "another_config"
upsert_response(
kind=kind, dataset=dataset, config=another_config, split=split, content=content, http_status=HTTPStatus.OK
)
get_response(kind=kind, dataset=dataset, config=config, split=split)
assert_metric(http_status=HTTPStatus.OK, error_code=None, kind=kind, total=2)
delete_dataset_responses(dataset=dataset)
assert_metric(http_status=HTTPStatus.OK, error_code=None, kind=kind, total=0)
with pytest.raises(CacheEntryDoesNotExistError):
get_response(kind=kind, dataset=dataset, config=config, split=split)
error_code = "error_code"
job_runner_version = 0
dataset_git_revision = "123456"
upsert_response(
kind=kind,
dataset=dataset,
config=config,
split=split,
content=content,
http_status=HTTPStatus.BAD_REQUEST,
error_code=error_code,
job_runner_version=job_runner_version,
dataset_git_revision=dataset_git_revision,
)
assert_metric(http_status=HTTPStatus.OK, error_code=None, kind=kind, total=0)
assert_metric(http_status=HTTPStatus.BAD_REQUEST, error_code=error_code, kind=kind, total=1)
cached_response3 = get_response(kind=kind, dataset=dataset, config=config, split=split)
assert cached_response3 == {
"http_status": HTTPStatus.BAD_REQUEST,
"content": content,
"error_code": error_code,
"job_runner_version": job_runner_version,
"dataset_git_revision": dataset_git_revision,
"progress": None,
}
def test_delete_response() -> None:
kind = "test_kind"
dataset_a = "test_dataset_a"
dataset_b = "test_dataset_b"
config = None
split = "test_split"
upsert_response(kind=kind, dataset=dataset_a, config=config, split=split, content={}, http_status=HTTPStatus.OK)
upsert_response(kind=kind, dataset=dataset_b, config=config, split=split, content={}, http_status=HTTPStatus.OK)
assert_metric(http_status=HTTPStatus.OK, error_code=None, kind=kind, total=2)
get_response(kind=kind, dataset=dataset_a, config=config, split=split)
get_response(kind=kind, dataset=dataset_b, config=config, split=split)
delete_response(kind=kind, dataset=dataset_a, config=config, split=split)
assert_metric(http_status=HTTPStatus.OK, error_code=None, kind=kind, total=1)
with pytest.raises(CacheEntryDoesNotExistError):
get_response(kind=kind, dataset=dataset_a, config=config, split=split)
get_response(kind=kind, dataset=dataset_b, config=config, split=split)
def test_delete_dataset_responses() -> None:
kind_a = "test_kind_a"
kind_b = "test_kind_b"
dataset_a = "test_dataset_a"
dataset_b = "test_dataset_b"
config = "test_config"
split = "test_split"
upsert_response(kind=kind_a, dataset=dataset_a, content={}, http_status=HTTPStatus.OK)
upsert_response(kind=kind_b, dataset=dataset_a, config=config, split=split, content={}, http_status=HTTPStatus.OK)
upsert_response(kind=kind_a, dataset=dataset_b, content={}, http_status=HTTPStatus.OK)
assert_metric(http_status=HTTPStatus.OK, error_code=None, kind=kind_a, total=2)
assert_metric(http_status=HTTPStatus.OK, error_code=None, kind=kind_b, total=1)
get_response(kind=kind_a, dataset=dataset_a)
get_response(kind=kind_b, dataset=dataset_a, config=config, split=split)
get_response(kind=kind_a, dataset=dataset_b)
delete_dataset_responses(dataset=dataset_a)
assert_metric(http_status=HTTPStatus.OK, error_code=None, kind=kind_a, total=1)
assert_metric(http_status=HTTPStatus.OK, error_code=None, kind=kind_b, total=0)
with pytest.raises(CacheEntryDoesNotExistError):
get_response(kind=kind_a, dataset=dataset_a)
with pytest.raises(CacheEntryDoesNotExistError):
get_response(kind=kind_b, dataset=dataset_a, config=config, split=split)
get_response(kind=kind_a, dataset=dataset_b)
def test_big_row() -> None:
# https://github.com/huggingface/datasets-server/issues/197
kind = "test_kind"
dataset = "test_dataset"
config = "test_config"
split = "test_split"
big_content = {"big": "a" * 100_000_000}
with pytest.raises(DocumentTooLarge):
upsert_response(
kind=kind, dataset=dataset, config=config, split=split, content=big_content, http_status=HTTPStatus.OK
)
def test_get_valid_dataset_names_empty() -> None:
assert not get_valid_datasets(kind="test_kind")
def test_get_valid_dataset_names_two_valid_datasets() -> None:
kind = "test_kind"
dataset_a = "test_dataset_a"
dataset_b = "test_dataset_b"
upsert_response(kind=kind, dataset=dataset_a, content={}, http_status=HTTPStatus.OK)
upsert_response(kind=kind, dataset=dataset_b, content={}, http_status=HTTPStatus.OK)
assert get_valid_datasets(kind=kind) == {dataset_a, dataset_b}
def test_get_valid_dataset_names_filtered_by_kind() -> None:
kind_a = "test_kind_a"
kind_b = "test_kind_b"
dataset_a = "test_dataset_a"
dataset_b = "test_dataset_b"
upsert_response(kind=kind_a, dataset=dataset_a, content={}, http_status=HTTPStatus.OK)
upsert_response(kind=kind_b, dataset=dataset_b, content={}, http_status=HTTPStatus.OK)
assert get_valid_datasets(kind=kind_a) == {dataset_a}
assert get_valid_datasets(kind=kind_b) == {dataset_b}
def test_get_valid_dataset_names_at_least_one_valid_response() -> None:
kind = "test_kind"
dataset = "test_dataset"
config_a = "test_config_a"
config_b = "test_config_b"
upsert_response(kind=kind, dataset=dataset, config=config_a, content={}, http_status=HTTPStatus.OK)
upsert_response(
kind=kind, dataset=dataset, config=config_b, content={}, http_status=HTTPStatus.INTERNAL_SERVER_ERROR
)
assert get_valid_datasets(kind=kind) == {dataset}
def test_get_valid_dataset_names_only_invalid_responses() -> None:
kind = "test_kind"
dataset = "test_dataset"
config_a = "test_config_a"
config_b = "test_config_b"
upsert_response(
kind=kind, dataset=dataset, config=config_a, content={}, http_status=HTTPStatus.INTERNAL_SERVER_ERROR
)
upsert_response(
kind=kind, dataset=dataset, config=config_b, content={}, http_status=HTTPStatus.INTERNAL_SERVER_ERROR
)
assert not get_valid_datasets(kind=kind)
def test_has_any_successful_response_empty() -> None:
assert not has_any_successful_response(dataset="dataset", kinds=[])
def test_has_any_successful_response_two_valid_datasets() -> None:
kind = "test_kind"
other_kind = "other_kind"
dataset_a = "test_dataset_a"
dataset_b = "test_dataset_b"
upsert_response(kind=kind, dataset=dataset_a, content={}, http_status=HTTPStatus.OK)
upsert_response(kind=kind, dataset=dataset_b, content={}, http_status=HTTPStatus.OK)
assert has_any_successful_response(dataset=dataset_a, kinds=[kind])
assert has_any_successful_response(dataset=dataset_b, kinds=[kind])
assert not has_any_successful_response(dataset=dataset_b, kinds=[other_kind])
assert has_any_successful_response(dataset=dataset_b, kinds=[kind, other_kind])
def test_has_any_successful_response_two_valid_kinds() -> None:
kind_a = "test_kind_a"
kind_b = "test_kind_b"
dataset = "test_dataset"
upsert_response(kind=kind_a, dataset=dataset, content={}, http_status=HTTPStatus.OK)
upsert_response(kind=kind_b, dataset=dataset, content={}, http_status=HTTPStatus.OK)
assert has_any_successful_response(dataset=dataset, kinds=[kind_a, kind_b])
def test_has_any_successful_response_at_least_one_valid_response() -> None:
kind_a = "test_kind_a"
kind_b = "test_kind_b"
dataset = "test_dataset"
config = "test_config"
upsert_response(kind=kind_a, dataset=dataset, config=config, content={}, http_status=HTTPStatus.OK)
upsert_response(
kind=kind_b, dataset=dataset, config=config, content={}, http_status=HTTPStatus.INTERNAL_SERVER_ERROR
)
assert has_any_successful_response(dataset=dataset, config=config, kinds=[kind_a, kind_b])
def test_has_any_successful_response_only_invalid_responses() -> None:
kind = "test_kind"
dataset = "test_dataset"
config_a = "test_config_a"
config_b = "test_config_b"
upsert_response(
kind=kind, dataset=dataset, config=config_a, content={}, http_status=HTTPStatus.INTERNAL_SERVER_ERROR
)
upsert_response(
kind=kind, dataset=dataset, config=config_b, content={}, http_status=HTTPStatus.INTERNAL_SERVER_ERROR
)
assert not has_any_successful_response(dataset=dataset, kinds=[kind])
def test_get_contents_page() -> None:
kind = "test_kind"
assert get_contents_page(kind=kind, limit=2) == {"contents": [], "cursor": None}
dataset_a = "test_dataset_a"
content_a = {"key": "a"}
expected_content_a = {"key": "a", "dataset": dataset_a}
upsert_response(
kind=kind,
dataset=dataset_a,
content=content_a,
http_status=HTTPStatus.OK,
)
content_b = {"key": "b"}
upsert_response(
kind=kind,
dataset="test_dataset_b",
content=content_b,
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
dataset_c = "test_dataset_c"
content_c = {"key": "c"}
expected_content_c = {"key": "c", "dataset": dataset_c}
upsert_response(
kind=kind,
dataset=dataset_c,
content=content_c,
http_status=HTTPStatus.OK,
)
content_d = {"key": "d"}
upsert_response(
kind="another_kind",
dataset="test_dataset_d",
content=content_d,
http_status=HTTPStatus.OK,
)
dataset_e = "test_dataset_e"
content_e = {"key": "e"}
expected_content_e = {"key": "e", "dataset": dataset_e}
upsert_response(
kind=kind,
dataset=dataset_e,
content=content_e,
http_status=HTTPStatus.OK,
)
response = get_contents_page(kind=kind, limit=2)
assert response["contents"] == [expected_content_a, expected_content_c]
assert response["cursor"] is not None
next_cursor = response["cursor"]
response = get_contents_page(kind=kind, limit=2, cursor=next_cursor)
assert response["contents"] == [expected_content_e]
assert response["cursor"] is None
with pytest.raises(InvalidCursor):
get_cache_reports(kind=kind, cursor="not an objectid", limit=2)
with pytest.raises(InvalidLimit):
get_cache_reports(kind=kind, cursor=next_cursor, limit=-1)
with pytest.raises(InvalidLimit):
get_cache_reports(kind=kind, cursor=next_cursor, limit=0)
def test_count_by_status_and_error_code() -> None:
assert not get_responses_count_by_kind_status_and_error_code()
upsert_response(
kind="test_kind",
dataset="test_dataset",
content={"key": "value"},
http_status=HTTPStatus.OK,
)
assert get_responses_count_by_kind_status_and_error_code() == [
{"kind": "test_kind", "http_status": 200, "error_code": None, "count": 1}
]
upsert_response(
kind="test_kind2",
dataset="test_dataset",
config="test_config",
split="test_split",
content={
"key": "value",
},
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
error_code="error_code",
)
metrics = get_responses_count_by_kind_status_and_error_code()
assert len(metrics) == 2
assert {"kind": "test_kind", "http_status": 200, "error_code": None, "count": 1} in metrics
assert {"kind": "test_kind2", "http_status": 500, "error_code": "error_code", "count": 1} in metrics
def test_get_cache_reports() -> None:
kind = "test_kind"
kind_2 = "test_kind_2"
expected_cache_reports: CacheReportsPage = {"cache_reports": [], "next_cursor": ""}
assert get_cache_reports(kind=kind, cursor="", limit=2) == expected_cache_reports
expected_cache_reports_with_content: CacheReportsWithContentPage = {
"cache_reports_with_content": [],
"next_cursor": "",
}
assert get_cache_reports_with_content(kind=kind, cursor="", limit=2) == expected_cache_reports_with_content
dataset_a = "test_dataset_a"
content_a = {"key": "a"}
http_status_a = HTTPStatus.OK
updated_at_a = datetime(2020, 1, 1, 0, 0, 0)
upsert_response(
kind=kind,
dataset=dataset_a,
content=content_a,
http_status=http_status_a,
updated_at=updated_at_a,
)
dataset_b = "test_dataset_b"
config_b = "test_config_b"
content_b = {"key": "b"}
http_status_b = HTTPStatus.INTERNAL_SERVER_ERROR
error_code_b = "error_code_b"
details_b = {
"error": "error b",
}
job_runner_version_b = 0
dataset_git_revision_b = "123456"
updated_at_b = datetime(2020, 1, 1, 0, 0, 1)
upsert_response(
kind=kind,
dataset=dataset_b,
config=config_b,
content=content_b,
details=details_b,
http_status=http_status_b,
error_code=error_code_b,
job_runner_version=job_runner_version_b,
dataset_git_revision=dataset_git_revision_b,
updated_at=updated_at_b,
)
dataset_c = "test_dataset_c"
config_c = "test_config_c"
split_c = "test_split_c"
content_c = {"key": "c"}
http_status_c = HTTPStatus.INTERNAL_SERVER_ERROR
error_code_c = "error_code_c"
details_c = {
"error": "error c",
}
updated_at_c = datetime(2020, 1, 1, 0, 0, 2)
upsert_response(
kind=kind,
dataset=dataset_c,
config=config_c,
split=split_c,
content=content_c,
details=details_c,
http_status=http_status_c,
error_code=error_code_c,
updated_at=updated_at_c,
)
upsert_response(
kind=kind_2,
dataset=dataset_c,
content=content_c,
details=details_c,
http_status=http_status_c,
error_code=error_code_c,
updated_at=updated_at_c,
)
upsert_response(
kind=kind_2,
dataset=dataset_c,
config=config_c,
split=split_c,
content=content_c,
details=details_c,
http_status=http_status_c,
error_code=error_code_c,
updated_at=updated_at_c,
)
response = get_cache_reports(kind=kind, cursor="", limit=2)
assert response["cache_reports"] == [
{
"kind": kind,
"dataset": dataset_a,
"config": None,
"split": None,
"http_status": http_status_a.value,
"error_code": None,
"details": {},
"updated_at": updated_at_a,
"job_runner_version": None,
"dataset_git_revision": None,
"progress": None,
},
{
"kind": kind,
"dataset": dataset_b,
"config": config_b,
"split": None,
"http_status": http_status_b.value,
"error_code": error_code_b,
"details": details_b,
"updated_at": updated_at_b,
"job_runner_version": job_runner_version_b,
"dataset_git_revision": dataset_git_revision_b,
"progress": None,
},
]
assert response["next_cursor"] != ""
next_cursor = response["next_cursor"]
response = get_cache_reports(kind=kind, cursor=next_cursor, limit=2)
assert response == {
"cache_reports": [
{
"kind": kind,
"dataset": dataset_c,
"config": config_c,
"split": split_c,
"http_status": http_status_c.value,
"error_code": error_code_c,
"details": details_c,
"updated_at": updated_at_c,
"job_runner_version": None,
"dataset_git_revision": None,
"progress": None,
},
],
"next_cursor": "",
}
response_with_content = get_cache_reports_with_content(kind=kind, cursor="", limit=2)
# redact the response to make it simpler to compare with the expected
assert response_with_content["cache_reports_with_content"] == [
{
"kind": kind,
"dataset": dataset_a,
"config": None,
"split": None,
"http_status": http_status_a.value,
"error_code": None,
"content": content_a,
"job_runner_version": None,
"dataset_git_revision": None,
"details": {},
"updated_at": updated_at_a,
"progress": None,
},
{
"kind": kind,
"dataset": dataset_b,
"config": config_b,
"split": None,
"http_status": http_status_b.value,
"error_code": error_code_b,
"content": content_b,
"job_runner_version": job_runner_version_b,
"dataset_git_revision": dataset_git_revision_b,
"details": details_b,
"updated_at": updated_at_b,
"progress": None,
},
]
assert response_with_content["next_cursor"] != ""
next_cursor = response_with_content["next_cursor"]
response_with_content = get_cache_reports_with_content(kind=kind, cursor=next_cursor, limit=2)
assert response_with_content == {
"cache_reports_with_content": [
{
"kind": kind,
"dataset": dataset_c,
"config": config_c,
"split": split_c,
"http_status": http_status_c.value,
"error_code": error_code_c,
"content": content_c,
"job_runner_version": None,
"dataset_git_revision": None,
"details": details_c,
"updated_at": updated_at_c,
"progress": None,
},
],
"next_cursor": "",
}
with pytest.raises(InvalidCursor):
get_cache_reports(kind=kind, cursor="not an objectid", limit=2)
with pytest.raises(InvalidLimit):
get_cache_reports(kind=kind, cursor=next_cursor, limit=-1)
with pytest.raises(InvalidLimit):
get_cache_reports(kind=kind, cursor=next_cursor, limit=0)
result_a = get_dataset_responses_without_content_for_kind(kind=kind, dataset=dataset_a)
assert len(result_a) == 1
assert result_a[0]["http_status"] == HTTPStatus.OK.value
assert result_a[0]["error_code"] is None
assert result_a[0]["details"] == {}
assert not get_dataset_responses_without_content_for_kind(kind=kind_2, dataset=dataset_a)
result_c = get_dataset_responses_without_content_for_kind(kind=kind_2, dataset=dataset_c)
assert len(result_c) == 2
for result in result_c:
assert result["http_status"] == http_status_c.value
assert result["error_code"] == error_code_c
assert result["details"] == details_c
assert result["updated_at"] == updated_at_c
@pytest.mark.parametrize("num_entries", [1, 10, 100, 1_000])
def test_stress_get_cache_reports(num_entries: int) -> None:
MAX_SECONDS = 0.1
kind = "test_kind"
content = {"key": "value"}
http_status = HTTPStatus.OK
splits = [f"split{i}" for i in range(num_entries)]
for split in splits:
upsert_response(
kind=kind,
dataset="dataset",
config="config",
split=split,
content=content,
http_status=http_status,
)
next_cursor = ""
is_first: bool = True
while next_cursor != "" or is_first:
start = process_time()
is_first = False
response = get_cache_reports(kind=kind, cursor=next_cursor, limit=100)
next_cursor = response["next_cursor"]
assert process_time() - start < MAX_SECONDS
def test_get_outdated_split_full_names_for_step() -> None:
kind = "kind"
current_version = 2
minor_version = 1
result = get_outdated_split_full_names_for_step(kind=kind, current_version=current_version)
upsert_response(
kind=kind,
dataset="dataset_with_current_version",
content={},
http_status=HTTPStatus.OK,
job_runner_version=current_version,
)
assert not result
upsert_response(
kind=kind,
dataset="dataset_with_minor_version",
content={},
http_status=HTTPStatus.OK,
job_runner_version=minor_version,
)
result = get_outdated_split_full_names_for_step(kind=kind, current_version=current_version)
assert result
assert len(result) == 1
class EntrySpec(TypedDict):
kind: str
dataset: str
config: Optional[str]
http_status: HTTPStatus
progress: Optional[float]
@pytest.mark.parametrize(
"selected_entries,kinds,dataset,config,best_entry",
[
# Best means:
# - the first success response with progress=1.0 is returned
(["ok1"], ["kind1"], "dataset", None, "ok1"),
(["ok_config1"], ["kind1"], "dataset", "config", "ok_config1"),
(["ok1", "ok2"], ["kind1", "kind2"], "dataset", None, "ok1"),
(["ok1", "ok2"], ["kind2", "kind1"], "dataset", None, "ok2"),
(["partial1", "ok2"], ["kind1", "kind2"], "dataset", None, "ok2"),
(["error1", "ok2"], ["kind1", "kind2"], "dataset", None, "ok2"),
# - if no success response with progress=1.0 is found, the success response with the highest progress is
# returned
(["partial1", "partial2"], ["kind1", "kind2"], "dataset", None, "partial2"),
(["partial1", "error2"], ["kind1", "kind2"], "dataset", None, "partial1"),
# - if no success response is found, the first error response is returned
(["error1", "error2"], ["kind1", "kind2"], "dataset", None, "error1"),
(["error1", "error2"], ["kind2", "kind1"], "dataset", None, "error2"),
# - if no response is found, an error response is returned
([], ["kind1"], "dataset", None, "cache_miss"),
(["ok_config1"], ["kind1"], "dataset", None, "cache_miss"),
(["ok1"], ["kind1"], "dataset", "config", "cache_miss"),
],
)
def test_get_best_response(
selected_entries: list[str], kinds: list[str], dataset: str, config: Optional[str], best_entry: str
) -> None:
# arrange
entries: dict[str, EntrySpec] = {
"ok1": {
"kind": "kind1",
"dataset": "dataset",
"config": None,
"http_status": HTTPStatus.OK,
"progress": 1.0,
},
"ok2": {
"kind": "kind2",
"dataset": "dataset",
"config": None,
"http_status": HTTPStatus.OK,
"progress": 1.0,
},
"partial1": {
"kind": "kind1",
"dataset": "dataset",
"config": None,
"http_status": HTTPStatus.OK,
"progress": 0,
},
"partial2": {
"kind": "kind2",
"dataset": "dataset",
"config": None,
"http_status": HTTPStatus.OK,
"progress": 0.5,
},
"ok_config1": {
"kind": "kind1",
"dataset": "dataset",
"config": "config",
"http_status": HTTPStatus.OK,
"progress": 1.0,
},
"error1": {
"kind": "kind1",
"dataset": "dataset",
"config": None,
"http_status": HTTPStatus.INTERNAL_SERVER_ERROR,
"progress": 1.0,
},
"error2": {
"kind": "kind2",
"dataset": "dataset",
"config": None,
"http_status": HTTPStatus.NOT_FOUND,
"progress": 1.0,
},
"cache_miss": {
"kind": "kind1",
"dataset": "dataset",
"config": None,
"http_status": HTTPStatus.NOT_FOUND,
"progress": None,
},
}
for entry in selected_entries:
upsert_response(
kind=entries[entry]["kind"],
dataset=entries[entry]["dataset"],
config=entries[entry]["config"],
http_status=entries[entry]["http_status"],
progress=entries[entry]["progress"],
content={"error": "some_error"} if (entries[entry]["http_status"] >= HTTPStatus.BAD_REQUEST.value) else {},
)
# act
best_response = get_best_response(kinds, dataset, config)
# assert
assert best_response.kind == entries[best_entry]["kind"]
assert ("error" in best_response.response["content"]) is (
entries[best_entry]["http_status"] >= HTTPStatus.BAD_REQUEST.value
)
assert best_response.response["http_status"] == entries[best_entry]["http_status"].value
assert best_response.response["progress"] == entries[best_entry]["progress"]
def test_cached_artifact_error() -> None:
dataset = "dataset"
config = "config"
split = "split"
kind = "cache_kind"
error_code = "ErrorCode"
error_message = "error message"
cause_exception = "CauseException"
cause_message = "cause message"
cause_traceback = ["traceback1", "traceback2"]
details = {
"error": error_message,
"cause_exception": cause_exception,
"cause_message": cause_message,
"cause_traceback": cause_traceback,
}
content = {"error": error_message}
job_runner_version = 1
dataset_git_revision = "dataset_git_revision"
progress = 1.0
upsert_response(
kind=kind,
dataset=dataset,
config=config,
split=split,
content=content,
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
error_code=error_code,
details=details,
job_runner_version=job_runner_version,
dataset_git_revision=dataset_git_revision,
progress=progress,
)
response = get_response_with_details(kind=kind, dataset=dataset, config=config, split=split)
error = CachedArtifactError(
message="Previous step error",
kind=kind,
dataset=dataset,
config=config,
split=split,
cache_entry_with_details=response,
)
assert error.cache_entry_with_details["content"] == content
assert error.cache_entry_with_details["http_status"] == HTTPStatus.INTERNAL_SERVER_ERROR
assert error.cache_entry_with_details["error_code"] == error_code
assert error.enhanced_details == {
"error": error_message,
"cause_exception": cause_exception,
"cause_message": cause_message,
"cause_traceback": cause_traceback,
"copied_from_artifact": {
"kind": kind,
"dataset": dataset,
"config": config,
"split": split,
},
}
class ResponseSpec(TypedDict):
content: Mapping[str, Any]
http_status: HTTPStatus
CACHE_KIND_A = "cache_kind_a"
CACHE_KIND_B = "cache_kind_b"
NAMES = ["name_1", "name_2", "name_3"]
NAME_FIELD = "name"
NAMES_FIELD = "names"
NAMES_RESPONSE_OK = ResponseSpec(
content={NAMES_FIELD: [{NAME_FIELD: name} for name in NAMES]}, http_status=HTTPStatus.OK
)
RESPONSE_ERROR = ResponseSpec(content=CONTENT_ERROR, http_status=HTTPStatus.INTERNAL_SERVER_ERROR)
@pytest.mark.parametrize(
"cache_kinds,response_spec_by_kind,expected_names",
[
([], {}, []),
([CACHE_KIND_A], {}, []),
([CACHE_KIND_A], {CACHE_KIND_A: RESPONSE_ERROR}, []),
([CACHE_KIND_A], {CACHE_KIND_A: NAMES_RESPONSE_OK}, NAMES),
([CACHE_KIND_A, CACHE_KIND_B], {CACHE_KIND_A: NAMES_RESPONSE_OK}, NAMES),
([CACHE_KIND_A, CACHE_KIND_B], {CACHE_KIND_A: NAMES_RESPONSE_OK, CACHE_KIND_B: RESPONSE_ERROR}, NAMES),
([CACHE_KIND_A, CACHE_KIND_B], {CACHE_KIND_A: NAMES_RESPONSE_OK, CACHE_KIND_B: NAMES_RESPONSE_OK}, NAMES),
([CACHE_KIND_A, CACHE_KIND_B], {CACHE_KIND_A: RESPONSE_ERROR, CACHE_KIND_B: RESPONSE_ERROR}, []),
],
)
def test_fetch_names(
cache_kinds: list[str],
response_spec_by_kind: Mapping[str, Mapping[str, Any]],
expected_names: list[str],
) -> None:
for kind, response_spec in response_spec_by_kind.items():
upsert_response(
kind=kind,
dataset=DATASET_NAME,
config=CONFIG_NAME_1,
split=None,
content=response_spec["content"],
http_status=response_spec["http_status"],
)
assert (
fetch_names(
dataset=DATASET_NAME,
config=CONFIG_NAME_1,
cache_kinds=cache_kinds,
names_field=NAMES_FIELD,
name_field=NAME_FIELD,
)
== expected_names
)
| datasets-server-main | libs/libcommon/tests/test_simple_cache.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import pytest
from libcommon.config import ProcessingGraphConfig
from libcommon.processing_graph import (
ProcessingGraph,
ProcessingGraphSpecification,
ProcessingStep,
)
def assert_lists_are_equal(a: list[ProcessingStep], b: list[str]) -> None:
assert sorted(processing_step.name for processing_step in a) == sorted(b)
def assert_step(
graph: ProcessingGraph,
processing_step_name: str,
children: list[str],
parents: list[str],
ancestors: list[str],
) -> None:
assert_lists_are_equal(graph.get_children(processing_step_name), children)
assert_lists_are_equal(graph.get_parents(processing_step_name), parents)
assert_lists_are_equal(graph.get_ancestors(processing_step_name), ancestors)
def test_graph() -> None:
a = "step_a"
b = "step_b"
c = "step_c"
d = "step_d"
e = "step_e"
f = "step_f"
specification: ProcessingGraphSpecification = {
a: {"input_type": "dataset", "job_runner_version": 1},
b: {"input_type": "dataset", "job_runner_version": 1},
c: {"input_type": "dataset", "triggered_by": a, "job_runner_version": 1},
d: {"input_type": "dataset", "triggered_by": [a, c], "job_runner_version": 1},
e: {"input_type": "dataset", "triggered_by": [c], "job_runner_version": 1},
f: {"input_type": "dataset", "triggered_by": [a, b], "job_runner_version": 1},
}
graph = ProcessingGraph(ProcessingGraphConfig(specification).specification)
assert_step(graph, a, children=[c, d, f], parents=[], ancestors=[])
assert_step(graph, b, children=[f], parents=[], ancestors=[])
assert_step(graph, c, children=[d, e], parents=[a], ancestors=[a])
assert_step(graph, d, children=[], parents=[a, c], ancestors=[a, c])
assert_step(graph, e, children=[], parents=[c], ancestors=[a, c])
assert_step(graph, f, children=[], parents=[a, b], ancestors=[a, b])
@pytest.fixture(scope="module")
def graph() -> ProcessingGraph:
config = ProcessingGraphConfig()
return ProcessingGraph(config.specification)
@pytest.mark.parametrize(
"processing_step_name,children,parents,ancestors",
[
(
"dataset-config-names",
[
"config-split-names-from-streaming",
"config-parquet-and-info",
"dataset-opt-in-out-urls-count",
"dataset-split-names",
"dataset-parquet",
"dataset-info",
"dataset-size",
"dataset-is-valid",
],
[],
[],
),
(
"config-parquet-and-info",
[
"config-parquet",
"config-info",
"config-size",
"split-duckdb-index",
],
["dataset-config-names"],
["dataset-config-names"],
),
(
"config-split-names-from-info",
[
"config-opt-in-out-urls-count",
"split-first-rows-from-streaming",
"dataset-split-names",
"split-duckdb-index",
"split-descriptive-statistics",
"config-is-valid",
],
["config-info"],
["dataset-config-names", "config-parquet-and-info", "config-info"],
),
(
"config-split-names-from-streaming",
[
"split-first-rows-from-streaming",
"dataset-split-names",
"config-opt-in-out-urls-count",
"split-duckdb-index",
"split-descriptive-statistics",
"config-is-valid",
],
["dataset-config-names"],
["dataset-config-names"],
),
(
"dataset-split-names",
[],
[
"dataset-config-names",
"config-split-names-from-info",
"config-split-names-from-streaming",
],
[
"dataset-config-names",
"config-parquet-and-info",
"config-info",
"config-split-names-from-info",
"config-split-names-from-streaming",
],
),
(
"split-first-rows-from-parquet",
["split-is-valid", "split-image-url-columns"],
["config-parquet-metadata"],
["config-parquet", "dataset-config-names", "config-parquet-and-info", "config-parquet-metadata"],
),
(
"split-first-rows-from-streaming",
["split-is-valid", "split-image-url-columns"],
[
"config-split-names-from-streaming",
"config-split-names-from-info",
],
[
"dataset-config-names",
"config-split-names-from-streaming",
"config-split-names-from-info",
"config-parquet-and-info",
"config-info",
],
),
(
"config-parquet",
["config-parquet-metadata", "dataset-parquet"],
["config-parquet-and-info"],
["dataset-config-names", "config-parquet-and-info"],
),
(
"config-parquet-metadata",
["split-first-rows-from-parquet"],
["config-parquet"],
["dataset-config-names", "config-parquet-and-info", "config-parquet"],
),
(
"dataset-parquet",
[],
["dataset-config-names", "config-parquet"],
["dataset-config-names", "config-parquet-and-info", "config-parquet"],
),
(
"config-info",
["dataset-info", "config-split-names-from-info"],
["config-parquet-and-info"],
["dataset-config-names", "config-parquet-and-info"],
),
(
"dataset-info",
[],
["dataset-config-names", "config-info"],
["dataset-config-names", "config-parquet-and-info", "config-info"],
),
(
"config-size",
["split-is-valid", "dataset-size"],
["config-parquet-and-info"],
["dataset-config-names", "config-parquet-and-info"],
),
(
"dataset-size",
["dataset-hub-cache"],
["dataset-config-names", "config-size"],
["dataset-config-names", "config-parquet-and-info", "config-size"],
),
(
"dataset-is-valid",
["dataset-hub-cache"],
[
"config-is-valid",
"dataset-config-names",
],
[
"dataset-config-names",
"config-parquet-and-info",
"config-info",
"config-parquet",
"config-size",
"config-split-names-from-info",
"config-parquet-metadata",
"config-split-names-from-streaming",
"split-first-rows-from-parquet",
"split-first-rows-from-streaming",
"config-is-valid",
"split-is-valid",
"split-duckdb-index",
],
),
(
"split-image-url-columns",
["split-opt-in-out-urls-scan"],
["split-first-rows-from-streaming", "split-first-rows-from-parquet"],
[
"dataset-config-names",
"config-split-names-from-streaming",
"config-split-names-from-info",
"config-info",
"config-parquet-and-info",
"config-parquet-metadata",
"split-first-rows-from-streaming",
"config-parquet",
"split-first-rows-from-parquet",
],
),
(
"split-opt-in-out-urls-scan",
["split-opt-in-out-urls-count"],
["split-image-url-columns"],
[
"dataset-config-names",
"config-split-names-from-streaming",
"config-split-names-from-info",
"config-info",
"config-parquet-and-info",
"config-parquet-metadata",
"split-first-rows-from-streaming",
"config-parquet",
"split-first-rows-from-parquet",
"split-image-url-columns",
],
),
(
"split-opt-in-out-urls-count",
["config-opt-in-out-urls-count"],
["split-opt-in-out-urls-scan"],
[
"dataset-config-names",
"config-split-names-from-streaming",
"split-first-rows-from-streaming",
"config-split-names-from-info",
"config-info",
"config-parquet-and-info",
"config-parquet-metadata",
"split-opt-in-out-urls-scan",
"config-parquet",
"split-first-rows-from-parquet",
"split-image-url-columns",
],
),
(
"config-opt-in-out-urls-count",
["dataset-opt-in-out-urls-count"],
["split-opt-in-out-urls-count", "config-split-names-from-info", "config-split-names-from-streaming"],
[
"dataset-config-names",
"config-split-names-from-streaming",
"split-first-rows-from-streaming",
"config-split-names-from-info",
"config-info",
"config-parquet-and-info",
"config-parquet-metadata",
"split-opt-in-out-urls-count",
"split-opt-in-out-urls-scan",
"config-parquet",
"split-first-rows-from-parquet",
"split-image-url-columns",
],
),
(
"dataset-opt-in-out-urls-count",
[],
["config-opt-in-out-urls-count", "dataset-config-names"],
[
"dataset-config-names",
"config-split-names-from-streaming",
"split-first-rows-from-streaming",
"config-split-names-from-info",
"config-info",
"config-parquet-and-info",
"config-parquet-metadata",
"config-opt-in-out-urls-count",
"split-opt-in-out-urls-count",
"split-opt-in-out-urls-scan",
"config-parquet",
"split-first-rows-from-parquet",
"split-image-url-columns",
],
),
(
"split-duckdb-index",
["split-is-valid"],
["config-split-names-from-info", "config-split-names-from-streaming", "config-parquet-and-info"],
[
"config-split-names-from-info",
"config-split-names-from-streaming",
"config-parquet-and-info",
"config-info",
"dataset-config-names",
],
),
(
"split-descriptive-statistics",
[],
["config-split-names-from-info", "config-split-names-from-streaming"],
[
"dataset-config-names",
"config-parquet-and-info",
"config-info",
"config-split-names-from-info",
"config-split-names-from-streaming",
],
),
(
"dataset-hub-cache",
[],
["dataset-is-valid", "dataset-size"],
[
"config-info",
"config-is-valid",
"config-parquet",
"config-parquet-and-info",
"config-parquet-metadata",
"config-size",
"config-split-names-from-info",
"config-split-names-from-streaming",
"dataset-config-names",
"dataset-is-valid",
"dataset-size",
"split-duckdb-index",
"split-first-rows-from-parquet",
"split-first-rows-from-streaming",
"split-is-valid",
],
),
],
)
def test_default_graph_steps(
graph: ProcessingGraph, processing_step_name: str, children: list[str], parents: list[str], ancestors: list[str]
) -> None:
assert_step(graph, processing_step_name, children=children, parents=parents, ancestors=ancestors)
def test_default_graph_first_steps(graph: ProcessingGraph) -> None:
roots = ["dataset-config-names"]
assert_lists_are_equal(graph.get_first_processing_steps(), roots)
def test_default_graph_enables_preview(graph: ProcessingGraph) -> None:
enables_preview = ["split-first-rows-from-streaming", "split-first-rows-from-parquet"]
assert_lists_are_equal(graph.get_processing_steps_enables_preview(), enables_preview)
def test_default_graph_enables_viewer(graph: ProcessingGraph) -> None:
enables_viewer = ["config-size"]
assert_lists_are_equal(graph.get_processing_steps_enables_viewer(), enables_viewer)
def test_default_graph_enables_search(graph: ProcessingGraph) -> None:
enables_search = ["split-duckdb-index"]
assert_lists_are_equal(graph.get_processing_steps_enables_search(), enables_search)
def test_default_graph_provide_dataset_config_names(graph: ProcessingGraph) -> None:
assert_lists_are_equal(graph.get_dataset_config_names_processing_steps(), ["dataset-config-names"])
def test_default_graph_provide_config_split_names(graph: ProcessingGraph) -> None:
assert_lists_are_equal(
graph.get_config_split_names_processing_steps(),
["config-split-names-from-streaming", "config-split-names-from-info"],
)
| datasets-server-main | libs/libcommon/tests/test_processing_graph.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
| datasets-server-main | libs/libcommon/tests/__init__.py |
import os
import time
from dataclasses import dataclass
from http import HTTPStatus
from pathlib import Path
from typing import Optional
import pytest
from libcommon.prometheus import (
ASSETS_DISK_USAGE,
QUEUE_JOBS_TOTAL,
RESPONSES_IN_CACHE_TOTAL,
Prometheus,
StepProfiler,
update_assets_disk_usage,
update_queue_jobs_total,
update_responses_in_cache_total,
)
from libcommon.queue import JobTotalMetricDocument
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import CacheTotalMetricDocument
def parse_metrics(content: str) -> dict[str, float]:
# examples:
# starlette_requests_total{method="GET",path_template="/metrics"} 1.0
# method_steps_processing_time_seconds_sum{method="healthcheck_endpoint",step="all"} 1.6772013623267412e-05
return {
parts[0]: float(parts[1])
for line in content.split("\n")
if line and line[0] != "#" and (parts := line.rsplit(" ", 1))
}
def test_prometheus() -> None:
is_multiprocess = "PROMETHEUS_MULTIPROC_DIR" in os.environ
prometheus = Prometheus()
registry = prometheus.getRegistry()
assert registry is not None
content = prometheus.getLatestContent()
metrics = parse_metrics(content)
name = "process_start_time_seconds"
if not is_multiprocess:
assert name in metrics, metrics
assert metrics[name] > 0, metrics[name]
else:
assert name not in metrics, metrics
def create_key(suffix: str, labels: dict[str, str], le: Optional[str] = None) -> str:
items = list(labels.items())
if le:
items.append(("le", le))
labels_string = ",".join([f'{key}="{value}"' for key, value in sorted(items)])
return f"method_steps_processing_time_seconds_{suffix}{{{labels_string}}}"
def check_histogram_metric(
metrics: dict[str, float], method: str, step: str, context: str, events: int, duration: float
) -> None:
labels = {"context": context, "method": method, "step": step}
assert metrics[create_key("count", labels)] == events, metrics
assert metrics[create_key("bucket", labels, le="+Inf")] == events, metrics
assert metrics[create_key("bucket", labels, le="1.0")] == events, metrics
assert metrics[create_key("bucket", labels, le="0.05")] == 0, metrics
assert metrics[create_key("sum", labels)] >= duration, metrics
assert metrics[create_key("sum", labels)] <= duration * 1.1, metrics
def test_step_profiler() -> None:
duration = 0.1
method = "test_step_profiler"
step_all = "all"
context = "None"
with StepProfiler(method=method, step=step_all):
time.sleep(duration)
metrics = parse_metrics(Prometheus().getLatestContent())
check_histogram_metric(metrics=metrics, method=method, step=step_all, context=context, events=1, duration=duration)
def test_nested_step_profiler() -> None:
method = "test_nested_step_profiler"
step_all = "all"
context = "None"
step_1 = "step_1"
duration_1a = 0.1
duration_1b = 0.3
context_1 = "None"
step_2 = "step_2"
duration_2 = 0.5
context_2 = "endpoint: /splits"
with StepProfiler(method=method, step=step_all):
with StepProfiler(method, step_1):
time.sleep(duration_1a)
with StepProfiler(method, step_1, context_1):
time.sleep(duration_1b)
with StepProfiler(method, step_2, context_2):
time.sleep(duration_2)
metrics = parse_metrics(Prometheus().getLatestContent())
check_histogram_metric(
metrics=metrics,
method=method,
step=step_all,
context=context,
events=1,
duration=duration_1a + duration_1b + duration_2,
)
check_histogram_metric(
metrics=metrics, method=method, step=step_1, context=context_1, events=2, duration=duration_1a + duration_1b
)
check_histogram_metric(
metrics=metrics, method=method, step=step_2, context=context_2, events=1, duration=duration_2
)
@dataclass
class Metrics:
metrics: dict[str, float]
def forge_metric_key(self, name: str, content: dict[str, str]) -> str:
local_content: dict[str, str] = dict(content)
if "PROMETHEUS_MULTIPROC_DIR" in os.environ:
local_content["pid"] = str(os.getpid())
inner = ",".join([f'{key}="{value}"' for key, value in sorted(local_content.items())])
return f"{name}{{{inner}}}"
def get_metrics() -> Metrics:
prometheus = Prometheus()
registry = prometheus.getRegistry()
assert registry is not None
content = prometheus.getLatestContent()
lines = content.split("\n")
metrics = {" ".join(line.split(" ")[:-1]): float(line.split(" ")[-1]) for line in lines if line and line[0] != "#"}
return Metrics(metrics=metrics)
def test_cache_metrics(cache_mongo_resource: CacheMongoResource) -> None:
RESPONSES_IN_CACHE_TOTAL.clear()
cache_metric = {
"kind": "dummy",
"http_status": HTTPStatus.OK,
"error_code": None,
"total": 1,
}
collection = CacheTotalMetricDocument._get_collection()
collection.insert_one(cache_metric)
metrics = get_metrics()
assert (
metrics.forge_metric_key(
name="responses_in_cache_total",
content={"error_code": "None", "http_status": "200", "kind": "dummy"},
)
not in metrics.metrics
)
update_responses_in_cache_total()
metrics = get_metrics()
assert (
metrics.forge_metric_key(
name="responses_in_cache_total",
content={"error_code": "None", "http_status": "200", "kind": "dummy"},
)
in metrics.metrics
)
def test_queue_metrics(queue_mongo_resource: QueueMongoResource) -> None:
QUEUE_JOBS_TOTAL.clear()
job_metric = {
"job_type": "dummy",
"status": "waiting",
"total": 1,
}
collection = JobTotalMetricDocument._get_collection()
collection.insert_one(job_metric)
metrics = get_metrics()
assert (
metrics.forge_metric_key(
name="queue_jobs_total",
content={"queue": "dummy", "status": "waiting"},
)
not in metrics.metrics
)
update_queue_jobs_total()
metrics = get_metrics()
assert (
metrics.forge_metric_key(
name="queue_jobs_total",
content={"queue": "dummy", "status": "waiting"},
)
in metrics.metrics
)
@pytest.mark.parametrize("usage_type", ["total", "used", "free", "percent"])
def test_assets_metrics(usage_type: str, tmp_path: Path) -> None:
ASSETS_DISK_USAGE.clear()
metrics = get_metrics()
name = metrics.forge_metric_key(name="assets_disk_usage", content={"type": usage_type})
assert name not in metrics.metrics
update_assets_disk_usage(directory=tmp_path)
metrics = get_metrics()
name = metrics.forge_metric_key(name="assets_disk_usage", content={"type": usage_type})
assert name in metrics.metrics
assert metrics.metrics[name] >= 0
if usage_type == "percent":
assert metrics.metrics[name] <= 100
def test_process_metrics() -> None:
metrics = get_metrics()
name = "process_start_time_seconds"
if "PROMETHEUS_MULTIPROC_DIR" in os.environ:
assert name not in metrics.metrics
else:
assert name in metrics.metrics
assert metrics.metrics[name] > 0
| datasets-server-main | libs/libcommon/tests/test_prometheus.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from http import HTTPStatus
from typing import Optional
import pytest
from libcommon.queue import Queue
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import (
delete_response,
get_cache_entries_df,
upsert_response,
)
from libcommon.state import (
ArtifactState,
CacheState,
ConfigState,
DatasetState,
SplitState,
)
from .utils import (
CACHE_KIND,
CONFIG_NAME_1,
CONFIG_NAMES,
CONFIG_NAMES_CONTENT,
DATASET_NAME,
JOB_RUNNER_VERSION,
PROCESSING_GRAPH,
REVISION_NAME,
SPLIT_NAME_1,
SPLIT_NAMES,
SPLIT_NAMES_CONTENT,
)
@pytest.fixture(autouse=True)
def queue_mongo_resource_autouse(queue_mongo_resource: QueueMongoResource) -> QueueMongoResource:
return queue_mongo_resource
@pytest.fixture(autouse=True)
def cache_mongo_resource_autouse(cache_mongo_resource: CacheMongoResource) -> CacheMongoResource:
return cache_mongo_resource
@pytest.mark.parametrize(
"dataset,config,split,cache_kind",
[
(DATASET_NAME, None, None, CACHE_KIND),
(DATASET_NAME, CONFIG_NAME_1, None, CACHE_KIND),
(DATASET_NAME, CONFIG_NAME_1, SPLIT_NAME_1, CACHE_KIND),
],
)
def test_cache_state_exists(dataset: str, config: Optional[str], split: Optional[str], cache_kind: str) -> None:
assert not CacheState(
dataset=dataset,
config=config,
split=split,
cache_kind=cache_kind,
cache_entries_df=get_cache_entries_df(dataset=dataset),
job_runner_version=JOB_RUNNER_VERSION,
).exists
upsert_response(
kind=cache_kind, dataset=dataset, config=config, split=split, content={}, http_status=HTTPStatus.OK
)
assert CacheState(
dataset=dataset,
config=config,
split=split,
cache_kind=cache_kind,
cache_entries_df=get_cache_entries_df(dataset=dataset),
job_runner_version=JOB_RUNNER_VERSION,
).exists
delete_response(kind=cache_kind, dataset=dataset, config=config, split=split)
assert not CacheState(
dataset=dataset,
config=config,
split=split,
cache_kind=cache_kind,
cache_entries_df=get_cache_entries_df(dataset=dataset),
job_runner_version=JOB_RUNNER_VERSION,
).exists
@pytest.mark.parametrize(
"dataset,config,split,cache_kind",
[
(DATASET_NAME, None, None, CACHE_KIND),
(DATASET_NAME, CONFIG_NAME_1, None, CACHE_KIND),
(DATASET_NAME, CONFIG_NAME_1, SPLIT_NAME_1, CACHE_KIND),
],
)
def test_cache_state_is_success(dataset: str, config: Optional[str], split: Optional[str], cache_kind: str) -> None:
assert not CacheState(
dataset=dataset,
config=config,
split=split,
cache_kind=cache_kind,
cache_entries_df=get_cache_entries_df(dataset=dataset),
job_runner_version=JOB_RUNNER_VERSION,
).is_success
upsert_response(
kind=cache_kind, dataset=dataset, config=config, split=split, content={}, http_status=HTTPStatus.OK
)
assert CacheState(
dataset=dataset,
config=config,
split=split,
cache_kind=cache_kind,
cache_entries_df=get_cache_entries_df(dataset=dataset),
job_runner_version=JOB_RUNNER_VERSION,
).is_success
upsert_response(
kind=cache_kind,
dataset=dataset,
config=config,
split=split,
content={},
http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
assert not CacheState(
dataset=dataset,
config=config,
split=split,
cache_kind=cache_kind,
cache_entries_df=get_cache_entries_df(dataset=dataset),
job_runner_version=JOB_RUNNER_VERSION,
).is_success
delete_response(kind=cache_kind, dataset=dataset, config=config, split=split)
assert not CacheState(
dataset=dataset,
config=config,
split=split,
cache_kind=cache_kind,
cache_entries_df=get_cache_entries_df(dataset=dataset),
job_runner_version=JOB_RUNNER_VERSION,
).is_success
def test_artifact_state() -> None:
dataset = DATASET_NAME
revision = REVISION_NAME
config = None
split = None
processing_step_name = "dataset-a"
processing_step = PROCESSING_GRAPH.get_processing_step(processing_step_name)
artifact_state = ArtifactState(
dataset=dataset,
revision=revision,
config=config,
split=split,
processing_step=processing_step,
pending_jobs_df=Queue().get_pending_jobs_df(dataset=dataset),
cache_entries_df=get_cache_entries_df(dataset=dataset),
)
assert artifact_state.id == f"{processing_step_name},{dataset},{revision}"
assert not artifact_state.cache_state.exists
assert not artifact_state.cache_state.is_success
assert not artifact_state.job_state.is_in_process
def test_split_state() -> None:
dataset = DATASET_NAME
revision = REVISION_NAME
config = CONFIG_NAME_1
split = SPLIT_NAME_1
expected_split_processing_step_name = "split-c"
split_state = SplitState(
dataset=dataset,
revision=revision,
config=config,
split=split,
processing_graph=PROCESSING_GRAPH,
pending_jobs_df=Queue()._get_df(jobs=[]),
cache_entries_df=get_cache_entries_df(dataset=dataset),
)
assert split_state.dataset == dataset
assert split_state.revision == revision
assert split_state.config == config
assert split_state.split == split
assert len(split_state.artifact_state_by_step) == 1
assert expected_split_processing_step_name in split_state.artifact_state_by_step
artifact_state = split_state.artifact_state_by_step[expected_split_processing_step_name]
assert artifact_state.id == f"{expected_split_processing_step_name},{dataset},{revision},{config},{split}"
assert not artifact_state.cache_state.exists
assert not artifact_state.cache_state.is_success
assert not artifact_state.job_state.is_in_process
def test_config_state_as_dict() -> None:
dataset = DATASET_NAME
revision = REVISION_NAME
config = CONFIG_NAME_1
expected_config_processing_step_name = "config-b"
processing_step = PROCESSING_GRAPH.get_processing_step(expected_config_processing_step_name)
upsert_response(
kind=processing_step.cache_kind,
dataset=dataset,
config=config,
split=None,
content=SPLIT_NAMES_CONTENT,
http_status=HTTPStatus.OK,
)
config_state = ConfigState(
dataset=dataset,
revision=revision,
config=config,
processing_graph=PROCESSING_GRAPH,
pending_jobs_df=Queue()._get_df(jobs=[]),
cache_entries_df=get_cache_entries_df(dataset=dataset),
)
assert config_state.dataset == dataset
assert config_state.revision == revision
assert config_state.config == config
assert len(config_state.artifact_state_by_step) == 1
assert expected_config_processing_step_name in config_state.artifact_state_by_step
artifact_state = config_state.artifact_state_by_step[expected_config_processing_step_name]
assert artifact_state.id == f"{expected_config_processing_step_name},{dataset},{revision},{config}"
assert artifact_state.cache_state.exists # <- in the cache
assert artifact_state.cache_state.is_success # <- is a success
assert not artifact_state.job_state.is_in_process
assert config_state.split_names == SPLIT_NAMES
assert len(config_state.split_states) == len(SPLIT_NAMES)
assert config_state.split_states[0].split == SPLIT_NAMES[0]
assert config_state.split_states[1].split == SPLIT_NAMES[1]
def test_dataset_state_as_dict() -> None:
dataset = DATASET_NAME
revision = REVISION_NAME
expected_dataset_processing_step_name = "dataset-a"
dataset_step = PROCESSING_GRAPH.get_processing_step(expected_dataset_processing_step_name)
expected_config_processing_step_name = "config-b"
config_step = PROCESSING_GRAPH.get_processing_step(expected_config_processing_step_name)
upsert_response(
kind=dataset_step.cache_kind,
dataset=dataset,
config=None,
split=None,
content=CONFIG_NAMES_CONTENT,
http_status=HTTPStatus.OK,
)
upsert_response(
kind=config_step.cache_kind,
dataset=dataset,
config=CONFIG_NAME_1,
split=None,
content=SPLIT_NAMES_CONTENT,
http_status=HTTPStatus.OK,
)
dataset_state = DatasetState(
dataset=dataset,
revision=revision,
processing_graph=PROCESSING_GRAPH,
pending_jobs_df=Queue()._get_df(jobs=[]),
cache_entries_df=get_cache_entries_df(dataset=dataset),
)
assert dataset_state.dataset == dataset
assert dataset_state.revision == revision
assert len(dataset_state.artifact_state_by_step) == 1
assert expected_dataset_processing_step_name in dataset_state.artifact_state_by_step
artifact_state = dataset_state.artifact_state_by_step[expected_dataset_processing_step_name]
assert artifact_state.id == f"{expected_dataset_processing_step_name},{dataset},{revision}"
assert artifact_state.cache_state.exists # <- in the cache
assert artifact_state.cache_state.is_success # <- is a success
assert not artifact_state.job_state.is_in_process
assert dataset_state.config_names == CONFIG_NAMES
assert len(dataset_state.config_states) == len(CONFIG_NAMES)
assert dataset_state.config_states[0].config == CONFIG_NAMES[0]
assert dataset_state.config_states[1].config == CONFIG_NAMES[1]
| datasets-server-main | libs/libcommon/tests/test_state.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from http import HTTPStatus
import pytest
from libcommon.orchestrator import AfterJobPlan, DatasetOrchestrator
from libcommon.processing_graph import Artifact, ProcessingGraph
from libcommon.queue import JobDocument, Queue
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import CachedResponseDocument, upsert_response_params
from libcommon.utils import JobOutput, JobResult, Priority, Status
from .utils import (
ARTIFACT_CA_1,
ARTIFACT_CA_2,
ARTIFACT_DA,
ARTIFACT_DB,
ARTIFACT_DC,
ARTIFACT_DD,
ARTIFACT_DE,
ARTIFACT_DG,
ARTIFACT_DH,
CONFIG_NAMES_CONTENT,
DATASET_NAME,
DIFFICULTY,
JOB_RUNNER_VERSION,
PROCESSING_GRAPH_FAN_IN_OUT,
PROCESSING_GRAPH_GENEALOGY,
PROCESSING_GRAPH_ONE_STEP,
PROCESSING_GRAPH_PARALLEL,
REVISION_NAME,
STEP_CB,
STEP_DA,
STEP_DC,
STEP_DD,
artifact_id_to_job_info,
)
CACHE_MAX_DAYS = 90
@pytest.fixture(autouse=True)
def queue_mongo_resource_autouse(queue_mongo_resource: QueueMongoResource) -> QueueMongoResource:
return queue_mongo_resource
@pytest.fixture(autouse=True)
def cache_mongo_resource_autouse(cache_mongo_resource: CacheMongoResource) -> CacheMongoResource:
return cache_mongo_resource
@pytest.mark.parametrize(
"processing_graph,artifacts_to_create",
[
(PROCESSING_GRAPH_ONE_STEP, []),
(PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DC]),
(PROCESSING_GRAPH_FAN_IN_OUT, [ARTIFACT_CA_1, ARTIFACT_CA_2]),
(PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DG, ARTIFACT_DH]),
],
)
def test_after_job_plan(
processing_graph: ProcessingGraph,
artifacts_to_create: list[str],
) -> None:
job_info = artifact_id_to_job_info(ARTIFACT_DA)
# put the cache (to be able to get the config names - case PROCESSING_GRAPH_FAN_IN_OUT)
upsert_response_params(
# inputs
kind=STEP_DA,
job_params=job_info["params"],
job_runner_version=JOB_RUNNER_VERSION,
# output
content=CONFIG_NAMES_CONTENT,
http_status=HTTPStatus.OK,
error_code=None,
details=None,
progress=1.0,
)
after_job_plan = AfterJobPlan(
processing_graph=processing_graph,
job_info=job_info,
)
if len(artifacts_to_create):
assert after_job_plan.as_response() == [f"CreateJobs,{len(artifacts_to_create)}"]
else:
assert after_job_plan.as_response() == []
after_job_plan.run()
pending_jobs_df = Queue().get_pending_jobs_df(dataset=DATASET_NAME)
assert len(pending_jobs_df) == len(artifacts_to_create)
artifact_ids = [
Artifact.get_id(
dataset=row["dataset"],
revision=row["revision"],
config=row["config"],
split=row["split"],
processing_step_name=row["type"],
)
for _, row in pending_jobs_df.iterrows()
]
assert set(artifact_ids) == set(artifacts_to_create)
def test_after_job_plan_delete() -> None:
job_info = artifact_id_to_job_info(ARTIFACT_DA)
# create two jobs for DG, and none for DH
# one job should be deleted for DG, and one should be created for DH
Queue().create_jobs([artifact_id_to_job_info(ARTIFACT_DG)] * 2)
after_job_plan = AfterJobPlan(
processing_graph=PROCESSING_GRAPH_PARALLEL,
job_info=job_info,
)
assert after_job_plan.as_response() == ["CreateJobs,1", "DeleteJobs,1"]
after_job_plan.run()
pending_jobs_df = Queue().get_pending_jobs_df(dataset=DATASET_NAME)
assert len(pending_jobs_df) == 2
artifact_ids = [
Artifact.get_id(
dataset=row["dataset"],
revision=row["revision"],
config=row["config"],
split=row["split"],
processing_step_name=row["type"],
)
for _, row in pending_jobs_df.iterrows()
]
assert artifact_ids == [ARTIFACT_DG, ARTIFACT_DH]
@pytest.mark.parametrize(
"processing_graph,artifacts_to_create",
[
(PROCESSING_GRAPH_ONE_STEP, []),
(PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DC]),
(PROCESSING_GRAPH_FAN_IN_OUT, [ARTIFACT_CA_1, ARTIFACT_CA_2]),
(PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DG, ARTIFACT_DH]),
],
)
def test_finish_job(
processing_graph: ProcessingGraph,
artifacts_to_create: list[str],
) -> None:
Queue().add_job(
dataset=DATASET_NAME,
revision=REVISION_NAME,
config=None,
split=None,
job_type=STEP_DA,
priority=Priority.NORMAL,
difficulty=DIFFICULTY,
)
job_info = Queue().start_job()
job_result = JobResult(
job_info=job_info,
job_runner_version=JOB_RUNNER_VERSION,
is_success=True,
output=JobOutput(
content=CONFIG_NAMES_CONTENT,
http_status=HTTPStatus.OK,
error_code=None,
details=None,
progress=1.0,
),
)
dataset_orchestrator = DatasetOrchestrator(dataset=DATASET_NAME, processing_graph=processing_graph)
dataset_orchestrator.finish_job(job_result=job_result)
assert JobDocument.objects(dataset=DATASET_NAME).count() == 1 + len(artifacts_to_create)
done_job = JobDocument.objects(dataset=DATASET_NAME, status=Status.SUCCESS)
assert done_job.count() == 1
waiting_jobs = JobDocument.objects(dataset=DATASET_NAME, status=Status.WAITING)
assert waiting_jobs.count() == len(artifacts_to_create)
assert {job.type for job in waiting_jobs} == {Artifact.parse_id(artifact)[4] for artifact in artifacts_to_create}
assert CachedResponseDocument.objects(dataset=DATASET_NAME).count() == 1
cached_response = CachedResponseDocument.objects(dataset=DATASET_NAME).first()
assert cached_response
assert cached_response.content == CONFIG_NAMES_CONTENT
assert cached_response.http_status == HTTPStatus.OK
assert cached_response.error_code is None
assert cached_response.details == {}
assert cached_response.progress == 1.0
assert cached_response.job_runner_version == JOB_RUNNER_VERSION
assert cached_response.dataset_git_revision == REVISION_NAME
@pytest.mark.parametrize(
"processing_graph,first_artifacts",
[
(PROCESSING_GRAPH_ONE_STEP, [ARTIFACT_DA]),
(PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DA, ARTIFACT_DB]),
(PROCESSING_GRAPH_FAN_IN_OUT, [ARTIFACT_DA]),
(PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DA]),
],
)
def test_set_revision(
processing_graph: ProcessingGraph,
first_artifacts: list[str],
) -> None:
dataset_orchestrator = DatasetOrchestrator(dataset=DATASET_NAME, processing_graph=processing_graph)
dataset_orchestrator.set_revision(
revision=REVISION_NAME, priority=Priority.NORMAL, error_codes_to_retry=[], cache_max_days=CACHE_MAX_DAYS
)
pending_jobs_df = Queue().get_pending_jobs_df(dataset=DATASET_NAME)
assert len(pending_jobs_df) == len(first_artifacts)
artifact_ids = [
Artifact.get_id(
dataset=row["dataset"],
revision=row["revision"],
config=row["config"],
split=row["split"],
processing_step_name=row["type"],
)
for _, row in pending_jobs_df.iterrows()
]
assert set(artifact_ids) == set(first_artifacts)
@pytest.mark.parametrize(
"processing_graph,first_artifacts",
[
(PROCESSING_GRAPH_ONE_STEP, [ARTIFACT_DA]),
(PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DA, ARTIFACT_DB]),
(PROCESSING_GRAPH_FAN_IN_OUT, [ARTIFACT_DA]),
(PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DA]),
],
)
def test_set_revision_handle_existing_jobs(
processing_graph: ProcessingGraph,
first_artifacts: list[str],
) -> None:
# create two pending jobs for DA
Queue().create_jobs([artifact_id_to_job_info(ARTIFACT_DA)] * 2)
dataset_orchestrator = DatasetOrchestrator(dataset=DATASET_NAME, processing_graph=processing_graph)
dataset_orchestrator.set_revision(
revision=REVISION_NAME, priority=Priority.NORMAL, error_codes_to_retry=[], cache_max_days=CACHE_MAX_DAYS
)
pending_jobs_df = Queue().get_pending_jobs_df(dataset=DATASET_NAME)
assert len(pending_jobs_df) == len(first_artifacts)
artifact_ids = [
Artifact.get_id(
dataset=row["dataset"],
revision=row["revision"],
config=row["config"],
split=row["split"],
processing_step_name=row["type"],
)
for _, row in pending_jobs_df.iterrows()
]
assert set(artifact_ids) == set(first_artifacts)
@pytest.mark.parametrize(
"processing_graph,pending_artifacts,processing_step_names,expected_has_pending_ancestor_jobs",
[
(PROCESSING_GRAPH_ONE_STEP, [ARTIFACT_DA], [STEP_DA], True),
(PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DA, ARTIFACT_DB], [STEP_DA], True),
(PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DB], [STEP_DD], True),
(PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DD], [STEP_DC], False),
(PROCESSING_GRAPH_FAN_IN_OUT, [ARTIFACT_DA], [STEP_CB], True),
(PROCESSING_GRAPH_FAN_IN_OUT, [ARTIFACT_DE], [STEP_CB], False),
],
)
def test_has_pending_ancestor_jobs(
processing_graph: ProcessingGraph,
pending_artifacts: list[str],
processing_step_names: list[str],
expected_has_pending_ancestor_jobs: bool,
) -> None:
Queue().create_jobs([artifact_id_to_job_info(artifact) for artifact in pending_artifacts])
dataset_orchestrator = DatasetOrchestrator(dataset=DATASET_NAME, processing_graph=processing_graph)
assert dataset_orchestrator.has_pending_ancestor_jobs(processing_step_names) == expected_has_pending_ancestor_jobs
| datasets-server-main | libs/libcommon/tests/test_orchestrator.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from pathlib import Path
from typing import Optional
import pytest
from libcommon.constants import ASSETS_CACHE_APPNAME
from libcommon.storage import StrPath, init_assets_dir, init_dir, remove_dir
@pytest.mark.parametrize(
"has_directory,is_directory_string,has_appname",
[
(False, False, False),
(False, False, True),
(False, True, False),
(False, True, True),
(True, False, False),
(True, False, True),
(True, True, False),
(True, True, True),
],
)
def test_init_dir(
tmp_path_factory: pytest.TempPathFactory, has_directory: bool, is_directory_string: bool, has_appname: bool
) -> None:
subdirectory = "subdirectory"
tmp_path = tmp_path_factory.mktemp("test") / subdirectory
appname = "appname" if has_appname else None
directory: Optional[StrPath]
if has_directory:
directory = str(tmp_path) if is_directory_string else tmp_path
result = init_dir(directory=directory, appname=appname)
assert result == directory
assert subdirectory in str(result), result
if appname is not None:
assert appname not in str(result), result
else:
directory = None
result = init_dir(directory=directory, appname=appname)
assert result != directory, result
assert subdirectory not in str(result), result
assert type(result) is str, result
if appname:
assert appname in str(result), result
Path(result).exists()
Path(result).is_dir()
@pytest.mark.parametrize(
"has_directory,is_directory_string",
[
(False, False),
(False, False),
(False, True),
(False, True),
(True, False),
(True, False),
(True, True),
(True, True),
],
)
def test_init_assets_dir(
tmp_path_factory: pytest.TempPathFactory, has_directory: bool, is_directory_string: bool
) -> None:
subdirectory = "subdirectory"
tmp_path = tmp_path_factory.mktemp("test") / subdirectory
directory: Optional[StrPath]
if has_directory:
directory = str(tmp_path) if is_directory_string else tmp_path
result = init_assets_dir(directory=directory)
assert result == directory
assert subdirectory in str(result), result
assert ASSETS_CACHE_APPNAME not in str(result), result
else:
directory = None
result = init_assets_dir(directory=directory)
assert result != directory, result
assert subdirectory not in str(result), result
assert type(result) is str, result
assert ASSETS_CACHE_APPNAME in str(result), result
Path(result).exists()
Path(result).is_dir()
@pytest.mark.parametrize(
"exists,is_string",
[
(False, False),
(True, False),
(False, True),
(True, True),
],
)
def test_remove_dir(tmp_path_factory: pytest.TempPathFactory, exists: bool, is_string: bool) -> None:
subdirectory = "subdirectory"
tmp_path = tmp_path_factory.mktemp("test") / subdirectory
tmp_file = tmp_path / "file.txt"
if exists:
tmp_path.mkdir(parents=True, exist_ok=True)
tmp_file.touch()
assert tmp_path.exists() is exists
assert tmp_path.is_dir() is exists
assert tmp_file.exists() is exists
assert tmp_file.is_file() is exists
directory: StrPath = str(tmp_path) if is_string else tmp_path
remove_dir(directory)
assert not tmp_path.exists()
assert not tmp_path.is_dir()
assert not tmp_file.exists()
assert not tmp_file.is_file()
| datasets-server-main | libs/libcommon/tests/test_storage.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import json
import os
import random
import time
from datetime import datetime, timedelta
from multiprocessing import Pool
from pathlib import Path
from typing import Optional
from unittest.mock import patch
import pytest
import pytz
from libcommon.constants import QUEUE_TTL_SECONDS
from libcommon.queue import (
EmptyQueueError,
JobDocument,
JobTotalMetricDocument,
Lock,
Queue,
lock,
)
from libcommon.resources import QueueMongoResource
from libcommon.utils import Priority, Status, get_datetime
from .utils import assert_metric
def get_old_datetime() -> datetime:
# Beware: the TTL index is set to 10 minutes. So it will delete the finished jobs after 10 minutes.
# We have to use a datetime that is not older than 10 minutes.
return get_datetime() - timedelta(seconds=(QUEUE_TTL_SECONDS / 2))
@pytest.fixture(autouse=True)
def queue_mongo_resource_autouse(queue_mongo_resource: QueueMongoResource) -> QueueMongoResource:
return queue_mongo_resource
def test_add_job() -> None:
test_type = "test_type"
test_dataset = "test_dataset"
test_revision = "test_revision"
test_difficulty = 50
# get the queue
queue = Queue()
assert JobTotalMetricDocument.objects().count() == 0
# add a job
job1 = queue.add_job(job_type=test_type, dataset=test_dataset, revision=test_revision, difficulty=test_difficulty)
assert_metric(job_type=test_type, status=Status.WAITING, total=1)
# a second call adds a second waiting job
job2 = queue.add_job(job_type=test_type, dataset=test_dataset, revision=test_revision, difficulty=test_difficulty)
assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset, revision=test_revision)
assert_metric(job_type=test_type, status=Status.WAITING, total=2)
# get and start a job the second one should have been picked
job_info = queue.start_job()
assert job2.reload().status == Status.STARTED
assert job_info["type"] == test_type
assert job_info["params"]["dataset"] == test_dataset
assert job_info["params"]["revision"] == test_revision
assert job_info["params"]["config"] is None
assert job_info["params"]["split"] is None
assert_metric(job_type=test_type, status=Status.WAITING, total=1)
assert_metric(job_type=test_type, status=Status.STARTED, total=1)
# and the first job should have been cancelled
assert job1.reload().status == Status.CANCELLED
assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset, revision=test_revision)
# adding the job while the first one has not finished yet adds another waiting job
# (there are no limits to the number of waiting jobs)
job3 = queue.add_job(job_type=test_type, dataset=test_dataset, revision=test_revision, difficulty=test_difficulty)
assert job3.status == Status.WAITING
assert_metric(job_type=test_type, status=Status.WAITING, total=2)
assert_metric(job_type=test_type, status=Status.STARTED, total=1)
with pytest.raises(EmptyQueueError):
# but: it's not possible to start two jobs with the same arguments
queue.start_job()
# finish the first job
queue.finish_job(job_id=job_info["job_id"], is_success=True)
# the queue is not empty
assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset, revision=test_revision)
assert_metric(job_type=test_type, status=Status.WAITING, total=2)
assert_metric(job_type=test_type, status=Status.STARTED, total=0)
assert_metric(job_type=test_type, status=Status.SUCCESS, total=1)
# process the third job
job_info = queue.start_job()
other_job_id = ("1" if job_info["job_id"][0] == "0" else "0") + job_info["job_id"][1:]
assert_metric(job_type=test_type, status=Status.WAITING, total=1)
assert_metric(job_type=test_type, status=Status.STARTED, total=1)
assert_metric(job_type=test_type, status=Status.SUCCESS, total=1)
# trying to finish another job fails silently (with a log)
queue.finish_job(job_id=other_job_id, is_success=True)
assert_metric(job_type=test_type, status=Status.WAITING, total=1)
assert_metric(job_type=test_type, status=Status.STARTED, total=1)
assert_metric(job_type=test_type, status=Status.SUCCESS, total=1)
# finish it
queue.finish_job(job_id=job_info["job_id"], is_success=True)
assert_metric(job_type=test_type, status=Status.WAITING, total=1)
assert_metric(job_type=test_type, status=Status.STARTED, total=0)
assert_metric(job_type=test_type, status=Status.SUCCESS, total=2)
# the queue is empty
assert not queue.is_job_in_process(job_type=test_type, dataset=test_dataset, revision=test_revision)
with pytest.raises(EmptyQueueError):
# an error is raised if we try to start a job
queue.start_job()
@pytest.mark.parametrize(
"jobs_ids,job_ids_to_cancel,expected_canceled_number",
[
(["a", "b"], ["a", "b"], 2),
(["a", "b"], ["a"], 1),
(["a"], ["a", "b"], 1),
],
)
def test_cancel_jobs_by_job_id(
jobs_ids: list[str], job_ids_to_cancel: list[str], expected_canceled_number: int
) -> None:
test_type = "test_type"
test_difficulty = 50
queue = Queue()
# we cannot really set job_id, so, we create jobs and get their job id, using dataset as a proxy
real_job_ids_to_cancel = []
waiting_jobs = 0
for job_id in list(set(jobs_ids + job_ids_to_cancel)):
job = queue.add_job(job_type=test_type, dataset=job_id, revision="test_revision", difficulty=test_difficulty)
waiting_jobs += 1
assert_metric(job_type=test_type, status=Status.WAITING, total=waiting_jobs)
if job_id in job_ids_to_cancel:
real_job_id = job.info()["job_id"]
real_job_ids_to_cancel.append(real_job_id)
if job_id not in jobs_ids:
# delete the job, in order to simulate that it did never exist (we just wanted a valid job_id)
job.delete()
queue.start_job()
assert_metric(job_type=test_type, status=Status.WAITING, total=1)
assert_metric(job_type=test_type, status=Status.STARTED, total=1)
canceled_number = queue.cancel_jobs_by_job_id(job_ids=real_job_ids_to_cancel)
assert canceled_number == expected_canceled_number
assert_metric(job_type=test_type, status=Status.CANCELLED, total=expected_canceled_number)
def test_cancel_jobs_by_job_id_wrong_format() -> None:
queue = Queue()
assert queue.cancel_jobs_by_job_id(job_ids=["not_a_valid_job_id"]) == 0
assert JobTotalMetricDocument.objects().count() == 0
def check_job(queue: Queue, expected_dataset: str, expected_split: str, expected_priority: Priority) -> None:
job_info = queue.start_job()
assert job_info["params"]["dataset"] == expected_dataset
assert job_info["params"]["split"] == expected_split
assert job_info["priority"] == expected_priority
def test_priority_logic_creation_order() -> None:
test_type = "test_type"
test_revision = "test_revision"
test_difficulty = 50
queue = Queue()
queue.add_job(
job_type=test_type,
dataset="dataset1",
revision=test_revision,
config="config",
split="split1",
difficulty=test_difficulty,
)
queue.add_job(
job_type=test_type,
dataset="dataset1",
revision=test_revision,
config="config",
split="split2",
difficulty=test_difficulty,
)
check_job(queue=queue, expected_dataset="dataset1", expected_split="split1", expected_priority=Priority.LOW)
check_job(queue=queue, expected_dataset="dataset1", expected_split="split2", expected_priority=Priority.LOW)
with pytest.raises(EmptyQueueError):
queue.start_job()
def test_priority_logic_started_jobs_per_dataset_order() -> None:
test_type = "test_type"
test_revision = "test_revision"
test_difficulty = 50
queue = Queue()
queue.add_job(
job_type=test_type,
dataset="dataset1",
revision=test_revision,
config="config",
split="split1",
difficulty=test_difficulty,
)
queue.add_job(
job_type=test_type,
dataset="dataset1",
revision=test_revision,
config="config",
split="split2",
difficulty=test_difficulty,
)
queue.add_job(
job_type=test_type,
dataset="dataset2",
revision=test_revision,
config="config",
split="split1",
difficulty=test_difficulty,
)
check_job(queue=queue, expected_dataset="dataset1", expected_split="split1", expected_priority=Priority.LOW)
check_job(queue=queue, expected_dataset="dataset2", expected_split="split1", expected_priority=Priority.LOW)
# ^ before, even if the creation date is after, because the dataset is different and has no started job
check_job(queue=queue, expected_dataset="dataset1", expected_split="split2", expected_priority=Priority.LOW)
with pytest.raises(EmptyQueueError):
queue.start_job()
def test_priority_logic_started_jobs_per_namespace_order() -> None:
test_type = "test_type"
test_revision = "test_revision"
test_difficulty = 50
queue = Queue()
queue.add_job(
job_type=test_type,
dataset="org1/dataset1",
revision=test_revision,
config="config",
split="split1",
difficulty=test_difficulty,
)
queue.add_job(
job_type=test_type,
dataset="org1/dataset2",
revision=test_revision,
config="config",
split="split1",
difficulty=test_difficulty,
)
queue.add_job(
job_type=test_type,
dataset="org2/dataset2",
revision=test_revision,
config="config",
split="split1",
difficulty=test_difficulty,
)
queue.add_job(
job_type=test_type,
dataset="no_org_dataset3",
revision=test_revision,
config="config",
split="split1",
difficulty=test_difficulty,
)
check_job(queue=queue, expected_dataset="org1/dataset1", expected_split="split1", expected_priority=Priority.LOW)
check_job(queue=queue, expected_dataset="org2/dataset2", expected_split="split1", expected_priority=Priority.LOW)
# ^ before, even if the creation date is after, because the namespace is different and has no started job
check_job(queue=queue, expected_dataset="no_org_dataset3", expected_split="split1", expected_priority=Priority.LOW)
check_job(queue=queue, expected_dataset="org1/dataset2", expected_split="split1", expected_priority=Priority.LOW)
with pytest.raises(EmptyQueueError):
queue.start_job()
def test_priority_logic_priority_order() -> None:
test_type = "test_type"
test_revision = "test_revision"
test_difficulty = 50
queue = Queue()
queue.add_job(
job_type=test_type,
dataset="dataset1",
revision=test_revision,
config="config",
split="split1",
difficulty=test_difficulty,
)
queue.add_job(
job_type=test_type,
dataset="dataset2",
revision=test_revision,
config="config",
split="split1",
priority=Priority.NORMAL,
difficulty=test_difficulty,
)
check_job(queue=queue, expected_dataset="dataset2", expected_split="split1", expected_priority=Priority.NORMAL)
# ^ before, even if the creation date is after, because the priority is higher
check_job(queue=queue, expected_dataset="dataset1", expected_split="split1", expected_priority=Priority.LOW)
with pytest.raises(EmptyQueueError):
queue.start_job()
@pytest.mark.parametrize(
"job_types_blocked,job_types_only,should_raise",
[
(None, None, False),
(None, ["test_type"], False),
(["other_type"], None, False),
(["other_type"], ["test_type"], False),
(None, ["other_type"], True),
(["test_type"], None, True),
(["test_type"], ["test_type"], True),
(["other_type", "test_type"], None, True),
(["other_type"], ["other_type"], True),
(["other_type", "test_type"], ["other_type", "test_type"], True),
],
)
def test_job_types_only(
job_types_blocked: Optional[list[str]], job_types_only: Optional[list[str]], should_raise: bool
) -> None:
job_type = "test_type"
test_dataset = "test_dataset"
test_revision = "test_revision"
test_difficulty = 50
queue = Queue()
queue.add_job(
job_type=job_type,
dataset=test_dataset,
revision=test_revision,
config=None,
split=None,
difficulty=test_difficulty,
)
assert queue.is_job_in_process(
job_type=job_type, dataset=test_dataset, revision=test_revision, config=None, split=None
)
if should_raise:
with pytest.raises(EmptyQueueError):
queue.start_job(job_types_blocked=job_types_blocked, job_types_only=job_types_only)
else:
job_info = queue.start_job(job_types_blocked=job_types_blocked, job_types_only=job_types_only)
assert job_info["params"]["dataset"] == test_dataset
@pytest.mark.parametrize(
"difficulty_min,difficulty_max,should_raise",
[
(None, None, False),
(None, 60, False),
(40, None, False),
(40, 60, False),
(50, 50, False),
(None, 40, True),
(60, None, True),
(60, 60, True),
(40, 40, True),
(55, 60, True),
(40, 45, True),
],
)
def test_difficulty(difficulty_min: Optional[int], difficulty_max: Optional[int], should_raise: bool) -> None:
job_type = "test_type"
test_dataset = "test_dataset"
test_revision = "test_revision"
test_difficulty = 50
queue = Queue()
queue.add_job(
job_type=job_type,
dataset=test_dataset,
revision=test_revision,
config=None,
split=None,
difficulty=test_difficulty,
)
assert queue.is_job_in_process(
job_type=job_type, dataset=test_dataset, revision=test_revision, config=None, split=None
)
if should_raise:
with pytest.raises(EmptyQueueError):
queue.start_job(difficulty_max=difficulty_max, difficulty_min=difficulty_min)
else:
job_info = queue.start_job(difficulty_max=difficulty_max, difficulty_min=difficulty_min)
assert job_info["params"]["dataset"] == test_dataset
def test_count_by_status() -> None:
test_type = "test_type"
test_other_type = "test_other_type"
test_dataset = "test_dataset"
test_revision = "test_revision"
test_difficulty = 50
queue = Queue()
expected_empty = {"waiting": 0, "started": 0, "success": 0, "error": 0, "cancelled": 0}
expected_one_waiting = {"waiting": 1, "started": 0, "success": 0, "error": 0, "cancelled": 0}
assert queue.get_jobs_count_by_status(job_type=test_type) == expected_empty
assert queue.get_jobs_count_by_status(job_type=test_other_type) == expected_empty
queue.add_job(job_type=test_type, dataset=test_dataset, revision=test_revision, difficulty=test_difficulty)
assert queue.get_jobs_count_by_status(job_type=test_type) == expected_one_waiting
assert queue.get_jobs_count_by_status(job_type=test_other_type) == expected_empty
queue.add_job(job_type=test_other_type, dataset=test_dataset, revision=test_revision, difficulty=test_difficulty)
assert queue.get_jobs_count_by_status(job_type=test_type) == expected_one_waiting
assert queue.get_jobs_count_by_status(job_type=test_other_type) == expected_one_waiting
def test_get_dataset_pending_jobs_for_type() -> None:
queue = Queue()
test_type = "test_type"
test_difficulty = 50
test_another_type = "test_another_type"
test_dataset = "test_dataset"
test_another_dataset = "test_another_dataset"
test_revision = "test_revision"
test_configs_waiting = ["test_config_waiting_1", "test_config_waiting_2"]
test_configs_started = ["test_config_started_1", "test_config_started_2"]
test_configs_finished = ["test_config_finished_1", "test_config_finished_2"]
for config in test_configs_finished:
for dataset in [test_dataset, test_another_dataset]:
for job_type in [test_type, test_another_type]:
queue.add_job(
job_type=job_type,
dataset=dataset,
revision=test_revision,
config=config,
split=None,
difficulty=test_difficulty,
)
job_info = queue.start_job()
queue.finish_job(job_info["job_id"], is_success=True)
for config in test_configs_started:
for dataset in [test_dataset, test_another_dataset]:
for job_type in [test_type, test_another_type]:
queue.add_job(
job_type=job_type,
dataset=dataset,
revision=test_revision,
config=config,
split=None,
difficulty=test_difficulty,
)
job_info = queue.start_job()
for config in test_configs_waiting:
for dataset in [test_dataset, test_another_dataset]:
for job_type in [test_type, test_another_type]:
queue.add_job(
job_type=job_type,
dataset=dataset,
revision=test_revision,
config=config,
split=None,
difficulty=test_difficulty,
)
result = queue.get_dataset_pending_jobs_for_type(dataset=test_dataset, job_type=test_type)
assert len(result) == len(test_configs_waiting) + len(test_configs_started)
for r in result:
assert r["dataset"] == test_dataset
assert r["type"] == test_type
assert r["status"] in [Status.WAITING.value, Status.STARTED.value]
def test_queue_heartbeat() -> None:
job_type = "test_type"
test_difficulty = 50
queue = Queue()
job = queue.add_job(
job_type=job_type,
dataset="dataset1",
revision="revision",
config="config",
split="split1",
difficulty=test_difficulty,
)
queue.start_job(job_types_only=[job_type])
assert job.last_heartbeat is None
queue.heartbeat(job.pk)
job.reload()
assert job.last_heartbeat is not None
last_heartbeat_datetime = pytz.UTC.localize(job.last_heartbeat)
assert last_heartbeat_datetime >= get_datetime() - timedelta(seconds=1)
def test_queue_get_zombies() -> None:
job_type = "test_type"
test_difficulty = 50
queue = Queue()
with patch("libcommon.queue.get_datetime", get_old_datetime):
zombie = queue.add_job(
job_type=job_type,
dataset="dataset1",
revision="revision",
config="config",
split="split1",
difficulty=test_difficulty,
)
queue.start_job(job_types_only=[job_type])
queue.add_job(
job_type=job_type,
dataset="dataset1",
revision="revision",
config="config",
split="split2",
difficulty=test_difficulty,
)
queue.start_job(job_types_only=[job_type])
assert queue.get_zombies(max_seconds_without_heartbeat=10) == [zombie.info()]
assert queue.get_zombies(max_seconds_without_heartbeat=-1) == []
assert queue.get_zombies(max_seconds_without_heartbeat=0) == []
assert queue.get_zombies(max_seconds_without_heartbeat=9999999) == []
def test_has_ttl_index_on_finished_at_field() -> None:
ttl_index_names = [
name
for name, value in JobDocument._get_collection().index_information().items()
if "expireAfterSeconds" in value and "key" in value and value["key"] == [("finished_at", 1)]
]
assert len(ttl_index_names) == 1
ttl_index_name = ttl_index_names[0]
assert ttl_index_name == "finished_at_1"
assert JobDocument._get_collection().index_information()[ttl_index_name]["expireAfterSeconds"] == QUEUE_TTL_SECONDS
def random_sleep() -> None:
MAX_SLEEP_MS = 40
time.sleep(MAX_SLEEP_MS / 1000 * random.random())
def increment(tmp_file: Path) -> None:
random_sleep()
with open(tmp_file, "r") as f:
current = int(f.read() or 0)
random_sleep()
with open(tmp_file, "w") as f:
f.write(str(current + 1))
random_sleep()
def locked_increment(tmp_file: Path) -> None:
sleeps = [0.05, 0.05, 0.05, 1, 1, 1, 1, 1, 1, 5, 5, 5, 5]
with lock(key="test_lock", owner=str(os.getpid()), sleeps=sleeps):
increment(tmp_file)
def test_lock(tmp_path_factory: pytest.TempPathFactory, queue_mongo_resource: QueueMongoResource) -> None:
tmp_file = Path(tmp_path_factory.mktemp("test_lock") / "tmp.txt")
tmp_file.touch()
max_parallel_jobs = 4
num_jobs = 42
with Pool(max_parallel_jobs, initializer=queue_mongo_resource.allocate) as pool:
pool.map(locked_increment, [tmp_file] * num_jobs)
expected = num_jobs
with open(tmp_file, "r") as f:
assert int(f.read()) == expected
Lock.objects(key="test_lock").delete()
def git_branch_locked_increment(tmp_file: Path) -> None:
sleeps = [0.05, 0.05, 0.05, 1, 1, 1, 1, 1, 1, 5, 5, 5, 5]
dataset = "dataset"
branch = "refs/convert/parquet"
with lock.git_branch(dataset=dataset, branch=branch, owner=str(os.getpid()), sleeps=sleeps):
increment(tmp_file)
def test_lock_git_branch(tmp_path_factory: pytest.TempPathFactory, queue_mongo_resource: QueueMongoResource) -> None:
tmp_file = Path(tmp_path_factory.mktemp("test_lock") / "tmp.txt")
tmp_file.touch()
max_parallel_jobs = 5
num_jobs = 43
with Pool(max_parallel_jobs, initializer=queue_mongo_resource.allocate) as pool:
pool.map(git_branch_locked_increment, [tmp_file] * num_jobs)
expected = num_jobs
with open(tmp_file, "r") as f:
assert int(f.read()) == expected
assert Lock.objects().count() == 1
assert Lock.objects().get().key == json.dumps({"dataset": "dataset", "branch": "refs/convert/parquet"})
assert Lock.objects().get().owner is None
Lock.objects().delete()
| datasets-server-main | libs/libcommon/tests/test_queue.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from datetime import datetime
from http import HTTPStatus
from typing import Any, Optional
from libcommon.orchestrator import DatasetBackfillPlan
from libcommon.processing_graph import Artifact, ProcessingGraph
from libcommon.queue import JobTotalMetricDocument, Queue
from libcommon.simple_cache import upsert_response
from libcommon.utils import JobInfo, Priority
DATASET_NAME = "dataset"
REVISION_NAME = "revision"
CONFIG_NAME_1 = "config1"
CONFIG_NAME_2 = "config2"
CONFIG_NAMES = [CONFIG_NAME_1, CONFIG_NAME_2]
CONFIG_NAMES_CONTENT = {"config_names": [{"config": config_name} for config_name in CONFIG_NAMES]}
SPLIT_NAME_1 = "split1"
SPLIT_NAME_2 = "split2"
SPLIT_NAMES = [SPLIT_NAME_1, SPLIT_NAME_2]
SPLIT_NAMES_CONTENT = {
"splits": [{"dataset": DATASET_NAME, "config": CONFIG_NAME_1, "split": split_name} for split_name in SPLIT_NAMES]
}
CACHE_MAX_DAYS = 90
CACHE_KIND = "cache_kind"
CONTENT_ERROR = {"error": "error"}
JOB_TYPE = "job_type"
DIFFICULTY = 50
STEP_DATASET_A = "dataset-a"
STEP_CONFIG_B = "config-b"
STEP_SPLIT_C = "split-c"
PROCESSING_GRAPH = ProcessingGraph(
processing_graph_specification={
STEP_DATASET_A: {"input_type": "dataset", "provides_dataset_config_names": True},
STEP_CONFIG_B: {"input_type": "config", "provides_config_split_names": True, "triggered_by": STEP_DATASET_A},
STEP_SPLIT_C: {"input_type": "split", "triggered_by": STEP_CONFIG_B},
}
)
OTHER_REVISION_NAME = f"other_{REVISION_NAME}"
CONFIG_NAME_1 = "config1"
CONFIG_NAME_2 = "config2"
CONFIG_NAMES = [CONFIG_NAME_1, CONFIG_NAME_2]
CONFIG_NAMES_CONTENT = {"config_names": [{"config": config_name} for config_name in CONFIG_NAMES]}
SPLIT_NAME_1 = "split1"
SPLIT_NAME_2 = "split2"
SPLIT_NAMES = [SPLIT_NAME_1, SPLIT_NAME_2]
SPLIT_NAMES_CONTENT = {
"splits": [{"dataset": DATASET_NAME, "config": CONFIG_NAME_1, "split": split_name} for split_name in SPLIT_NAMES]
}
STEP_DA = "dataset-a"
STEP_DB = "dataset-b"
STEP_DC = "dataset-c"
STEP_DD = "dataset-d"
STEP_DE = "dataset-e"
STEP_DF = "dataset-f"
STEP_DG = "dataset-g"
STEP_DH = "dataset-h"
STEP_DI = "dataset-i"
ARTIFACT_DA = f"{STEP_DA},{DATASET_NAME},{REVISION_NAME}"
ARTIFACT_DA_OTHER_REVISION = f"{STEP_DA},{DATASET_NAME},{OTHER_REVISION_NAME}"
ARTIFACT_DB = f"{STEP_DB},{DATASET_NAME},{REVISION_NAME}"
ARTIFACT_DC = f"{STEP_DC},{DATASET_NAME},{REVISION_NAME}"
ARTIFACT_DD = f"{STEP_DD},{DATASET_NAME},{REVISION_NAME}"
ARTIFACT_DE = f"{STEP_DE},{DATASET_NAME},{REVISION_NAME}"
ARTIFACT_DF = f"{STEP_DF},{DATASET_NAME},{REVISION_NAME}"
ARTIFACT_DG = f"{STEP_DG},{DATASET_NAME},{REVISION_NAME}"
ARTIFACT_DH = f"{STEP_DH},{DATASET_NAME},{REVISION_NAME}"
ARTIFACT_DI = f"{STEP_DI},{DATASET_NAME},{REVISION_NAME}"
STEP_CA = "config-a"
STEP_CB = "config-b"
ARTIFACT_CA_1 = f"{STEP_CA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_1}"
ARTIFACT_CA_2 = f"{STEP_CA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_2}"
ARTIFACT_CB_1 = f"{STEP_CB},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_1}"
ARTIFACT_CB_2 = f"{STEP_CB},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_2}"
STEP_SA = "split-a"
ARTIFACT_SA_1_1 = f"{STEP_SA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_1},{SPLIT_NAME_1}"
ARTIFACT_SA_1_2 = f"{STEP_SA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_1},{SPLIT_NAME_2}"
ARTIFACT_SA_2_1 = f"{STEP_SA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_2},{SPLIT_NAME_1}"
ARTIFACT_SA_2_2 = f"{STEP_SA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_2},{SPLIT_NAME_2}"
# Graph to test only one step
#
# +-------+
# | DA |
# +-------+
#
PROCESSING_GRAPH_ONE_STEP = ProcessingGraph(
processing_graph_specification={
STEP_DA: {"input_type": "dataset"},
}
)
# Graph to test siblings, children, grand-children, multiple parents
#
# +-------+ +-------+
# | DA | | DB |
# +-------+ +-------+
# | |
# | +----+
# | | |
# +-------+ |
# | DC | |
# +-------+ |
# | |
# | +----+
# | |
# +-------+
# | DD |
# +-------+
#
PROCESSING_GRAPH_GENEALOGY = ProcessingGraph(
processing_graph_specification={
STEP_DA: {"input_type": "dataset", "provides_dataset_config_names": True},
STEP_DB: {"input_type": "dataset"}, # sibling
STEP_DC: {"input_type": "dataset", "triggered_by": [STEP_DA, STEP_DB]}, # child
STEP_DD: {"input_type": "dataset", "triggered_by": [STEP_DB, STEP_DC]}, # grandchild
}
)
# Graph to test fan-in, fan-out
#
# +-------+
# | DA |
# +-------+
# |
# ⩚
# +-------+
# | CA |
# +-------+
# | ⩛
# | +-----+
# ⩚ |
# +-------+ +-------+
# | SA | | DE |
# +-------+ +-------+
# ⩛ ⩛
# | +-----+
# | |
# +-------+ +-------+
# | CB | | DF |
# +-------+ +-------+
#
PROCESSING_GRAPH_FAN_IN_OUT = ProcessingGraph(
processing_graph_specification={
STEP_DA: {"input_type": "dataset", "provides_dataset_config_names": True},
STEP_CA: {
"input_type": "config",
"triggered_by": STEP_DA,
"provides_config_split_names": True,
}, # fan-out (D->C)
STEP_SA: {"input_type": "split", "triggered_by": STEP_CA}, # fan-out (C -> S)
# is fan-out (D -> S) possible? (we need the list of split names anyway)
STEP_DE: {"input_type": "dataset", "triggered_by": STEP_CA}, # fan-in (C -> D)
STEP_CB: {"input_type": "config", "triggered_by": STEP_SA}, # fan-in (S -> C)
STEP_DF: {"input_type": "dataset", "triggered_by": STEP_SA}, # fan-in (S -> D)
}
)
# Graph to test parallel steps (ie. two steps that compute the same thing, and abort if the other already exists)
#
# +-------+
# | DA |
# +-------+
# |
# +---------+
# | |
# +-------+ +-------+
# | DG | | DH |
# +-------+ +-------+
# | |
# +---------+
# |
# +-------+
# | DI |
# +-------+
#
PROCESSING_GRAPH_PARALLEL = ProcessingGraph(
processing_graph_specification={
STEP_DA: {"input_type": "dataset", "provides_dataset_config_names": True},
STEP_DG: {"input_type": "dataset", "triggered_by": STEP_DA},
STEP_DH: {"input_type": "dataset", "triggered_by": STEP_DA},
STEP_DI: {"input_type": "dataset", "triggered_by": [STEP_DG, STEP_DH]},
}
)
JOB_RUNNER_VERSION = 1
def get_dataset_backfill_plan(
processing_graph: ProcessingGraph,
dataset: str = DATASET_NAME,
revision: str = REVISION_NAME,
error_codes_to_retry: Optional[list[str]] = None,
cache_max_days: Optional[int] = None,
) -> DatasetBackfillPlan:
return DatasetBackfillPlan(
dataset=dataset,
revision=revision,
processing_graph=processing_graph,
error_codes_to_retry=error_codes_to_retry,
cache_max_days=CACHE_MAX_DAYS if cache_max_days is None else cache_max_days,
)
def assert_equality(value: Any, expected: Any, context: Optional[str] = None) -> None:
report = {"expected": expected, "got": value}
if context is not None:
report["additional"] = context
assert value == expected, report
def assert_dataset_backfill_plan(
dataset_backfill_plan: DatasetBackfillPlan,
cache_status: dict[str, list[str]],
queue_status: dict[str, list[str]],
tasks: list[str],
config_names: Optional[list[str]] = None,
split_names_in_first_config: Optional[list[str]] = None,
) -> None:
if config_names is not None:
assert_equality(dataset_backfill_plan.dataset_state.config_names, config_names, context="config_names")
assert_equality(
len(dataset_backfill_plan.dataset_state.config_states), len(config_names), context="config_states"
)
if len(config_names) and split_names_in_first_config is not None:
assert_equality(
dataset_backfill_plan.dataset_state.config_states[0].split_names,
split_names_in_first_config,
context="split_names",
)
computed_cache_status = dataset_backfill_plan.cache_status.as_response()
for key, value in cache_status.items():
assert_equality(computed_cache_status[key], sorted(value), key)
assert_equality(
dataset_backfill_plan.get_queue_status().as_response(),
{key: sorted(value) for key, value in queue_status.items()},
context="queue_status",
)
assert_equality(dataset_backfill_plan.as_response(), sorted(tasks), context="tasks")
def put_cache(
step: str,
dataset: str,
revision: str,
config: Optional[str] = None,
split: Optional[str] = None,
error_code: Optional[str] = None,
use_old_job_runner_version: Optional[bool] = False,
updated_at: Optional[datetime] = None,
) -> None:
if not config:
if not step.startswith("dataset-"):
raise ValueError("Unexpected artifact: should start with dataset-")
content = CONFIG_NAMES_CONTENT
config = None
split = None
elif not split:
if not step.startswith("config-"):
raise ValueError("Unexpected artifact: should start with config-")
content = SPLIT_NAMES_CONTENT
split = None
else:
if not step.startswith("split-"):
raise ValueError("Unexpected artifact: should start with split-")
content = {}
if error_code:
http_status = HTTPStatus.INTERNAL_SERVER_ERROR
content = {}
else:
http_status = HTTPStatus.OK
upsert_response(
kind=step,
dataset=dataset,
config=config,
split=split,
content=content,
http_status=http_status,
job_runner_version=JOB_RUNNER_VERSION - 1 if use_old_job_runner_version else JOB_RUNNER_VERSION,
dataset_git_revision=revision,
error_code=error_code,
updated_at=updated_at,
)
def process_next_job() -> None:
job_info = Queue().start_job()
put_cache(
step=job_info["type"],
dataset=job_info["params"]["dataset"],
revision=job_info["params"]["revision"],
config=job_info["params"]["config"],
split=job_info["params"]["split"],
)
Queue().finish_job(job_id=job_info["job_id"], is_success=True)
def process_all_jobs() -> None:
runs = 100
try:
while runs > 0:
runs -= 1
process_next_job()
except Exception:
return
def compute_all(
processing_graph: ProcessingGraph,
dataset: str = DATASET_NAME,
revision: str = REVISION_NAME,
error_codes_to_retry: Optional[list[str]] = None,
) -> None:
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph, dataset, revision, error_codes_to_retry)
max_runs = 100
while len(dataset_backfill_plan.tasks) > 0 and max_runs >= 0:
if max_runs == 0:
raise ValueError("Too many runs")
max_runs -= 1
dataset_backfill_plan.run()
for task in dataset_backfill_plan.tasks:
task_type, sep, num = task.id.partition(",")
if sep is None:
raise ValueError(f"Unexpected task id {task.id}: should contain a comma")
if task_type == "CreateJobs":
process_all_jobs()
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph, dataset, revision, error_codes_to_retry)
def artifact_id_to_job_info(artifact_id: str) -> JobInfo:
dataset, revision, config, split, processing_step_name = Artifact.parse_id(artifact_id)
return JobInfo(
job_id="job_id",
params={
"dataset": dataset,
"config": config,
"split": split,
"revision": revision,
},
type=processing_step_name,
priority=Priority.NORMAL,
difficulty=DIFFICULTY,
)
def assert_metric(job_type: str, status: str, total: int) -> None:
metric = JobTotalMetricDocument.objects(job_type=job_type, status=status).first()
assert metric is not None
assert metric.total == total
| datasets-server-main | libs/libcommon/tests/utils.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import pytest
from libcommon.dataset import get_dataset_git_revision
from libcommon.exceptions import DatasetInfoHubRequestError
@pytest.mark.real_dataset
def test_get_dataset_git_revision() -> None:
dataset = "glue"
hf_endpoint = "https://huggingface.co"
hf_token = None
get_dataset_git_revision(dataset, hf_endpoint, hf_token)
@pytest.mark.real_dataset
def test_get_dataset_git_revision_timeout() -> None:
dataset = "glue"
hf_endpoint = "https://huggingface.co"
hf_token = None
with pytest.raises(DatasetInfoHubRequestError):
get_dataset_git_revision(dataset, hf_endpoint, hf_token, hf_timeout_seconds=0.01)
| datasets-server-main | libs/libcommon/tests/test_dataset.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
from libcommon.config import LogConfig
def test_log_config() -> None:
log_config = LogConfig()
assert log_config.level == 20
| datasets-server-main | libs/libcommon/tests/test_config.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from datetime import datetime
from typing import Optional
import pytest
from libcommon.processing_graph import ProcessingGraph
from libcommon.queue import Queue
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.utils import Priority, Status, get_datetime
from .utils import (
ARTIFACT_CA_1,
ARTIFACT_CA_2,
ARTIFACT_CB_1,
ARTIFACT_CB_2,
ARTIFACT_DA,
ARTIFACT_DA_OTHER_REVISION,
ARTIFACT_DB,
ARTIFACT_DC,
ARTIFACT_DD,
ARTIFACT_DE,
ARTIFACT_DF,
ARTIFACT_DG,
ARTIFACT_DH,
ARTIFACT_DI,
ARTIFACT_SA_1_1,
ARTIFACT_SA_1_2,
ARTIFACT_SA_2_1,
ARTIFACT_SA_2_2,
CONFIG_NAME_1,
CONFIG_NAMES,
DATASET_NAME,
DIFFICULTY,
OTHER_REVISION_NAME,
PROCESSING_GRAPH_FAN_IN_OUT,
PROCESSING_GRAPH_GENEALOGY,
PROCESSING_GRAPH_ONE_STEP,
PROCESSING_GRAPH_PARALLEL,
REVISION_NAME,
SPLIT_NAME_1,
SPLIT_NAMES,
STEP_CA,
STEP_DA,
STEP_DD,
STEP_DI,
STEP_SA,
assert_dataset_backfill_plan,
compute_all,
get_dataset_backfill_plan,
process_all_jobs,
process_next_job,
put_cache,
)
@pytest.fixture(autouse=True)
def queue_mongo_resource_autouse(queue_mongo_resource: QueueMongoResource) -> QueueMongoResource:
return queue_mongo_resource
@pytest.fixture(autouse=True)
def cache_mongo_resource_autouse(cache_mongo_resource: CacheMongoResource) -> CacheMongoResource:
return cache_mongo_resource
@pytest.mark.parametrize(
"processing_graph,cache_is_empty",
[
(PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DA, ARTIFACT_DB, ARTIFACT_DC, ARTIFACT_DD]),
(PROCESSING_GRAPH_FAN_IN_OUT, [ARTIFACT_DA, ARTIFACT_DE, ARTIFACT_DF]),
(PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DA, ARTIFACT_DG, ARTIFACT_DH, ARTIFACT_DI]),
],
)
def test_initial_state(
processing_graph: ProcessingGraph,
cache_is_empty: list[str],
) -> None:
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
config_names=[],
split_names_in_first_config=[],
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": cache_is_empty,
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": [],
},
queue_status={"in_process": []},
tasks=[f"CreateJobs,{len(cache_is_empty)}"],
)
@pytest.mark.parametrize(
"processing_graph,cache_is_empty",
[
(PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DB, ARTIFACT_DC, ARTIFACT_DD]),
(
PROCESSING_GRAPH_FAN_IN_OUT,
[ARTIFACT_CA_1, ARTIFACT_CA_2, ARTIFACT_CB_1, ARTIFACT_CB_2, ARTIFACT_DE, ARTIFACT_DF],
),
(PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DG, ARTIFACT_DH, ARTIFACT_DI]),
],
)
def test_da_is_computed(
processing_graph: ProcessingGraph,
cache_is_empty: list[str],
) -> None:
put_cache(step=STEP_DA, dataset=DATASET_NAME, revision=REVISION_NAME)
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
config_names=CONFIG_NAMES,
split_names_in_first_config=[],
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": cache_is_empty,
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": [ARTIFACT_DA],
},
queue_status={"in_process": []},
tasks=[f"CreateJobs,{len(cache_is_empty)}"],
)
@pytest.mark.parametrize(
"processing_graph,cache_is_empty",
[
(
PROCESSING_GRAPH_FAN_IN_OUT,
[ARTIFACT_CA_2, ARTIFACT_CB_1, ARTIFACT_CB_2, ARTIFACT_DE, ARTIFACT_DF, ARTIFACT_SA_1_1, ARTIFACT_SA_1_2],
),
],
)
def test_ca_1_is_computed(
processing_graph: ProcessingGraph,
cache_is_empty: list[str],
) -> None:
put_cache(step=STEP_DA, dataset=DATASET_NAME, revision=REVISION_NAME)
put_cache(step=STEP_CA, dataset=DATASET_NAME, revision=REVISION_NAME, config=CONFIG_NAME_1)
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
config_names=CONFIG_NAMES,
split_names_in_first_config=SPLIT_NAMES,
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": cache_is_empty,
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": [ARTIFACT_CA_1, ARTIFACT_DA],
},
queue_status={"in_process": []},
tasks=[f"CreateJobs,{len(cache_is_empty)}"],
)
@pytest.mark.parametrize(
"processing_graph,new_1,in_process_2,new_2",
[
(
PROCESSING_GRAPH_GENEALOGY,
[ARTIFACT_DA, ARTIFACT_DB, ARTIFACT_DC, ARTIFACT_DD],
[ARTIFACT_DB, ARTIFACT_DC, ARTIFACT_DD],
[],
),
(
PROCESSING_GRAPH_FAN_IN_OUT,
[ARTIFACT_DA, ARTIFACT_DE, ARTIFACT_DF],
[ARTIFACT_DE, ARTIFACT_DF],
[ARTIFACT_CA_1, ARTIFACT_CA_2, ARTIFACT_CB_1, ARTIFACT_CB_2],
),
(
PROCESSING_GRAPH_PARALLEL,
[ARTIFACT_DA, ARTIFACT_DG, ARTIFACT_DH, ARTIFACT_DI],
[ARTIFACT_DG, ARTIFACT_DH, ARTIFACT_DI],
[],
),
],
)
def test_plan_one_job_creation_and_termination(
processing_graph: ProcessingGraph, new_1: list[str], in_process_2: list[str], new_2: list[str]
) -> None:
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
config_names=[],
split_names_in_first_config=[],
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": new_1,
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": [],
},
queue_status={"in_process": []},
tasks=[f"CreateJobs,{len(new_1)}"],
)
dataset_backfill_plan.run()
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
config_names=[],
split_names_in_first_config=[],
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": new_1,
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": [],
},
queue_status={"in_process": new_1},
tasks=[],
)
process_next_job()
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
config_names=CONFIG_NAMES,
split_names_in_first_config=[],
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": sorted(in_process_2 + new_2),
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": [ARTIFACT_DA],
},
queue_status={"in_process": in_process_2},
tasks=[f"CreateJobs,{len(new_2)}"] if new_2 else [],
)
@pytest.mark.parametrize(
"processing_graph,to_backfill",
[
(
PROCESSING_GRAPH_GENEALOGY,
[{ARTIFACT_DA, ARTIFACT_DB, ARTIFACT_DC, ARTIFACT_DD}, set()],
),
(
PROCESSING_GRAPH_FAN_IN_OUT,
[
{ARTIFACT_DA, ARTIFACT_DE, ARTIFACT_DF},
{ARTIFACT_CA_1, ARTIFACT_CA_2, ARTIFACT_CB_1, ARTIFACT_CB_2},
{ARTIFACT_SA_1_1, ARTIFACT_SA_1_2, ARTIFACT_SA_2_1, ARTIFACT_SA_2_2, ARTIFACT_DE},
{ARTIFACT_CB_1, ARTIFACT_CB_2, ARTIFACT_DF},
set(),
],
),
(PROCESSING_GRAPH_PARALLEL, [{ARTIFACT_DA, ARTIFACT_DG, ARTIFACT_DH, ARTIFACT_DI}, set()]),
],
)
def test_plan_all_job_creation_and_termination(processing_graph: ProcessingGraph, to_backfill: list[set[str]]) -> None:
previous_artifacts: set[str] = set()
for artifacts_to_backfill in to_backfill:
is_empty = sorted(artifacts_to_backfill - previous_artifacts)
is_outdated_by_parent = sorted(artifacts_to_backfill.intersection(previous_artifacts))
in_process = sorted(is_empty + is_outdated_by_parent)
up_to_date = sorted(previous_artifacts - artifacts_to_backfill)
previous_artifacts = artifacts_to_backfill.union(previous_artifacts)
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": is_outdated_by_parent,
"cache_is_empty": is_empty,
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": up_to_date,
},
queue_status={"in_process": []},
tasks=[f"CreateJobs,{len(in_process)}"] if in_process else [],
)
dataset_backfill_plan.run()
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": is_outdated_by_parent,
"cache_is_empty": is_empty,
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": up_to_date,
},
queue_status={"in_process": in_process},
tasks=[],
)
process_all_jobs()
@pytest.mark.parametrize(
"processing_graph,up_to_date",
[
(PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DA, ARTIFACT_DB, ARTIFACT_DC, ARTIFACT_DD]),
(
PROCESSING_GRAPH_FAN_IN_OUT,
[
ARTIFACT_CA_1,
ARTIFACT_CA_2,
ARTIFACT_CB_1,
ARTIFACT_CB_2,
ARTIFACT_DA,
ARTIFACT_DE,
ARTIFACT_DF,
ARTIFACT_SA_1_1,
ARTIFACT_SA_1_2,
ARTIFACT_SA_2_1,
ARTIFACT_SA_2_2,
],
),
(PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DA, ARTIFACT_DG, ARTIFACT_DH, ARTIFACT_DI]),
],
)
def test_plan_compute_all(processing_graph: ProcessingGraph, up_to_date: list[str]) -> None:
compute_all(processing_graph=processing_graph)
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": [],
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": up_to_date,
},
queue_status={"in_process": []},
tasks=[],
)
@pytest.mark.parametrize(
"processing_graph,up_to_date,is_outdated_by_parent",
[
(PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DB, ARTIFACT_DD], [ARTIFACT_DC]),
(PROCESSING_GRAPH_FAN_IN_OUT, [ARTIFACT_DE, ARTIFACT_DF], []),
(PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DI], [ARTIFACT_DG, ARTIFACT_DH]),
],
)
def test_plan_retry_error_and_outdated_by_parent(
processing_graph: ProcessingGraph, up_to_date: list[str], is_outdated_by_parent: list[str]
) -> None:
error_code = "ERROR_CODE_TO_RETRY"
error_codes_to_retry = [error_code]
compute_all(processing_graph=processing_graph, error_codes_to_retry=error_codes_to_retry)
put_cache(step=STEP_DA, dataset=DATASET_NAME, revision=REVISION_NAME, error_code=error_code)
# in the case of PROCESSING_GRAPH_FAN_IN_OUT: the config names do not exist anymore:
# the cache entries (also the jobs, if any - not here) should be deleted.
# they are still here, and haunting the database
# TODO: Not supported yet
dataset_backfill_plan = get_dataset_backfill_plan(
processing_graph=processing_graph, error_codes_to_retry=error_codes_to_retry
)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
config_names=[],
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": is_outdated_by_parent,
"cache_is_empty": [],
"cache_is_error_to_retry": [ARTIFACT_DA],
"cache_is_job_runner_obsolete": [],
"up_to_date": up_to_date,
},
queue_status={"in_process": []},
tasks=[f"CreateJobs,{len(is_outdated_by_parent) + 1}"],
)
@pytest.mark.parametrize(
"days_ago,is_old",
[(10, False), (30, True)],
)
def test_plan_old(days_ago: int, is_old: bool) -> None:
compute_all(processing_graph=PROCESSING_GRAPH_ONE_STEP)
CACHE_MAX_DAYS = 20
put_cache(step=STEP_DA, dataset=DATASET_NAME, revision=REVISION_NAME, updated_at=get_datetime(days_ago))
dataset_backfill_plan = get_dataset_backfill_plan(
processing_graph=PROCESSING_GRAPH_ONE_STEP, cache_max_days=CACHE_MAX_DAYS
)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [ARTIFACT_DA] if is_old else [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": [],
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": [] if is_old else [ARTIFACT_DA],
},
queue_status={"in_process": []},
tasks=["CreateJobs,1"] if is_old else [],
)
@pytest.mark.parametrize(
"processing_graph,up_to_date,is_outdated_by_parent",
[
(PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DA, ARTIFACT_DB, ARTIFACT_DD], [ARTIFACT_DC]),
(
PROCESSING_GRAPH_FAN_IN_OUT,
[
ARTIFACT_CB_1,
ARTIFACT_CB_2,
ARTIFACT_DA,
ARTIFACT_DE,
ARTIFACT_DF,
ARTIFACT_SA_1_1,
ARTIFACT_SA_1_2,
ARTIFACT_SA_2_1,
ARTIFACT_SA_2_2,
],
[ARTIFACT_CA_1, ARTIFACT_CA_2],
),
(PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DA, ARTIFACT_DI], [ARTIFACT_DG, ARTIFACT_DH]),
],
)
def test_plan_outdated_by_parent(
processing_graph: ProcessingGraph, up_to_date: list[str], is_outdated_by_parent: list[str]
) -> None:
compute_all(processing_graph=processing_graph)
put_cache(step=STEP_DA, dataset=DATASET_NAME, revision=REVISION_NAME)
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": is_outdated_by_parent,
"cache_is_empty": [],
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": up_to_date,
},
queue_status={"in_process": []},
tasks=[f"CreateJobs,{len(is_outdated_by_parent)}"],
)
@pytest.mark.parametrize(
"processing_graph,up_to_date,is_outdated_by_parent",
[
(PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DB, ARTIFACT_DD], [ARTIFACT_DC]),
(
PROCESSING_GRAPH_FAN_IN_OUT,
[
ARTIFACT_CB_1,
ARTIFACT_CB_2,
ARTIFACT_DE,
ARTIFACT_DF,
ARTIFACT_SA_1_1,
ARTIFACT_SA_1_2,
ARTIFACT_SA_2_1,
ARTIFACT_SA_2_2,
],
[ARTIFACT_CA_1, ARTIFACT_CA_2],
),
(PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DI], [ARTIFACT_DG, ARTIFACT_DH]),
],
)
def test_plan_job_runner_version_and_outdated_by_parent(
processing_graph: ProcessingGraph, up_to_date: list[str], is_outdated_by_parent: list[str]
) -> None:
compute_all(processing_graph=processing_graph)
put_cache(step=STEP_DA, dataset=DATASET_NAME, revision=REVISION_NAME, use_old_job_runner_version=True)
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": is_outdated_by_parent,
"cache_is_empty": [],
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [ARTIFACT_DA],
"up_to_date": up_to_date,
},
queue_status={"in_process": []},
tasks=[f"CreateJobs,{len(is_outdated_by_parent) + 1}"],
)
@pytest.mark.parametrize(
"processing_graph,up_to_date,is_outdated_by_parent",
[
(PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DB, ARTIFACT_DD], [ARTIFACT_DC]),
(
PROCESSING_GRAPH_FAN_IN_OUT,
[
ARTIFACT_CB_1,
ARTIFACT_CB_2,
ARTIFACT_DE,
ARTIFACT_DF,
ARTIFACT_SA_1_1,
ARTIFACT_SA_1_2,
ARTIFACT_SA_2_1,
ARTIFACT_SA_2_2,
],
[ARTIFACT_CA_1, ARTIFACT_CA_2],
),
(PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DI], [ARTIFACT_DG, ARTIFACT_DH]),
],
)
def test_plan_git_revision_and_outdated_by_parent(
processing_graph: ProcessingGraph, up_to_date: list[str], is_outdated_by_parent: list[str]
) -> None:
compute_all(processing_graph=processing_graph)
put_cache(step=STEP_DA, dataset=DATASET_NAME, revision=OTHER_REVISION_NAME)
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
cache_status={
"cache_has_different_git_revision": [ARTIFACT_DA],
"cache_is_old": [],
"cache_is_outdated_by_parent": is_outdated_by_parent,
"cache_is_empty": [],
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": up_to_date,
},
queue_status={"in_process": []},
tasks=[f"CreateJobs,{len(is_outdated_by_parent) + 1}"],
)
@pytest.mark.parametrize(
"processing_graph,up_to_date,is_outdated_by_parent",
[
(
PROCESSING_GRAPH_FAN_IN_OUT,
[
ARTIFACT_CA_1,
ARTIFACT_CA_2,
ARTIFACT_CB_2,
ARTIFACT_DA,
ARTIFACT_DE,
ARTIFACT_SA_1_1,
ARTIFACT_SA_1_2,
ARTIFACT_SA_2_1,
ARTIFACT_SA_2_2,
],
[
ARTIFACT_CB_1,
ARTIFACT_DF,
],
),
],
)
def test_plan_fan_in_updated(
processing_graph: ProcessingGraph, up_to_date: list[str], is_outdated_by_parent: list[str]
) -> None:
compute_all(processing_graph=processing_graph)
put_cache(step=STEP_SA, dataset=DATASET_NAME, revision=REVISION_NAME, config=CONFIG_NAME_1, split=SPLIT_NAME_1)
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": is_outdated_by_parent,
"cache_is_empty": [],
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": up_to_date,
},
queue_status={"in_process": []},
tasks=[f"CreateJobs,{len(is_outdated_by_parent)}"],
)
@pytest.mark.parametrize(
"processing_graph,initial,up_to_date,is_empty,unknown",
[
(
PROCESSING_GRAPH_GENEALOGY,
[ARTIFACT_DA, ARTIFACT_DD],
[ARTIFACT_DA, ARTIFACT_DD],
[ARTIFACT_DB, ARTIFACT_DC],
[],
),
(
PROCESSING_GRAPH_FAN_IN_OUT,
[ARTIFACT_CA_1],
[],
[ARTIFACT_DA, ARTIFACT_DE, ARTIFACT_DF],
[
ARTIFACT_CA_1,
ARTIFACT_CA_2,
ARTIFACT_CB_1,
ARTIFACT_CB_2,
ARTIFACT_SA_1_1,
ARTIFACT_SA_1_2,
ARTIFACT_SA_2_1,
ARTIFACT_SA_2_2,
],
),
(
PROCESSING_GRAPH_FAN_IN_OUT,
[ARTIFACT_SA_1_1],
[],
[ARTIFACT_DA, ARTIFACT_DE, ARTIFACT_DF],
[
ARTIFACT_CA_1,
ARTIFACT_CA_2,
ARTIFACT_CB_1,
ARTIFACT_CB_2,
ARTIFACT_SA_1_1,
ARTIFACT_SA_1_2,
ARTIFACT_SA_2_1,
ARTIFACT_SA_2_2,
],
),
(
PROCESSING_GRAPH_PARALLEL,
[ARTIFACT_DA, ARTIFACT_DI],
[ARTIFACT_DA, ARTIFACT_DI],
[ARTIFACT_DG, ARTIFACT_DH],
[],
),
],
)
def test_plan_incoherent_state(
processing_graph: ProcessingGraph,
initial: list[str],
up_to_date: list[str],
is_empty: list[str],
unknown: list[str],
) -> None:
for artifact in initial:
if artifact == ARTIFACT_SA_1_1:
put_cache(
step=STEP_SA, dataset=DATASET_NAME, revision=REVISION_NAME, config=CONFIG_NAME_1, split=SPLIT_NAME_1
)
elif artifact == ARTIFACT_CA_1:
put_cache(step=STEP_CA, dataset=DATASET_NAME, revision=REVISION_NAME, config=CONFIG_NAME_1)
elif artifact == ARTIFACT_DA:
put_cache(step=STEP_DA, dataset=DATASET_NAME, revision=REVISION_NAME)
elif artifact == ARTIFACT_DD:
put_cache(step=STEP_DD, dataset=DATASET_NAME, revision=REVISION_NAME)
elif artifact == ARTIFACT_DI:
put_cache(step=STEP_DI, dataset=DATASET_NAME, revision=REVISION_NAME)
else:
raise NotImplementedError()
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": is_empty,
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": up_to_date,
},
queue_status={"in_process": []},
tasks=[f"CreateJobs,{len(is_empty)}"],
)
compute_all(processing_graph=processing_graph)
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": [],
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": sorted(up_to_date + is_empty + unknown),
},
queue_status={"in_process": []},
tasks=[],
)
JobSpec = tuple[Priority, Status, Optional[datetime]]
OLD = datetime.strptime("20000101", "%Y%m%d")
NEW = datetime.strptime("20000102", "%Y%m%d")
LOW_WAITING_OLD = (Priority.LOW, Status.WAITING, OLD)
LOW_WAITING_NEW = (Priority.LOW, Status.WAITING, NEW)
LOW_STARTED_OLD = (Priority.LOW, Status.STARTED, OLD)
LOW_STARTED_NEW = (Priority.LOW, Status.STARTED, NEW)
NORMAL_WAITING_OLD = (Priority.NORMAL, Status.WAITING, OLD)
NORMAL_WAITING_NEW = (Priority.NORMAL, Status.WAITING, NEW)
NORMAL_STARTED_OLD = (Priority.NORMAL, Status.STARTED, OLD)
NORMAL_STARTED_NEW = (Priority.NORMAL, Status.STARTED, NEW)
@pytest.mark.parametrize(
"existing_jobs,expected_create_job,expected_delete_jobs,expected_jobs_after_backfill",
[
([], True, False, [(Priority.LOW, Status.WAITING, None)]),
(
[
LOW_WAITING_OLD,
LOW_WAITING_NEW,
LOW_STARTED_OLD,
LOW_STARTED_NEW,
NORMAL_WAITING_OLD,
NORMAL_WAITING_NEW,
NORMAL_STARTED_OLD,
NORMAL_STARTED_NEW,
],
False,
True,
[NORMAL_STARTED_OLD],
),
(
[
LOW_WAITING_OLD,
LOW_WAITING_NEW,
LOW_STARTED_OLD,
LOW_STARTED_NEW,
NORMAL_WAITING_OLD,
NORMAL_WAITING_NEW,
NORMAL_STARTED_NEW,
],
False,
True,
[NORMAL_STARTED_NEW],
),
(
[
LOW_WAITING_OLD,
LOW_WAITING_NEW,
LOW_STARTED_OLD,
LOW_STARTED_NEW,
NORMAL_WAITING_OLD,
NORMAL_WAITING_NEW,
],
False,
True,
[LOW_STARTED_OLD],
),
(
[LOW_WAITING_OLD, LOW_WAITING_NEW, LOW_STARTED_NEW, NORMAL_WAITING_OLD, NORMAL_WAITING_NEW],
False,
True,
[LOW_STARTED_NEW],
),
(
[LOW_WAITING_OLD, LOW_WAITING_NEW, NORMAL_WAITING_OLD, NORMAL_WAITING_NEW],
False,
True,
[NORMAL_WAITING_OLD],
),
([LOW_WAITING_OLD, LOW_WAITING_NEW, NORMAL_WAITING_NEW], False, True, [NORMAL_WAITING_NEW]),
([LOW_WAITING_OLD, LOW_WAITING_NEW], False, True, [LOW_WAITING_OLD]),
([LOW_WAITING_NEW], False, False, [LOW_WAITING_NEW]),
([LOW_WAITING_NEW] * 5, False, True, [LOW_WAITING_NEW]),
],
)
def test_delete_jobs(
existing_jobs: list[JobSpec],
expected_create_job: bool,
expected_delete_jobs: bool,
expected_jobs_after_backfill: list[JobSpec],
) -> None:
processing_graph = PROCESSING_GRAPH_ONE_STEP
queue = Queue()
for job_spec in existing_jobs:
(priority, status, created_at) = job_spec
job = queue.add_job(
job_type=STEP_DA, dataset="dataset", revision="revision", priority=priority, difficulty=DIFFICULTY
)
if created_at is not None:
job.created_at = created_at
job.save()
if status is Status.STARTED:
job.status = Status.STARTED
job.started_at = datetime.now()
job.save()
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
expected_in_process = [ARTIFACT_DA] if existing_jobs else []
if expected_create_job:
if expected_delete_jobs:
raise NotImplementedError()
expected_tasks = ["CreateJobs,1"]
elif expected_delete_jobs:
expected_tasks = [f"DeleteJobs,{len(existing_jobs) - 1}"]
else:
expected_tasks = []
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
config_names=[],
split_names_in_first_config=[],
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": [ARTIFACT_DA],
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": [],
},
queue_status={"in_process": expected_in_process},
tasks=expected_tasks,
)
dataset_backfill_plan.run()
job_dicts = queue.get_dataset_pending_jobs_for_type(dataset=DATASET_NAME, job_type=STEP_DA)
assert len(job_dicts) == len(expected_jobs_after_backfill)
for job_dict, expected_job_spec in zip(job_dicts, expected_jobs_after_backfill):
(priority, status, created_at) = expected_job_spec
assert job_dict["priority"] == priority.value
assert job_dict["status"] == status.value
if created_at is not None:
assert job_dict["created_at"] == created_at
def test_multiple_revisions() -> None:
processing_graph = PROCESSING_GRAPH_ONE_STEP
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph, revision=REVISION_NAME)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
config_names=[],
split_names_in_first_config=[],
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": [ARTIFACT_DA],
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": [],
},
queue_status={"in_process": []},
tasks=["CreateJobs,1"],
)
# create the job for the first revision
dataset_backfill_plan.run()
# the job is in process, no other job is created for the same revision
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph, revision=REVISION_NAME)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
config_names=[],
split_names_in_first_config=[],
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": [ARTIFACT_DA],
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": [],
},
queue_status={"in_process": [ARTIFACT_DA]},
tasks=[],
)
# create the job for the second revision: the first job is deleted
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph, revision=OTHER_REVISION_NAME)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
config_names=[],
split_names_in_first_config=[],
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": [ARTIFACT_DA_OTHER_REVISION],
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": [],
},
queue_status={"in_process": []},
tasks=["DeleteJobs,1", "CreateJobs,1"],
)
dataset_backfill_plan.run()
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph, revision=OTHER_REVISION_NAME)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
config_names=[],
split_names_in_first_config=[],
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": [ARTIFACT_DA_OTHER_REVISION],
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": [],
},
queue_status={"in_process": [ARTIFACT_DA_OTHER_REVISION]},
tasks=[],
)
pending_jobs_df = Queue().get_pending_jobs_df(dataset=DATASET_NAME)
assert len(pending_jobs_df) == 1
assert not (pending_jobs_df["revision"] == REVISION_NAME).any()
assert (pending_jobs_df["revision"] == OTHER_REVISION_NAME).all()
| datasets-server-main | libs/libcommon/tests/test_backfill.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from http import HTTPStatus
import pytest
from libcommon.config import ProcessingGraphConfig
from libcommon.constants import PROCESSING_STEP_DATASET_CONFIG_NAMES_VERSION
from libcommon.processing_graph import ProcessingGraph
from libcommon.queue import Queue
from libcommon.resources import CacheMongoResource, QueueMongoResource
from libcommon.simple_cache import upsert_response
from .utils import (
CONFIG_NAMES,
CONFIG_NAMES_CONTENT,
REVISION_NAME,
assert_dataset_backfill_plan,
get_dataset_backfill_plan,
)
PROCESSING_GRAPH = ProcessingGraph(processing_graph_specification=ProcessingGraphConfig().specification)
@pytest.fixture(autouse=True)
def queue_mongo_resource_autouse(queue_mongo_resource: QueueMongoResource) -> QueueMongoResource:
return queue_mongo_resource
@pytest.fixture(autouse=True)
def cache_mongo_resource_autouse(cache_mongo_resource: CacheMongoResource) -> CacheMongoResource:
return cache_mongo_resource
def test_plan_job_creation_and_termination() -> None:
# we launch all the backfill tasks
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=PROCESSING_GRAPH)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
# The config names are not yet known
config_names=[],
# The split names are not yet known
split_names_in_first_config=[],
# All the dataset-level cache entries are empty
# No config-level and split-level cache entries is listed, because the config names and splits
# names are not yet known.
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": [
"dataset-config-names,dataset,revision",
"dataset-hub-cache,dataset,revision",
"dataset-info,dataset,revision",
"dataset-is-valid,dataset,revision",
"dataset-opt-in-out-urls-count,dataset,revision",
"dataset-parquet,dataset,revision",
"dataset-size,dataset,revision",
"dataset-split-names,dataset,revision",
],
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": [],
},
# The queue is empty, so no step is in process.
queue_status={"in_process": []},
# The root dataset-level steps, as well as the "fan-in" steps, are ready to be backfilled.
tasks=["CreateJobs,8"],
)
dataset_backfill_plan.run()
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=PROCESSING_GRAPH)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
# The config names are not yet known
config_names=[],
# The split names are not yet known
split_names_in_first_config=[],
# the cache has not changed
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": [
"dataset-config-names,dataset,revision",
"dataset-hub-cache,dataset,revision",
"dataset-info,dataset,revision",
"dataset-is-valid,dataset,revision",
"dataset-opt-in-out-urls-count,dataset,revision",
"dataset-parquet,dataset,revision",
"dataset-size,dataset,revision",
"dataset-split-names,dataset,revision",
],
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": [],
},
# the jobs have been created and are in process
queue_status={
"in_process": [
"dataset-config-names,dataset,revision",
"dataset-hub-cache,dataset,revision",
"dataset-info,dataset,revision",
"dataset-is-valid,dataset,revision",
"dataset-opt-in-out-urls-count,dataset,revision",
"dataset-parquet,dataset,revision",
"dataset-size,dataset,revision",
"dataset-split-names,dataset,revision",
]
},
# thus: no new task
tasks=[],
)
# we simulate the job for "dataset-config-names,dataset,revision" has finished
job_info = Queue().start_job(job_types_only=["dataset-config-names"])
upsert_response(
kind=job_info["type"],
dataset=job_info["params"]["dataset"],
config=job_info["params"]["config"],
split=job_info["params"]["split"],
content=CONFIG_NAMES_CONTENT,
http_status=HTTPStatus.OK,
job_runner_version=PROCESSING_STEP_DATASET_CONFIG_NAMES_VERSION,
dataset_git_revision=REVISION_NAME,
)
Queue().finish_job(job_id=job_info["job_id"], is_success=True)
dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=PROCESSING_GRAPH)
assert_dataset_backfill_plan(
dataset_backfill_plan=dataset_backfill_plan,
# The config names are now known
config_names=CONFIG_NAMES,
# The split names are not yet known
split_names_in_first_config=[],
# The "dataset-config-names" step is up-to-date
# Config-level artifacts are empty and ready to be filled (even if some of their parents are still missing)
# The split-level artifacts are still missing, because the splits names are not yet known, for any config.
cache_status={
"cache_has_different_git_revision": [],
"cache_is_old": [],
"cache_is_outdated_by_parent": [],
"cache_is_empty": [
"config-split-names-from-info,dataset,revision,config1",
"config-split-names-from-info,dataset,revision,config2",
"config-split-names-from-streaming,dataset,revision,config1",
"config-split-names-from-streaming,dataset,revision,config2",
"config-info,dataset,revision,config1",
"config-info,dataset,revision,config2",
"config-opt-in-out-urls-count,dataset,revision,config1",
"config-opt-in-out-urls-count,dataset,revision,config2",
"config-parquet,dataset,revision,config1",
"config-parquet,dataset,revision,config2",
"config-parquet-and-info,dataset,revision,config1",
"config-parquet-and-info,dataset,revision,config2",
"config-parquet-metadata,dataset,revision,config1",
"config-parquet-metadata,dataset,revision,config2",
"config-size,dataset,revision,config1",
"config-size,dataset,revision,config2",
"config-is-valid,dataset,revision,config1",
"config-is-valid,dataset,revision,config2",
"dataset-hub-cache,dataset,revision",
"dataset-info,dataset,revision",
"dataset-is-valid,dataset,revision",
"dataset-opt-in-out-urls-count,dataset,revision",
"dataset-parquet,dataset,revision",
"dataset-size,dataset,revision",
"dataset-split-names,dataset,revision",
],
"cache_is_error_to_retry": [],
"cache_is_job_runner_obsolete": [],
"up_to_date": ["dataset-config-names,dataset,revision"],
},
# the job "dataset-config-names,dataset,revision" is no more in process
queue_status={
"in_process": [
"dataset-hub-cache,dataset,revision",
"dataset-info,dataset,revision",
"dataset-is-valid,dataset,revision",
"dataset-opt-in-out-urls-count,dataset,revision",
"dataset-parquet,dataset,revision",
"dataset-size,dataset,revision",
"dataset-split-names,dataset,revision",
]
},
tasks=["CreateJobs,18"],
)
| datasets-server-main | libs/libcommon/tests/test_backfill_on_real_graph.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import datetime
from collections.abc import Mapping
from pathlib import Path
from typing import Any, Optional
import numpy as np
import pandas as pd
import pytest
from datasets import (
Array2D,
Array3D,
Array4D,
Array5D,
Audio,
ClassLabel,
Dataset,
Features,
Image,
Sequence,
Translation,
TranslationVariableLanguages,
Value,
)
from datasets.features.features import FeatureType
def value(content: Any, dtype: Any) -> Dataset:
return Dataset.from_pandas(pd.DataFrame({"col": [content]}, dtype=dtype))
def other(content: Any, feature_type: Optional[FeatureType] = None) -> Dataset:
if feature_type:
features = Features({"col": feature_type})
return Dataset.from_dict({"col": [content]}, features=features)
else:
return Dataset.from_dict({"col": [content]})
@pytest.fixture(scope="session")
def datasets() -> Mapping[str, Dataset]:
sampling_rate = 16_000
return {
# Value feature
"null": value(None, None),
"bool": value(False, pd.BooleanDtype()),
"int8": value(-7, pd.Int8Dtype()),
"int16": value(-7, pd.Int16Dtype()),
"int32": value(-7, pd.Int32Dtype()),
"int64": value(-7, pd.Int64Dtype()),
"uint8": value(7, pd.UInt8Dtype()),
"uint16": value(7, pd.UInt16Dtype()),
"uint32": value(7, pd.UInt32Dtype()),
"uint64": value(7, pd.UInt64Dtype()),
"float16": value(-3.14, np.float16),
"float32": value(-3.14, np.float32),
"float64": value(-3.14, np.float64),
"time": value(datetime.time(1, 1, 1), None),
"timestamp_1": value(pd.Timestamp(2020, 1, 1), None),
"timestamp_2": value(pd.Timestamp(1513393355.5, unit="s"), None),
"timestamp_3": value(pd.Timestamp(1513393355500, unit="ms"), None),
"timestamp_tz": value(pd.Timestamp(year=2020, month=1, day=1, tz="US/Pacific"), None),
"string": value("a string", pd.StringDtype(storage="python")),
# other types of features
"class_label": other("positive", ClassLabel(names=["negative", "positive"])),
"dict": other({"a": 0}, None),
"list": other([{"a": 0}], None),
"sequence_simple": other([0], None),
"sequence": other([{"a": 0}], Sequence(feature={"a": Value(dtype="int64")})),
"array2d": other(np.zeros((2, 2), dtype="float32"), Array2D(shape=(2, 2), dtype="float32")),
"array3d": other(np.zeros((2, 2, 2), dtype="float32"), Array3D(shape=(2, 2, 2), dtype="float32")),
"array4d": other(np.zeros((2, 2, 2, 2), dtype="float32"), Array4D(shape=(2, 2, 2, 2), dtype="float32")),
"array5d": other(np.zeros((2, 2, 2, 2, 2), dtype="float32"), Array5D(shape=(2, 2, 2, 2, 2), dtype="float32")),
"audio": other({"array": [0.1, 0.2, 0.3], "sampling_rate": sampling_rate}, Audio(sampling_rate=sampling_rate)),
"audio_ogg": other(
str(Path(__file__).resolve().parent / "data" / "test_audio_vorbis.ogg"), Audio(sampling_rate=sampling_rate)
),
"image": other(str(Path(__file__).resolve().parent / "data" / "test_image_rgb.jpg"), Image()),
"translation": other({"en": "the cat", "fr": "le chat"}, Translation(languages=["en", "fr"])),
"translation_variable_languages": other(
{"en": "the cat", "fr": ["le chat", "la chatte"]},
TranslationVariableLanguages(languages=["en", "fr"]),
),
"images_list": other(
[
str(Path(__file__).resolve().parent / "data" / "test_image_rgb.jpg"),
str(Path(__file__).resolve().parent / "data" / "test_image_rgb.jpg"),
],
[Image()],
),
"audios_list": other(
[
{"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
{"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
],
[Audio()],
),
"images_sequence": other(
[
str(Path(__file__).resolve().parent / "data" / "test_image_rgb.jpg"),
str(Path(__file__).resolve().parent / "data" / "test_image_rgb.jpg"),
],
Sequence(feature=Image()),
),
"audios_sequence": other(
[
{"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
{"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
],
Sequence(feature=Audio()),
),
"dict_of_audios_and_images": other(
{
"a": 0,
"b": [
str(Path(__file__).resolve().parent / "data" / "test_image_rgb.jpg"),
str(Path(__file__).resolve().parent / "data" / "test_image_rgb.jpg"),
],
"c": {
"ca": [
{"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
{"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
]
},
},
{"a": Value(dtype="int64"), "b": [Image()], "c": {"ca": [Audio()]}},
),
"sequence_of_dicts": other(
[{"a": {"b": 0}}, {"a": {"b": 1}}], Sequence(feature={"a": {"b": Value(dtype="int64")}})
),
"none_value": other({"a": None}, {"a": Value(dtype="int64")}),
"big": Dataset.from_pandas(
pd.DataFrame({"col": ["a" * 1_234 for _ in range(4_567)]}, dtype=pd.StringDtype(storage="python"))
),
}
| datasets-server-main | libs/libcommon/tests/fixtures/datasets.py |
datasets-server-main | libs/libcommon/tests/fixtures/__init__.py |
|
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import datetime
from collections.abc import Mapping
from typing import Any
from zoneinfo import ZoneInfo
import numpy as np
import pytest
from datasets import Audio, Dataset, Features, Image, Value
from libcommon.storage import StrPath
from libcommon.viewer_utils.features import (
get_cell_value,
get_supported_unsupported_columns,
)
# we need to know the correspondence between the feature type and the cell value, in order to:
# - document the API
# - implement the client on the Hub (dataset viewer)
# see https://github.com/huggingface/datasets/blob/a5192964dc4b76ee5c03593c11ee56f29bbd688d/...
# src/datasets/features/features.py#L1469
# ``FieldType`` can be one of the following:
# - a :class:`datasets.Value` feature specifies a single typed value, e.g. ``int64`` or ``string``
@pytest.mark.parametrize(
"dataset_type,output_value,output_dtype",
[
("null", None, "null"),
("bool", False, "bool"),
("int8", -7, "int8"),
("int16", -7, "int16"),
("int32", -7, "int32"),
("int64", -7, "int64"),
("uint8", 7, "uint8"),
("uint16", 7, "uint16"),
("uint32", 7, "uint32"),
("uint64", 7, "uint64"),
("float16", np.float16(-3.14), "float16"),
# (alias float)
("float32", np.float32(-3.14), "float32"),
# (alias double)
("float64", -3.14, "float64"),
("time", datetime.time(1, 1, 1), "time64[us]"),
("timestamp_1", datetime.datetime(2020, 1, 1, 0, 0), "timestamp[ns]"),
("timestamp_2", datetime.datetime(2017, 12, 16, 3, 2, 35, 500000), "timestamp[ns]"),
("timestamp_3", datetime.datetime(2017, 12, 16, 3, 2, 35, 500000), "timestamp[ns]"),
(
"timestamp_tz",
datetime.datetime(2020, 1, 1, 0, 0, tzinfo=ZoneInfo("US/Pacific")),
"timestamp[ns, tz=US/Pacific]",
),
("string", "a string", "string"),
],
)
def test_value(
dataset_type: str,
output_value: Any,
output_dtype: str,
datasets: Mapping[str, Dataset],
cached_assets_directory: StrPath,
) -> None:
dataset = datasets[dataset_type]
feature = dataset.features["col"]
assert feature._type == "Value"
assert feature.dtype == output_dtype
value = get_cell_value(
dataset="dataset",
config="config",
split="split",
row_idx=7,
cell=dataset[0]["col"],
featureName="col",
fieldType=feature,
assets_base_url="http://localhost/assets",
assets_directory=cached_assets_directory,
)
assert value == output_value
@pytest.mark.parametrize(
"dataset_type,output_value,output_type",
[
# - a :class:`datasets.ClassLabel` feature specifies a field with a predefined set of classes
# which can have labels associated to them and will be stored as integers in the dataset
("class_label", 1, "ClassLabel"),
# - a python :obj:`dict` which specifies that the field is a nested field containing a mapping of sub-fields
# to sub-fields features. It's possible to have nested fields of nested fields in an arbitrary manner
("dict", {"a": 0}, {"a": Value(dtype="int64", id=None)}),
# - a python :obj:`list` or a :class:`datasets.Sequence` specifies that the field contains a list of objects.
# The python :obj:`list` or :class:`datasets.Sequence` should be provided with a single sub-feature as an
# example of the feature type hosted in this list
# <Tip>
# A :class:`datasets.Sequence` with a internal dictionary feature will be automatically converted into a
# dictionary of lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets
# library but may be un-wanted in some cases. If you don't want this behavior, you can use a python
# :obj:`list` instead of the :class:`datasets.Sequence`.
# </Tip>
("list", [{"a": 0}], [{"a": Value(dtype="int64", id=None)}]),
("sequence_simple", [0], "Sequence"),
("sequence", {"a": [0]}, "Sequence"),
# - a :class:`Array2D`, :class:`Array3D`, :class:`Array4D` or :class:`Array5D` feature for multidimensional
# arrays
("array2d", [[0.0, 0.0], [0.0, 0.0]], "Array2D"),
("array3d", [[[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]]], "Array3D"),
(
"array4d",
[
[[[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]]],
[[[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]]],
],
"Array4D",
),
(
"array5d",
[
[
[[[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]]],
[[[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]]],
],
[
[[[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]]],
[[[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]]],
],
],
"Array5D",
),
# - an :class:`Audio` feature to store the absolute path to an audio file or a dictionary with the relative
# path to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio
# data.
(
"audio",
[
{
"src": "http://localhost/assets/dataset/--/config/split/7/col/audio.wav",
"type": "audio/wav",
}
],
"Audio",
),
(
"audio_ogg",
[
{
"src": "http://localhost/assets/dataset/--/config/split/7/col/audio.wav",
"type": "audio/wav",
}
],
"Audio",
),
# - an :class:`Image` feature to store the absolute path to an image file, an :obj:`np.ndarray` object, a
# :obj:`PIL.Image.Image` object or a dictionary with the relative path to an image file ("path" key) and
# its bytes content ("bytes" key). This feature extracts the image data.
(
"image",
{
"src": "http://localhost/assets/dataset/--/config/split/7/col/image.jpg",
"height": 480,
"width": 640,
},
"Image",
),
# - :class:`datasets.Translation` and :class:`datasets.TranslationVariableLanguages`, the two features
# specific to Machine Translation
("translation", {"en": "the cat", "fr": "le chat"}, "Translation"),
(
"translation_variable_languages",
{"language": ["en", "fr", "fr"], "translation": ["the cat", "la chatte", "le chat"]},
"TranslationVariableLanguages",
),
# special cases
(
"images_list",
[
{
"src": "http://localhost/assets/dataset/--/config/split/7/col/image-1d100e9.jpg",
"height": 480,
"width": 640,
},
{
"src": "http://localhost/assets/dataset/--/config/split/7/col/image-1d300ea.jpg",
"height": 480,
"width": 640,
},
],
[Image(decode=True, id=None)],
),
(
"audios_list",
[
[
{
"src": "http://localhost/assets/dataset/--/config/split/7/col/audio-1d100e9.wav",
"type": "audio/wav",
},
],
[
{
"src": "http://localhost/assets/dataset/--/config/split/7/col/audio-1d300ea.wav",
"type": "audio/wav",
},
],
],
[Audio()],
),
(
"images_sequence",
[
{
"src": "http://localhost/assets/dataset/--/config/split/7/col/image-1d100e9.jpg",
"height": 480,
"width": 640,
},
{
"src": "http://localhost/assets/dataset/--/config/split/7/col/image-1d300ea.jpg",
"height": 480,
"width": 640,
},
],
"Sequence",
),
(
"audios_sequence",
[
[
{
"src": "http://localhost/assets/dataset/--/config/split/7/col/audio-1d100e9.wav",
"type": "audio/wav",
},
],
[
{
"src": "http://localhost/assets/dataset/--/config/split/7/col/audio-1d300ea.wav",
"type": "audio/wav",
},
],
],
"Sequence",
),
(
"dict_of_audios_and_images",
{
"a": 0,
"b": [
{
"src": "http://localhost/assets/dataset/--/config/split/7/col/image-89101db.jpg",
"height": 480,
"width": 640,
},
{
"src": "http://localhost/assets/dataset/--/config/split/7/col/image-89301dc.jpg",
"height": 480,
"width": 640,
},
],
"c": {
"ca": [
[
{
"src": "http://localhost/assets/dataset/--/config/split/7/col/audio-18360330.wav",
"type": "audio/wav",
},
],
[
{
"src": "http://localhost/assets/dataset/--/config/split/7/col/audio-18380331.wav",
"type": "audio/wav",
},
],
]
},
},
{"a": Value(dtype="int64"), "b": [Image(decode=True, id=None)], "c": {"ca": [Audio()]}},
),
("sequence_of_dicts", {"a": [{"b": 0}, {"b": 1}]}, "Sequence"),
("none_value", {"a": None}, {"a": Value(dtype="int64", id=None)}),
],
)
def test_others(
dataset_type: str,
output_value: Any,
output_type: Any,
datasets: Mapping[str, Dataset],
cached_assets_directory: StrPath,
) -> None:
dataset = datasets[dataset_type]
feature = dataset.features["col"]
if type(output_type) in [list, dict]:
assert feature == output_type
else:
assert feature._type == output_type
value = get_cell_value(
dataset="dataset",
config="config",
split="split",
row_idx=7,
cell=dataset[0]["col"],
featureName="col",
fieldType=feature,
assets_base_url="http://localhost/assets",
assets_directory=cached_assets_directory,
)
assert value == output_value
def test_get_supported_unsupported_columns() -> None:
features = Features(
{
"audio1": Audio(),
"audio2": Audio(sampling_rate=16_000),
"audio3": [Audio()],
"image1": Image(),
"image2": Image(decode=False),
"image3": [Image()],
"string": Value("string"),
"binary": Value("binary"),
}
)
unsupported_features = [Value("binary"), Audio()]
supported_columns, unsupported_columns = get_supported_unsupported_columns(features, unsupported_features)
assert supported_columns == ["image1", "image2", "image3", "string"]
assert unsupported_columns == ["audio1", "audio2", "audio3", "binary"]
| datasets-server-main | libs/libcommon/tests/viewer_utils/test_features.py |
datasets-server-main | libs/libcommon/tests/viewer_utils/__init__.py |
|
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import contextlib
import json
import logging
import time
import types
from collections import Counter
from collections.abc import Sequence
from datetime import datetime, timedelta
from itertools import groupby
from operator import itemgetter
from types import TracebackType
from typing import Generic, Literal, Optional, TypedDict, TypeVar
from uuid import uuid4
import pandas as pd
import pytz
from bson import ObjectId
from mongoengine import Document
from mongoengine.errors import DoesNotExist, NotUniqueError
from mongoengine.fields import (
DateTimeField,
EnumField,
IntField,
ObjectIdField,
StringField,
)
from mongoengine.queryset.queryset import QuerySet
from libcommon.constants import (
DEFAULT_DIFFICULTY_MAX,
DEFAULT_DIFFICULTY_MIN,
LOCK_TTL_SECONDS,
QUEUE_COLLECTION_JOBS,
QUEUE_COLLECTION_LOCKS,
QUEUE_METRICS_COLLECTION,
QUEUE_MONGOENGINE_ALIAS,
QUEUE_TTL_SECONDS,
)
from libcommon.utils import (
FlatJobInfo,
JobInfo,
Priority,
Status,
get_datetime,
inputs_to_string,
)
# START monkey patching ### hack ###
# see https://github.com/sbdchd/mongo-types#install
U = TypeVar("U", bound=Document)
def no_op(self, x): # type: ignore
return self
QuerySet.__class_getitem__ = types.MethodType(no_op, QuerySet)
class QuerySetManager(Generic[U]):
def __get__(self, instance: object, cls: type[U]) -> QuerySet[U]:
return QuerySet(cls, cls._get_collection())
class StartedJobError(Exception):
pass
# END monkey patching ### hack ###
class JobDict(TypedDict):
type: str
dataset: str
revision: str
config: Optional[str]
split: Optional[str]
unicity_id: str
namespace: str
priority: str
status: str
difficulty: int
created_at: datetime
started_at: Optional[datetime]
finished_at: Optional[datetime]
last_heartbeat: Optional[datetime]
class CountByStatus(TypedDict):
waiting: int
started: int
success: int
error: int
cancelled: int
class DumpByPendingStatus(TypedDict):
waiting: list[JobDict]
started: list[JobDict]
class EmptyQueueError(Exception):
pass
class JobDoesNotExistError(DoesNotExist):
pass
class AlreadyStartedJobError(Exception):
pass
class LockTimeoutError(Exception):
pass
class NoWaitingJobError(Exception):
pass
class JobQueryFilters(TypedDict, total=False):
type__nin: list[str]
type__in: list[str]
difficulty__gte: int
difficulty__lte: int
# States:
# - waiting: started_at is None and finished_at is None: waiting jobs
# - started: started_at is not None and finished_at is None: started jobs
# - finished: started_at is not None and finished_at is not None: finished jobs
# For a given set of arguments, only one job is allowed in the started state. No
# restriction for the other states
class JobDocument(Document):
"""A job in the mongoDB database
Args:
type (`str`): The type of the job, identifies the queue
dataset (`str`): The dataset on which to apply the job.
revision (`str`): The git revision of the dataset.
config (`str`, optional): The config on which to apply the job.
split (`str`, optional): The split on which to apply the job.
unicity_id (`str`): A string that identifies the job uniquely. Only one job with the same unicity_id can be in
the started state. The revision is not part of the unicity_id.
namespace (`str`): The dataset namespace (user or organization) if any, else the dataset name (canonical name).
priority (`Priority`, optional): The priority of the job. Defaults to Priority.LOW.
status (`Status`, optional): The status of the job. Defaults to Status.WAITING.
difficulty (`int`): The difficulty of the job: 0=easy, 100=hard as a convention.
created_at (`datetime`): The creation date of the job.
started_at (`datetime`, optional): When the job has started.
finished_at (`datetime`, optional): When the job has finished.
last_heartbeat (`datetime`, optional): Last time the running job got a heartbeat from the worker.
"""
meta = {
"collection": QUEUE_COLLECTION_JOBS,
"db_alias": QUEUE_MONGOENGINE_ALIAS,
"indexes": [
("type", "dataset", "status"),
("type", "dataset", "revision", "config", "split", "status", "priority"),
("priority", "status", "created_at", "namespace"),
("priority", "status", "type", "namespace", "unicity_id", "created_at", "-difficulty"),
("status", "type"),
("unicity_id", "-created_at", "status"),
{
"fields": ["finished_at"],
"expireAfterSeconds": QUEUE_TTL_SECONDS,
"partialFilterExpression": {"status": {"$in": [Status.SUCCESS, Status.ERROR, Status.CANCELLED]}},
},
],
}
type = StringField(required=True)
dataset = StringField(required=True)
revision = StringField(required=True)
config = StringField()
split = StringField()
unicity_id = StringField(required=True)
namespace = StringField(required=True)
priority = EnumField(Priority, default=Priority.LOW)
status = EnumField(Status, default=Status.WAITING)
difficulty = IntField(required=True)
created_at = DateTimeField(required=True)
started_at = DateTimeField()
finished_at = DateTimeField()
last_heartbeat = DateTimeField()
def to_dict(self) -> JobDict:
return {
"type": self.type,
"dataset": self.dataset,
"revision": self.revision,
"config": self.config,
"split": self.split,
"unicity_id": self.unicity_id,
"namespace": self.namespace,
"priority": self.priority.value,
"status": self.status.value,
"difficulty": self.difficulty,
"created_at": self.created_at,
"started_at": self.started_at,
"finished_at": self.finished_at,
"last_heartbeat": self.last_heartbeat,
}
objects = QuerySetManager["JobDocument"]()
def info(self) -> JobInfo:
return JobInfo(
{
"job_id": str(self.pk), # job.pk is the id. job.id is not recognized by mypy
"type": self.type,
"params": {
"dataset": self.dataset,
"revision": self.revision,
"config": self.config,
"split": self.split,
},
"priority": self.priority,
"difficulty": self.difficulty,
}
)
@classmethod
def get(cls, job_id: str) -> "JobDocument":
try:
return cls.objects(pk=job_id).get()
except DoesNotExist as e:
raise JobDoesNotExistError(f"Job does not exist: {job_id=}") from e
def flat_info(self) -> FlatJobInfo:
return FlatJobInfo(
{
"job_id": str(self.pk), # job.pk is the id. job.id is not recognized by mypy
"type": self.type,
"dataset": self.dataset,
"revision": self.revision,
"config": self.config,
"split": self.split,
"priority": self.priority.value,
"status": self.status.value,
"difficulty": self.difficulty,
"created_at": self.created_at,
}
)
DEFAULT_INCREASE_AMOUNT = 1
DEFAULT_DECREASE_AMOUNT = -1
class JobTotalMetricDocument(Document):
"""Jobs total metric in mongoDB database, used to compute prometheus metrics.
Args:
job_type (`str`): job type
status (`str`): job status see libcommon.queue.Status
total (`int`): total of jobs
created_at (`datetime`): when the metric has been created.
"""
id = ObjectIdField(db_field="_id", primary_key=True, default=ObjectId)
job_type = StringField(required=True, unique_with="status")
status = StringField(required=True)
total = IntField(required=True, default=0)
created_at = DateTimeField(default=get_datetime)
meta = {
"collection": QUEUE_METRICS_COLLECTION,
"db_alias": QUEUE_MONGOENGINE_ALIAS,
"indexes": [("job_type", "status")],
}
objects = QuerySetManager["JobTotalMetricDocument"]()
def _update_metrics(job_type: str, status: str, increase_by: int) -> None:
JobTotalMetricDocument.objects(job_type=job_type, status=status).update(
upsert=True,
write_concern={"w": "majority", "fsync": True},
read_concern={"level": "majority"},
inc__total=increase_by,
)
def increase_metric(job_type: str, status: str) -> None:
_update_metrics(job_type=job_type, status=status, increase_by=DEFAULT_INCREASE_AMOUNT)
def decrease_metric(job_type: str, status: str) -> None:
_update_metrics(job_type=job_type, status=status, increase_by=DEFAULT_DECREASE_AMOUNT)
def update_metrics_for_type(job_type: str, previous_status: str, new_status: str) -> None:
if job_type is not None:
decrease_metric(job_type=job_type, status=previous_status)
increase_metric(job_type=job_type, status=new_status)
class Lock(Document):
meta = {
"collection": QUEUE_COLLECTION_LOCKS,
"db_alias": QUEUE_MONGOENGINE_ALIAS,
"indexes": [
("key", "owner"),
{
"fields": ["updated_at"],
"expireAfterSeconds": LOCK_TTL_SECONDS,
"partialFilterExpression": {"$or": [{"owner": None}, {"ttl": LOCK_TTL_SECONDS}]},
},
],
}
key = StringField(primary_key=True)
owner = StringField()
ttl = IntField()
job_id = StringField() # deprecated
created_at = DateTimeField()
updated_at = DateTimeField()
objects = QuerySetManager["Lock"]()
class lock(contextlib.AbstractContextManager["lock"]):
"""
Provides a simple way of inter-applications communication using a MongoDB lock.
An example usage is to another worker of your application that a resource
or working directory is currently used in a job.
Example of usage:
```python
key = json.dumps({"type": job.type, "dataset": job.dataset})
with lock(key=key, owner=job.pk):
...
```
Or using a try/except:
```python
try:
key = json.dumps({"type": job.type, "dataset": job.dataset})
lock(key=key, owner=job.pk).acquire()
except TimeoutError:
...
```
"""
_default_sleeps = (0.05, 0.05, 0.05, 1, 1, 1, 5)
def __init__(
self, key: str, owner: str, sleeps: Sequence[float] = _default_sleeps, ttl: Optional[int] = None
) -> None:
self.key = key
self.owner = owner
self.sleeps = sleeps
self.ttl = ttl
if ttl is not None and ttl != LOCK_TTL_SECONDS:
raise ValueError(f"Only TTL of LOCK_TTL_SECONDS={LOCK_TTL_SECONDS} is supported by the TTL index.")
def acquire(self) -> None:
for sleep in self.sleeps:
try:
Lock.objects(key=self.key, owner__in=[None, self.owner]).update(
upsert=True,
write_concern={"w": "majority", "fsync": True},
read_concern={"level": "majority"},
owner=self.owner,
updated_at=get_datetime(),
ttl=self.ttl,
)
return
except NotUniqueError:
logging.debug(f"Sleep {sleep}s to acquire lock '{self.key}' for owner='{self.owner}'")
time.sleep(sleep)
raise TimeoutError("lock couldn't be acquired")
def release(self) -> None:
Lock.objects(key=self.key, owner=self.owner).update(
write_concern={"w": "majority", "fsync": True},
read_concern={"level": "majority"},
owner=None,
updated_at=get_datetime(),
)
def __enter__(self) -> "lock":
self.acquire()
return self
def __exit__(
self, exctype: Optional[type[BaseException]], excinst: Optional[BaseException], exctb: Optional[TracebackType]
) -> Literal[False]:
self.release()
return False
@classmethod
def git_branch(cls, dataset: str, branch: str, owner: str, sleeps: Sequence[float] = _default_sleeps) -> "lock":
"""
Lock a git branch of a dataset on the hub for read/write
Args:
dataset (`str`): the dataset repository
branch (`str`): the branch to lock
owner (`str`): the current job id that holds the lock
sleeps (`Sequence[float]`): the time in seconds to sleep between each attempt to acquire the lock
"""
key = json.dumps({"dataset": dataset, "branch": branch})
return cls(key=key, owner=owner, sleeps=sleeps)
def release_locks(owner: str) -> None:
"""
Release all locks owned by the given owner
Args:
owner (`str`): the current owner that holds the locks
"""
Lock.objects(owner=owner).update(
write_concern={"w": "majority", "fsync": True},
read_concern={"level": "majority"},
owner=None,
updated_at=get_datetime(),
)
class Queue:
"""A queue manages jobs.
Note that creating a Queue object does not create the queue in the database. It's a view that allows to manipulate
the jobs. You can create multiple Queue objects, it has no effect on the database.
It's a FIFO queue, with the following properties:
- a job is identified by its input arguments: unicity_id (type, dataset, config and split, NOT revision)
- a job can be in one of the following states: waiting, started, success, error, cancelled
- a job can be in the queue only once (unicity_id) in the "started" state
- a job can be in the queue multiple times in the other states
- a job has a priority (two levels: NORMAL and LOW)
- a job has a difficulty (from 0: easy to 100: hard, as a convention)
- the queue is ordered by priority then by the creation date of the jobs
- datasets and users that already have started jobs are de-prioritized (using namespace)
"""
def add_job(
self,
job_type: str,
dataset: str,
revision: str,
difficulty: int,
config: Optional[str] = None,
split: Optional[str] = None,
priority: Priority = Priority.LOW,
) -> JobDocument:
"""Add a job to the queue in the waiting state.
Note that the same "unicity_id" can have multiple jobs in the waiting state, with the same or different
revisions and or priorities.
Args:
job_type (`str`): The type of the job
dataset (`str`): The dataset on which to apply the job.
revision (`str`): The git revision of the dataset.
difficulty (`int`): The difficulty of the job.
config (`str`, optional): The config on which to apply the job.
split (`str`, optional): The config on which to apply the job.
priority (`Priority`, optional): The priority of the job. Defaults to Priority.LOW.
Returns: the job
"""
increase_metric(job_type=job_type, status=Status.WAITING)
return JobDocument(
type=job_type,
dataset=dataset,
revision=revision,
config=config,
split=split,
unicity_id=inputs_to_string(dataset=dataset, config=config, split=split, prefix=job_type),
namespace=dataset.split("/")[0],
priority=priority,
created_at=get_datetime(),
status=Status.WAITING,
difficulty=difficulty,
).save()
def create_jobs(self, job_infos: list[JobInfo]) -> int:
"""Creates jobs in the queue.
They are created in the waiting state.
Args:
job_infos (`list[JobInfo]`): The jobs to be created.
Returns:
`int`: The number of created jobs. 0 if we had an exception.
"""
try:
jobs = [
JobDocument(
type=job_info["type"],
dataset=job_info["params"]["dataset"],
revision=job_info["params"]["revision"],
config=job_info["params"]["config"],
split=job_info["params"]["split"],
unicity_id=inputs_to_string(
dataset=job_info["params"]["dataset"],
config=job_info["params"]["config"],
split=job_info["params"]["split"],
prefix=job_info["type"],
),
namespace=job_info["params"]["dataset"].split("/")[0],
priority=job_info["priority"],
created_at=get_datetime(),
status=Status.WAITING,
difficulty=job_info["difficulty"],
)
for job_info in job_infos
]
for job in jobs:
increase_metric(job_type=job.type, status=Status.WAITING)
job_ids = JobDocument.objects.insert(jobs, load_bulk=False)
return len(job_ids)
except Exception:
return 0
def cancel_jobs_by_job_id(self, job_ids: list[str]) -> int:
"""Cancel jobs from the queue.
If the job ids are not valid, they are ignored.
Args:
job_ids (`list[str]`): The list of job ids to cancel.
Returns:
`int`: The number of canceled jobs
"""
try:
existing = JobDocument.objects(pk__in=job_ids)
previous_status = [(job.type, job.status) for job in existing.all()]
existing.update(finished_at=get_datetime(), status=Status.CANCELLED)
for job_type, status in previous_status:
update_metrics_for_type(job_type=job_type, previous_status=status, new_status=Status.CANCELLED)
return existing.count()
except Exception:
return 0
def _get_next_waiting_job_for_priority(
self,
priority: Priority,
difficulty_min: Optional[int] = None,
difficulty_max: Optional[int] = None,
job_types_blocked: Optional[list[str]] = None,
job_types_only: Optional[list[str]] = None,
) -> JobDocument:
"""Get the next job in the queue for a given priority.
For a given priority, get the waiting job with the oldest creation date:
- among the datasets that still have no started job.
- if none, among the datasets that have the least started jobs:
- ensuring that the unicity_id field is unique among the started jobs.
Args:
priority (`Priority`): The priority of the job.
difficulty_min: if not None, only jobs with a difficulty greater or equal to this value are considered.
difficulty_max: if not None, only jobs with a difficulty lower or equal to this value are considered.
job_types_blocked: if not None, jobs of the given types are not considered.
job_types_only: if not None, only jobs of the given types are considered.
Raises:
EmptyQueueError: if there is no waiting job in the queue that satisfies the restrictions above.
Returns: the job
"""
logging.debug(
f"Getting next waiting job for priority {priority}, blocked types: {job_types_blocked}, only types:"
f" {job_types_only}"
)
filters: JobQueryFilters = {}
if job_types_blocked:
filters["type__nin"] = job_types_blocked
if job_types_only:
filters["type__in"] = job_types_only
if difficulty_min is not None and difficulty_min > DEFAULT_DIFFICULTY_MIN:
filters["difficulty__gte"] = difficulty_min
if difficulty_max is not None and difficulty_max < DEFAULT_DIFFICULTY_MAX:
filters["difficulty__lte"] = difficulty_max
started_jobs = JobDocument.objects(status=Status.STARTED, **filters)
logging.debug(f"Number of started jobs: {started_jobs.count()}")
started_job_namespaces = [job.namespace for job in started_jobs.only("namespace")]
logging.debug(f"Started job namespaces: {started_job_namespaces}")
next_waiting_job = (
JobDocument.objects(
status=Status.WAITING, namespace__nin=set(started_job_namespaces), priority=priority, **filters
)
.order_by("+created_at")
.only("type", "dataset", "revision", "config", "split", "priority", "unicity_id")
.no_cache()
.first()
)
# ^ no_cache should generate a query on every iteration, which should solve concurrency issues between workers
if next_waiting_job is not None:
return next_waiting_job
logging.debug("No waiting job for namespace without started job")
# all the waiting jobs, if any, are for namespaces that already have started jobs.
#
# Let's:
# - exclude the waiting jobs which unicity_id is already in a started job
# and, among the remaining waiting jobs, let's:
# - select the oldest waiting job for the namespace with the least number of started jobs
started_unicity_ids = {job.unicity_id for job in started_jobs.only("unicity_id")}
descending_frequency_namespace_counts = [
[namespace, count] for namespace, count in Counter(started_job_namespaces).most_common()
]
logging.debug(f"Descending frequency namespace counts: {descending_frequency_namespace_counts}")
descending_frequency_namespace_groups = [
[item[0] for item in data] for (_, data) in groupby(descending_frequency_namespace_counts, itemgetter(1))
]
# maybe we could get rid of this loop
while descending_frequency_namespace_groups:
least_common_namespaces_group = descending_frequency_namespace_groups.pop()
logging.debug(f"Least common namespaces group: {least_common_namespaces_group}")
next_waiting_job = (
JobDocument.objects(
status=Status.WAITING,
namespace__in=least_common_namespaces_group,
unicity_id__nin=started_unicity_ids,
priority=priority,
**filters,
)
.order_by("+created_at")
.only("type", "dataset", "revision", "config", "split", "priority", "unicity_id")
.no_cache()
.first()
)
if next_waiting_job is not None:
return next_waiting_job
raise EmptyQueueError("no job available with the priority")
def get_next_waiting_job(
self,
difficulty_min: Optional[int] = None,
difficulty_max: Optional[int] = None,
job_types_blocked: Optional[list[str]] = None,
job_types_only: Optional[list[str]] = None,
) -> JobDocument:
"""Get the next job in the queue.
Get the waiting job with the oldest creation date with the following criteria:
- among the highest priority jobs,
- among the datasets that still have no started job.
- if none, among the datasets that have the least started jobs:
- ensuring that the unicity_id field is unique among the started jobs.
Args:
difficulty_min: if not None, only jobs with a difficulty greater or equal to this value are considered.
difficulty_max: if not None, only jobs with a difficulty lower or equal to this value are considered.
job_types_blocked: if not None, jobs of the given types are not considered.
job_types_only: if not None, only jobs of the given types are considered.
Raises:
EmptyQueueError: if there is no waiting job in the queue that satisfies the restrictions above.
Returns: the job
"""
for priority in [Priority.NORMAL, Priority.LOW]:
with contextlib.suppress(EmptyQueueError):
return self._get_next_waiting_job_for_priority(
priority=priority,
job_types_blocked=job_types_blocked,
job_types_only=job_types_only,
difficulty_min=difficulty_min,
difficulty_max=difficulty_max,
)
raise EmptyQueueError("no job available")
def _start_newest_job_and_cancel_others(self, job: JobDocument) -> JobDocument:
"""Start a job (the newest one for unicity_id) and cancel the other ones.
A lock is used to ensure that the job is not started by another worker.
Args:
job: the job to start
Returns:
the started job
Raises:
AlreadyStartedJobError: if a started job already exist for the same unicity_id.
LockTimeoutError: if the lock could not be acquired after 20 retries.
"""
# could be a method of Job
RETRIES = 20
# uuid is used to differentiate between workers
# otherwise another worker might acquire the lock
lock_owner = str(uuid4())
try:
# retry for 2 seconds
with lock(key=job.unicity_id, owner=lock_owner, sleeps=[0.1] * RETRIES, ttl=LOCK_TTL_SECONDS):
# get all the pending jobs for the same unicity_id
waiting_jobs = JobDocument.objects(
unicity_id=job.unicity_id, status__in=[Status.WAITING, Status.STARTED]
).order_by("-created_at")
datetime = get_datetime()
# raise if any job has already been started for unicity_id
num_started_jobs = waiting_jobs(status=Status.STARTED).count()
if num_started_jobs > 0:
if num_started_jobs > 1:
logging.critical(f"job {job.unicity_id} has been started {num_started_jobs} times. Max is 1.")
raise AlreadyStartedJobError(f"job {job.unicity_id} has been started by another worker")
# get the most recent one
first_job = waiting_jobs.first()
if not first_job:
raise NoWaitingJobError(f"no waiting job could be found for {job.unicity_id}")
# start it
if not JobDocument.objects(pk=str(first_job.pk), status=Status.WAITING).update(
started_at=datetime,
status=Status.STARTED,
write_concern={"w": "majority", "fsync": True},
read_concern={"level": "majority"},
):
raise AlreadyStartedJobError(f"job {job.unicity_id} has been started by another worker")
update_metrics_for_type(
job_type=first_job.type, previous_status=Status.WAITING, new_status=Status.STARTED
)
# and cancel the other ones, if any
waiting_jobs(status=Status.WAITING).update(
finished_at=datetime,
status=Status.CANCELLED,
write_concern={"w": "majority", "fsync": True},
read_concern={"level": "majority"},
)
for waiting_job in waiting_jobs(status=Status.WAITING):
update_metrics_for_type(
job_type=waiting_job.type, previous_status=Status.WAITING, new_status=Status.CANCELLED
)
return first_job.reload()
except TimeoutError as err:
raise LockTimeoutError(
f"could not acquire the lock for job {job.unicity_id} after {RETRIES} retries."
) from err
def start_job(
self,
difficulty_min: Optional[int] = None,
difficulty_max: Optional[int] = None,
job_types_blocked: Optional[list[str]] = None,
job_types_only: Optional[list[str]] = None,
) -> JobInfo:
"""Start the next job in the queue.
The job is moved from the waiting state to the started state. A lock is used to ensure that only one worker
can start a job at a time.
Args:
difficulty_min: if not None, only jobs with a difficulty greater or equal to this value are considered.
difficulty_max: if not None, only jobs with a difficulty lower or equal to this value are considered.
job_types_blocked: if not None, jobs of the given types are not considered.
job_types_only: if not None, only jobs of the given types are considered.
Raises:
EmptyQueueError: if there is no job in the queue, within the limit of the maximum number of started jobs
for a dataset
AlreadyStartedJobError: if a started job already exist for the same unicity_id
LockTimeoutError: if the lock cannot be acquired
Returns: the job id, the type, the input arguments: dataset, revision, config and split
"""
logging.debug(f"looking for a job to start, blocked types: {job_types_blocked}, only types: {job_types_only}")
next_waiting_job = self.get_next_waiting_job(
job_types_blocked=job_types_blocked,
job_types_only=job_types_only,
difficulty_min=difficulty_min,
difficulty_max=difficulty_max,
)
logging.debug(f"job found: {next_waiting_job}")
# ^ can raise EmptyQueueError
if job_types_blocked and next_waiting_job.type in job_types_blocked:
raise RuntimeError(
f"The job type {next_waiting_job.type} is in the list of blocked job types {job_types_only}"
)
if job_types_only and next_waiting_job.type not in job_types_only:
raise RuntimeError(
f"The job type {next_waiting_job.type} is not in the list of allowed job types {job_types_only}"
)
started_job = self._start_newest_job_and_cancel_others(job=next_waiting_job)
return started_job.info()
def get_job_with_id(self, job_id: str) -> JobDocument:
"""Get the job for a given job id.
Args:
job_id (`str`, required): id of the job
Returns: the requested job
Raises:
DoesNotExist: if the job does not exist
"""
return JobDocument.objects(pk=job_id).get()
def get_job_type(self, job_id: str) -> str:
"""Get the job type for a given job id.
Args:
job_id (`str`, required): id of the job
Returns: the job type
Raises:
DoesNotExist: if the job does not exist
"""
job = self.get_job_with_id(job_id=job_id)
return job.type
def _get_started_job(self, job_id: str) -> JobDocument:
"""Get a started job, and raise if it's not in the correct format
(does not exist, not started, incorrect values for finished_at or started_at).
Args:
job_id (`str`, required): id of the job
Returns:
`Job`: the started job
"""
job = JobDocument.objects(pk=job_id).get()
if job.status is not Status.STARTED:
raise StartedJobError(f"job {job.unicity_id} has a not the STARTED status ({job.status.value}).")
if job.finished_at is not None:
raise StartedJobError(f"job {job.unicity_id} has a non-empty finished_at field.")
if job.started_at is None:
raise StartedJobError(f"job {job.unicity_id} has an empty started_at field.")
return job
def is_job_started(self, job_id: str) -> bool:
"""Check if a job is started, with the correct values for finished_at and started_at.
Args:
job_id (`str`, required): id of the job
Returns:
`bool`: whether the job exists, is started, and had the expected format (STARTED status, non-empty
started_at, empty finished_at)
"""
try:
self._get_started_job(job_id=job_id)
except DoesNotExist:
logging.error(f"job {job_id} does not exist.")
return False
except StartedJobError as e:
logging.debug(f"job {job_id} has not the expected format for a started job: {e}")
return False
return True
def finish_job(self, job_id: str, is_success: bool) -> bool:
"""Finish a job in the queue.
The job is moved from the started state to the success or error state. The existing locks are released.
Args:
job_id (`str`, required): id of the job
is_success (`bool`, required): whether the job succeeded or not
Returns:
`bool`: whether the job existed, and had the expected format (STARTED status, non-empty started_at, empty
finished_at) before finishing
"""
try:
job = self._get_started_job(job_id=job_id)
except DoesNotExist:
logging.error(f"job {job_id} does not exist. Aborting.")
return False
except StartedJobError as e:
logging.error(f"job {job_id} has not the expected format for a started job. Aborting: {e}")
return False
finished_status = Status.SUCCESS if is_success else Status.ERROR
previous_status = job.status
job.update(finished_at=get_datetime(), status=finished_status)
update_metrics_for_type(job_type=job.type, previous_status=previous_status, new_status=finished_status)
release_locks(owner=job_id)
return True
def is_job_in_process(
self, job_type: str, dataset: str, revision: str, config: Optional[str] = None, split: Optional[str] = None
) -> bool:
"""Check if a job is in process (waiting or started).
Args:
job_type (`str`, required): job type
dataset (`str`, required): dataset name
revision (`str`, required): dataset git revision
config (`str`, optional): config name. Defaults to None.
split (`str`, optional): split name. Defaults to None.
Returns:
`bool`: whether the job is in process (waiting or started)
"""
return (
JobDocument.objects(
type=job_type,
dataset=dataset,
revision=revision,
config=config,
split=split,
status__in=[Status.WAITING, Status.STARTED],
).count()
> 0
)
def _get_df(self, jobs: list[FlatJobInfo]) -> pd.DataFrame:
return pd.DataFrame(
{
"job_id": pd.Series([job["job_id"] for job in jobs], dtype="str"),
"type": pd.Series([job["type"] for job in jobs], dtype="category"),
"dataset": pd.Series([job["dataset"] for job in jobs], dtype="str"),
"revision": pd.Series([job["revision"] for job in jobs], dtype="str"),
"config": pd.Series([job["config"] for job in jobs], dtype="str"),
"split": pd.Series([job["split"] for job in jobs], dtype="str"),
"priority": pd.Categorical(
[job["priority"] for job in jobs],
ordered=True,
categories=[Priority.LOW.value, Priority.NORMAL.value],
),
"status": pd.Categorical(
[job["status"] for job in jobs],
ordered=True,
categories=[
Status.WAITING.value,
Status.STARTED.value,
Status.SUCCESS.value,
Status.ERROR.value,
Status.CANCELLED.value,
],
),
"created_at": pd.Series([job["created_at"] for job in jobs], dtype="datetime64[ns]"),
}
)
# ^ does not seem optimal at all, but I get the types right
def get_pending_jobs_df(self, dataset: str, job_types: Optional[list[str]] = None) -> pd.DataFrame:
filters = {}
if job_types:
filters["type__in"] = job_types
return self._get_df(
[
job.flat_info()
for job in JobDocument.objects(status__in=[Status.WAITING, Status.STARTED], **filters, dataset=dataset)
]
)
def has_pending_jobs(self, dataset: str, job_types: Optional[list[str]] = None) -> bool:
filters = {}
if job_types:
filters["type__in"] = job_types
return JobDocument.objects(status__in=[Status.WAITING, Status.STARTED], **filters, dataset=dataset).count() > 0
# special reports
def count_jobs(self, status: Status, job_type: str) -> int:
"""Count the number of jobs with a given status and the given type.
Args:
status (`Status`, required): status of the jobs
job_type (`str`, required): job type
Returns: the number of jobs with the given status and the given type.
"""
return JobDocument.objects(type=job_type, status=status.value).count()
def get_jobs_count_by_status(self, job_type: str) -> CountByStatus:
"""Count the number of jobs by status for a given job type.
Returns: a dictionary with the number of jobs for each status
"""
# ensure that all the statuses are present, even if equal to zero
# note: we repeat the values instead of looping on Status because we don't know how to get the types right
# in mypy
# result: CountByStatus = {s.value: jobs(status=s.value).count() for s in Status} # <- doesn't work in mypy
# see https://stackoverflow.com/a/67292548/7351594
return {
"waiting": self.count_jobs(status=Status.WAITING, job_type=job_type),
"started": self.count_jobs(status=Status.STARTED, job_type=job_type),
"success": self.count_jobs(status=Status.SUCCESS, job_type=job_type),
"error": self.count_jobs(status=Status.ERROR, job_type=job_type),
"cancelled": self.count_jobs(status=Status.CANCELLED, job_type=job_type),
}
def get_dump_with_status(self, status: Status, job_type: str) -> list[JobDict]:
"""Get the dump of the jobs with a given status and a given type.
Args:
status (`Status`, required): status of the jobs
job_type (`str`, required): job type
Returns: a list of jobs with the given status and the given type
"""
return [d.to_dict() for d in JobDocument.objects(status=status.value, type=job_type)]
def get_dump_by_pending_status(self, job_type: str) -> DumpByPendingStatus:
"""Get the dump of the jobs by pending status for a given job type.
Returns: a dictionary with the dump of the jobs for each pending status
"""
return {
"waiting": self.get_dump_with_status(job_type=job_type, status=Status.WAITING),
"started": self.get_dump_with_status(job_type=job_type, status=Status.STARTED),
}
def get_dataset_pending_jobs_for_type(self, dataset: str, job_type: str) -> list[JobDict]:
"""Get the pending jobs of a dataset for a given job type.
Returns: an array of the pending jobs for the dataset and the given job type
"""
return [
d.to_dict()
for d in JobDocument.objects(
status__in=[Status.WAITING.value, Status.STARTED.value], type=job_type, dataset=dataset
)
]
def heartbeat(self, job_id: str) -> None:
"""Update the job `last_heartbeat` field with the current date.
This is used to keep track of running jobs.
If a job doesn't have recent heartbeats, it means it crashed at one point and is considered a zombie.
"""
try:
job = self.get_job_with_id(job_id)
except DoesNotExist:
logging.warning(f"Heartbeat skipped because job {job_id} doesn't exist in the queue.")
return
# no need to update metrics since it is just the last_heartbeat
job.update(last_heartbeat=get_datetime())
def get_zombies(self, max_seconds_without_heartbeat: float) -> list[JobInfo]:
"""Get the zombie jobs.
It returns jobs without recent heartbeats, which means they crashed at one point and became zombies.
Usually `max_seconds_without_heartbeat` is a factor of the time between two heartbeats.
Returns: an array of the zombie job infos.
"""
started_jobs = JobDocument.objects(status=Status.STARTED)
if max_seconds_without_heartbeat <= 0:
return []
zombies = [
job
for job in started_jobs
if (
job.last_heartbeat is not None
and get_datetime()
>= pytz.UTC.localize(job.last_heartbeat) + timedelta(seconds=max_seconds_without_heartbeat)
)
or (
job.last_heartbeat is None
and job.started_at is not None
and get_datetime()
>= pytz.UTC.localize(job.started_at) + timedelta(seconds=max_seconds_without_heartbeat)
)
]
return [zombie.info() for zombie in zombies]
# only for the tests
def _clean_queue_database() -> None:
"""Delete all the jobs in the database"""
JobDocument.drop_collection() # type: ignore
JobTotalMetricDocument.drop_collection() # type: ignore
Lock.drop_collection() # type: ignore
| datasets-server-main | libs/libcommon/src/libcommon/queue.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
from dataclasses import dataclass, field
from typing import Optional
from environs import Env
from libcommon.constants import (
PROCESSING_STEP_CONFIG_INFO_VERSION,
PROCESSING_STEP_CONFIG_IS_VALID_VERSION,
PROCESSING_STEP_CONFIG_OPT_IN_OUT_URLS_COUNT_VERSION,
PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_VERSION,
PROCESSING_STEP_CONFIG_PARQUET_METADATA_VERSION,
PROCESSING_STEP_CONFIG_PARQUET_VERSION,
PROCESSING_STEP_CONFIG_SIZE_VERSION,
PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_INFO_VERSION,
PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_STREAMING_VERSION,
PROCESSING_STEP_DATASET_CONFIG_NAMES_VERSION,
PROCESSING_STEP_DATASET_HUB_CACHE_VERSION,
PROCESSING_STEP_DATASET_INFO_VERSION,
PROCESSING_STEP_DATASET_IS_VALID_VERSION,
PROCESSING_STEP_DATASET_OPT_IN_OUT_URLS_COUNT_VERSION,
PROCESSING_STEP_DATASET_PARQUET_VERSION,
PROCESSING_STEP_DATASET_SIZE_VERSION,
PROCESSING_STEP_DATASET_SPLIT_NAMES_VERSION,
PROCESSING_STEP_SPLIT_DESCRIPTIVE_STATISTICS_VERSION,
PROCESSING_STEP_SPLIT_DUCKDB_INDEX_VERSION,
PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_PARQUET_VERSION,
PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION,
PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION,
PROCESSING_STEP_SPLIT_IS_VALID_VERSION,
PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_COUNT_VERSION,
PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSION,
)
from libcommon.processing_graph import ProcessingGraphSpecification
ASSETS_BASE_URL = "assets"
ASSETS_STORAGE_DIRECTORY = None
@dataclass(frozen=True)
class AssetsConfig:
base_url: str = ASSETS_BASE_URL
storage_directory: Optional[str] = ASSETS_STORAGE_DIRECTORY
@classmethod
def from_env(cls) -> "AssetsConfig":
env = Env(expand_vars=True)
with env.prefixed("ASSETS_"):
return cls(
base_url=env.str(name="BASE_URL", default=ASSETS_BASE_URL),
storage_directory=env.str(name="STORAGE_DIRECTORY", default=ASSETS_STORAGE_DIRECTORY),
)
CACHED_ASSETS_BASE_URL = "cached-assets"
CACHED_ASSETS_STORAGE_DIRECTORY = None
CACHED_ASSETS_CLEAN_CACHE_PROBA = 0.05
CACHED_ASSETS_KEEP_FIRST_ROWS_NUMBER = 100
CACHED_ASSETS_KEEP_MOST_RECENT_ROWS_NUMBER = 200
CACHED_ASSETS_MAX_CLEANED_ROWS_NUMBER = 10_000
@dataclass(frozen=True)
class CachedAssetsConfig:
base_url: str = ASSETS_BASE_URL
storage_directory: Optional[str] = CACHED_ASSETS_STORAGE_DIRECTORY
clean_cache_proba: float = CACHED_ASSETS_CLEAN_CACHE_PROBA
keep_first_rows_number: int = CACHED_ASSETS_KEEP_FIRST_ROWS_NUMBER
keep_most_recent_rows_number: int = CACHED_ASSETS_KEEP_MOST_RECENT_ROWS_NUMBER
max_cleaned_rows_number: int = CACHED_ASSETS_MAX_CLEANED_ROWS_NUMBER
@classmethod
def from_env(cls) -> "CachedAssetsConfig":
env = Env(expand_vars=True)
with env.prefixed("CACHED_ASSETS_"):
return cls(
base_url=env.str(name="BASE_URL", default=CACHED_ASSETS_BASE_URL),
storage_directory=env.str(name="STORAGE_DIRECTORY", default=CACHED_ASSETS_STORAGE_DIRECTORY),
clean_cache_proba=env.float(name="CLEAN_CACHE_PROBA", default=CACHED_ASSETS_CLEAN_CACHE_PROBA),
keep_first_rows_number=env.float(
name="KEEP_FIRST_ROWS_NUMBER", default=CACHED_ASSETS_KEEP_FIRST_ROWS_NUMBER
),
keep_most_recent_rows_number=env.float(
name="KEEP_MOST_RECENT_ROWS_NUMBER", default=CACHED_ASSETS_KEEP_MOST_RECENT_ROWS_NUMBER
),
max_cleaned_rows_number=env.float(
name="MAX_CLEAN_SAMPLE_SIZE", default=CACHED_ASSETS_MAX_CLEANED_ROWS_NUMBER
),
)
PARQUET_METADATA_STORAGE_DIRECTORY = None
@dataclass(frozen=True)
class ParquetMetadataConfig:
storage_directory: Optional[str] = PARQUET_METADATA_STORAGE_DIRECTORY
@classmethod
def from_env(cls) -> "ParquetMetadataConfig":
env = Env(expand_vars=True)
with env.prefixed("PARQUET_METADATA_"):
return cls(
storage_directory=env.str(name="STORAGE_DIRECTORY", default=PARQUET_METADATA_STORAGE_DIRECTORY),
)
ROWS_INDEX_MAX_ARROW_DATA_IN_MEMORY = 300_000_000
@dataclass(frozen=True)
class RowsIndexConfig:
max_arrow_data_in_memory: int = ROWS_INDEX_MAX_ARROW_DATA_IN_MEMORY
@classmethod
def from_env(cls) -> "RowsIndexConfig":
env = Env(expand_vars=True)
with env.prefixed("ROWS_INDEX_"):
return cls(
max_arrow_data_in_memory=env.int(
name="MAX_ARROW_DATA_IN_MEMORY", default=ROWS_INDEX_MAX_ARROW_DATA_IN_MEMORY
),
)
COMMON_HF_ENDPOINT = "https://huggingface.co"
COMMON_HF_TOKEN = None
@dataclass(frozen=True)
class CommonConfig:
hf_endpoint: str = COMMON_HF_ENDPOINT
hf_token: Optional[str] = COMMON_HF_TOKEN
@classmethod
def from_env(cls) -> "CommonConfig":
env = Env(expand_vars=True)
with env.prefixed("COMMON_"):
return cls(
hf_endpoint=env.str(name="HF_ENDPOINT", default=COMMON_HF_ENDPOINT),
hf_token=env.str(name="HF_TOKEN", default=COMMON_HF_TOKEN), # nosec
)
LOG_LEVEL = logging.INFO
@dataclass(frozen=True)
class LogConfig:
level: int = LOG_LEVEL
@classmethod
def from_env(cls) -> "LogConfig":
env = Env(expand_vars=True)
with env.prefixed("LOG_"):
return cls(
level=env.log_level(name="LEVEL", default=LOG_LEVEL),
)
CACHE_MAX_DAYS = 90 # 3 months
CACHE_MONGO_DATABASE = "datasets_server_cache"
CACHE_MONGO_URL = "mongodb://localhost:27017"
@dataclass(frozen=True)
class CacheConfig:
max_days: int = CACHE_MAX_DAYS
mongo_database: str = CACHE_MONGO_DATABASE
mongo_url: str = CACHE_MONGO_URL
@classmethod
def from_env(cls) -> "CacheConfig":
env = Env(expand_vars=True)
with env.prefixed("CACHE_"):
return cls(
max_days=env.int(name="MAX_DAYS", default=CACHE_MAX_DAYS),
mongo_database=env.str(name="MONGO_DATABASE", default=CACHE_MONGO_DATABASE),
mongo_url=env.str(name="MONGO_URL", default=CACHE_MONGO_URL),
)
QUEUE_MONGO_DATABASE = "datasets_server_queue"
QUEUE_MONGO_URL = "mongodb://localhost:27017"
@dataclass(frozen=True)
class QueueConfig:
mongo_database: str = QUEUE_MONGO_DATABASE
mongo_url: str = QUEUE_MONGO_URL
@classmethod
def from_env(cls) -> "QueueConfig":
env = Env(expand_vars=True)
with env.prefixed("QUEUE_"):
return cls(
mongo_database=env.str(name="MONGO_DATABASE", default=QUEUE_MONGO_DATABASE),
mongo_url=env.str(name="MONGO_URL", default=QUEUE_MONGO_URL),
)
@dataclass(frozen=True)
class ProcessingGraphConfig:
specification: ProcessingGraphSpecification = field(
default_factory=lambda: {
"dataset-config-names": {
"input_type": "dataset",
"provides_dataset_config_names": True,
"job_runner_version": PROCESSING_STEP_DATASET_CONFIG_NAMES_VERSION,
"difficulty": 50,
},
"config-split-names-from-streaming": {
"input_type": "config",
"triggered_by": "dataset-config-names",
"provides_config_split_names": True,
"job_runner_version": PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_STREAMING_VERSION,
"difficulty": 60,
},
"split-first-rows-from-streaming": {
"input_type": "split",
"triggered_by": ["config-split-names-from-streaming", "config-split-names-from-info"],
"enables_preview": True,
"job_runner_version": PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION,
"difficulty": 70,
},
"config-parquet-and-info": {
"input_type": "config",
"triggered_by": "dataset-config-names",
"job_runner_version": PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_VERSION,
"difficulty": 70,
},
"config-parquet": {
"input_type": "config",
"triggered_by": "config-parquet-and-info",
"job_runner_version": PROCESSING_STEP_CONFIG_PARQUET_VERSION,
"provides_config_parquet": True,
"difficulty": 20,
},
"config-parquet-metadata": {
"input_type": "config",
"triggered_by": "config-parquet",
"job_runner_version": PROCESSING_STEP_CONFIG_PARQUET_METADATA_VERSION,
"provides_config_parquet_metadata": True,
"difficulty": 50,
},
"split-first-rows-from-parquet": {
"input_type": "split",
"triggered_by": "config-parquet-metadata",
"enables_preview": True,
"job_runner_version": PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_PARQUET_VERSION,
"difficulty": 40,
},
"dataset-parquet": {
"input_type": "dataset",
"triggered_by": ["config-parquet", "dataset-config-names"],
"job_runner_version": PROCESSING_STEP_DATASET_PARQUET_VERSION,
"difficulty": 20,
},
"config-info": {
"input_type": "config",
"triggered_by": "config-parquet-and-info",
"job_runner_version": PROCESSING_STEP_CONFIG_INFO_VERSION,
"difficulty": 20,
},
"dataset-info": {
"input_type": "dataset",
"triggered_by": ["config-info", "dataset-config-names"],
"job_runner_version": PROCESSING_STEP_DATASET_INFO_VERSION,
"difficulty": 20,
},
"config-split-names-from-info": {
"input_type": "config",
"triggered_by": "config-info",
"provides_config_split_names": True,
"job_runner_version": PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_INFO_VERSION,
"difficulty": 20,
},
"config-size": {
"input_type": "config",
"triggered_by": "config-parquet-and-info",
"enables_viewer": True,
"job_runner_version": PROCESSING_STEP_CONFIG_SIZE_VERSION,
"difficulty": 20,
},
"dataset-size": {
"input_type": "dataset",
"triggered_by": ["config-size", "dataset-config-names"],
"job_runner_version": PROCESSING_STEP_DATASET_SIZE_VERSION,
"difficulty": 20,
},
"dataset-split-names": {
"input_type": "dataset",
"triggered_by": [
"config-split-names-from-info",
"config-split-names-from-streaming",
"dataset-config-names",
],
"job_runner_version": PROCESSING_STEP_DATASET_SPLIT_NAMES_VERSION,
"difficulty": 20,
},
"split-descriptive-statistics": {
"input_type": "split",
"triggered_by": [
"config-split-names-from-info",
"config-split-names-from-streaming",
],
"job_runner_version": PROCESSING_STEP_SPLIT_DESCRIPTIVE_STATISTICS_VERSION,
"difficulty": 70,
},
"split-is-valid": {
"input_type": "split",
# special case: triggered by all the steps that have enables_preview/enables_viewer/enables_search
"triggered_by": [
"config-size",
"split-first-rows-from-parquet",
"split-first-rows-from-streaming",
"split-duckdb-index",
],
"job_runner_version": PROCESSING_STEP_SPLIT_IS_VALID_VERSION,
"difficulty": 20,
},
"config-is-valid": {
"input_type": "config",
"triggered_by": [
"config-split-names-from-streaming",
"config-split-names-from-info",
"split-is-valid",
],
"job_runner_version": PROCESSING_STEP_CONFIG_IS_VALID_VERSION,
"difficulty": 20,
},
"dataset-is-valid": {
"input_type": "dataset",
"triggered_by": [
"dataset-config-names",
"config-is-valid",
],
"job_runner_version": PROCESSING_STEP_DATASET_IS_VALID_VERSION,
"difficulty": 20,
},
"split-image-url-columns": {
"input_type": "split",
"triggered_by": ["split-first-rows-from-streaming", "split-first-rows-from-parquet"],
"job_runner_version": PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION,
"difficulty": 40,
},
"split-opt-in-out-urls-scan": {
"input_type": "split",
"triggered_by": ["split-image-url-columns"],
"job_runner_version": PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSION,
"difficulty": 70,
},
"split-opt-in-out-urls-count": {
"input_type": "split",
"triggered_by": ["split-opt-in-out-urls-scan"],
"job_runner_version": PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_COUNT_VERSION,
"difficulty": 20,
},
"config-opt-in-out-urls-count": {
"input_type": "config",
"triggered_by": [
"config-split-names-from-streaming",
"config-split-names-from-info",
"split-opt-in-out-urls-count",
],
"job_runner_version": PROCESSING_STEP_CONFIG_OPT_IN_OUT_URLS_COUNT_VERSION,
"difficulty": 20,
},
"dataset-opt-in-out-urls-count": {
"input_type": "dataset",
"triggered_by": ["dataset-config-names", "config-opt-in-out-urls-count"],
"job_runner_version": PROCESSING_STEP_DATASET_OPT_IN_OUT_URLS_COUNT_VERSION,
"difficulty": 20,
},
"split-duckdb-index": {
"input_type": "split",
"triggered_by": [
"config-split-names-from-info",
"config-split-names-from-streaming",
"config-parquet-and-info",
],
"enables_search": True,
"job_runner_version": PROCESSING_STEP_SPLIT_DUCKDB_INDEX_VERSION,
"difficulty": 70,
},
"dataset-hub-cache": {
"input_type": "dataset",
"triggered_by": ["dataset-is-valid", "dataset-size"],
"job_runner_version": PROCESSING_STEP_DATASET_HUB_CACHE_VERSION,
"difficulty": 20,
},
}
)
@classmethod
def from_env(cls) -> "ProcessingGraphConfig":
# TODO: allow passing the graph via env vars
return cls()
| datasets-server-main | libs/libcommon/src/libcommon/config.py |
import asyncio
import logging
import os
from dataclasses import dataclass, field
from functools import lru_cache
from typing import Literal, Optional, TypedDict, Union
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
from datasets import Features
from datasets.features.features import FeatureType
from datasets.utils.py_utils import size_str
from fsspec.implementations.http import HTTPFile, HTTPFileSystem
from huggingface_hub import HfFileSystem
from libcommon.processing_graph import ProcessingGraph
from libcommon.prometheus import StepProfiler
from libcommon.simple_cache import get_previous_step_or_raise
from libcommon.storage import StrPath
from libcommon.viewer_utils.features import get_supported_unsupported_columns
class ParquetResponseEmptyError(Exception):
pass
class ParquetResponseFormatError(Exception):
pass
class FileSystemError(Exception):
pass
class TooBigRows(Exception):
pass
class ParquetFileMetadataItem(TypedDict):
dataset: str
config: str
split: str
url: str
filename: str
size: int
num_rows: int
parquet_metadata_subpath: str
@dataclass
class RowGroupReader:
parquet_file: pq.ParquetFile
group_id: int
def read(self, columns: list[str]) -> pa.Table:
return self.parquet_file.read_row_group(i=self.group_id, columns=columns)
def read_size(self) -> int:
return self.parquet_file.metadata.row_group(self.group_id).total_byte_size # type: ignore
@dataclass
class ParquetIndexWithMetadata:
features: Features
supported_columns: list[str]
unsupported_columns: list[str]
parquet_files_urls: list[str]
metadata_paths: list[str]
num_bytes: list[int]
num_rows: list[int]
httpfs: HTTPFileSystem
hf_token: Optional[str]
max_arrow_data_in_memory: int
num_rows_total: int = field(init=False)
def __post_init__(self) -> None:
if self.httpfs._session is None:
self.httpfs_session = asyncio.run(self.httpfs.set_session())
else:
self.httpfs_session = self.httpfs._session
self.num_rows_total = sum(self.num_rows)
def query(self, offset: int, length: int) -> pa.Table:
"""Query the parquet files
Note that this implementation will always read at least one row group, to get the list of columns and always
have the same schema, even if the requested rows are invalid (out of range).
Args:
offset (int): The first row to read.
length (int): The number of rows to read.
Returns:
pa.Table: The requested rows.
Raises:
TooBigRows: if the arrow data from the parquet row groups is bigger than max_arrow_data_in_memory
"""
with StepProfiler(
method="parquet_index_with_metadata.query", step="get the parquet files than contain the requested rows"
):
parquet_file_offsets = np.cumsum(self.num_rows)
last_row_in_parquet = parquet_file_offsets[-1] - 1
first_row = min(offset, last_row_in_parquet)
last_row = min(offset + length - 1, last_row_in_parquet)
first_parquet_file_id, last_parquet_file_id = np.searchsorted(
parquet_file_offsets, [first_row, last_row], side="right"
)
parquet_offset = (
offset - parquet_file_offsets[first_parquet_file_id - 1] if first_parquet_file_id > 0 else offset
)
urls = self.parquet_files_urls[first_parquet_file_id : last_parquet_file_id + 1] # noqa: E203
metadata_paths = self.metadata_paths[first_parquet_file_id : last_parquet_file_id + 1] # noqa: E203
num_bytes = self.num_bytes[first_parquet_file_id : last_parquet_file_id + 1] # noqa: E203
with StepProfiler(
method="parquet_index_with_metadata.query", step="load the remote parquet files using metadata from disk"
):
parquet_files = [
pq.ParquetFile(
HTTPFile(
self.httpfs,
url,
session=self.httpfs_session,
size=size,
loop=self.httpfs.loop,
cache_type=None,
**self.httpfs.kwargs,
),
metadata=pq.read_metadata(metadata_path),
pre_buffer=True,
)
for url, metadata_path, size in zip(urls, metadata_paths, num_bytes)
]
with StepProfiler(
method="parquet_index_with_metadata.query", step="get the row groups than contain the requested rows"
):
row_group_offsets = np.cumsum(
[
parquet_file.metadata.row_group(group_id).num_rows
for parquet_file in parquet_files
for group_id in range(parquet_file.metadata.num_row_groups)
]
)
row_group_readers = [
RowGroupReader(parquet_file=parquet_file, group_id=group_id)
for parquet_file in parquet_files
for group_id in range(parquet_file.metadata.num_row_groups)
]
last_row_in_parquet = row_group_offsets[-1] - 1
first_row = min(parquet_offset, last_row_in_parquet)
last_row = min(parquet_offset + length - 1, last_row_in_parquet)
first_row_group_id, last_row_group_id = np.searchsorted(
row_group_offsets, [first_row, last_row], side="right"
)
with StepProfiler(
method="parquet_index_with_metadata.row_groups_size_check", step="check if the rows can fit in memory"
):
row_groups_size = sum(
[row_group_readers[i].read_size() for i in range(first_row_group_id, last_row_group_id + 1)]
)
if row_groups_size > self.max_arrow_data_in_memory:
raise TooBigRows(
"Rows from parquet row groups are too big to be read:"
f" {size_str(row_groups_size)} (max={size_str(self.max_arrow_data_in_memory)})"
)
with StepProfiler(method="parquet_index_with_metadata.query", step="read the row groups"):
pa_table = pa.concat_tables(
[
row_group_readers[i].read(self.supported_columns)
for i in range(first_row_group_id, last_row_group_id + 1)
]
)
first_row_in_pa_table = row_group_offsets[first_row_group_id - 1] if first_row_group_id > 0 else 0
return pa_table.slice(parquet_offset - first_row_in_pa_table, length)
@staticmethod
def from_parquet_metadata_items(
parquet_file_metadata_items: list[ParquetFileMetadataItem],
features: Optional[Features],
parquet_metadata_directory: StrPath,
httpfs: HTTPFileSystem,
hf_token: Optional[str],
max_arrow_data_in_memory: int,
unsupported_features: list[FeatureType] = [],
) -> "ParquetIndexWithMetadata":
if not parquet_file_metadata_items:
raise ParquetResponseEmptyError("No parquet files found.")
with StepProfiler(
method="parquet_index_with_metadata.from_parquet_metadata_items",
step="get the index from parquet metadata",
):
try:
parquet_files_metadata = sorted(
parquet_file_metadata_items, key=lambda parquet_file_metadata: parquet_file_metadata["filename"]
)
parquet_files_urls = [parquet_file_metadata["url"] for parquet_file_metadata in parquet_files_metadata]
metadata_paths = [
os.path.join(parquet_metadata_directory, parquet_file_metadata["parquet_metadata_subpath"])
for parquet_file_metadata in parquet_files_metadata
]
num_bytes = [parquet_file_metadata["size"] for parquet_file_metadata in parquet_files_metadata]
num_rows = [parquet_file_metadata["num_rows"] for parquet_file_metadata in parquet_files_metadata]
except Exception as e:
raise ParquetResponseFormatError(f"Could not parse the list of parquet files: {e}") from e
with StepProfiler(
method="parquet_index_with_metadata.from_parquet_metadata_items", step="get the dataset's features"
):
if features is None: # config-parquet version<6 didn't have features
features = Features.from_arrow_schema(pq.read_schema(metadata_paths[0]))
supported_columns, unsupported_columns = get_supported_unsupported_columns(
features,
unsupported_features=unsupported_features,
)
return ParquetIndexWithMetadata(
features=features,
supported_columns=supported_columns,
unsupported_columns=unsupported_columns,
parquet_files_urls=parquet_files_urls,
metadata_paths=metadata_paths,
num_bytes=num_bytes,
num_rows=num_rows,
httpfs=httpfs,
hf_token=hf_token,
max_arrow_data_in_memory=max_arrow_data_in_memory,
)
class RowsIndex:
def __init__(
self,
dataset: str,
config: str,
split: str,
processing_graph: ProcessingGraph,
httpfs: HfFileSystem,
hf_token: Optional[str],
parquet_metadata_directory: StrPath,
max_arrow_data_in_memory: int,
unsupported_features: list[FeatureType] = [],
):
self.dataset = dataset
self.revision: Optional[str] = None
self.config = config
self.split = split
self.processing_graph = processing_graph
self.httpfs = httpfs
self.parquet_index = self._init_parquet_index(
hf_token=hf_token,
parquet_metadata_directory=parquet_metadata_directory,
max_arrow_data_in_memory=max_arrow_data_in_memory,
unsupported_features=unsupported_features,
)
def _init_parquet_index(
self,
hf_token: Optional[str],
parquet_metadata_directory: StrPath,
max_arrow_data_in_memory: int,
unsupported_features: list[FeatureType] = [],
) -> ParquetIndexWithMetadata:
with StepProfiler(method="rows_index._init_parquet_index", step="all"):
# get the list of parquet files
with StepProfiler(method="rows_index._init_parquet_index", step="get list of parquet files for split"):
config_parquet_metadata_processing_steps = (
self.processing_graph.get_config_parquet_metadata_processing_steps()
)
cache_kinds = [step.cache_kind for step in config_parquet_metadata_processing_steps]
result = get_previous_step_or_raise(
kinds=cache_kinds,
dataset=self.dataset,
config=self.config,
split=None,
)
self.revision = result.response["dataset_git_revision"]
content = result.response["content"]
if content.get("features"): # config-parquet-metadata version<2 didn't have features
features = Features.from_dict(content["features"])
else:
features = None
logging.info(
f"Create ParquetIndexWithMetadata for dataset={self.dataset}, config={self.config}, split={self.split}"
)
return ParquetIndexWithMetadata.from_parquet_metadata_items(
[
parquet_item
for parquet_item in content["parquet_files_metadata"]
if parquet_item["split"] == self.split and parquet_item["config"] == self.config
],
features=features,
parquet_metadata_directory=parquet_metadata_directory,
httpfs=self.httpfs,
hf_token=hf_token,
max_arrow_data_in_memory=max_arrow_data_in_memory,
unsupported_features=unsupported_features,
)
# note that this cache size is global for the class, not per instance
@lru_cache(maxsize=8)
def query(self, offset: int, length: int) -> pa.Table:
"""Query the parquet files
Note that this implementation will always read at least one row group, to get the list of columns and always
have the same schema, even if the requested rows are invalid (out of range).
Args:
offset (int): The first row to read.
length (int): The number of rows to read.
Returns:
pa.Table: The requested rows.
"""
logging.info(
f"Query {type(self.parquet_index).__name__} for dataset={self.dataset}, config={self.config},"
f" split={self.split}, offset={offset}, length={length}"
)
return self.parquet_index.query(offset=offset, length=length)
class Indexer:
def __init__(
self,
processing_graph: ProcessingGraph,
parquet_metadata_directory: StrPath,
httpfs: HTTPFileSystem,
max_arrow_data_in_memory: int,
unsupported_features: list[FeatureType] = [],
all_columns_supported_datasets_allow_list: Union[Literal["all"], list[str]] = "all",
hf_token: Optional[str] = None,
):
self.processing_graph = processing_graph
self.parquet_metadata_directory = parquet_metadata_directory
self.httpfs = httpfs
self.hf_token = hf_token
self.max_arrow_data_in_memory = max_arrow_data_in_memory
self.unsupported_features = unsupported_features
self.all_columns_supported_datasets_allow_list = all_columns_supported_datasets_allow_list
@lru_cache(maxsize=8)
def get_rows_index(
self,
dataset: str,
config: str,
split: str,
) -> RowsIndex:
filter_features = (
self.all_columns_supported_datasets_allow_list != "all"
and dataset not in self.all_columns_supported_datasets_allow_list
)
unsupported_features = self.unsupported_features if filter_features else []
return RowsIndex(
dataset=dataset,
config=config,
split=split,
processing_graph=self.processing_graph,
httpfs=self.httpfs,
hf_token=self.hf_token,
parquet_metadata_directory=self.parquet_metadata_directory,
max_arrow_data_in_memory=self.max_arrow_data_in_memory,
unsupported_features=unsupported_features,
)
| datasets-server-main | libs/libcommon/src/libcommon/parquet_utils.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
import logging
def init_logging(level: int = logging.INFO) -> None:
logging.basicConfig(level=level, format="%(levelname)s: %(asctime)s - %(name)s - %(message)s")
logging.debug(f"Log level set to: {logging.getLevelName(logging.getLogger().getEffectiveLevel())}")
| datasets-server-main | libs/libcommon/src/libcommon/log.py |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 The HuggingFace Authors.
ASSETS_CACHE_APPNAME = "datasets_server_assets"
CACHE_COLLECTION_RESPONSES = "cachedResponsesBlue"
CACHE_MONGOENGINE_ALIAS = "cache"
CACHED_ASSETS_CACHE_APPNAME = "datasets_server_cached_assets"
HF_DATASETS_CACHE_APPNAME = "hf_datasets_cache"
PARQUET_METADATA_CACHE_APPNAME = "datasets_server_parquet_metadata"
DESCRIPTIVE_STATISTICS_CACHE_APPNAME = "datasets_server_descriptive_statistics"
DUCKDB_INDEX_CACHE_APPNAME = "datasets_server_duckdb_index"
CACHE_METRICS_COLLECTION = "cacheTotalMetric"
QUEUE_METRICS_COLLECTION = "jobTotalMetric"
METRICS_MONGOENGINE_ALIAS = "metrics"
QUEUE_COLLECTION_JOBS = "jobsBlue"
QUEUE_COLLECTION_LOCKS = "locks"
QUEUE_MONGOENGINE_ALIAS = "queue"
QUEUE_TTL_SECONDS = 600 # 10 minutes
LOCK_TTL_SECONDS = 600 # 10 minutes
DEFAULT_DIFFICULTY = 50
DEFAULT_DIFFICULTY_MAX = 100
DEFAULT_DIFFICULTY_MIN = 0
DEFAULT_INPUT_TYPE = "dataset"
DEFAULT_JOB_RUNNER_VERSION = 1
PROCESSING_STEP_CONFIG_INFO_VERSION = 2
PROCESSING_STEP_CONFIG_IS_VALID_VERSION = 1
PROCESSING_STEP_CONFIG_OPT_IN_OUT_URLS_COUNT_VERSION = 3
PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_VERSION = 4
PROCESSING_STEP_CONFIG_PARQUET_METADATA_VERSION = 2
PROCESSING_STEP_CONFIG_PARQUET_VERSION = 6
PROCESSING_STEP_CONFIG_SIZE_VERSION = 2
PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_INFO_VERSION = 3
PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_STREAMING_VERSION = 3
PROCESSING_STEP_DATASET_CONFIG_NAMES_VERSION = 1
PROCESSING_STEP_DATASET_HUB_CACHE_VERSION = 1
PROCESSING_STEP_DATASET_INFO_VERSION = 2
PROCESSING_STEP_DATASET_IS_VALID_VERSION = 5
PROCESSING_STEP_DATASET_OPT_IN_OUT_URLS_COUNT_VERSION = 2
PROCESSING_STEP_DATASET_PARQUET_VERSION = 2
PROCESSING_STEP_DATASET_SIZE_VERSION = 2
PROCESSING_STEP_DATASET_SPLIT_NAMES_VERSION = 3
PROCESSING_STEP_SPLIT_DUCKDB_INDEX_VERSION = 2
PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_PARQUET_VERSION = 2
PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION = 3
PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION = 1
PROCESSING_STEP_SPLIT_IS_VALID_VERSION = 1
PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_COUNT_VERSION = 2
PROCESSING_STEP_SPLIT_DESCRIPTIVE_STATISTICS_VERSION = 1
PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSION = 4
PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100
PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100
PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100
PARQUET_REVISION = "refs/convert/parquet"
ERROR_CODES_TO_RETRY = "CreateCommitError,LockedDatasetTimeoutError,StreamingRowsError"
| datasets-server-main | libs/libcommon/src/libcommon/constants.py |