language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
plotly__plotly.py
|
plotly/basedatatypes.py
|
{
"start": 215845,
"end": 216282
}
|
class ____(BasePlotlyType):
"""
Base class for all types in the trace hierarchy
"""
def __init__(self, plotly_name, **kwargs):
super(BaseTraceHierarchyType, self).__init__(plotly_name, **kwargs)
def _send_prop_set(self, prop_path_str, val):
if self.parent:
# ### Inform parent of restyle operation ###
self.parent._restyle_child(self, prop_path_str, val)
|
BaseTraceHierarchyType
|
python
|
fluentpython__example-code-2e
|
23-descriptor/bulkfood/model_v5.py
|
{
"start": 707,
"end": 1004
}
|
class ____(Validated):
"""a string with at least one non-space character"""
def validate(self, name, value):
value = value.strip()
if not value: # <2>
raise ValueError(f'{name} cannot be blank')
return value # <3>
# end::MODEL_V5_VALIDATED_SUB[]
|
NonBlank
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/indexing.py
|
{
"start": 458,
"end": 2279
}
|
class ____:
params = [
(np.int64, np.uint64, np.float64),
("unique_monotonic_inc", "nonunique_monotonic_inc"),
]
param_names = ["dtype", "index_structure"]
def setup(self, dtype, index_structure):
N = 10**6
indices = {
"unique_monotonic_inc": Index(range(N), dtype=dtype),
"nonunique_monotonic_inc": Index(
list(range(55)) + [54] + list(range(55, N - 1)), dtype=dtype
),
}
self.data = Series(np.random.rand(N), index=indices[index_structure])
self.array = np.arange(10000)
self.array_list = self.array.tolist()
def time_getitem_scalar(self, index, index_structure):
self.data[800000]
def time_getitem_slice(self, index, index_structure):
self.data[:800000]
def time_getitem_list_like(self, index, index_structure):
self.data[[800000]]
def time_getitem_array(self, index, index_structure):
self.data[self.array]
def time_getitem_lists(self, index, index_structure):
self.data[self.array_list]
def time_iloc_array(self, index, index_structure):
self.data.iloc[self.array]
def time_iloc_list_like(self, index, index_structure):
self.data.iloc[[800000]]
def time_iloc_scalar(self, index, index_structure):
self.data.iloc[800000]
def time_iloc_slice(self, index, index_structure):
self.data.iloc[:800000]
def time_loc_array(self, index, index_structure):
self.data.loc[self.array]
def time_loc_list_like(self, index, index_structure):
self.data.loc[[800000]]
def time_loc_scalar(self, index, index_structure):
self.data.loc[800000]
def time_loc_slice(self, index, index_structure):
self.data.loc[:800000]
|
NumericSeriesIndexing
|
python
|
jazzband__django-waffle
|
waffle/tests/test_utils.py
|
{
"start": 182,
"end": 663
}
|
class ____(TestCase):
def test_overridden_setting(self):
prefix = get_setting('CACHE_PREFIX')
self.assertEqual(settings.WAFFLE_CACHE_PREFIX, prefix)
def test_default_setting(self):
age = get_setting('MAX_AGE')
self.assertEqual(defaults.MAX_AGE, age)
def test_override_settings(self):
assert not get_setting('OVERRIDE')
with override_settings(WAFFLE_OVERRIDE=True):
assert get_setting('OVERRIDE')
|
GetSettingTests
|
python
|
ray-project__ray
|
python/ray/data/tests/test_split.py
|
{
"start": 1250,
"end": 35434
}
|
class ____:
def __init__(self):
self.value = 0
def increment(self):
self.value += 1
return self.value
def test_equal_split(shutdown_only):
ray.init(num_cpus=2)
def range2x(n):
return ray.data.range(2 * n)
def counts(shards):
@ray.remote(num_cpus=0)
def count(s):
return s.count()
return ray.get([count.remote(s) for s in shards])
r1 = counts(range2x(10).split(3, equal=True))
assert all(c == 6 for c in r1), r1
# The following test is failing and may be a regression.
# Splits appear to be based on existing block boundaries ([10, 5, 5], [8, 8, 4]).
# r2 = counts(range2x(10).split(3, equal=False))
# assert all(c >= 6 for c in r2), r2
# assert not all(c == 6 for c in r2), r2
@pytest.mark.parametrize(
"block_sizes,num_splits",
[
([3, 6, 3], 3), # Test baseline.
([3, 3, 3], 3), # Already balanced.
([3, 6, 4], 3), # Row truncation.
([3, 6, 2, 3], 3), # Row truncation, smaller number of blocks.
([5, 6, 2, 5], 5), # Row truncation, larger number of blocks.
([1, 1, 1, 1, 6], 5), # All smaller but one.
([4, 4, 4, 4, 1], 5), # All larger but one.
([2], 2), # Single block.
([2, 5], 1), # Single split.
],
)
def test_equal_split_balanced(ray_start_regular_shared_2_cpus, block_sizes, num_splits):
_test_equal_split_balanced(block_sizes, num_splits)
def _test_equal_split_balanced(block_sizes, num_splits):
ctx = DataContext.get_current()
blocks = []
metadata = []
ref_bundles = []
total_rows = 0
for block_size in block_sizes:
block = pd.DataFrame({"id": list(range(total_rows, total_rows + block_size))})
blocks.append(ray.put(block))
metadata.append(BlockAccessor.for_block(block).get_metadata())
schema = BlockAccessor.for_block(block).schema()
blk = (blocks[-1], metadata[-1])
ref_bundles.append(RefBundle((blk,), owns_blocks=True, schema=schema))
total_rows += block_size
logical_plan = LogicalPlan(InputData(input_data=ref_bundles), ctx)
stats = DatasetStats(metadata={"TODO": []}, parent=None)
ds = Dataset(
ExecutionPlan(stats, ctx),
logical_plan,
)
splits = ds.split(num_splits, equal=True)
split_counts = [split.count() for split in splits]
assert len(split_counts) == num_splits
expected_block_size = total_rows // num_splits
# Check that all splits are the expected size.
assert all([count == expected_block_size for count in split_counts])
expected_total_rows = sum(split_counts)
# Check that the expected number of rows were dropped.
assert total_rows - expected_total_rows == total_rows % num_splits
# Check that all rows are unique (content check).
split_rows = [row for split in splits for row in split.take(total_rows)]
assert len(set(extract_values("id", split_rows))) == len(split_rows)
def test_equal_split_balanced_grid(ray_start_regular_shared_2_cpus):
# Tests balanced equal splitting over a grid of configurations.
# Grid: num_blocks x num_splits x num_rows_block_1 x ... x num_rows_block_n
seed = int(time.time())
print(f"Seeding RNG for test_equal_split_balanced_grid with: {seed}")
random.seed(seed)
max_num_splits = 15
num_splits_samples = 3
max_num_blocks = 50
max_num_rows_per_block = 100
num_blocks_samples = 3
block_sizes_samples = 3
for num_splits in np.random.randint(2, max_num_splits + 1, size=num_splits_samples):
for num_blocks in np.random.randint(
1, max_num_blocks + 1, size=num_blocks_samples
):
block_sizes_list = [
np.random.randint(1, max_num_rows_per_block + 1, size=num_blocks)
for _ in range(block_sizes_samples)
]
for block_sizes in block_sizes_list:
if sum(block_sizes) < num_splits:
min_ = math.ceil(num_splits / num_blocks)
block_sizes = np.random.randint(
min_, max_num_rows_per_block + 1, size=num_blocks
)
_test_equal_split_balanced(block_sizes, num_splits)
def test_split_small(ray_start_regular_shared_2_cpus):
x = [Counter.remote() for _ in range(4)]
data = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]
fail = []
@ray.remote(num_cpus=0)
def take(s):
return extract_values("item", s.take())
for m in [1, 3]:
for n in [1, 3]:
for locality_hints in [None, x[:n]]:
for equal in [True, False]:
print("Testing", m, n, equal, locality_hints)
ds = ray.data.from_items(data, override_num_blocks=m)
splits = ds.split(n, equal=equal, locality_hints=locality_hints)
assert len(splits) == n
outs = ray.get([take.remote(s) for s in splits])
out = []
for r in outs:
out.extend(r)
if equal:
lens = set([len(s) for s in outs]) # noqa
limit = len(data) - (len(data) % n)
allowed = [limit]
# Allow for some pipelining artifacts.
print(len(out), len(set(out)), allowed)
if (
len(out) not in allowed
or len(set(out)) != len(out)
# TODO(ekl) we should be able to enable this check, but
# there are some edge condition bugs in split.
# or len(lens) != 1
):
print("FAIL", m, n, equal, locality_hints)
fail.append((m, n, equal, locality_hints))
else:
if sorted(out) != data:
print("FAIL", m, n, equal, locality_hints)
fail.append((m, n, equal, locality_hints))
assert not fail, fail
def test_split_at_indices_simple(ray_start_regular_shared_2_cpus, restore_data_context):
# NOTE: It's critical to preserve ordering for assertions in this test to work
DataContext.get_current().execution_options.preserve_order = True
ds = ray.data.range(10, override_num_blocks=3)
with pytest.raises(ValueError):
ds.split_at_indices([])
with pytest.raises(ValueError):
ds.split_at_indices([-1])
with pytest.raises(ValueError):
ds.split_at_indices([3, 1])
splits = ds.split_at_indices([5])
r = [extract_values("id", s.take()) for s in splits]
assert r == [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
splits = ds.split_at_indices([2, 5])
r = [extract_values("id", s.take()) for s in splits]
assert r == [[0, 1], [2, 3, 4], [5, 6, 7, 8, 9]]
splits = ds.split_at_indices([2, 5, 5, 100])
r = [extract_values("id", s.take()) for s in splits]
assert r == [[0, 1], [2, 3, 4], [], [5, 6, 7, 8, 9], []]
splits = ds.split_at_indices([100])
r = [extract_values("id", s.take()) for s in splits]
assert r == [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], []]
splits = ds.split_at_indices([0])
r = [extract_values("id", s.take()) for s in splits]
assert r == [[], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]
@pytest.mark.parametrize("num_blocks", list(range(1, 20)) + [25, 40])
@pytest.mark.parametrize(
"indices",
[
# Two-splits.
[5],
[10],
[15],
# Three-splits.
[5, 12],
[1, 18],
[9, 10],
# Misc.
[3, 10, 17],
[2, 4, 11, 12, 19],
list(range(20)),
list(range(0, 20, 2)),
# Empty splits.
[10, 10],
[5, 10, 10, 15],
# Out-of-bounds.
[25],
[7, 11, 23, 33],
],
)
def test_split_at_indices_coverage(
ray_start_regular_shared_2_cpus, num_blocks, indices, restore_data_context
):
# Test that split_at_indices() creates the expected splits on a set of partition and
# indices configurations.
# NOTE: It's critical to preserve ordering for assertions in this test to work
DataContext.get_current().execution_options.preserve_order = True
ds = ray.data.range(20, override_num_blocks=num_blocks)
splits = ds.split_at_indices(indices)
r = [extract_values("id", s.sort("id").take_all()) for s in splits]
# Use np.array_split() semantics as our correctness ground-truth.
assert r == [arr.tolist() for arr in np.array_split(list(range(20)), indices)]
@pytest.mark.parametrize("num_blocks", [1, 3, 5, 10])
@pytest.mark.parametrize(
"indices",
[
[2], # Single split
[1, 3], # Two splits
[0, 2, 4], # Three splits
[1, 2, 3, 4], # Four splits
[1, 2, 3, 4, 7], # Five splits
[1, 2, 3, 4, 6, 9], # Six splits
]
+ [
list(x) for x in itertools.combinations_with_replacement([1, 3, 4], 2)
] # Selected two-split cases
+ [
list(x) for x in itertools.combinations_with_replacement([0, 2, 4], 3)
], # Selected three-split cases
)
def test_split_at_indices_coverage_complete(
ray_start_regular_shared_2_cpus, num_blocks, indices, restore_data_context
):
# NOTE: It's critical to preserve ordering for assertions in this test to work
DataContext.get_current().execution_options.preserve_order = True
# Test that split_at_indices() creates the expected splits on a set of partition and
# indices configurations.
ds = ray.data.range(10, override_num_blocks=num_blocks)
splits = ds.split_at_indices(indices)
r = [extract_values("id", s.take_all()) for s in splits]
# Use np.array_split() semantics as our correctness ground-truth.
assert r == [arr.tolist() for arr in np.array_split(list(range(10)), indices)]
def test_split_proportionately(ray_start_regular_shared_2_cpus):
ds = ray.data.range(10, override_num_blocks=3)
with pytest.raises(ValueError):
ds.split_proportionately([])
with pytest.raises(ValueError):
ds.split_proportionately([-1])
with pytest.raises(ValueError):
ds.split_proportionately([0])
with pytest.raises(ValueError):
ds.split_proportionately([1])
with pytest.raises(ValueError):
ds.split_proportionately([0.5, 0.5])
splits = ds.split_proportionately([0.5])
r = [extract_values("id", s.take()) for s in splits]
assert r == [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
splits = ds.split_proportionately([0.2, 0.3])
r = [extract_values("id", s.take()) for s in splits]
assert r == [[0, 1], [2, 3, 4], [5, 6, 7, 8, 9]]
splits = ds.split_proportionately([0.2, 0.3, 0.3])
r = [extract_values("id", s.take()) for s in splits]
assert r == [[0, 1], [2, 3, 4], [5, 6, 7], [8, 9]]
splits = ds.split_proportionately([0.98, 0.01])
r = [extract_values("id", s.take()) for s in splits]
assert r == [[0, 1, 2, 3, 4, 5, 6, 7], [8], [9]]
with pytest.raises(ValueError):
ds.split_proportionately([0.90] + ([0.001] * 90))
def test_split(ray_start_regular_shared_2_cpus):
ds = ray.data.range(20, override_num_blocks=10)
assert ds._plan.initial_num_blocks() == 10
assert ds.sum() == 190
assert ds._block_num_rows() == [2] * 10
datasets = ds.split(5)
assert [2] * 5 == [len(dataset._plan.execute().blocks) for dataset in datasets]
assert 190 == sum([dataset.sum("id") for dataset in datasets])
datasets = ds.split(3)
assert [4, 3, 3] == [len(dataset._plan.execute().blocks) for dataset in datasets]
assert 190 == sum([dataset.sum("id") for dataset in datasets])
datasets = ds.split(1)
assert [10] == [len(dataset._plan.execute().blocks) for dataset in datasets]
assert 190 == sum([dataset.sum("id") for dataset in datasets])
datasets = ds.split(10)
assert [1] * 10 == [len(dataset._plan.execute().blocks) for dataset in datasets]
assert 190 == sum([dataset.sum("id") for dataset in datasets])
datasets = ds.split(11)
assert [1] * 10 + [0] == [
len(dataset._plan.execute().blocks) for dataset in datasets
]
assert 190 == sum([dataset.sum("id") or 0 for dataset in datasets])
def test_split_hints(ray_start_regular_shared_2_cpus):
@ray.remote
class Actor(object):
def __init__(self):
pass
def assert_split_assignment(block_node_ids, actor_node_ids, expected_split_result):
"""Helper function to setup split hints test.
Args:
block_node_ids: a list of blocks with their locations. For
example ["node1", "node2"] represents two blocks with
"node1", "node2" as their location respectively.
actor_node_ids: a list of actors with their locations. For
example ["node1", "node2"] represents two actors with
"node1", "node2" as their location respectively.
expected_split_result: a list of allocation result, each entry
in the list stores the block_index in the split dataset.
For example, [[0, 1], [2]] represents the split result has
two datasets, datasets[0] contains block 0 and 1; and
datasets[1] contains block 2.
"""
num_blocks = len(block_node_ids)
ds = ray.data.range(num_blocks, override_num_blocks=num_blocks).materialize()
bundles = ds.iter_internal_ref_bundles()
blocks = _ref_bundles_iterator_to_block_refs_list(bundles)
assert len(block_node_ids) == len(blocks)
actors = [Actor.remote() for i in range(len(actor_node_ids))]
with patch("ray.experimental.get_object_locations") as location_mock:
with patch("ray._private.state.actors") as state_mock:
block_locations = {}
for i, node_id in enumerate(block_node_ids):
if node_id:
block_locations[blocks[i]] = {"node_ids": [node_id]}
location_mock.return_value = block_locations
actor_state = {}
for i, node_id in enumerate(actor_node_ids):
actor_state[actors[i]._actor_id.hex()] = {
"Address": {"NodeID": node_id}
}
state_mock.return_value = actor_state
datasets = ds.split(len(actors), locality_hints=actors)
assert len(datasets) == len(actors)
for i in range(len(actors)):
assert {blocks[j] for j in expected_split_result[i]} == set(
_ref_bundles_iterator_to_block_refs_list(
datasets[i].iter_internal_ref_bundles()
)
)
assert_split_assignment(
["node2", "node1", "node1"], ["node1", "node2"], [[1, 2], [0]]
)
assert_split_assignment(
["node1", "node1", "node1"], ["node1", "node2"], [[2, 1], [0]]
)
assert_split_assignment(["node2", "node2", None], ["node1", "node2"], [[0, 2], [1]])
assert_split_assignment(["node2", "node2", None], [None, None], [[2, 1], [0]])
assert_split_assignment(
["n1", "n2", "n3", "n1", "n2"], ["n1", "n2"], [[0, 2, 3], [1, 4]]
)
assert_split_assignment(["n1", "n2"], ["n1", "n2", "n3"], [[0], [1], []])
# perfect split:
#
# split 300 blocks
# with node_ids interleaving between "n0", "n1", "n2"
#
# to 3 actors
# with has node_id "n1", "n2", "n0"
#
# expect that block 1, 4, 7... are assigned to actor with node_id n1
# block 2, 5, 8... are assigned to actor with node_id n2
# block 0, 3, 6... are assigned to actor with node_id n0
assert_split_assignment(
["n0", "n1", "n2"] * 100,
["n1", "n2", "n0"],
[range(1, 300, 3), range(2, 300, 3), range(0, 300, 3)],
)
# even split regardless of locality:
#
# split 301 blocks
# with block 0 to block 50 on "n0",
# block 51 to block 300 on "n1"
#
# to 3 actors
# with node_ids "n1", "n2", "n0"
#
# expect that block 200 to block 300 are assigned to actor with node_id n1
# block 100 to block 199 are assigned to actor with node_id n2
# block 0 to block 99 are assigned to actor with node_id n0
assert_split_assignment(
["n0"] * 50 + ["n1"] * 251,
["n1", "n2", "n0"],
[range(200, 301), range(100, 200), list(range(0, 50)) + list(range(50, 100))],
)
def test_generate_valid_indices():
assert [1, 2, 3] == _generate_valid_indices([10], [1, 2, 3])
assert [1, 2, 2] == _generate_valid_indices([1, 1], [1, 2, 3])
def test_generate_per_block_split_indices():
assert [[1], [1, 2], [], []] == _generate_per_block_split_indices(
[3, 3, 3, 1], [1, 4, 5]
)
assert [[3], [], [], [1, 1]] == _generate_per_block_split_indices(
[3, 3, 3, 1], [3, 10, 10]
)
assert [[], [], [], []] == _generate_per_block_split_indices([3, 3, 3, 1], [])
def _create_meta(num_rows):
return BlockMetadata(
num_rows=num_rows,
size_bytes=None,
input_files=None,
exec_stats=None,
)
def _create_block_and_metadata(data: Any) -> Tuple[ObjectRef[Block], BlockMetadata]:
block = pd.DataFrame({"id": data})
metadata = BlockAccessor.for_block(block).get_metadata()
return (ray.put(block), metadata)
def _create_blocklist(blocks):
block_refs = []
meta = []
for block in blocks:
block_ref, block_meta = _create_block_and_metadata(block)
block_refs.append(block_ref)
meta.append(block_meta)
return BlockList(block_refs, meta, owned_by_consumer=True)
def _create_bundle(blocks: List[List[Any]]) -> RefBundle:
schema = BlockAccessor.for_block(pd.DataFrame({"id": []})).schema()
return RefBundle(
[_create_block_and_metadata(block) for block in blocks],
owns_blocks=True,
schema=schema,
)
def _create_blocks_with_metadata(blocks):
return _create_blocklist(blocks).get_blocks_with_metadata()
def test_split_single_block(ray_start_regular_shared_2_cpus):
block = pd.DataFrame({"id": [1, 2, 3]})
metadata = _create_meta(3)
results = ray.get(
ray.remote(_split_single_block)
.options(num_returns=2)
.remote(234, block, metadata, [])
)
block_id, meta = results[0]
blocks = results[1:]
assert 234 == block_id
assert len(blocks) == 1
assert list(blocks[0]["id"]) == [1, 2, 3]
assert meta[0].num_rows == 3
results = ray.get(
ray.remote(_split_single_block)
.options(num_returns=3)
.remote(234, block, metadata, [1])
)
block_id, meta = results[0]
blocks = results[1:]
assert 234 == block_id
assert len(blocks) == 2
assert list(blocks[0]["id"]) == [1]
assert meta[0].num_rows == 1
assert list(blocks[1]["id"]) == [2, 3]
assert meta[1].num_rows == 2
results = ray.get(
ray.remote(_split_single_block)
.options(num_returns=6)
.remote(234, block, metadata, [0, 1, 1, 3])
)
block_id, meta = results[0]
blocks = results[1:]
assert 234 == block_id
assert len(blocks) == 5
assert list(blocks[0]["id"]) == []
assert list(blocks[1]["id"]) == [1]
assert list(blocks[2]["id"]) == []
assert list(blocks[3]["id"]) == [2, 3]
assert list(blocks[4]["id"]) == []
block = pd.DataFrame({"id": []})
metadata = _create_meta(0)
results = ray.get(
ray.remote(_split_single_block)
.options(num_returns=3)
.remote(234, block, metadata, [0])
)
block_id, meta = results[0]
blocks = results[1:]
assert 234 == block_id
assert len(blocks) == 2
assert list(blocks[0]["id"]) == []
assert list(blocks[1]["id"]) == []
def test_drop_empty_block_split():
assert [1, 2] == _drop_empty_block_split([0, 1, 2, 3], 3)
assert [1, 2] == _drop_empty_block_split([1, 1, 2, 2], 3)
assert [] == _drop_empty_block_split([0], 0)
def verify_splits(splits, blocks_by_split):
assert len(splits) == len(blocks_by_split)
for blocks, (block_refs, metas) in zip(blocks_by_split, splits):
assert len(blocks) == len(block_refs)
assert len(blocks) == len(metas)
for block, block_ref, meta in zip(blocks, block_refs, metas):
assert list(ray.get(block_ref)["id"]) == block
assert meta.num_rows == len(block)
def test_generate_global_split_results(ray_start_regular_shared_2_cpus):
inputs = [
_create_block_and_metadata([1]),
_create_block_and_metadata([2, 3]),
_create_block_and_metadata([4]),
]
splits = list(zip(*_generate_global_split_results(iter(inputs), [1, 2, 1])))
verify_splits(splits, [[[1]], [[2, 3]], [[4]]])
splits = list(zip(*_generate_global_split_results(iter(inputs), [3, 1])))
verify_splits(splits, [[[1], [2, 3]], [[4]]])
splits = list(zip(*_generate_global_split_results(iter(inputs), [3, 0, 1])))
verify_splits(splits, [[[1], [2, 3]], [], [[4]]])
inputs = []
splits = list(zip(*_generate_global_split_results(iter(inputs), [0, 0])))
verify_splits(splits, [[], []])
def test_private_split_at_indices(ray_start_regular_shared_2_cpus):
inputs = _create_blocks_with_metadata([])
splits = list(zip(*_split_at_indices(inputs, [0])))
verify_splits(splits, [[], []])
splits = list(zip(*_split_at_indices(inputs, [])))
verify_splits(splits, [[]])
inputs = _create_blocks_with_metadata([[1], [2, 3], [4]])
splits = list(zip(*_split_at_indices(inputs, [1])))
verify_splits(splits, [[[1]], [[2, 3], [4]]])
inputs = _create_blocks_with_metadata([[1], [2, 3], [4]])
splits = list(zip(*_split_at_indices(inputs, [2])))
verify_splits(splits, [[[1], [2]], [[3], [4]]])
inputs = _create_blocks_with_metadata([[1], [2, 3], [4]])
splits = list(zip(*_split_at_indices(inputs, [1])))
verify_splits(splits, [[[1]], [[2, 3], [4]]])
inputs = _create_blocks_with_metadata([[1], [2, 3], [4]])
splits = list(zip(*_split_at_indices(inputs, [2, 2])))
verify_splits(splits, [[[1], [2]], [], [[3], [4]]])
inputs = _create_blocks_with_metadata([[1], [2, 3], [4]])
splits = list(zip(*_split_at_indices(inputs, [])))
verify_splits(splits, [[[1], [2, 3], [4]]])
inputs = _create_blocks_with_metadata([[1], [2, 3], [4]])
splits = list(zip(*_split_at_indices(inputs, [0, 4])))
verify_splits(splits, [[], [[1], [2, 3], [4]], []])
def equalize_helper(input_block_lists: List[List[List[Any]]]):
result = _equalize(
[_create_bundle(block_list) for block_list in input_block_lists],
owned_by_consumer=True,
)
result_block_lists = []
for bundle in result:
block_list = []
for block_ref in bundle.block_refs:
block = ray.get(block_ref)
block_accessor = BlockAccessor.for_block(block)
block_list.append(list(block_accessor.to_default()["id"]))
result_block_lists.append(block_list)
return result_block_lists
def verify_equalize_result(input_block_lists, expected_block_lists):
result_block_lists = equalize_helper(input_block_lists)
assert result_block_lists == expected_block_lists
def test_equalize(ray_start_regular_shared_2_cpus):
verify_equalize_result([], [])
verify_equalize_result([[]], [[]])
verify_equalize_result([[[1]], []], [[], []])
verify_equalize_result([[[1], [2, 3]], [[4]]], [[[1], [2]], [[4], [3]]])
verify_equalize_result([[[1], [2, 3]], []], [[[1]], [[2]]])
verify_equalize_result(
[[[1], [2, 3], [4, 5]], [[6]], []], [[[1], [2]], [[6], [3]], [[4, 5]]]
)
verify_equalize_result(
[[[1, 2, 3], [4, 5]], [[6]], []], [[[4, 5]], [[6], [1]], [[2, 3]]]
)
def test_equalize_randomized(ray_start_regular_shared_2_cpus):
# verify the entries in the splits are in the range of 0 .. num_rows,
# unique, and the total number matches num_rows if exact_num == True.
def assert_unique_and_inrange(splits, num_rows, exact_num=False):
unique_set = set()
for split in splits:
for block in split:
for entry in block:
assert entry not in unique_set
assert entry >= 0 and entry < num_rows
unique_set.add(entry)
if exact_num:
assert len(unique_set) == num_rows
# verify that splits are equalized.
def assert_equal_split(splits, num_rows, num_split):
split_size = num_rows // num_split
for split in splits:
assert len((list(itertools.chain.from_iterable(split)))) == split_size
# create randomized splits contains entries from 0 ... num_rows.
def random_split(num_rows, num_split):
split_point = [int(random.random() * num_rows) for _ in range(num_split - 1)]
split_index_helper = [0] + sorted(split_point) + [num_rows]
splits = []
for i in range(1, len(split_index_helper)):
split_start = split_index_helper[i - 1]
split_end = split_index_helper[i]
num_entries = split_end - split_start
split = []
num_block_split = int(random.random() * num_entries)
block_split_point = [
split_start + int(random.random() * num_entries)
for _ in range(num_block_split)
]
block_index_helper = [split_start] + sorted(block_split_point) + [split_end]
for j in range(1, len(block_index_helper)):
split.append(
list(range(block_index_helper[j - 1], block_index_helper[j]))
)
splits.append(split)
assert_unique_and_inrange(splits, num_rows, exact_num=True)
return splits
for i in range(100):
num_rows = int(random.random() * 100)
num_split = int(random.random() * 10) + 1
input_splits = random_split(num_rows, num_split)
print(input_splits)
equalized_splits = equalize_helper(input_splits)
assert_unique_and_inrange(equalized_splits, num_rows)
assert_equal_split(equalized_splits, num_rows, num_split)
def test_train_test_split(ray_start_regular_shared_2_cpus):
ds = ray.data.range(8)
# float
train, test = ds.train_test_split(test_size=0.25)
assert extract_values("id", train.take()) == [0, 1, 2, 3, 4, 5]
assert extract_values("id", test.take()) == [6, 7]
# int
train, test = ds.train_test_split(test_size=2)
assert extract_values("id", train.take()) == [0, 1, 2, 3, 4, 5]
assert extract_values("id", test.take()) == [6, 7]
# shuffle
train, test = ds.train_test_split(test_size=0.25, shuffle=True, seed=1)
assert extract_values("id", train.take()) == [7, 4, 6, 0, 5, 2]
assert extract_values("id", test.take()) == [1, 3]
# error handling
with pytest.raises(TypeError):
ds.train_test_split(test_size=[1])
with pytest.raises(ValueError):
ds.train_test_split(test_size=-1)
with pytest.raises(ValueError):
ds.train_test_split(test_size=0)
with pytest.raises(ValueError):
ds.train_test_split(test_size=1.1)
with pytest.raises(ValueError):
ds.train_test_split(test_size=9)
def test_train_test_split_stratified(ray_start_regular_shared_2_cpus):
# Test basic stratification with simple dataset
data = [
{"id": 0, "label": "A"},
{"id": 1, "label": "A"},
{"id": 2, "label": "B"},
{"id": 3, "label": "B"},
{"id": 4, "label": "C"},
{"id": 5, "label": "C"},
]
ds = ray.data.from_items(data)
# Test stratified split
train, test = ds.train_test_split(test_size=0.5, stratify="label")
# Check that we have the right number of samples
assert train.count() == 3
assert test.count() == 3
# Check that class proportions are preserved
train_labels = [row["label"] for row in train.take()]
test_labels = [row["label"] for row in test.take()]
train_label_counts = {label: train_labels.count(label) for label in ["A", "B", "C"]}
test_label_counts = {label: test_labels.count(label) for label in ["A", "B", "C"]}
# Each class should have exactly 1 sample in each split
assert train_label_counts == {"A": 1, "B": 1, "C": 1}
assert test_label_counts == {"A": 1, "B": 1, "C": 1}
def test_train_test_split_shuffle_stratify_error(ray_start_regular_shared_2_cpus):
# Test that shuffle=True and stratify cannot be used together
data = [
{"id": 0, "label": "A"},
{"id": 1, "label": "A"},
{"id": 2, "label": "B"},
{"id": 3, "label": "B"},
]
ds = ray.data.from_items(data)
# Test that combining shuffle=True and stratify raises ValueError
with pytest.raises(
ValueError, match="Cannot specify both 'shuffle=True' and 'stratify'"
):
ds.train_test_split(test_size=0.5, shuffle=True, stratify="label")
def test_train_test_split_stratified_imbalanced(ray_start_regular_shared_2_cpus):
# Test stratified split with imbalanced class distribution
data = [
{"id": 0, "label": "A"},
{"id": 1, "label": "A"},
{"id": 2, "label": "A"},
{"id": 3, "label": "A"},
{"id": 4, "label": "A"},
{"id": 5, "label": "A"}, # 6 samples of class A
{"id": 6, "label": "B"},
{"id": 7, "label": "B"}, # 2 samples of class B
{"id": 8, "label": "C"}, # 1 sample of class C
]
ds = ray.data.from_items(data)
# Test with 0.3 test size
train, test = ds.train_test_split(test_size=0.3, stratify="label")
train_labels = [row["label"] for row in train.take()]
test_labels = [row["label"] for row in test.take()]
train_label_counts = {label: train_labels.count(label) for label in ["A", "B", "C"]}
test_label_counts = {label: test_labels.count(label) for label in ["A", "B", "C"]}
# Check proportions are maintained as closely as possible
# Class A: 6 samples -> test_count = int(6 * 0.3) = 1 -> train: 5, test: 1
# Class B: 2 samples -> test_count = int(2 * 0.3) = 0 -> train: 2, test: 0
# Class C: 1 sample -> test_count = int(1 * 0.3) = 0 -> train: 1, test: 0
assert train_label_counts["A"] == 5
assert test_label_counts["A"] == 1
assert train_label_counts["B"] == 2
assert test_label_counts["B"] == 0
assert train_label_counts["C"] == 1
assert test_label_counts["C"] == 0
def test_split_is_not_disruptive(ray_start_cluster):
ray.shutdown()
ds = ray.data.range(100, override_num_blocks=10).map_batches(lambda x: x)
def verify_integrity(splits):
for dss in splits:
for batch in dss.iter_batches():
pass
for batch in ds.iter_batches():
pass
# No block splitting invovled: split 10 even blocks into 2 groups.
verify_integrity(ds.split(2, equal=True))
# Block splitting invovled: split 10 even blocks into 3 groups.
verify_integrity(ds.split(3, equal=True))
# Same as above but having tranforms post converting to lazy.
verify_integrity(ds.map_batches(lambda x: x).split(2, equal=True))
verify_integrity(ds.map_batches(lambda x: x).split(3, equal=True))
# Same as above but having in-place tranforms post converting to lazy.
verify_integrity(ds.randomize_block_order().split(2, equal=True))
verify_integrity(ds.randomize_block_order().split(3, equal=True))
def test_streaming_train_test_split_hash(ray_start_regular_shared_2_cpus):
ds = ray.data.range(10000000, override_num_blocks=10)
ds_train, ds_test = ds.streaming_train_test_split(
test_size=0.2, split_type="hash", hash_column="id"
)
np.testing.assert_almost_equal(float(ds_train.count()) / 10000000.0, 0.8, decimal=3)
np.testing.assert_almost_equal(float(ds_test.count()) / 10000000.0, 0.2, decimal=3)
# Check if train and test are disjoint
assert (
ds_train.join(ds_test, join_type="inner", on=("id",), num_partitions=1).count()
== 0
)
@pytest.mark.parametrize("seed", [None, 42])
def test_streaming_train_test_split_random(ray_start_regular_shared_2_cpus, seed):
ds = ray.data.range(10000000, override_num_blocks=10)
ds_train, ds_test = ds.streaming_train_test_split(
test_size=0.2, split_type="random", seed=seed
)
np.testing.assert_almost_equal(float(ds_train.count()) / 10000000.0, 0.8, decimal=3)
np.testing.assert_almost_equal(float(ds_test.count()) / 10000000.0, 0.2, decimal=3)
# Check if train and test are disjoint
assert (
ds_train.join(ds_test, join_type="inner", on=("id",), num_partitions=1).count()
== 0
)
@pytest.mark.parametrize(
"test_size,split_type,hash_column,seed,error_msg",
[
(0.2, "hash", None, None, "hash_column is required for hash split"),
(0.2, "hash", "id", 42, "seed is not supported for hash split"),
(0, "hash", "id", None, "test_size must be between 0 and 1"),
(1, "hash", "id", None, "test_size must be between 0 and 1"),
(0.2, "random", "id", None, "hash_column is not supported for random split"),
(0, "random", None, None, "test_size must be between 0 and 1"),
(1, "random", None, None, "test_size must be between 0 and 1"),
(0.2, "unknown", "id", None, "Invalid split type: unknown"),
],
)
def test_streaming_train_test_split_wrong_params(
ray_start_regular_shared_2_cpus, test_size, split_type, hash_column, seed, error_msg
):
ds = ray.data.range(10)
with pytest.raises(ValueError, match=error_msg):
ds.streaming_train_test_split(
test_size=test_size,
split_type=split_type,
hash_column=hash_column,
seed=seed,
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
Counter
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/elements/text_area_test.py
|
{
"start": 12261,
"end": 13251
}
|
class ____:
pass
def test_text_input_interaction():
"""Test interactions with an empty text_area widget."""
def script():
import streamlit as st
st.text_area("the label", value=None)
at = AppTest.from_function(script).run()
text_area = at.text_area[0]
assert text_area.value is None
# Input a value:
at = text_area.input("Foo").run()
text_area = at.text_area[0]
assert text_area.value == "Foo"
# # Clear the value
at = text_area.set_value(None).run()
text_area = at.text_area[0]
assert text_area.value is None
def test_None_session_state_value_retained():
def script():
import streamlit as st
if "text_area" not in st.session_state:
st.session_state["text_area"] = None
st.text_area("text_area", key="text_area")
st.button("button")
at = AppTest.from_function(script).run()
at = at.button[0].click().run()
assert at.text_area[0].value is None
|
SomeObj
|
python
|
pytorch__pytorch
|
test/test_proxy_tensor.py
|
{
"start": 4748,
"end": 29699
}
|
class ____(TestCase):
# WARNING: if any of your inputs are index tensors, DO NOT use this
# function
def _test(self, f, inps):
fx_f = make_fx(f, tracing_mode=self.tracing_mode)(*inps)
new_inps = tree_map(_create_new_input, inps)
r1 = fx_f(*new_inps)
r2 = f(*new_inps)
self.assertEqual(r1, r2)
def test_pre_dispatch_mode_stack(self):
def f(a):
b = torch.ones(4, 4)
return torch.matmul(a, b)
# We expect to see matmul in the trace - it should NOT be decomposed into mm.
# Also, torch.ones() doesn't show up in the trace.
# This is annoying but expected: ones() never dispatches to the Autograd dispatch key,
# so our mode never sees it - it goes directly to the BackendSelect key.
inp = torch.ones(4, 4)
# Test that make_fx(pre_dispatch=True) clears caches properly.
from torch._dispatch.python import enable_python_dispatcher
with enable_python_dispatcher():
out1 = f(inp)
fx_g = make_fx(f, pre_dispatch=True)(inp)
self.assertExpectedInline(fx_g.code.strip(), """\
def forward(self, a_1):
ones = torch.ops.aten.ones.default([4, 4], device = device(type='cpu'), pin_memory = False)
matmul = torch.ops.aten.matmul.default(a_1, ones); a_1 = ones = None
return matmul""")
def test_pre_dispatch_linear(self):
def f(a, b, c):
return torch.nn.functional.linear(a, b, c)
a = torch.ones(4, 4)
b = torch.ones(4, 4)
c = torch.ones(4)
fx_g = make_fx(f, pre_dispatch=True)(a, b, c)
out1 = f(a, b, c)
out2 = fx_g(a, b, c)
self.assertEqual(out1, out2)
def test_pre_dispatch_no_grad(self):
def f(a):
b = a.sin()
torch.set_grad_enabled(False)
c = b.cos()
torch.set_grad_enabled(True)
return b + c.sin()
a1 = torch.randn(4, requires_grad=True)
a2 = a1.detach().clone().requires_grad_(True)
a_tmp = a1.detach().clone().requires_grad_(True)
fx_g = make_fx(f, pre_dispatch=True)(a_tmp)
out1 = f(a1)
out2 = fx_g(a2)
self.assertEqual(out1, out2)
out1.sum().backward()
out2.sum().backward()
self.assertEqual(a1.grad, a2.grad)
def test_make_fx_simple(self):
def f(x):
return torch.sin(x)
self._test(f, (torch.randn(3),))
def test_scalar_device(self, device='cpu'):
def f(a, b):
return a + b
self._test(f, [torch.randn(3, device=device), torch.tensor(5)])
def test_isolated_graphmodule(self):
def is_any_sum(gm):
return any(node.target == torch.ops.aten.sum.default for node in gm.graph.nodes)
def is_any_digamma(gm):
return any(node.target == torch.ops.aten.digamma.default for node in gm.graph.nodes)
def is_any_sigmoid(gm):
return any(node.target == torch.ops.aten.sigmoid.default for node in gm.graph.nodes)
def inner(x):
return torch.sum(x)
def f(x):
gm = get_isolated_graphmodule(inner, (x,), {})
self.assertTrue(is_any_sum(gm))
return x + torch.randn(x.shape)
# get_isolated_graphmodule uses make_fx internally that shouldn't be traced
# by the outer make_fx call
traced = make_fx(f)(torch.randn(3))
self.assertFalse(is_any_sum(traced))
# When factory functions are used, they should not be traced
# by the outer make_fx call
def inner_with_factory():
val = torch.tensor(float(1))
val.add_(2)
return torch.full((10, 10), val).sum()
def f1(x):
gm = get_isolated_graphmodule(inner_with_factory, (), {})
self.assertTrue(is_any_sum(gm))
return torch.sigmoid(x)
def f2(x):
gm = get_isolated_graphmodule(f1, (x,), {})
self.assertFalse(is_any_sum(gm))
self.assertTrue(is_any_sigmoid(gm))
return torch.digamma(x)
traced = make_fx(f2)(torch.randn(3))
self.assertFalse(is_any_sum(traced))
self.assertFalse(is_any_sigmoid(traced))
self.assertTrue(is_any_digamma(traced))
# Verify nested make_fx calls don't make factory functions to be leaked
# into the outer graph. Verify that `make_fx`` itself does not leak its execution.
def f2(x):
gm = make_fx(f1)(x)
self.assertFalse(is_any_sum(gm))
self.assertTrue(is_any_sigmoid(gm))
return torch.digamma(x)
traced = make_fx(f2)(torch.randn(3))
self.assertFalse(is_any_sum(traced))
self.assertFalse(is_any_sigmoid(traced))
self.assertTrue(is_any_digamma(traced))
# Verify that the `forward`` function of a graph module produced as a
# side effect of an interior `make_fx` is still traced
def f3(x):
gm = make_fx(f1)(x)
self.assertFalse(is_any_sum(gm))
self.assertTrue(is_any_sigmoid(gm))
# `gm.forward`` is still traced
return torch.digamma(gm(x))
traced = make_fx(f3)(torch.randn(3))
self.assertFalse(is_any_sum(traced))
self.assertTrue(is_any_sigmoid(traced))
self.assertTrue(is_any_digamma(traced))
# Verify interaction with non-ProxyTensor modes
from torch.testing._internal.logging_tensor import LoggingTensorMode
def f1_logging(x):
with LoggingTensorMode():
gm = get_isolated_graphmodule(inner_with_factory, (), {})
self.assertTrue(is_any_sum(gm))
return torch.sigmoid(x)
def f2_logging(x):
with LoggingTensorMode(), LoggingTensorMode():
gm = get_isolated_graphmodule(f1_logging, (x,), {})
self.assertFalse(is_any_sum(gm))
self.assertTrue(is_any_sigmoid(gm))
return torch.digamma(x)
traced = make_fx(f2_logging)(torch.randn(3))
self.assertFalse(is_any_sum(traced))
self.assertFalse(is_any_sigmoid(traced))
self.assertTrue(is_any_digamma(traced))
# Verify interaction with another tensor subclass
# This case currently doesn't work and should raise an error
# See: https://github.com/pytorch/pytorch/pull/81764#issuecomment-1200472068
from torch.testing._internal.logging_tensor import LoggingTensor
def f1_logging_tensor(x):
gm = get_isolated_graphmodule(inner_with_factory, (), {})
self.assertTrue(is_any_sum(gm))
return torch.sigmoid(x)
def f2_logging_tensor(x):
x = LoggingTensor(x)
gm = get_isolated_graphmodule(f1_logging_tensor, (x,), {})
self.assertFalse(is_any_sum(gm))
self.assertTrue(is_any_sigmoid(gm))
return torch.digamma(x)
traced = make_fx(f2_logging_tensor)(torch.randn(3))
self.assertFalse(is_any_sum(traced))
self.assertFalse(is_any_sigmoid(traced)) # this fails, sigmoid is traced with LoggingTensor
self.assertTrue(is_any_digamma(traced))
# See https://github.com/pytorch/pytorch/issues/97541
def test_empty_like_doesnt_burn_in_defaults(self):
def f(x):
return torch.empty_like(x)
out = make_fx(f)(torch.randn(3))
self.assertExpectedInline(out.code.strip(), """\
def forward(self, x_1):
empty_like = torch.ops.aten.empty_like.default(x_1, pin_memory = False); x_1 = None
return empty_like""")
def test_proxy_tensor_mode_with_decomp_table_preserves_proxy(self):
def f(x):
y = x.new_zeros(x.size())
y.copy_(x)
return y
def _new_zeros_decomp(inp, size, dtype=None, layout=None, device=None, pin_memory=None):
return torch.zeros(size, dtype=inp.dtype, device=inp.device)
factory_func_decomp = {torch.ops.aten.new_zeros.default: _new_zeros_decomp}
# When new_zeros() decomposes into torch.zero(), we expect ProxyTensorMode
# to still be (re-entrantly) enabled, so that the `torch.zero()` call
# returns a ProxyTensor.
out = make_fx(f, decomposition_table=factory_func_decomp)(torch.ones(2))
self.assertExpectedInline(out.code, """\
def forward(self, x_1):
zeros = torch.ops.aten.zeros.default([2], dtype = torch.float32, device = device(type='cpu'), pin_memory = False)
copy_ = torch.ops.aten.copy_.default(zeros, x_1); zeros = x_1 = None
return copy_
""")
def test_make_fx_reentrant_dispatch(self):
def f(x):
return torch.ops.aten.norm.Scalar(x, 2.0)
def norm_decomp(x, p=2.0):
if p != 2.0:
raise RuntimeError("can't handle with p != 2")
return torch.sqrt(torch.sum(torch.square(x)))
decomp = {torch.ops.aten.norm.Scalar: norm_decomp}
traced = make_fx(f, decomposition_table=decomp, tracing_mode=self.tracing_mode)(torch.rand(3))
for n in traced.graph.nodes:
self.assertTrue("square" not in str(n.target))
self.assertTrue("norm" not in str(n.target))
@unittest.skipIf(not USE_TORCHVISION, "test requires torchvision")
def test_resnet18_backward_trace(self):
mod = torchvision.models.resnet18()
# An old version of this test called the module directly. This works
# for tracing_mode == "real", but for fake tensors, we also have to
# ensure that the parameters and buffers get wrapped in fake tensors
# because free fake tensors are not supported. Fortunately functional_call
# does precisely this for us.
def f(x, params, buffers):
for p in params.values():
p.grad = None
loss = torch.func.functional_call(mod, {**params, **buffers}, (x,)).sum()
# I could have done this with the functional API, but there is
# plenty of exercising this; I want to show mutating API still
# works
loss.backward()
return [p.grad for p in params.values()]
inp = torch.randn(3, 3, 250, 250)
self._test(f, [inp, dict(mod.named_parameters()), dict(mod.named_buffers())])
def test_varargs(self):
def f(*args):
return sum(args)
self._test(f, [torch.randn(2), torch.randn(2)])
def test_proxy_tensor(self):
def f_grad(x):
val = x.cos().cos().sum()
return torch.autograd.grad(val, x)
def f_backward(x):
val = x.cos().cos().sum()
val.backward()
return x.grad
for f in [f_grad, f_backward]:
self._test(f, [torch.randn(3, requires_grad=True)])
def test_pickle_issue89626(self):
import pickle
x = torch.randn(2)
make_fx(lambda x: x * 2, tracing_mode=self.tracing_mode)(x)
pickle.dumps(x)
def test_inplace_metadata(self):
def f(x):
x = x.clone()
x.unsqueeze_(-1)
assert x.shape[-1] == 1
return x
self._test(f, [torch.randn(5)])
def test_mode_tracing_factory_function(self):
def f(x):
return x + torch.randn(x.shape)
# default behavior should trace factory functions
traced = make_fx(f, tracing_mode=self.tracing_mode)(torch.randn(3))
self.assertTrue(
any(
node.target == aten.randn.default
for node in traced.graph.nodes
)
)
def test_pre_dispatch_functionalization(self):
def f(x):
a = FunctionalTensorMode(pre_dispatch=True, export=True)
with a:
x_unwrapped = FunctionalTensor.to_functional(x)
y = torch.matmul(x_unwrapped, x_unwrapped)
y = y + x_unwrapped
y.mul_(5)
y_unwrapped = torch._from_functional_tensor(y.elem)
return y_unwrapped
from torch._dispatch.python import enable_python_dispatcher
with enable_python_dispatcher():
inp = torch.randn(4, 4)
gm = make_fx(f, pre_dispatch=True)(inp)
# TODO actually not decompose
self.assertExpectedInline(gm.code.strip(), """\
def forward(self, x_1):
matmul = torch.ops.aten.matmul.default(x_1, x_1)
add = torch.ops.aten.add.Tensor(matmul, x_1); matmul = x_1 = None
mul = torch.ops.aten.mul.Tensor(add, 5); add = None
return mul""")
def test_pre_dispatch_functionalization_view_op(self):
def f(x):
a = FunctionalTensorMode(pre_dispatch=True, export=True)
with a:
x_unwrapped = FunctionalTensor.to_functional(x)
y = torch.matmul(x_unwrapped, x_unwrapped)
x_unwrapped = x_unwrapped.transpose(1, 0)
y = y + x_unwrapped
y = y.view(2, 8)
y_unwrapped = torch._from_functional_tensor(y.elem)
return y_unwrapped
from torch._dispatch.python import enable_python_dispatcher
with enable_python_dispatcher():
inp = torch.randn(4, 4)
gm = make_fx(f, pre_dispatch=True)(inp)
# TODO actually not decompose
self.assertExpectedInline(gm.code.strip(), """\
def forward(self, x_1):
matmul = torch.ops.aten.matmul.default(x_1, x_1)
transpose = torch.ops.aten.transpose.int(x_1, 1, 0); x_1 = None
add = torch.ops.aten.add.Tensor(matmul, transpose); matmul = transpose = None
view = torch.ops.aten.view.default(add, [2, 8]); add = None
return view""")
def test_val_metadata_mutation(self):
def f(x):
y = x.clone()
y.unsqueeze_(0)
return y
traced = make_fx(f, tracing_mode=self.tracing_mode)(torch.randn(3, requires_grad=True))
self.assertEqual([
tuple(node.meta['val'].shape)
for node in traced.graph.nodes
if 'val' in node.meta
], [(3,), (3,), (1, 3)])
def test_make_fx_overloads(self):
def f(x):
return x.cos() + torch.randn(x.shape)
traced = make_fx(f, tracing_mode=self.tracing_mode)(torch.randn(3))
self.assertTrue(all(isinstance(node.target, torch._ops.OpOverload)
for node in traced.graph.nodes if node.op == 'call_function'))
def test_tensor_constants(self):
def f():
val = torch.tensor(float('inf'))
return torch.full((100, 100), val)
self._test(f, [])
def test_allclose(self):
def f(a, b):
return torch.allclose(a, b)
def test_f():
make_fx(f, tracing_mode=self.tracing_mode)(
torch.zeros(3), torch.zeros(3)
)
if self.tracing_mode != "real":
self.assertRaises(DataDependentOutputException, test_f)
else:
self.assertRaisesRegex(RuntimeError, "data-dependent", test_f)
def test_constant_proxy_tensor_mut(self):
def f():
val = torch.tensor(float(1))
val.add_(2)
return torch.full((100, 100), val)
g = make_fx(f, tracing_mode=self.tracing_mode)()
self.assertEqual(g(), f())
# In case we mutated shared state in the g graph!
self.assertEqual(g(), f())
def test_constant_unbind(self):
def f():
val = torch.tensor([2])
r, = torch.unbind(val, 0)
return r.item()
g = make_fx(f, tracing_mode=self.tracing_mode)()
self.assertEqual(g(), f())
def test_constant_blowup(self):
def f():
val = torch.tensor([2])
blowup = val.repeat(1000)
return bool(blowup.sum().item() == 2)
def test_f():
make_fx(f, tracing_mode=self.tracing_mode)()
self.assertRaisesRegex(RuntimeError, "data-dependent", test_f)
def test_constant_random(self):
def f():
val = torch.tensor([2.0])
val.normal_()
return bool(val.item() == 2.1)
def test_f():
make_fx(f, tracing_mode=self.tracing_mode)()
self.assertRaisesRegex(RuntimeError, "data-dependent", test_f)
def test_decomposition_interpreter(self):
def fn(x):
return torch.nn.functional.silu(x)
x = torch.rand((4, 4))
fx_module = make_fx(fn, tracing_mode=self.tracing_mode, decomposition_table=None)(x)
found_silu = False
for n in fx_module.graph.nodes:
if n.target == torch.ops.aten.silu or n.target == torch.ops.aten.silu.default:
found_silu = True
self.assertTrue(found_silu)
new_graph = torch.fx.Graph()
silu_decomp_table = {torch.ops.aten.silu.default: decomposition_table[torch.ops.aten.silu.default]}
DecompositionInterpreter(
fx_module,
new_graph=new_graph,
decomposition_table=silu_decomp_table,
).run(x)
decomposed_module = torch.fx.GraphModule(fx_module, new_graph)
for n in decomposed_module.graph.nodes:
self.assertTrue(n.target != torch.ops.aten.silu)
self.assertTrue(n.target != torch.ops.aten.silu.default)
self.assertEqual(fx_module(x), decomposed_module(x))
def test_make_fx_model_fwd_bwd(self):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
return self.linear(x).relu()
model = Foo()
def f(x, params):
out = torch.func.functional_call(model, params, x).sum()
out.backward()
return list(params.values())
input = torch.randn(3, 5, requires_grad=True)
params = dict(model.named_parameters())
fx_f = make_fx(f, tracing_mode=self.tracing_mode)(input, params)
# fx may change the order of parameters in list, so using set() to compare
self.assertTrue(
torch.allclose(fx_f(input, params)[0], f(input, params)[0])
or
torch.allclose(fx_f(input, params)[0], f(input, params)[1])
)
self.assertTrue(
torch.allclose(fx_f(input, params)[1], f(input, params)[0])
or
torch.allclose(fx_f(input, params)[1], f(input, params)[1])
)
def test_make_fx_model_double_param(self):
class Emformer(torch.nn.Module):
def __init__(
self,
input_dim: int = 256,
) -> None:
super().__init__()
self.layer_norm = torch.nn.LayerNorm(input_dim)
def forward(mod_self, x): # noqa: B902
self.assertTrue(isinstance(mod_self.layer_norm.weight, torch.Tensor))
y = mod_self.layer_norm(x)
self.assertTrue(isinstance(mod_self.layer_norm.weight, torch.Tensor))
z = mod_self.layer_norm(y)
return z
gm = make_fx(Emformer())(torch.randn(16, 1, 256))
ops = {n.target for n in gm.graph.nodes if n.op == 'call_function'}
self.assertEqual(len(ops), 2)
def test_make_fx_model_fwd_bwd_wgtupdate(self):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
return self.linear(x).relu()
model = Foo()
def f(args, params, buffers):
for p in params.values():
p.grad = None
if not isinstance(args, Iterable):
args = [args]
params_and_buffers = {**params, **buffers}
out = torch.func.functional_call(model, params_and_buffers, args)
out.sum().backward()
return [p - 1e-4 * p.grad for p in params.values()]
input = torch.randn(3, 5, requires_grad=True)
params = dict(model.named_parameters())
buffers = dict(model.named_buffers())
fx_f = make_fx(f, tracing_mode=self.tracing_mode)(input, params, buffers)
# fx may change the order of parameters in list, so using set() to compare
# also there is a numerical difference in results so changing atol from 1e-08 to 1e-03
self.assertTrue(
torch.allclose(fx_f(input, params, buffers)[0], f(input, params, buffers)[0], atol=1e-03)
or
torch.allclose(fx_f(input, params, buffers)[0], f(input, params, buffers)[1], atol=1e-03)
)
self.assertTrue(
torch.allclose(fx_f(input, params, buffers)[1], f(input, params, buffers)[0], atol=1e-03)
or
torch.allclose(fx_f(input, params, buffers)[1], f(input, params, buffers)[1], atol=1e-03)
)
def test_trace_subclasses(self):
def f1(x):
x = UnwrapTensor(x)
y = x * 2
return y
def f2(x):
wrapped = UnwrapTensor(x)
y = x * wrapped
return y
inp = [torch.randn(5)]
self._test(f1, inp)
self._test(f2, inp)
def test_partial_decomp(self):
def f(a, b, c):
x = torch.addmm(a, b, c)
y = torch.addmm(a, b, c, beta=2, alpha=1)
return x + y
inps = [torch.randn(5, 5), torch.randn(5, 5), torch.randn(5, 5)]
fx_g = make_fx(f)(*inps)
def addmm(a, b, c, beta=1, alpha=1):
if beta == 1 and alpha == 1:
return NotImplemented
return beta * a + alpha * (b @ c)
decomposed_fx = make_fx(f, decomposition_table={aten.addmm.default: addmm})(*inps)
self.assertEqual(fx_g(*inps), decomposed_fx(*inps))
self.assertEqual(len([n for n in fx_g.graph.nodes if n.target == aten.addmm.default]), 2)
self.assertEqual(len([n for n in decomposed_fx.graph.nodes if n.target == aten.addmm.default]), 1)
def test_decomp_of_capture(self):
val = torch.randn(5)
def f(x):
return x.t() + val.t()
def nop(x):
return x.cos()
traced = make_fx(f, decomposition_table={torch.ops.aten.t.default: nop})(torch.randn(5))
self.assertEqual(len([n for n in traced.graph.nodes if n.target == torch.ops.aten.t.default]), 0)
@unittest.skipIf(not HAS_CUDA, 'CUDA-only test')
def test_amp_cache(self):
layer = torch.nn.Conv2d(3, 3, 3).cuda()
def f(x, w):
return torch.nn.functional.conv2d(x, w, stride=layer.stride)
inp = torch.randn(4, 3, 10, 10, device='cuda')
with torch.autocast('cuda'):
out_graph = make_fx(f)(inp, layer.weight).graph
out_graph2 = make_fx(f)(inp, layer.weight).graph
self.assertEqual(len(out_graph.nodes), len(out_graph2.nodes))
for a, b in zip(out_graph.nodes, out_graph2.nodes):
self.assertEqual(a.op, b.op)
def test_strides(self):
def f(x):
self.assertTrue(x.is_contiguous())
self.assertFalse(x.is_contiguous(memory_format=torch.channels_last))
x = x.permute(0, 3, 1, 2)
self.assertFalse(x.is_contiguous())
self.assertTrue(x.is_contiguous(memory_format=torch.channels_last))
return x
make_fx(f)(torch.randn(2, 3, 4, 5))
def f(x):
self.assertTrue(x.is_contiguous())
y = x[:, 1]
self.assertFalse(y.is_contiguous())
y = x[:, ::2]
self.assertFalse(y.is_contiguous())
return x.cos()
make_fx(f)(torch.randn(2, 3, 4, 5))
def test_pr_86917(self):
# Tests the issue brought up here https://github.com/pytorch/pytorch/pull/86917#issuecomment-1283155344
def f(a, b):
return torch.ops.aten.nll_loss_forward(a, b, None, 1, 10)
self._test(f, [torch.randn(1, 10), torch.zeros(1, dtype=torch.long)])
@unittest.skipIf(not HAS_CUDA, 'CUDA-only test')
def test_T244632748(self):
class TestModule(torch.nn.Module):
def forward(self, x):
return x + (x.shape[0] * 2)
mod = TestModule()
sample = torch.randn((5, 5)).to("cuda")
dim0 = torch.export.Dim.DYNAMIC(max=100)
dynamic_shapes = {"x": (dim0, torch.export.Dim.STATIC)}
ep = torch.export.export(mod, (sample,), dynamic_shapes=dynamic_shapes)
gm = ep.module()
symint = list(gm.graph.nodes)[3].meta["val"]
list(gm.graph.nodes)[3].replace_all_uses_with(symint)
gm.graph.eliminate_dead_code()
inductor_fx = torch._inductor.aot_compile(
gm, (sample,), options={"fx_wrapper": True, "compile_threads": 1}
)
|
TestGenericProxyTensor
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_sequences.py
|
{
"start": 4750,
"end": 13558
}
|
class ____(fixtures.TestBase):
__requires__ = ("sequences",)
__sparse_driver_backend__ = True
@classmethod
def setup_test_class(cls):
cls.seq = normalize_sequence(config, Sequence("my_sequence"))
cls.seq.create(testing.db)
@classmethod
def teardown_test_class(cls):
cls.seq.drop(testing.db)
def _assert_seq_result(self, ret):
"""asserts return of next_value is an int"""
assert isinstance(ret, int)
assert ret >= testing.db.dialect.default_sequence_base
def test_execute(self, connection):
s = normalize_sequence(config, Sequence("my_sequence"))
self._assert_seq_result(connection.scalar(s))
def test_execute_deprecated(self, connection):
s = normalize_sequence(config, Sequence("my_sequence", optional=True))
with expect_deprecated(
r"Using the .execute\(\) method to invoke a "
r"DefaultGenerator object is deprecated; please use "
r"the .scalar\(\) method."
):
self._assert_seq_result(connection.execute(s))
def test_scalar_optional(self, connection):
"""test dialect executes a Sequence, returns nextval, whether
or not "optional" is set"""
s = normalize_sequence(config, Sequence("my_sequence", optional=True))
self._assert_seq_result(connection.scalar(s))
def test_execute_next_value(self, connection):
"""test func.next_value().execute()/.scalar() works
with connectionless execution."""
s = normalize_sequence(config, Sequence("my_sequence"))
self._assert_seq_result(connection.scalar(s.next_value()))
def test_execute_optional_next_value(self, connection):
"""test func.next_value().execute()/.scalar() works
with connectionless execution."""
s = normalize_sequence(config, Sequence("my_sequence", optional=True))
self._assert_seq_result(connection.scalar(s.next_value()))
def test_func_embedded_select(self, connection):
"""test can use next_value() in select column expr"""
s = normalize_sequence(config, Sequence("my_sequence"))
self._assert_seq_result(connection.scalar(select(s.next_value())))
@testing.requires.sequences_in_other_clauses
@testing.provide_metadata
def test_func_embedded_whereclause(self, connection):
"""test can use next_value() in whereclause"""
metadata = self.metadata
t1 = Table("t", metadata, Column("x", Integer))
t1.create(testing.db)
connection.execute(t1.insert(), [{"x": 1}, {"x": 300}, {"x": 301}])
s = normalize_sequence(config, Sequence("my_sequence"))
eq_(
list(
connection.execute(t1.select().where(t1.c.x > s.next_value()))
),
[(300,), (301,)],
)
@testing.provide_metadata
def test_func_embedded_valuesbase(self, connection):
"""test can use next_value() in values() of _ValuesBase"""
metadata = self.metadata
t1 = Table(
"t",
metadata,
Column("x", Integer),
)
t1.create(testing.db)
s = normalize_sequence(config, Sequence("my_sequence"))
connection.execute(t1.insert().values(x=s.next_value()))
self._assert_seq_result(connection.scalar(t1.select()))
def test_inserted_pk_no_returning(self, metadata, connection):
"""test inserted_primary_key contains [None] when
pk_col=next_value(), implicit returning is not used."""
# I'm not really sure what this test wants to accomlish.
t1 = Table(
"t",
metadata,
Column("x", Integer, primary_key=True),
implicit_returning=False,
)
s = normalize_sequence(
config, Sequence("my_sequence_here", metadata=metadata)
)
conn = connection
t1.create(conn)
s.create(conn)
r = conn.execute(t1.insert().values(x=s.next_value()))
if testing.requires.emulated_lastrowid_even_with_sequences.enabled:
eq_(r.inserted_primary_key, (1,))
else:
eq_(r.inserted_primary_key, (None,))
@testing.combinations(
("implicit_returning",),
("no_implicit_returning",),
("explicit_returning", testing.requires.insert_returning),
(
"return_defaults_no_implicit_returning",
testing.requires.insert_returning,
),
(
"return_defaults_implicit_returning",
testing.requires.insert_returning,
),
argnames="returning",
)
@testing.requires.multivalues_inserts
def test_seq_multivalues_inline(self, metadata, connection, returning):
_implicit_returning = "no_implicit_returning" not in returning
t1 = Table(
"t",
metadata,
Column(
"x",
Integer,
normalize_sequence(config, Sequence("my_seq")),
primary_key=True,
),
Column("data", String(50)),
implicit_returning=_implicit_returning,
)
metadata.create_all(connection)
conn = connection
stmt = t1.insert().values(
[{"data": "d1"}, {"data": "d2"}, {"data": "d3"}]
)
if returning == "explicit_returning":
stmt = stmt.returning(t1.c.x)
elif "return_defaults" in returning:
stmt = stmt.return_defaults()
r = conn.execute(stmt)
if returning == "explicit_returning":
eq_(r.all(), [(1,), (2,), (3,)])
elif "return_defaults" in returning:
eq_(r.returned_defaults_rows, None)
# TODO: not sure what this is
eq_(r.inserted_primary_key_rows, [(None,)])
eq_(
conn.execute(t1.select().order_by(t1.c.x)).all(),
[(1, "d1"), (2, "d2"), (3, "d3")],
)
@testing.combinations(
("implicit_returning",),
("no_implicit_returning",),
(
"explicit_returning",
testing.requires.insert_returning
+ testing.requires.insert_executemany_returning,
),
(
"return_defaults_no_implicit_returning",
testing.requires.insert_returning
+ testing.requires.insert_executemany_returning,
),
(
"return_defaults_implicit_returning",
testing.requires.insert_returning
+ testing.requires.insert_executemany_returning,
),
argnames="returning",
)
def test_seq_multivalues_executemany(
self, connection, metadata, returning
):
_implicit_returning = "no_implicit_returning" not in returning
t1 = Table(
"t",
metadata,
Column(
"x",
Integer,
normalize_sequence(config, Sequence("my_seq")),
primary_key=True,
),
Column("data", String(50)),
implicit_returning=_implicit_returning,
)
metadata.create_all(connection)
conn = connection
stmt = t1.insert()
if returning == "explicit_returning":
stmt = stmt.returning(t1.c.x)
elif "return_defaults" in returning:
stmt = stmt.return_defaults()
r = conn.execute(
stmt, [{"data": "d1"}, {"data": "d2"}, {"data": "d3"}]
)
if returning == "explicit_returning":
eq_(r.all(), [(1,), (2,), (3,)])
elif "return_defaults" in returning:
if "no_implicit_returning" in returning:
eq_(r.returned_defaults_rows, None)
eq_(r.inserted_primary_key_rows, [(1,), (2,), (3,)])
else:
eq_(r.returned_defaults_rows, [(1,), (2,), (3,)])
eq_(r.inserted_primary_key_rows, [(1,), (2,), (3,)])
eq_(
conn.execute(t1.select().order_by(t1.c.x)).all(),
[(1, "d1"), (2, "d2"), (3, "d3")],
)
@testing.requires.insert_returning
def test_inserted_pk_implicit_returning(self, connection, metadata):
"""test inserted_primary_key contains the result when
pk_col=next_value(), when implicit returning is used."""
s = normalize_sequence(config, Sequence("my_sequence"))
t1 = Table(
"t",
metadata,
Column(
"x",
Integer,
primary_key=True,
),
implicit_returning=True,
)
t1.create(connection)
r = connection.execute(t1.insert().values(x=s.next_value()))
self._assert_seq_result(r.inserted_primary_key[0])
|
SequenceExecTest
|
python
|
django-import-export__django-import-export
|
tests/core/tests/resources.py
|
{
"start": 1577,
"end": 1695
}
|
class ____(resources.ModelResource):
class Meta:
model = Profile
exclude = ("user",)
|
ProfileResource
|
python
|
bokeh__bokeh
|
src/bokeh/models/tools.py
|
{
"start": 21660,
"end": 27179
}
|
class ____(Scroll):
''' *toolbar icon*: |wheel_zoom_icon|
The wheel zoom tool will zoom the plot in and out, centered on the
current mouse location.
The wheel zoom tool also activates the border regions of a Plot for
"single axis" zooming. For instance, zooming in the vertical border or
axis will effect a zoom in the vertical direction only, with the
horizontal dimension kept fixed.
.. |wheel_zoom_icon| image:: /_images/icons/wheel-zoom.svg
:height: 24px
:alt: Icon of a mouse shape next to an hourglass representing the wheel-zoom tool in the toolbar.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
# ZoomBaseTool common {
dimensions = Enum(Dimensions, default="both", help="""
Which dimensions the wheel zoom tool is constrained to act in. By default
the wheel zoom tool will zoom in any dimension, but can be configured to
only zoom horizontally across the width of the plot, or vertically across
the height of the plot.
""")
renderers = Either(Auto, List(Instance(DataRenderer)), default="auto", help="""
Restrict zoom to ranges used by the provided data renderers. If ``"auto"``
then all ranges provided by the cartesian frame will be used.
""")
level = NonNegative(Int, default=0, help="""
When working with composite scales (sub-coordinates), this property
allows to configure which set of ranges to scale. The default is to
scale top-level (frame) ranges.
""")
# }
hit_test = Bool(default=False, help="""
Whether to zoom only those renderer that are being pointed at.
This setting only applies when zooming renderers that were configured with
sub-coordinates, otherwise it has no effect.
If ``True``, then ``hit_test_mode`` property defines how hit testing
is performed and ``hit_test_behavior`` allows to configure other aspects
of this setup. See respective properties for details.
.. note::
This property is experimental and may change at any point
""")
hit_test_mode = Enum("point", "hline", "vline", default="point", help="""
Allows to configure what geometry to use when ``hit_test`` is enabled.
Supported modes are ``"point"`` for single point hit testing, and ``hline``
and ``vline`` for either horizontal or vertical span hit testing.
.. note::
This property is experimental and may change at any point
""")
hit_test_behavior = Either(Instance(GroupBy), Enum("only_hit"), default="only_hit", help="""
Allows to configure which renderers will be zoomed when ``hit_test`` is enabled.
By default (``hit_only``) only actually hit renderers will be zoomed. An
instance of ``GroupBy`` model can be used to tell what other renderers
should be zoomed when a given one is hit.
.. note::
This property is experimental and may change at any point
""").accepts(Enum("group_by_name"), lambda _: GroupByName()) \
.accepts(List(List(Instance(DataRenderer))), lambda groups: GroupByModels(groups=groups))
maintain_focus = Bool(default=True, help="""
If True, then hitting a range bound in any one dimension will prevent all
further zooming all dimensions. If False, zooming can continue
independently in any dimension that has not yet reached its bounds, even if
that causes overall focus or aspect ratio to change.
""")
zoom_on_axis = Bool(default=True, help="""
Whether scrolling on an axis (outside the central plot area) should zoom
that dimension. If enabled, the behavior of this feature can be configured
with ``zoom_together`` property.
""")
zoom_together = Enum("none", "cross", "all", default="all", help="""
Defines the behavior of the tool when zooming on an axis:
- ``"none"``
zoom only the axis that's being interacted with. Any cross
axes nor any other axes in the dimension of this axis will be affected.
- ``"cross"``
zoom the axis that's being interacted with and its cross
axis, if configured. No other axes in this or cross dimension will be
affected.
- ``"all"``
zoom all axes in the dimension of the axis that's being
interacted with. All cross axes will be unaffected.
""")
speed = Float(default=1/600, help="""
Speed at which the wheel zooms. Default is 1/600. Optimal range is between
0.001 and 0.09. High values will be clipped. Speed may very between browsers.
""")
modifiers = Modifiers(default={}, help="""
Allows to configure a combination of modifier keys, which need to
be pressed during the selected gesture for this tool to trigger.
For example, to zoom only when ``Ctrl`` and ``Shift`` keys are
pressed, use:
.. code-block:: python
tool = WheelZoomTool(modifiers=dict(ctrl=True, shift=True))
plot.add_tools(tool)
or alternatively using a concise syntax:
.. code-block:: python
tool = WheelZoomTool(modifiers="ctrl+shift")
plot.add_tools(tool)
.. note::
Setting modifiers allows this tool to be automatically activated,
if ``Toolbar.active_scroll`` is set to ``"auto"``.
.. warning::
Configuring modifiers is a platform dependent feature and
can make this tool unusable for example on mobile devices.
""").accepts(String, _parse_modifiers)
|
WheelZoomTool
|
python
|
google__pytype
|
build_scripts/release.py
|
{
"start": 2587,
"end": 3833
}
|
class ____:
"""Context manager to build the pytype distribution package."""
def __enter__(self):
sdist_cmd = ["python", "setup.py", "sdist"]
print(f"Creating distribution package: {sdist_cmd}\n")
returncode, stdout = build_utils.run_cmd(sdist_cmd)
if returncode != 0:
raise ReleaseError(f"Running {sdist_cmd} failed:\n{stdout}")
# The sdist command creates the distribution package in a directory
# named "dist"
self.dist_path = os.path.join(build_utils.PYTYPE_SRC_ROOT, "dist")
return self.dist_path
def __exit__(self, exc_type, exc_val, exc_tb):
print("Deleting the distribution directory ...\n")
shutil.rmtree(self.dist_path)
print("Deleting the metadata directory ...\n")
shutil.rmtree(os.path.join(build_utils.PYTYPE_SRC_ROOT, "pytype.egg-info"))
return False
def main():
args = parse_args()
verify_no_pytype_installation_exists()
check_if_version_is_ok()
verify_pypirc_exists()
try:
with DistributionPackage() as pkg_path:
upload_package(pkg_path, args.mode == TEST_MODE)
except ReleaseError as error:
sys.exit(f">>> Release Failed <<<\n{error.msg}")
print("!!! Release Successful !!!\n")
if __name__ == "__main__":
main()
|
DistributionPackage
|
python
|
tornadoweb__tornado
|
demos/facebook/facebook.py
|
{
"start": 3613,
"end": 4146
}
|
class ____(tornado.web.UIModule):
def render(self, post):
return self.render_string("modules/post.html", post=post)
async def main():
tornado.options.parse_command_line()
if not (options.facebook_api_key and options.facebook_secret):
print("--facebook_api_key and --facebook_secret must be set")
return
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
await asyncio.Event().wait()
if __name__ == "__main__":
asyncio.run(main())
|
PostModule
|
python
|
sphinx-doc__sphinx
|
sphinx/transforms/__init__.py
|
{
"start": 3283,
"end": 5012
}
|
class ____(SphinxTransform):
"""Replace some substitutions if they aren't defined in the document."""
# run before the default Substitutions
default_priority = 210
def apply(self, **kwargs: Any) -> None:
# only handle those not otherwise defined in the document
to_handle = _DEFAULT_SUBSTITUTIONS - set(self.document.substitution_defs)
for ref in self.document.findall(nodes.substitution_reference):
if (name := ref['refname']) in to_handle:
ref.replace_self(self._handle_default_substitution(name))
def _handle_default_substitution(
self, name: _DEFAULT_SUBSTITUTION_NAMES
) -> nodes.Text:
if name == 'translation progress':
# special handling: calculate translation progress
return nodes.Text(_calculate_translation_progress(self.document))
if name == 'today':
if text := self.config.today:
return nodes.Text(text)
# special handling: can also specify a strftime format
today_fmt = self.config.today_fmt or _('%b %d, %Y')
return nodes.Text(format_date(today_fmt, language=self.config.language))
# config.version and config.release
return nodes.Text(getattr(self.config, name))
def _calculate_translation_progress(document: nodes.document) -> str:
try:
translation_progress = document['translation_progress']
except KeyError:
return _('could not calculate translation progress!')
total = translation_progress['total']
translated = translation_progress['translated']
if total <= 0:
return _('no translated elements!')
return f'{translated / total:.2%}'
|
DefaultSubstitutions
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/grpc.py
|
{
"start": 616,
"end": 816
}
|
class ____(str, BaseEnum):
"""Define how the query's hybrid fusion operation should be performed."""
RANKED = "FUSION_TYPE_RANKED"
RELATIVE_SCORE = "FUSION_TYPE_RELATIVE_SCORE"
|
HybridFusion
|
python
|
wandb__wandb
|
wandb/vendor/pygments/styles/bw.py
|
{
"start": 357,
"end": 1355
}
|
class ____(Style):
background_color = "#ffffff"
default_style = ""
styles = {
Comment: "italic",
Comment.Preproc: "noitalic",
Keyword: "bold",
Keyword.Pseudo: "nobold",
Keyword.Type: "nobold",
Operator.Word: "bold",
Name.Class: "bold",
Name.Namespace: "bold",
Name.Exception: "bold",
Name.Entity: "bold",
Name.Tag: "bold",
String: "italic",
String.Interpol: "bold",
String.Escape: "bold",
Generic.Heading: "bold",
Generic.Subheading: "bold",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold",
Error: "border:#FF0000"
}
|
BlackWhiteStyle
|
python
|
kubernetes-client__python
|
kubernetes/client/api/logs_api.py
|
{
"start": 543,
"end": 9507
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def log_file_handler(self, logpath, **kwargs): # noqa: E501
"""log_file_handler # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.log_file_handler(logpath, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str logpath: path to the log (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.log_file_handler_with_http_info(logpath, **kwargs) # noqa: E501
def log_file_handler_with_http_info(self, logpath, **kwargs): # noqa: E501
"""log_file_handler # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.log_file_handler_with_http_info(logpath, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str logpath: path to the log (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'logpath'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method log_file_handler" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'logpath' is set
if self.api_client.client_side_validation and ('logpath' not in local_var_params or # noqa: E501
local_var_params['logpath'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `logpath` when calling `log_file_handler`") # noqa: E501
collection_formats = {}
path_params = {}
if 'logpath' in local_var_params:
path_params['logpath'] = local_var_params['logpath'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/logs/{logpath}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def log_file_list_handler(self, **kwargs): # noqa: E501
"""log_file_list_handler # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.log_file_list_handler(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.log_file_list_handler_with_http_info(**kwargs) # noqa: E501
def log_file_list_handler_with_http_info(self, **kwargs): # noqa: E501
"""log_file_list_handler # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.log_file_list_handler_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method log_file_list_handler" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/logs/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
LogsApi
|
python
|
huggingface__transformers
|
src/transformers/core_model_loading.py
|
{
"start": 7296,
"end": 9084
}
|
class ____(ConversionOps):
"""Inverse of :class:`MergeModulelist` using explicit split sizes per group."""
def __init__(self, dim: int = 0):
self.dim = dim
@torch.no_grad
def convert(
self, input_dict: dict[str, torch.Tensor], source_patterns: list[str], target_patterns: list[str], **kwargs
) -> dict[str, torch.Tensor]:
all_tensors = {}
for source_pattern, tensors in input_dict.items():
tensor = tensors[0] if isinstance(tensors, list) else tensors
# We split in the number of tensors present in the given dim
sizes = tensor.size(self.dim)
targets = self.get_target_patterns(input_dict, source_pattern, target_patterns, sizes)
chunks = torch.chunk(tensor, sizes, dim=self.dim)
# We squeeze each chunk here as well to make sure to give them their original shape
all_tensors.update({target: chunk.squeeze() for target, chunk in zip(targets, chunks)})
return all_tensors
def get_target_patterns(
self, input_dict: dict, source_pattern: str, target_patterns: list[str], sizes: int
) -> list[str]:
# Here it's a single operation, so we use the target
if len(input_dict) == 1:
if len(target_patterns) == 1:
return [target_patterns[0].replace("*", f"{i}") for i in range(sizes)]
else:
raise ValueError("Undefined Operation encountered!")
# Here it's the last operation in a chain, so we use the source as they were replaced before in the chain
else:
return [source_pattern.replace("*", f"{i}") for i in range(sizes)]
@property
def reverse_op(self) -> ConversionOps:
return MergeModulelist(self.dim)
|
SplitModulelist
|
python
|
networkx__networkx
|
networkx/algorithms/tests/test_dominance.py
|
{
"start": 3442,
"end": 9811
}
|
class ____:
@pytest.mark.parametrize("G", [nx.Graph(), nx.MultiGraph()])
def test_raises_undirected(self, G):
"""Check that `dominance_frontiers` raises for undirected graphs."""
with pytest.raises(
nx.NetworkXNotImplemented, match=r"not implemented for undirected"
):
nx.dominance_frontiers(G, 0)
def test_raises_node(self):
"""Check that `dominance_frontiers` raises when `start` is not in the graph."""
G = nx.empty_graph(1, create_using=nx.DiGraph)
with pytest.raises(nx.NetworkXError, match=r"not in G"):
nx.dominance_frontiers(G, 1)
def test_singleton(self):
G = nx.DiGraph()
G.add_node(0)
assert nx.dominance_frontiers(G, 0) == {0: set()}
G.add_edge(0, 0)
assert nx.dominance_frontiers(G, 0) == {0: {0}}
@pytest.mark.parametrize("gen, df", [(nx.path_graph, set()), (nx.cycle_graph, {0})])
@pytest.mark.parametrize("n", [5, 10, 20])
def test_path_and_cycle(self, gen, df, n):
"""Check that `dominance_frontiers` is correct for path and cycle graphs."""
G = gen(n, create_using=nx.DiGraph())
assert nx.dominance_frontiers(G, 0) == dict.fromkeys(range(n), df)
def test_unreachable(self):
n = 5
G = nx.path_graph(n, create_using=nx.DiGraph())
assert nx.dominance_frontiers(G, 1) == dict.fromkeys(range(1, n), set())
def test_irreducible1(self):
"""
Graph taken from figure 2 of "A simple, fast dominance algorithm." (2006).
https://hdl.handle.net/1911/96345
"""
edges = [(1, 2), (2, 1), (3, 2), (4, 1), (5, 3), (5, 4)]
G = nx.DiGraph(edges)
assert nx.dominance_frontiers(G, 5) == {
1: {2},
2: {1},
3: {2},
4: {1},
5: set(),
}
def test_irreducible2(self):
"""
Graph taken from figure 4 of "A simple, fast dominance algorithm." (2006).
https://hdl.handle.net/1911/96345
"""
edges = [(1, 2), (2, 1), (2, 3), (3, 2), (4, 2), (4, 3), (5, 1), (6, 4), (6, 5)]
G = nx.DiGraph(edges)
assert nx.dominance_frontiers(G, 6) == {
1: {2},
2: {1, 3},
3: {2},
4: {2, 3},
5: {1},
6: set(),
}
def test_domrel_png(self):
# Graph taken from https://commons.wikipedia.org/wiki/File:Domrel.png
edges = [(1, 2), (2, 3), (2, 4), (2, 6), (3, 5), (4, 5), (5, 2)]
G = nx.DiGraph(edges)
assert nx.dominance_frontiers(G, 1) == {
1: set(),
2: {2},
3: {5},
4: {5},
5: {2},
6: set(),
}
# Test postdominance.
result = nx.dominance_frontiers(G.reverse(copy=False), 6)
assert result == {1: set(), 2: {2}, 3: {2}, 4: {2}, 5: {2}, 6: set()}
def test_boost_example(self):
# Graph taken from Figure 1 of
# http://www.boost.org/doc/libs/1_56_0/libs/graph/doc/lengauer_tarjan_dominator.htm
edges = [(0, 1), (1, 2), (1, 3), (2, 7), (3, 4), (4, 5), (4, 6), (5, 7), (6, 4)]
G = nx.DiGraph(edges)
assert nx.dominance_frontiers(G, 0) == {
0: set(),
1: set(),
2: {7},
3: {7},
4: {4, 7},
5: {7},
6: {4},
7: set(),
}
# Test postdominance.
result = nx.dominance_frontiers(G.reverse(copy=False), 7)
expected = {
0: set(),
1: set(),
2: {1},
3: {1},
4: {1, 4},
5: {1},
6: {4},
7: set(),
}
assert result == expected
def test_discard_issue(self):
# https://github.com/networkx/networkx/issues/2071
g = nx.DiGraph()
g.add_edges_from(
[
("b0", "b1"),
("b1", "b2"),
("b2", "b3"),
("b3", "b1"),
("b1", "b5"),
("b5", "b6"),
("b5", "b8"),
("b6", "b7"),
("b8", "b7"),
("b7", "b3"),
("b3", "b4"),
]
)
df = nx.dominance_frontiers(g, "b0")
assert df == {
"b4": set(),
"b5": {"b3"},
"b6": {"b7"},
"b7": {"b3"},
"b0": set(),
"b1": {"b1"},
"b2": {"b3"},
"b3": {"b1"},
"b8": {"b7"},
}
def test_loop(self):
g = nx.DiGraph()
g.add_edges_from([("a", "b"), ("b", "c"), ("b", "a")])
df = nx.dominance_frontiers(g, "a")
assert df == {"a": {"a"}, "b": {"a"}, "c": set()}
def test_missing_immediate_doms(self):
# see https://github.com/networkx/networkx/issues/2070
g = nx.DiGraph()
edges = [
("entry_1", "b1"),
("b1", "b2"),
("b2", "b3"),
("b3", "exit"),
("entry_2", "b3"),
]
# entry_1
# |
# b1
# |
# b2 entry_2
# | /
# b3
# |
# exit
g.add_edges_from(edges)
# formerly raised KeyError on entry_2 when parsing b3
# because entry_2 does not have immediate doms (no path)
nx.dominance_frontiers(g, "entry_1")
def test_loops_larger(self):
# from
# http://ecee.colorado.edu/~waite/Darmstadt/motion.html
g = nx.DiGraph()
edges = [
("entry", "exit"),
("entry", "1"),
("1", "2"),
("2", "3"),
("3", "4"),
("4", "5"),
("5", "6"),
("6", "exit"),
("6", "2"),
("5", "3"),
("4", "4"),
]
g.add_edges_from(edges)
df = nx.dominance_frontiers(g, "entry")
answer = {
"entry": set(),
"1": {"exit"},
"2": {"exit", "2"},
"3": {"exit", "3", "2"},
"4": {"exit", "4", "3", "2"},
"5": {"exit", "3", "2"},
"6": {"exit", "2"},
"exit": set(),
}
for n in df:
assert set(df[n]) == set(answer[n])
|
TestDominanceFrontiers
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/lib/test_histograms.py
|
{
"start": 1002,
"end": 15855
}
|
class ____(TestCase):
def test_simple(self):
n = 100
v = np.random.rand(n)
(a, b) = histogram(v)
# check if the sum of the bins equals the number of samples
assert_equal(np.sum(a, axis=0), n)
# check that the bin counts are evenly spaced when the data is from
# a linear function
(a, b) = histogram(np.linspace(0, 10, 100))
assert_array_equal(a, 10)
def test_one_bin(self):
# Ticket 632
hist, edges = histogram([1, 2, 3, 4], [1, 2])
assert_array_equal(
hist,
[
2,
],
)
assert_array_equal(edges, [1, 2])
assert_raises((RuntimeError, ValueError), histogram, [1, 2], bins=0)
h, e = histogram([1, 2], bins=1)
assert_equal(h, np.array([2]))
assert_allclose(e, np.array([1.0, 2.0]))
def test_density(self):
# Check that the integral of the density equals 1.
n = 100
v = np.random.rand(n)
a, b = histogram(v, density=True)
area = np.sum(a * np.diff(b))
assert_almost_equal(area, 1)
# Check with non-constant bin widths
v = np.arange(10)
bins = [0, 1, 3, 6, 10]
a, b = histogram(v, bins, density=True)
assert_almost_equal(a, 0.1)
assert_equal(np.sum(a * np.diff(b)), 1)
# Test that passing False works too
a, b = histogram(v, bins, density=False)
assert_array_equal(a, [1, 2, 3, 4])
# Variable bin widths are especially useful to deal with
# infinities.
v = np.arange(10)
bins = [0, 1, 3, 6, np.inf]
a, b = histogram(v, bins, density=True)
assert_almost_equal(a, [0.1, 0.1, 0.1, 0.0])
# Taken from a bug report from N. Becker on the numpy-discussion
# mailing list Aug. 6, 2010.
counts, dmy = np.histogram([1, 2, 3, 4], [0.5, 1.5, np.inf], density=True)
assert_equal(counts, [0.25, 0])
def test_outliers(self):
# Check that outliers are not tallied
a = np.arange(10) + 0.5
# Lower outliers
h, b = histogram(a, range=[0, 9])
assert_equal(h.sum(), 9)
# Upper outliers
h, b = histogram(a, range=[1, 10])
assert_equal(h.sum(), 9)
# Normalization
h, b = histogram(a, range=[1, 9], density=True)
assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15)
# Weights
w = np.arange(10) + 0.5
h, b = histogram(a, range=[1, 9], weights=w, density=True)
assert_equal((h * np.diff(b)).sum(), 1)
h, b = histogram(a, bins=8, range=[1, 9], weights=w)
assert_equal(h, w[1:-1])
def test_arr_weights_mismatch(self):
a = np.arange(10) + 0.5
w = np.arange(11) + 0.5
with assert_raises((RuntimeError, ValueError)): # , "same shape as"):
h, b = histogram(a, range=[1, 9], weights=w, density=True)
def test_type(self):
# Check the type of the returned histogram
a = np.arange(10) + 0.5
h, b = histogram(a)
assert_(np.issubdtype(h.dtype, np.integer))
h, b = histogram(a, density=True)
assert_(np.issubdtype(h.dtype, np.floating))
h, b = histogram(a, weights=np.ones(10, int))
assert_(np.issubdtype(h.dtype, np.integer))
h, b = histogram(a, weights=np.ones(10, float))
assert_(np.issubdtype(h.dtype, np.floating))
def test_f32_rounding(self):
# gh-4799, check that the rounding of the edges works with float32
x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32)
y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32)
counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100)
assert_equal(counts_hist.sum(), 3.0)
def test_bool_conversion(self):
# gh-12107
# Reference integer histogram
a = np.array([1, 1, 0], dtype=np.uint8)
int_hist, int_edges = np.histogram(a)
# Should raise an warning on booleans
# Ensure that the histograms are equivalent, need to suppress
# the warnings to get the actual outputs
# with suppress_warnings() as sup:
# rec = sup.record(RuntimeWarning, 'Converting input from .*')
hist, edges = np.histogram([True, True, False])
# A warning should be issued
# assert_equal(len(rec), 1)
assert_array_equal(hist, int_hist)
assert_array_equal(edges, int_edges)
def test_weights(self):
v = np.random.rand(100)
w = np.ones(100) * 5
a, b = histogram(v)
na, nb = histogram(v, density=True)
wa, wb = histogram(v, weights=w)
nwa, nwb = histogram(v, weights=w, density=True)
assert_array_almost_equal(a * 5, wa)
assert_array_almost_equal(na, nwa)
# Check weights are properly applied.
v = np.linspace(0, 10, 10)
w = np.concatenate((np.zeros(5), np.ones(5)))
wa, wb = histogram(v, bins=np.arange(11), weights=w)
assert_array_almost_equal(wa, w)
# Check with integer weights
wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1])
assert_array_equal(wa, [4, 5, 0, 1])
wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True)
assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10.0 / 3.0 * 4)
# Check weights with non-uniform bin widths
a, b = histogram(
np.arange(9),
[0, 1, 3, 6, 10],
weights=[2, 1, 1, 1, 1, 1, 1, 1, 1],
density=True,
)
assert_almost_equal(a, [0.2, 0.1, 0.1, 0.075])
@xpassIfTorchDynamo_np # (reason="histogram complex weights")
def test_exotic_weights(self):
# Test the use of weights that are not integer or floats, but e.g.
# complex numbers or object types.
# Complex weights
values = np.array([1.3, 2.5, 2.3])
weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2])
# Check with custom bins
wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
# Check with even bins
wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
# Decimal weights
from decimal import Decimal
values = np.array([1.3, 2.5, 2.3])
weights = np.array([Decimal(1), Decimal(2), Decimal(3)])
# Check with custom bins
wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
# Check with even bins
wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
def test_no_side_effects(self):
# This is a regression test that ensures that values passed to
# ``histogram`` are unchanged.
values = np.array([1.3, 2.5, 2.3])
np.histogram(values, range=[-10, 10], bins=100)
assert_array_almost_equal(values, [1.3, 2.5, 2.3])
def test_empty(self):
a, b = histogram([], bins=([0, 1]))
assert_array_equal(a, np.array([0]))
assert_array_equal(b, np.array([0, 1]))
def test_error_binnum_type(self):
# Tests if right Error is raised if bins argument is float
vals = np.linspace(0.0, 1.0, num=100)
histogram(vals, 5)
assert_raises(TypeError, histogram, vals, 2.4)
def test_finite_range(self):
# Normal ranges should be fine
vals = np.linspace(0.0, 1.0, num=100)
histogram(vals, range=[0.25, 0.75])
assert_raises((RuntimeError, ValueError), histogram, vals, range=[np.nan, 0.75])
assert_raises((RuntimeError, ValueError), histogram, vals, range=[0.25, np.inf])
def test_invalid_range(self):
# start of range must be < end of range
vals = np.linspace(0.0, 1.0, num=100)
with assert_raises((RuntimeError, ValueError)):
np.histogram(vals, range=[0.1, 0.01])
@xfail # (reason="edge cases")
def test_bin_edge_cases(self):
# Ensure that floating-point computations correctly place edge cases.
arr = np.array([337, 404, 739, 806, 1007, 1811, 2012])
hist, edges = np.histogram(arr, bins=8296, range=(2, 2280))
mask = hist > 0
left_edges = edges[:-1][mask]
right_edges = edges[1:][mask]
for x, left, right in zip(arr, left_edges, right_edges):
assert_(x >= left)
assert_(x < right, msg=f"{x}, {right}")
def test_last_bin_inclusive_range(self):
arr = np.array([0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 4.0, 5.0])
hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))
assert_equal(hist[-1], 1)
def test_bin_array_dims(self):
# gracefully handle bins object > 1 dimension
vals = np.linspace(0.0, 1.0, num=100)
bins = np.array([[0, 0.5], [0.6, 1.0]])
with assert_raises((RuntimeError, ValueError)):
np.histogram(vals, bins=bins)
@xpassIfTorchDynamo_np # (reason="no uint64")
def test_unsigned_monotonicity_check(self):
# Ensures ValueError is raised if bins not increasing monotonically
# when bins contain unsigned values (see #9222)
arr = np.array([2])
bins = np.array([1, 3, 1], dtype="uint64")
with assert_raises((RuntimeError, ValueError)):
hist, edges = np.histogram(arr, bins=bins)
def test_object_array_of_0d(self):
# gh-7864
assert_raises(
(RuntimeError, ValueError),
histogram,
[np.array(0.4) for i in range(10)] + [-np.inf],
)
assert_raises(
(RuntimeError, ValueError),
histogram,
[np.array(0.4) for i in range(10)] + [np.inf],
)
# these should not crash
np.histogram([np.array(0.5) for i in range(10)] + [0.500000000000002])
np.histogram([np.array(0.5) for i in range(10)] + [0.5])
@xpassIfTorchDynamo_np # (reason="bins='auto'")
def test_some_nan_values(self):
# gh-7503
one_nan = np.array([0, 1, np.nan])
all_nan = np.array([np.nan, np.nan])
# the internal comparisons with NaN give warnings
# sup = suppress_warnings()
# sup.filter(RuntimeWarning)
# with sup:
# can't infer range with nan
assert_raises(ValueError, histogram, one_nan, bins="auto")
assert_raises(ValueError, histogram, all_nan, bins="auto")
# explicit range solves the problem
h, b = histogram(one_nan, bins="auto", range=(0, 1))
assert_equal(h.sum(), 2) # nan is not counted
h, b = histogram(all_nan, bins="auto", range=(0, 1))
assert_equal(h.sum(), 0) # nan is not counted
# as does an explicit set of bins
h, b = histogram(one_nan, bins=[0, 1])
assert_equal(h.sum(), 2) # nan is not counted
h, b = histogram(all_nan, bins=[0, 1])
assert_equal(h.sum(), 0) # nan is not counted
def do_signed_overflow_bounds(self, dtype):
exponent = 8 * np.dtype(dtype).itemsize - 1
arr = np.array([-(2**exponent) + 4, 2**exponent - 4], dtype=dtype)
hist, e = histogram(arr, bins=2)
assert_equal(e, [-(2**exponent) + 4, 0, 2**exponent - 4])
assert_equal(hist, [1, 1])
def test_signed_overflow_bounds(self):
self.do_signed_overflow_bounds(np.byte)
self.do_signed_overflow_bounds(np.short)
self.do_signed_overflow_bounds(np.intc)
@xfail # (reason="int->float conversion loses precision")
def test_signed_overflow_bounds_2(self):
self.do_signed_overflow_bounds(np.int_)
self.do_signed_overflow_bounds(np.longlong)
def do_precision_lower_bound(self, float_small, float_large):
eps = np.finfo(float_large).eps
arr = np.array([1.0], float_small)
range = np.array([1.0 + eps, 2.0], float_large)
# test is looking for behavior when the bounds change between dtypes
if range.astype(float_small)[0] != 1:
return
# previously crashed
count, x_loc = np.histogram(arr, bins=1, range=range)
assert_equal(count, [1])
# gh-10322 means that the type comes from arr - this may change
assert_equal(x_loc.dtype, float_small)
def do_precision_upper_bound(self, float_small, float_large):
eps = np.finfo(float_large).eps
arr = np.array([1.0], float_small)
range = np.array([0.0, 1.0 - eps], float_large)
# test is looking for behavior when the bounds change between dtypes
if range.astype(float_small)[-1] != 1:
return
# previously crashed
count, x_loc = np.histogram(arr, bins=1, range=range)
assert_equal(count, [1])
# gh-10322 means that the type comes from arr - this may change
assert_equal(x_loc.dtype, float_small)
def do_precision(self, float_small, float_large):
self.do_precision_lower_bound(float_small, float_large)
self.do_precision_upper_bound(float_small, float_large)
@xpassIfTorchDynamo_np # (reason="mixed dtypes")
def test_precision(self):
# not looping results in a useful stack trace upon failure
self.do_precision(np.half, np.single)
self.do_precision(np.half, np.double)
self.do_precision(np.single, np.double)
@xpassIfTorchDynamo_np # (reason="histogram_bin_edges")
def test_histogram_bin_edges(self):
hist, e = histogram([1, 2, 3, 4], [1, 2])
edges = histogram_bin_edges([1, 2, 3, 4], [1, 2])
assert_allclose(edges, e, atol=2e-15)
arr = np.array([0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 4.0, 5.0])
hist, e = histogram(arr, bins=30, range=(-0.5, 5))
edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5))
assert_allclose(edges, e, atol=2e-15)
hist, e = histogram(arr, bins="auto", range=(0, 1))
edges = histogram_bin_edges(arr, bins="auto", range=(0, 1))
assert_allclose(edges, e, atol=2e-15)
# @requires_memory(free_bytes=1e10)
@xpassIfTorchDynamo_np # (reason="pytorch does not support bins = [int, int, array]")
@slow
def test_big_arrays(self):
sample = np.zeros([100000000, 3])
xbins = 400
ybins = 400
zbins = np.arange(16000)
hist = np.histogramdd(sample=sample, bins=(xbins, ybins, zbins))
assert_equal(type(hist), type((1, 2)))
@xpassIfTorchDynamo_np # (reason="TODO")
@instantiate_parametrized_tests
|
TestHistogram
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_async_job_manager.py
|
{
"start": 12687,
"end": 16194
}
|
class ____:
def test_refresh_throttle_uses_max_and_pings_account(self, mocker, api):
# Arrange: per_account=42, per_application=77 -> expect 77
api.api.ads_insights_throttle = MyFacebookAdsApi.Throttle(42.0, 77.0)
acct = mocker.Mock()
api.get_account.return_value = acct
limit = APILimit(api=api, account_id="act_123")
limit.refresh_throttle()
api.get_account.assert_called_once_with(account_id="act_123")
acct.get_insights.assert_called_once() # the "ping"
assert limit.current_throttle == 77.0
def test_try_consume_success_and_release_accounting(self, mocker, api):
# Arrange: very low throttle so we never block on throttle
api.api.ads_insights_throttle = MyFacebookAdsApi.Throttle(0.0, 0.0)
api.get_account.return_value = mocker.Mock()
limit = APILimit(api=api, account_id="act_1", throttle_limit=90.0, max_jobs=2)
# Act / Assert
assert limit.try_consume() is True
assert limit.inflight == 1
assert limit.try_consume() is True
assert limit.inflight == 2
# At max_jobs => third should fail even with low throttle
assert limit.try_consume() is False
assert limit.inflight == 2
# Release twice brings inflight back to 0
limit.release()
limit.release()
assert limit.inflight == 0
# Extra release must not underflow
limit.release()
assert limit.inflight == 0
# refresh_throttle gets called inside try_consume (via get_account().get_insights())
assert api.get_account.call_count >= 2
def test_try_consume_blocks_on_throttle(self, mocker, api):
# Arrange: throttle too high -> block
api.api.ads_insights_throttle = MyFacebookAdsApi.Throttle(95.0, 10.0) # max()=95 >= limit
api.get_account.return_value = mocker.Mock()
limit = APILimit(api=api, account_id="act_2", throttle_limit=90.0, max_jobs=10)
# First attempt: blocked by throttle
assert limit.try_consume() is False
assert limit.inflight == 0
# Lower throttle -> allow
api.api.ads_insights_throttle = MyFacebookAdsApi.Throttle(10.0, 5.0)
assert limit.try_consume() is True
assert limit.inflight == 1
def test_limit_reached_property_works_for_both_conditions(self, api):
limit = APILimit(api=api, account_id="act_3", throttle_limit=80.0, max_jobs=1)
# Neither inflight nor throttle blocking
limit._current_throttle = 0.0
limit._inflight = 0
assert limit.limit_reached is False
# Concurrency cap reached
limit._inflight = 1
limit._current_throttle = 0.0
assert limit.limit_reached is True
# Throttle cap reached
limit._inflight = 0
limit._current_throttle = 80.0
assert limit.limit_reached is True
def test_try_consume_does_not_refresh_when_capacity_reached(self, mocker, api):
"""
When inflight >= max_jobs, try_consume must short-circuit (no throttle ping).
"""
acct = mocker.Mock()
api.get_account.return_value = acct
limit = APILimit(api=api, account_id="act_x", throttle_limit=90.0, max_jobs=1)
# Simulate capacity reached
limit._inflight = 1
ok = limit.try_consume()
assert ok is False
# No throttle refresh should have been attempted
api.get_account.assert_not_called()
|
TestAPILimit
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/methodOverride4.py
|
{
"start": 1107,
"end": 1247
}
|
class ____(BaseC):
# This should generate an error because of the upper bound.
def method1[T: SubclassC](self, x: T) -> T: ...
|
SubclassC
|
python
|
viewflow__viewflow
|
viewflow/this_object.py
|
{
"start": 360,
"end": 1390
}
|
class ____:
"""
Reference to a method for forward references in class bodies.
This class is used to defer the resolution of a method reference until the
class is fully constructed. This is particularly useful in workflow or state
machine implementations where the flow references are declared before the
methods are defined.
"""
def __init__(self, propname: str, methodname: str):
self._propname = propname
self._methodname = methodname
def resolve(self, instance) -> Any:
"""
Resolve the method reference on the given instance.
Args:
instance (object): The instance on which to resolve the method.
Returns:
Any: The result of the resolved method call.
Raises:
AttributeError: If the property or method does not exist on the instance.
"""
prop = getattr(instance, self._propname)
method = getattr(instance, f"_this_{self._methodname}")
return method(prop)
|
ThisMethod
|
python
|
sphinx-doc__sphinx
|
sphinx/domains/c/_ast.py
|
{
"start": 41191,
"end": 42620
}
|
class ____(ASTDeclarator):
def __init__(self, declId: ASTNestedName, size: ASTExpression) -> None:
self.declId = declId
self.size = size
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTDeclaratorNameBitField):
return NotImplemented
return self.declId == other.declId and self.size == other.size
def __hash__(self) -> int:
return hash((self.declId, self.size))
@property
def name(self) -> ASTNestedName:
return self.declId
# ------------------------------------------------------------------------
def require_space_after_declSpecs(self) -> bool:
return self.declId is not None
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.declId:
res.append(transform(self.declId))
res.extend((' : ', transform(self.size)))
return ''.join(res)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
verify_description_mode(mode)
if self.declId:
self.declId.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_space()
signode += addnodes.desc_sig_punctuation(':', ':')
signode += addnodes.desc_sig_space()
self.size.describe_signature(signode, mode, env, symbol)
|
ASTDeclaratorNameBitField
|
python
|
PyCQA__pylint
|
tests/functional/p/protected_access.py
|
{
"start": 368,
"end": 561
}
|
class ____:
def __init__(self):
self._meta = 42
self._manager = 24
self._teta = 29
OBJ = Protected()
OBJ._meta
OBJ._manager
OBJ._teta # [protected-access]
|
Protected
|
python
|
pytorch__pytorch
|
test/dynamo/test_unspec.py
|
{
"start": 32050,
"end": 32878
}
|
class ____(torch._dynamo.test_case.TestCase):
def test_builtin_functions_on_device(self, device):
def fn(x, scaler):
m = torch.nn.ReLU()
m.to(device)
y = m(x) * scaler
return y
x = torch.randn([3, 6], device=device)
scaler = 0.23 # 0.23 is unspecialized
ref = fn(x, scaler)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
res = opt_fn(x, scaler)
self.assertTrue(same(ref, res))
self.assertEqual(ref.device, res.device)
devices = ["cuda", "hpu", "xpu"]
instantiate_device_type_tests(
UnspecTestsDevice, globals(), only_for=devices, allow_xpu=True
)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
UnspecTestsDevice
|
python
|
doocs__leetcode
|
solution/1900-1999/1926.Nearest Exit from Entrance in Maze/Solution.py
|
{
"start": 0,
"end": 723
}
|
class ____:
def nearestExit(self, maze: List[List[str]], entrance: List[int]) -> int:
m, n = len(maze), len(maze[0])
i, j = entrance
q = deque([(i, j)])
maze[i][j] = "+"
ans = 0
while q:
ans += 1
for _ in range(len(q)):
i, j = q.popleft()
for a, b in [[0, -1], [0, 1], [-1, 0], [1, 0]]:
x, y = i + a, j + b
if 0 <= x < m and 0 <= y < n and maze[x][y] == ".":
if x == 0 or x == m - 1 or y == 0 or y == n - 1:
return ans
q.append((x, y))
maze[x][y] = "+"
return -1
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/utils/sentry_apps/request_buffer.py
|
{
"start": 1468,
"end": 7558
}
|
class ____:
"""
Create a data structure to store basic information about Sentry App webhook requests in Redis
This should store the last 100 requests and last 100 errors (in different keys) for each event type, for each Sentry App
"""
def __init__(self, sentry_app: SentryApp | RpcSentryApp) -> None:
self.sentry_app = sentry_app
cluster_id = settings.SENTRY_WEBHOOK_LOG_REDIS_CLUSTER
self.client = redis.redis_clusters.get(cluster_id)
def _get_redis_key(self, event: str, error: bool = False) -> str:
sentry_app_id = self.sentry_app.id
if error:
return f"sentry-app-webhook-error:{{{sentry_app_id}}}:{event}"
else:
return f"sentry-app-webhook-request:{{{sentry_app_id}}}:{event}"
def _convert_redis_request(self, redis_request: str, event: str) -> SentryAppRequest:
"""
Convert the request string stored in Redis to a python dict
Add the event type to the dict so that the request can be identified correctly
"""
request = json.loads(redis_request)
request["event_type"] = event
return request
def _add_to_buffer_pipeline(
self, buffer_key: str, item: object, pipeline: Pipeline[str]
) -> None:
"""
Add the item to the buffer key specified, using the given pipeline.
This does not execute the pipeline's commands.
"""
pipeline.lpush(buffer_key, json.dumps(item))
pipeline.ltrim(buffer_key, 0, BUFFER_SIZE - 1)
pipeline.expire(buffer_key, KEY_EXPIRY)
@overload
def _get_all_from_buffer(self, buffer_key: str, pipeline: Pipeline[str]) -> None: ...
@overload
def _get_all_from_buffer(self, buffer_key: str) -> list[str]: ...
def _get_all_from_buffer(
self, buffer_key: str, pipeline: Pipeline[str] | None = None
) -> list[str] | None:
"""
Get the list at the buffer key, using the given pipeline if available.
If a pipeline is provided, this does not return a value as the pipeline must still be executed.
"""
if pipeline is not None:
pipeline.lrange(buffer_key, 0, BUFFER_SIZE - 1)
return None
else:
return self.client.lrange(buffer_key, 0, BUFFER_SIZE - 1)
def _get_requests(
self, event: str | list[str] | None = None, error: bool = False
) -> list[SentryAppRequest]:
if isinstance(event, str):
event = [event]
# If no event is specified, return the latest requests/errors for all event types
event_types = event or EXTENDED_VALID_EVENTS
pipe = self.client.pipeline()
all_requests: list[SentryAppRequest] = []
for evt in event_types:
self._get_all_from_buffer(self._get_redis_key(evt, error=error), pipeline=pipe)
values = pipe.execute()
for idx, evt in enumerate(event_types):
event_requests = [self._convert_redis_request(request, evt) for request in values[idx]]
all_requests.extend(event_requests)
all_requests.sort(key=lambda x: parse_date(x["date"]), reverse=True)
return all_requests[0:BUFFER_SIZE]
def get_requests(
self, event: str | list[str] | None = None, errors_only: bool = False
) -> list[SentryAppRequest]:
return self._get_requests(event=event, error=errors_only)
def add_request(
self,
response_code: int,
org_id: int,
event: str,
url: str,
error_id: str | None = None,
project_id: int | None = None,
response: Response | None = None,
headers: Mapping[str, str] | None = None,
) -> None:
from sentry.utils.sentry_apps.webhooks import TIMEOUT_STATUS_CODE
if event not in EXTENDED_VALID_EVENTS:
logger.warning("Event %s is not a valid event that can be stored.", event)
return
request_key = self._get_redis_key(event)
time = timezone.now()
request_data = {
"date": str(time),
"response_code": response_code,
"webhook_url": url,
}
MAX_SIZE = 1024
if response_code >= 400 or response_code == TIMEOUT_STATUS_CODE:
if headers:
request_data["request_headers"] = headers
if response is not None:
if response.content is not None:
try:
json.loads(response.content)
# if the type is jsonifiable, treat it as such
prettified_response_body = json.dumps(response.content)
request_data["response_body"] = prettified_response_body[:MAX_SIZE]
except (json.JSONDecodeError, TypeError):
request_data["response_body"] = response.content[:MAX_SIZE]
if response.request is not None:
request_body = response.request.body
if request_body is not None:
prettified_request_body = json.dumps(request_body)
request_data["request_body"] = prettified_request_body[:MAX_SIZE]
# Don't store the org id for internal apps because it will always be the org that owns the app anyway
if not self.sentry_app.is_internal:
request_data["organization_id"] = org_id
# We need both the error ID and project ID to link the error
if error_id is not None and project_id is not None:
request_data["error_id"] = error_id
request_data["project_id"] = project_id
pipe = self.client.pipeline()
self._add_to_buffer_pipeline(request_key, request_data, pipe)
# If it's an error add it to the error buffer
if 400 <= response_code <= 599 or response_code == TIMEOUT_STATUS_CODE:
error_key = self._get_redis_key(event, error=True)
self._add_to_buffer_pipeline(error_key, request_data, pipe)
pipe.execute()
|
SentryAppWebhookRequestsBuffer
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/distributions/uniform.py
|
{
"start": 1319,
"end": 7093
}
|
class ____(distribution.Distribution):
"""Uniform distribution with `low` and `high` parameters.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; a, b) = I[a <= x < b] / Z
Z = b - a
```
where
- `low = a`,
- `high = b`,
- `Z` is the normalizing constant, and
- `I[predicate]` is the [indicator function](
https://en.wikipedia.org/wiki/Indicator_function) for `predicate`.
The parameters `low` and `high` must be shaped in a way that supports
broadcasting (e.g., `high - low` is a valid operation).
#### Examples
```python
# Without broadcasting:
u1 = Uniform(low=3.0, high=4.0) # a single uniform distribution [3, 4]
u2 = Uniform(low=[1.0, 2.0],
high=[3.0, 4.0]) # 2 distributions [1, 3], [2, 4]
u3 = Uniform(low=[[1.0, 2.0],
[3.0, 4.0]],
high=[[1.5, 2.5],
[3.5, 4.5]]) # 4 distributions
```
```python
# With broadcasting:
u1 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) # 3 distributions
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
low=0.,
high=1.,
validate_args=False,
allow_nan_stats=True,
name="Uniform"):
"""Initialize a batch of Uniform distributions.
Args:
low: Floating point tensor, lower boundary of the output interval. Must
have `low < high`.
high: Floating point tensor, upper boundary of the output interval. Must
have `low < high`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
InvalidArgumentError: if `low >= high` and `validate_args=False`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[low, high]) as name:
with ops.control_dependencies([
check_ops.assert_less(
low, high, message="uniform not defined when low >= high.")
] if validate_args else []):
self._low = array_ops.identity(low, name="low")
self._high = array_ops.identity(high, name="high")
check_ops.assert_same_float_dtype([self._low, self._high])
super(Uniform, self).__init__(
dtype=self._low.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._low,
self._high],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("low", "high"),
([ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2)))
@property
def low(self):
"""Lower boundary of the output interval."""
return self._low
@property
def high(self):
"""Upper boundary of the output interval."""
return self._high
def range(self, name="range"):
"""`high - low`."""
with self._name_scope(name):
return self.high - self.low
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.low),
array_ops.shape(self.high))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.low.get_shape(),
self.high.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
samples = random_ops.random_uniform(shape=shape,
dtype=self.dtype,
seed=seed)
return self.low + self.range() * samples
def _prob(self, x):
broadcasted_x = x * array_ops.ones(
self.batch_shape_tensor(), dtype=x.dtype)
return array_ops.where_v2(
math_ops.is_nan(broadcasted_x), broadcasted_x,
array_ops.where_v2(
math_ops.logical_or(broadcasted_x < self.low,
broadcasted_x >= self.high),
array_ops.zeros_like(broadcasted_x),
array_ops.ones_like(broadcasted_x) / self.range()))
def _cdf(self, x):
broadcast_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(x), self.batch_shape_tensor())
zeros = array_ops.zeros(broadcast_shape, dtype=self.dtype)
ones = array_ops.ones(broadcast_shape, dtype=self.dtype)
broadcasted_x = x * ones
result_if_not_big = array_ops.where_v2(
x < self.low, zeros, (broadcasted_x - self.low) / self.range())
return array_ops.where_v2(x >= self.high, ones, result_if_not_big)
def _entropy(self):
return math_ops.log(self.range())
def _mean(self):
return (self.low + self.high) / 2.
def _variance(self):
return math_ops.square(self.range()) / 12.
def _stddev(self):
return self.range() / math.sqrt(12.)
|
Uniform
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_indexing.py
|
{
"start": 10703,
"end": 14359
}
|
class ____(LocBase):
_projection_passthrough = True
@functools.cached_property
def start(self):
if self.iindexer.start is not None:
start = _get_partitions(self.frame, self.iindexer.start)
else:
start = 0
return start
@functools.cached_property
def stop(self):
if self.iindexer.stop is not None:
stop = _get_partitions(self.frame, self.iindexer.stop)
else:
stop = self.frame.npartitions - 1
return stop
@functools.cached_property
def istart(self):
if self.iindexer.start is None and self.frame.known_divisions:
istart = (
self.frame.divisions[0]
if self.iindexer.stop is None
else min(self.frame.divisions[0], self.iindexer.stop)
)
else:
istart = coerce_loc_index(self.frame, self.iindexer.start)
return istart
@functools.cached_property
def istop(self):
if self.iindexer.stop is None and self.frame.known_divisions:
istop = (
self.frame.divisions[-1]
if self.iindexer.start is None
else max(self.frame.divisions[-1], self.iindexer.start)
)
else:
istop = coerce_loc_index(self.frame, self.iindexer.stop)
return istop
def _divisions(self):
if self.stop == self.start:
return (self.istart, self.istop)
if self.iindexer.start is None:
div_start = self.frame.divisions[0]
else:
div_start = max(self.istart, self.frame.divisions[self.start])
if self.iindexer.stop is None:
div_stop = self.frame.divisions[-1]
else:
div_stop = min(self.istop, self.frame.divisions[self.stop + 1])
return (
(div_start,)
+ self.frame.divisions[self.start + 1 : self.stop + 1]
+ (div_stop,)
)
def _lower(self):
parts = list(range(self.start, self.stop + 1))
if self.frame.npartitions == len(parts):
return
return type(self)(Partitions(self.frame, parts), self.iindexer, self.cindexer)
def _layer(self) -> dict:
if self.stop == self.start:
return {
(self._name, 0): Task(
(self._name, 0),
methods.loc,
TaskRef((self.frame._name, self.start)),
slice(self.iindexer.start, self.iindexer.stop),
self.cindexer,
)
}
dsk = {
(self._name, 0): Task(
(self._name, 0),
methods.loc,
TaskRef((self.frame._name, self.start)),
slice(self.iindexer.start, None),
self.cindexer,
)
}
for i in range(1, self.stop - self.start):
if self.cindexer is None:
dsk[self._name, i] = Alias((self.frame._name, self.start + i)) # type: ignore[assignment]
else:
dsk[self._name, i] = Task(
(self._name, i),
methods.loc,
TaskRef((self.frame._name, self.start + i)),
slice(None, None),
self.cindexer,
)
dsk[self._name, self.stop - self.start] = Task(
(self._name, self.stop - self.start),
methods.loc,
TaskRef((self.frame._name, self.stop)),
slice(None, self.iindexer.stop),
self.cindexer,
)
return dsk
|
LocSlice
|
python
|
keras-team__keras
|
keras/src/ops/image_test.py
|
{
"start": 490,
"end": 8937
}
|
class ____(testing.TestCase):
def setUp(self):
# Defaults to channels_last
self.data_format = backend.image_data_format()
backend.set_image_data_format("channels_last")
return super().setUp()
def tearDown(self):
backend.set_image_data_format(self.data_format)
return super().tearDown()
def test_rgb_to_grayscale(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
out = kimage.rgb_to_grayscale(x)
self.assertEqual(out.shape, (None, 20, 20, 1))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
out = kimage.rgb_to_grayscale(x)
self.assertEqual(out.shape, (None, 1, 20, 20))
def test_rgb_to_hsv(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
out = kimage.rgb_to_hsv(x)
self.assertEqual(out.shape, (None, 20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
out = kimage.rgb_to_hsv(x)
self.assertEqual(out.shape, (None, 3, 20, 20))
def test_hsv_to_rgb(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
out = kimage.hsv_to_rgb(x)
self.assertEqual(out.shape, (None, 20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
out = kimage.hsv_to_rgb(x)
self.assertEqual(out.shape, (None, 3, 20, 20))
def test_resize(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
out = kimage.resize(x, size=(15, 15))
self.assertEqual(out.shape, (None, 15, 15, 3))
x = KerasTensor([None, None, 3])
out = kimage.resize(x, size=(15, 15))
self.assertEqual(out.shape, (15, 15, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
out = kimage.resize(x, size=(15, 15))
self.assertEqual(out.shape, (None, 3, 15, 15))
x = KerasTensor([3, None, None])
out = kimage.resize(x, size=(15, 15))
self.assertEqual(out.shape, (3, 15, 15))
def test_affine_transform(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
transform = KerasTensor([None, 8])
out = kimage.affine_transform(x, transform)
self.assertEqual(out.shape, (None, 20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
transform = KerasTensor([None, 8])
out = kimage.affine_transform(x, transform)
self.assertEqual(out.shape, (None, 3, 20, 20))
def test_extract_patches(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
p_h, p_w = 5, 5
out = kimage.extract_patches(x, (p_h, p_w))
self.assertEqual(out.shape, (None, 4, 4, 75))
out = kimage.extract_patches(x, 5)
self.assertEqual(out.shape, (None, 4, 4, 75))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
p_h, p_w = 5, 5
out = kimage.extract_patches(x, (p_h, p_w))
self.assertEqual(out.shape, (None, 75, 4, 4))
out = kimage.extract_patches(x, 5)
self.assertEqual(out.shape, (None, 75, 4, 4))
def test_extract_patches_3d(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 20, 3])
p_d, p_h, p_w = 5, 5, 5
out = kimage.extract_patches_3d(x, (p_d, p_h, p_w))
self.assertEqual(out.shape, (None, 4, 4, 4, 375))
out = kimage.extract_patches_3d(x, 5)
self.assertEqual(out.shape, (None, 4, 4, 4, 375))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20, 20])
p_d, p_h, p_w = 5, 5, 5
out = kimage.extract_patches_3d(x, (p_d, p_h, p_w))
self.assertEqual(out.shape, (None, 375, 4, 4, 4))
out = kimage.extract_patches_3d(x, 5)
self.assertEqual(out.shape, (None, 375, 4, 4, 4))
def test_map_coordinates(self):
input = KerasTensor([20, 20, None])
coordinates = KerasTensor([3, 15, 15, None])
out = kimage.map_coordinates(input, coordinates, 0)
self.assertEqual(out.shape, coordinates.shape[1:])
def test_pad_images(self):
# Test channels_last
x = KerasTensor([None, 15, 25, 3])
out = kimage.pad_images(x, 2, 3, target_height=20, target_width=30)
self.assertEqual(out.shape, (None, 20, 30, 3))
x = KerasTensor([None, None, 3])
out = kimage.pad_images(x, 2, 3, target_height=20, target_width=30)
self.assertEqual(out.shape, (20, 30, 3))
# Test unknown shape
x = KerasTensor([None, None, 3])
out = kimage.pad_images(x, 2, 3, 2, 3)
self.assertEqual(out.shape, (None, None, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 15, 25])
out = kimage.pad_images(x, 2, 3, target_height=20, target_width=30)
self.assertEqual(out.shape, (None, 3, 20, 30))
x = KerasTensor([3, None, None])
out = kimage.pad_images(x, 2, 3, target_height=20, target_width=30)
self.assertEqual(out.shape, (3, 20, 30))
def test_crop_images(self):
# Test channels_last
x = KerasTensor([None, 15, 25, 3])
out = kimage.crop_images(x, 2, 3, target_height=10, target_width=20)
self.assertEqual(out.shape, (None, 10, 20, 3))
x = KerasTensor([None, None, 3])
out = kimage.crop_images(x, 2, 3, target_height=10, target_width=20)
self.assertEqual(out.shape, (10, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 15, 25])
out = kimage.crop_images(x, 2, 3, target_height=10, target_width=20)
self.assertEqual(out.shape, (None, 3, 10, 20))
x = KerasTensor([3, None, None])
out = kimage.crop_images(x, 2, 3, target_height=10, target_width=20)
self.assertEqual(out.shape, (3, 10, 20))
def test_perspective_transform(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
start_points = KerasTensor([None, 4, 2])
end_points = KerasTensor([None, 4, 2])
out = kimage.perspective_transform(x, start_points, end_points)
self.assertEqual(out.shape, (None, 20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
start_points = KerasTensor([None, 4, 2])
end_points = KerasTensor([None, 4, 2])
out = kimage.perspective_transform(x, start_points, end_points)
self.assertEqual(out.shape, (None, 3, 20, 20))
def test_gaussian_blur(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
out = kimage.gaussian_blur(x)
self.assertEqual(out.shape, (None, 20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
out = kimage.gaussian_blur(x)
self.assertEqual(out.shape, (None, 3, 20, 20))
def test_elastic_transform(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
out = kimage.elastic_transform(x)
self.assertEqual(out.shape, (None, 20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
out = kimage.elastic_transform(x)
self.assertEqual(out.shape, (None, 3, 20, 20))
def test_scale_and_translate(self):
images = KerasTensor([None, 20, 20, 3])
output_shape = (None, 25, 25, 3)
scale = KerasTensor([2])
translation = KerasTensor([2])
out = kimage.scale_and_translate(
images,
output_shape=output_shape,
scale=scale,
translation=translation,
spatial_dims=(1, 2),
method="linear",
)
self.assertEqual(out.shape, output_shape)
|
ImageOpsDynamicShapeTest
|
python
|
huggingface__transformers
|
tests/models/owlvit/test_modeling_owlvit.py
|
{
"start": 4434,
"end": 7268
}
|
class ____(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as OWLVIT does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (OwlViTVisionModel,) if is_torch_available() else ()
test_resize_embeddings = False
def setUp(self):
self.model_tester = OwlViTVisionModelTester(self)
self.config_tester = ConfigTester(
self, config_class=OwlViTVisionConfig, has_text_modality=False, hidden_size=37
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="OWLVIT does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="OWL-ViT does not support training yet")
def test_training(self):
pass
@unittest.skip(reason="OWL-ViT does not support training yet")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "google/owlvit-base-patch32"
model = OwlViTVisionModel.from_pretrained(model_name)
self.assertIsNotNone(model)
|
OwlViTVisionModelTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-replacements-to-sort-the-array.py
|
{
"start": 44,
"end": 435
}
|
class ____(object):
def minimumReplacement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def ceil_divide(a, b):
return (a+b-1)//b
result = 0
curr = nums[-1]
for x in reversed(nums):
cnt = ceil_divide(x, curr)
result += cnt-1
curr = x//cnt
return result
|
Solution
|
python
|
GoogleCloudPlatform__python-docs-samples
|
service_extensions/callouts/add_header/server.py
|
{
"start": 2672,
"end": 3737
}
|
class ____(service_pb2_grpc.ExternalProcessorServicer):
def Process(
self,
request_iterator: Iterator[service_pb2.ProcessingRequest],
context: ServicerContext,
) -> Iterator[service_pb2.ProcessingResponse]:
"Process the client request and add example headers"
for request in request_iterator:
if request.HasField("response_headers"):
response_header_mutation = add_headers_mutation(
[("hello", "service-extensions")]
)
yield service_pb2.ProcessingResponse(
response_headers=response_header_mutation
)
elif request.HasField("request_headers"):
request_header_mutation = add_headers_mutation(
[("host", "service-extensions.com"), (":path", "/")],
clear_route_cache=True,
)
yield service_pb2.ProcessingResponse(
request_headers=request_header_mutation
)
|
CalloutProcessor
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/worksheet/test_write_sheet_views3.py
|
{
"start": 301,
"end": 4762
}
|
class ____(unittest.TestCase):
"""
Test the Worksheet _write_sheet_views() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_sheet_views1(self):
"""Test the _write_sheet_views() method with split panes"""
self.worksheet.select()
self.worksheet.split_panes(15, 0)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane ySplit="600" topLeftCell="A2"/><selection pane="bottomLeft" activeCell="A2" sqref="A2"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_views2(self):
"""Test the _write_sheet_views() method with split panes"""
self.worksheet.select()
self.worksheet.split_panes(30, 0)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane ySplit="900" topLeftCell="A3"/><selection pane="bottomLeft" activeCell="A3" sqref="A3"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_views3(self):
"""Test the _write_sheet_views() method with split panes"""
self.worksheet.select()
self.worksheet.split_panes(105, 0)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane ySplit="2400" topLeftCell="A8"/><selection pane="bottomLeft" activeCell="A8" sqref="A8"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_views4(self):
"""Test the _write_sheet_views() method with split panes"""
self.worksheet.select()
self.worksheet.split_panes(0, 8.43)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="1350" topLeftCell="B1"/><selection pane="topRight" activeCell="B1" sqref="B1"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_views5(self):
"""Test the _write_sheet_views() method with split panes"""
self.worksheet.select()
self.worksheet.split_panes(0, 17.57)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="2310" topLeftCell="C1"/><selection pane="topRight" activeCell="C1" sqref="C1"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_views6(self):
"""Test the _write_sheet_views() method with split panes"""
self.worksheet.select()
self.worksheet.split_panes(0, 45)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="5190" topLeftCell="F1"/><selection pane="topRight" activeCell="F1" sqref="F1"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_views7(self):
"""Test the _write_sheet_views() method with split panes"""
self.worksheet.select()
self.worksheet.split_panes(15, 8.43)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="1350" ySplit="600" topLeftCell="B2"/><selection pane="topRight" activeCell="B1" sqref="B1"/><selection pane="bottomLeft" activeCell="A2" sqref="A2"/><selection pane="bottomRight" activeCell="B2" sqref="B2"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_views8(self):
"""Test the _write_sheet_views() method with split panes"""
self.worksheet.select()
self.worksheet.split_panes(45, 54.14)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="6150" ySplit="1200" topLeftCell="G4"/><selection pane="topRight" activeCell="G1" sqref="G1"/><selection pane="bottomLeft" activeCell="A4" sqref="A4"/><selection pane="bottomRight" activeCell="G4" sqref="G4"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
|
TestWriteSheetViews
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/array_ops/spacetobatch_op_test.py
|
{
"start": 23619,
"end": 27392
}
|
class ____(test.TestCase):
def _checkProperties(self, input_shape, block_shape, base_paddings, paddings,
crops):
"""Checks that `paddings` and `crops` satisfy invariants."""
num_block_dims = len(block_shape)
self.assertEqual(len(input_shape), num_block_dims)
if base_paddings is None:
base_paddings = np.zeros((num_block_dims, 2), np.int32)
self.assertEqual(base_paddings.shape, (num_block_dims, 2))
self.assertEqual(paddings.shape, (num_block_dims, 2))
self.assertEqual(crops.shape, (num_block_dims, 2))
for i in range(num_block_dims):
self.assertEqual(paddings[i, 0], base_paddings[i, 0])
self.assertLessEqual(0, paddings[i, 1] - base_paddings[i, 1])
self.assertLess(paddings[i, 1] - base_paddings[i, 1], block_shape[i])
self.assertEqual(
(input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i],
0)
self.assertEqual(crops[i, 0], 0)
self.assertEqual(crops[i, 1], paddings[i, 1] - base_paddings[i, 1])
def _test(self, input_shape, block_shape, base_paddings):
input_shape = np.array(input_shape)
block_shape = np.array(block_shape)
if base_paddings is not None:
base_paddings = np.array(base_paddings)
# Check with constants.
paddings, crops = array_ops.required_space_to_batch_paddings(input_shape,
block_shape,
base_paddings)
paddings_const = tensor_util.constant_value(paddings)
crops_const = tensor_util.constant_value(crops)
self.assertIsNotNone(paddings_const)
self.assertIsNotNone(crops_const)
self._checkProperties(input_shape, block_shape, base_paddings,
paddings_const, crops_const)
# Check with non-constants.
assignments = {}
input_shape_placeholder = array_ops.placeholder(dtypes.int32)
assignments[input_shape_placeholder] = input_shape
block_shape_placeholder = array_ops.placeholder(dtypes.int32,
[len(block_shape)])
assignments[block_shape_placeholder] = block_shape
if base_paddings is not None:
base_paddings_placeholder = array_ops.placeholder(dtypes.int32,
[len(block_shape), 2])
assignments[base_paddings_placeholder] = base_paddings
else:
base_paddings_placeholder = None
t_paddings, t_crops = array_ops.required_space_to_batch_paddings(
input_shape_placeholder, block_shape_placeholder,
base_paddings_placeholder)
with self.cached_session():
paddings_result = t_paddings.eval(assignments)
crops_result = t_crops.eval(assignments)
self.assertAllEqual(paddings_result, paddings_const)
self.assertAllEqual(crops_result, crops_const)
@test_util.run_deprecated_v1
def testSimple(self):
self._test(
input_shape=np.zeros((0,), np.int32),
block_shape=np.zeros((0,), np.int32),
base_paddings=None)
self._test(
input_shape=np.zeros((0,), np.int32),
block_shape=np.zeros((0,), np.int32),
base_paddings=np.zeros((0, 2), np.int32))
self._test(input_shape=[1], block_shape=[2], base_paddings=None)
self._test(input_shape=[1], block_shape=[2], base_paddings=[[1, 0]])
self._test(input_shape=[3], block_shape=[1], base_paddings=[[1, 2]])
self._test(input_shape=[1], block_shape=[2], base_paddings=[[2, 3]])
self._test(input_shape=[4, 5], block_shape=[3, 2], base_paddings=None)
self._test(
input_shape=[4, 5], block_shape=[3, 2], base_paddings=[[0, 0], [0, 1]])
if __name__ == "__main__":
test.main()
|
RequiredSpaceToBatchPaddingsTest
|
python
|
plotly__plotly.py
|
plotly/graph_objs/_icicle.py
|
{
"start": 215,
"end": 66356
}
|
class ____(_BaseTraceType):
_parent_path_str = ""
_path_str = "icicle"
_valid_props = {
"branchvalues",
"count",
"customdata",
"customdatasrc",
"domain",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatefallback",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"insidetextfont",
"labels",
"labelssrc",
"leaf",
"legend",
"legendgrouptitle",
"legendrank",
"legendwidth",
"level",
"marker",
"maxdepth",
"meta",
"metasrc",
"name",
"opacity",
"outsidetextfont",
"parents",
"parentssrc",
"pathbar",
"root",
"sort",
"stream",
"text",
"textfont",
"textinfo",
"textposition",
"textsrc",
"texttemplate",
"texttemplatefallback",
"texttemplatesrc",
"tiling",
"type",
"uid",
"uirevision",
"values",
"valuessrc",
"visible",
}
@property
def branchvalues(self):
"""
Determines how the items in `values` are summed. When set to
"total", items in `values` are taken to be value of all its
descendants. When set to "remainder", items in `values`
corresponding to the root and the branches sectors are taken to
be the extra part not part of the sum of the values at their
leaves.
The 'branchvalues' property is an enumeration that may be specified as:
- One of the following enumeration values:
['remainder', 'total']
Returns
-------
Any
"""
return self["branchvalues"]
@branchvalues.setter
def branchvalues(self, val):
self["branchvalues"] = val
@property
def count(self):
"""
Determines default for `values` when it is not provided, by
inferring a 1 for each of the "leaves" and/or "branches",
otherwise 0.
The 'count' property is a flaglist and may be specified
as a string containing:
- Any combination of ['branches', 'leaves'] joined with '+' characters
(e.g. 'branches+leaves')
Returns
-------
Any
"""
return self["count"]
@count.setter
def count(self, val):
self["count"] = val
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def domain(self):
"""
The 'domain' property is an instance of Domain
that may be specified as:
- An instance of :class:`plotly.graph_objs.icicle.Domain`
- A dict of string/value properties that will be passed
to the Domain constructor
Returns
-------
plotly.graph_objs.icicle.Domain
"""
return self["domain"]
@domain.setter
def domain(self, val):
self["domain"] = val
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['label', 'text', 'value', 'name', 'current path', 'percent root', 'percent entry', 'percent parent'] joined with '+' characters
(e.g. 'label+text')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.icicle.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Returns
-------
plotly.graph_objs.icicle.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Finally, the template string has access to variables
`currentPath`, `root`, `entry`, `percentRoot`, `percentEntry`
and `percentParent`. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
@property
def hovertemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'hovertemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["hovertemplatefallback"]
@hovertemplatefallback.setter
def hovertemplatefallback(self, val):
self["hovertemplatefallback"] = val
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
@property
def hovertext(self):
"""
Sets hover text elements associated with each sector. If a
single string, the same string appears for all data points. If
an array of string, the items are mapped in order of this
trace's sectors. To be seen, trace `hoverinfo` must contain a
"text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
@property
def insidetextfont(self):
"""
Sets the font used for `textinfo` lying inside the sector.
The 'insidetextfont' property is an instance of Insidetextfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.icicle.Insidetextfont`
- A dict of string/value properties that will be passed
to the Insidetextfont constructor
Returns
-------
plotly.graph_objs.icicle.Insidetextfont
"""
return self["insidetextfont"]
@insidetextfont.setter
def insidetextfont(self, val):
self["insidetextfont"] = val
@property
def labels(self):
"""
Sets the labels of each of the sectors.
The 'labels' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["labels"]
@labels.setter
def labels(self, val):
self["labels"] = val
@property
def labelssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `labels`.
The 'labelssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["labelssrc"]
@labelssrc.setter
def labelssrc(self, val):
self["labelssrc"] = val
@property
def leaf(self):
"""
The 'leaf' property is an instance of Leaf
that may be specified as:
- An instance of :class:`plotly.graph_objs.icicle.Leaf`
- A dict of string/value properties that will be passed
to the Leaf constructor
Returns
-------
plotly.graph_objs.icicle.Leaf
"""
return self["leaf"]
@leaf.setter
def leaf(self, val):
self["leaf"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.icicle.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.icicle.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def level(self):
"""
Sets the level from which this trace hierarchy is rendered. Set
`level` to `''` to start from the root node in the hierarchy.
Must be an "id" if `ids` is filled in, otherwise plotly
attempts to find a matching item in `labels`.
The 'level' property accepts values of any type
Returns
-------
Any
"""
return self["level"]
@level.setter
def level(self, val):
self["level"] = val
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.icicle.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.icicle.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def maxdepth(self):
"""
Sets the number of rendered sectors from any given `level`. Set
`maxdepth` to "-1" to render all the levels in the hierarchy.
The 'maxdepth' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
Returns
-------
int
"""
return self["maxdepth"]
@maxdepth.setter
def maxdepth(self, val):
self["maxdepth"] = val
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def outsidetextfont(self):
"""
Sets the font used for `textinfo` lying outside the sector.
This option refers to the root of the hierarchy presented on
top left corner of a treemap graph. Please note that if a
hierarchy has multiple root nodes, this option won't have any
effect and `insidetextfont` would be used.
The 'outsidetextfont' property is an instance of Outsidetextfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.icicle.Outsidetextfont`
- A dict of string/value properties that will be passed
to the Outsidetextfont constructor
Returns
-------
plotly.graph_objs.icicle.Outsidetextfont
"""
return self["outsidetextfont"]
@outsidetextfont.setter
def outsidetextfont(self, val):
self["outsidetextfont"] = val
@property
def parents(self):
"""
Sets the parent sectors for each of the sectors. Empty string
items '' are understood to reference the root node in the
hierarchy. If `ids` is filled, `parents` items are understood
to be "ids" themselves. When `ids` is not set, plotly attempts
to find matching items in `labels`, but beware they must be
unique.
The 'parents' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["parents"]
@parents.setter
def parents(self, val):
self["parents"] = val
@property
def parentssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `parents`.
The 'parentssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["parentssrc"]
@parentssrc.setter
def parentssrc(self, val):
self["parentssrc"] = val
@property
def pathbar(self):
"""
The 'pathbar' property is an instance of Pathbar
that may be specified as:
- An instance of :class:`plotly.graph_objs.icicle.Pathbar`
- A dict of string/value properties that will be passed
to the Pathbar constructor
Returns
-------
plotly.graph_objs.icicle.Pathbar
"""
return self["pathbar"]
@pathbar.setter
def pathbar(self, val):
self["pathbar"] = val
@property
def root(self):
"""
The 'root' property is an instance of Root
that may be specified as:
- An instance of :class:`plotly.graph_objs.icicle.Root`
- A dict of string/value properties that will be passed
to the Root constructor
Returns
-------
plotly.graph_objs.icicle.Root
"""
return self["root"]
@root.setter
def root(self, val):
self["root"] = val
@property
def sort(self):
"""
Determines whether or not the sectors are reordered from
largest to smallest.
The 'sort' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["sort"]
@sort.setter
def sort(self, val):
self["sort"] = val
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.icicle.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Returns
-------
plotly.graph_objs.icicle.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def text(self):
"""
Sets text elements associated with each sector. If trace
`textinfo` contains a "text" flag, these elements will be seen
on the chart. If trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in the
hover labels.
The 'text' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def textfont(self):
"""
Sets the font used for `textinfo`.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.icicle.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Returns
-------
plotly.graph_objs.icicle.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def textinfo(self):
"""
Determines which trace information appear on the graph.
The 'textinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['label', 'text', 'value', 'current path', 'percent root', 'percent entry', 'percent parent'] joined with '+' characters
(e.g. 'label+text')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["textinfo"]
@textinfo.setter
def textinfo(self, val):
self["textinfo"] = val
@property
def textposition(self):
"""
Sets the positions of the `text` elements.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top left', 'top center', 'top right', 'middle left',
'middle center', 'middle right', 'bottom left', 'bottom
center', 'bottom right']
Returns
-------
Any
"""
return self["textposition"]
@textposition.setter
def textposition(self, val):
self["textposition"] = val
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
@property
def texttemplate(self):
"""
Template string used for rendering the information text that
appears on points. Note that this will override `textinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. All attributes that can be
specified per-point (the ones that are `arrayOk: true`) are
available. Finally, the template string has access to variables
`currentPath`, `root`, `entry`, `percentRoot`, `percentEntry`,
`percentParent`, `label` and `value`.
The 'texttemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
@property
def texttemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'texttemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["texttemplatefallback"]
@texttemplatefallback.setter
def texttemplatefallback(self, val):
self["texttemplatefallback"] = val
@property
def texttemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
The 'texttemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["texttemplatesrc"]
@texttemplatesrc.setter
def texttemplatesrc(self, val):
self["texttemplatesrc"] = val
@property
def tiling(self):
"""
The 'tiling' property is an instance of Tiling
that may be specified as:
- An instance of :class:`plotly.graph_objs.icicle.Tiling`
- A dict of string/value properties that will be passed
to the Tiling constructor
Returns
-------
plotly.graph_objs.icicle.Tiling
"""
return self["tiling"]
@tiling.setter
def tiling(self, val):
self["tiling"] = val
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def values(self):
"""
Sets the values associated with each of the sectors. Use with
`branchvalues` to determine how the values are summed.
The 'values' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["values"]
@values.setter
def values(self, val):
self["values"] = val
@property
def valuessrc(self):
"""
Sets the source reference on Chart Studio Cloud for `values`.
The 'valuessrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["valuessrc"]
@valuessrc.setter
def valuessrc(self, val):
self["valuessrc"] = val
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
branchvalues
Determines how the items in `values` are summed. When
set to "total", items in `values` are taken to be value
of all its descendants. When set to "remainder", items
in `values` corresponding to the root and the branches
sectors are taken to be the extra part not part of the
sum of the values at their leaves.
count
Determines default for `values` when it is not
provided, by inferring a 1 for each of the "leaves"
and/or "branches", otherwise 0.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
domain
:class:`plotly.graph_objects.icicle.Domain` instance or
dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.icicle.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `currentPath`, `root`, `entry`,
`percentRoot`, `percentEntry` and `percentParent`.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each sector.
If a single string, the same string appears for all
data points. If an array of string, the items are
mapped in order of this trace's sectors. To be seen,
trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
insidetextfont
Sets the font used for `textinfo` lying inside the
sector.
labels
Sets the labels of each of the sectors.
labelssrc
Sets the source reference on Chart Studio Cloud for
`labels`.
leaf
:class:`plotly.graph_objects.icicle.Leaf` instance or
dict with compatible properties
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgrouptitle
:class:`plotly.graph_objects.icicle.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
level
Sets the level from which this trace hierarchy is
rendered. Set `level` to `''` to start from the root
node in the hierarchy. Must be an "id" if `ids` is
filled in, otherwise plotly attempts to find a matching
item in `labels`.
marker
:class:`plotly.graph_objects.icicle.Marker` instance or
dict with compatible properties
maxdepth
Sets the number of rendered sectors from any given
`level`. Set `maxdepth` to "-1" to render all the
levels in the hierarchy.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the trace.
outsidetextfont
Sets the font used for `textinfo` lying outside the
sector. This option refers to the root of the hierarchy
presented on top left corner of a treemap graph. Please
note that if a hierarchy has multiple root nodes, this
option won't have any effect and `insidetextfont` would
be used.
parents
Sets the parent sectors for each of the sectors. Empty
string items '' are understood to reference the root
node in the hierarchy. If `ids` is filled, `parents`
items are understood to be "ids" themselves. When `ids`
is not set, plotly attempts to find matching items in
`labels`, but beware they must be unique.
parentssrc
Sets the source reference on Chart Studio Cloud for
`parents`.
pathbar
:class:`plotly.graph_objects.icicle.Pathbar` instance
or dict with compatible properties
root
:class:`plotly.graph_objects.icicle.Root` instance or
dict with compatible properties
sort
Determines whether or not the sectors are reordered
from largest to smallest.
stream
:class:`plotly.graph_objects.icicle.Stream` instance or
dict with compatible properties
text
Sets text elements associated with each sector. If
trace `textinfo` contains a "text" flag, these elements
will be seen on the chart. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
textfont
Sets the font used for `textinfo`.
textinfo
Determines which trace information appear on the graph.
textposition
Sets the positions of the `text` elements.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appears on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. All attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `currentPath`, `root`, `entry`,
`percentRoot`, `percentEntry`, `percentParent`, `label`
and `value`.
texttemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
tiling
:class:`plotly.graph_objects.icicle.Tiling` instance or
dict with compatible properties
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
values
Sets the values associated with each of the sectors.
Use with `branchvalues` to determine how the values are
summed.
valuessrc
Sets the source reference on Chart Studio Cloud for
`values`.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
branchvalues=None,
count=None,
customdata=None,
customdatasrc=None,
domain=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatefallback=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
insidetextfont=None,
labels=None,
labelssrc=None,
leaf=None,
legend=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
level=None,
marker=None,
maxdepth=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
outsidetextfont=None,
parents=None,
parentssrc=None,
pathbar=None,
root=None,
sort=None,
stream=None,
text=None,
textfont=None,
textinfo=None,
textposition=None,
textsrc=None,
texttemplate=None,
texttemplatefallback=None,
texttemplatesrc=None,
tiling=None,
uid=None,
uirevision=None,
values=None,
valuessrc=None,
visible=None,
**kwargs,
):
"""
Construct a new Icicle object
Visualize hierarchal data from leaves (and/or outer branches)
towards root with rectangles. The icicle sectors are determined
by the entries in "labels" or "ids" and in "parents".
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Icicle`
branchvalues
Determines how the items in `values` are summed. When
set to "total", items in `values` are taken to be value
of all its descendants. When set to "remainder", items
in `values` corresponding to the root and the branches
sectors are taken to be the extra part not part of the
sum of the values at their leaves.
count
Determines default for `values` when it is not
provided, by inferring a 1 for each of the "leaves"
and/or "branches", otherwise 0.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
domain
:class:`plotly.graph_objects.icicle.Domain` instance or
dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.icicle.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `currentPath`, `root`, `entry`,
`percentRoot`, `percentEntry` and `percentParent`.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each sector.
If a single string, the same string appears for all
data points. If an array of string, the items are
mapped in order of this trace's sectors. To be seen,
trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
insidetextfont
Sets the font used for `textinfo` lying inside the
sector.
labels
Sets the labels of each of the sectors.
labelssrc
Sets the source reference on Chart Studio Cloud for
`labels`.
leaf
:class:`plotly.graph_objects.icicle.Leaf` instance or
dict with compatible properties
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgrouptitle
:class:`plotly.graph_objects.icicle.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
level
Sets the level from which this trace hierarchy is
rendered. Set `level` to `''` to start from the root
node in the hierarchy. Must be an "id" if `ids` is
filled in, otherwise plotly attempts to find a matching
item in `labels`.
marker
:class:`plotly.graph_objects.icicle.Marker` instance or
dict with compatible properties
maxdepth
Sets the number of rendered sectors from any given
`level`. Set `maxdepth` to "-1" to render all the
levels in the hierarchy.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the trace.
outsidetextfont
Sets the font used for `textinfo` lying outside the
sector. This option refers to the root of the hierarchy
presented on top left corner of a treemap graph. Please
note that if a hierarchy has multiple root nodes, this
option won't have any effect and `insidetextfont` would
be used.
parents
Sets the parent sectors for each of the sectors. Empty
string items '' are understood to reference the root
node in the hierarchy. If `ids` is filled, `parents`
items are understood to be "ids" themselves. When `ids`
is not set, plotly attempts to find matching items in
`labels`, but beware they must be unique.
parentssrc
Sets the source reference on Chart Studio Cloud for
`parents`.
pathbar
:class:`plotly.graph_objects.icicle.Pathbar` instance
or dict with compatible properties
root
:class:`plotly.graph_objects.icicle.Root` instance or
dict with compatible properties
sort
Determines whether or not the sectors are reordered
from largest to smallest.
stream
:class:`plotly.graph_objects.icicle.Stream` instance or
dict with compatible properties
text
Sets text elements associated with each sector. If
trace `textinfo` contains a "text" flag, these elements
will be seen on the chart. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
textfont
Sets the font used for `textinfo`.
textinfo
Determines which trace information appear on the graph.
textposition
Sets the positions of the `text` elements.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appears on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. All attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `currentPath`, `root`, `entry`,
`percentRoot`, `percentEntry`, `percentParent`, `label`
and `value`.
texttemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
tiling
:class:`plotly.graph_objects.icicle.Tiling` instance or
dict with compatible properties
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
values
Sets the values associated with each of the sectors.
Use with `branchvalues` to determine how the values are
summed.
valuessrc
Sets the source reference on Chart Studio Cloud for
`values`.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Icicle
"""
super().__init__("icicle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.Icicle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Icicle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("branchvalues", arg, branchvalues)
self._set_property("count", arg, count)
self._set_property("customdata", arg, customdata)
self._set_property("customdatasrc", arg, customdatasrc)
self._set_property("domain", arg, domain)
self._set_property("hoverinfo", arg, hoverinfo)
self._set_property("hoverinfosrc", arg, hoverinfosrc)
self._set_property("hoverlabel", arg, hoverlabel)
self._set_property("hovertemplate", arg, hovertemplate)
self._set_property("hovertemplatefallback", arg, hovertemplatefallback)
self._set_property("hovertemplatesrc", arg, hovertemplatesrc)
self._set_property("hovertext", arg, hovertext)
self._set_property("hovertextsrc", arg, hovertextsrc)
self._set_property("ids", arg, ids)
self._set_property("idssrc", arg, idssrc)
self._set_property("insidetextfont", arg, insidetextfont)
self._set_property("labels", arg, labels)
self._set_property("labelssrc", arg, labelssrc)
self._set_property("leaf", arg, leaf)
self._set_property("legend", arg, legend)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("level", arg, level)
self._set_property("marker", arg, marker)
self._set_property("maxdepth", arg, maxdepth)
self._set_property("meta", arg, meta)
self._set_property("metasrc", arg, metasrc)
self._set_property("name", arg, name)
self._set_property("opacity", arg, opacity)
self._set_property("outsidetextfont", arg, outsidetextfont)
self._set_property("parents", arg, parents)
self._set_property("parentssrc", arg, parentssrc)
self._set_property("pathbar", arg, pathbar)
self._set_property("root", arg, root)
self._set_property("sort", arg, sort)
self._set_property("stream", arg, stream)
self._set_property("text", arg, text)
self._set_property("textfont", arg, textfont)
self._set_property("textinfo", arg, textinfo)
self._set_property("textposition", arg, textposition)
self._set_property("textsrc", arg, textsrc)
self._set_property("texttemplate", arg, texttemplate)
self._set_property("texttemplatefallback", arg, texttemplatefallback)
self._set_property("texttemplatesrc", arg, texttemplatesrc)
self._set_property("tiling", arg, tiling)
self._set_property("uid", arg, uid)
self._set_property("uirevision", arg, uirevision)
self._set_property("values", arg, values)
self._set_property("valuessrc", arg, valuessrc)
self._set_property("visible", arg, visible)
self._props["type"] = "icicle"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Icicle
|
python
|
django__django
|
tests/generic_relations/models.py
|
{
"start": 852,
"end": 934
}
|
class ____(TaggedItem):
value = models.PositiveIntegerField()
|
ValuableTaggedItem
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/context.py
|
{
"start": 32066,
"end": 35797
}
|
class ____(GroupedElement, Generative, TypedReturnsRows[Unpack[_Ts]]):
"""Core construct that represents a load of ORM objects from various
:class:`.ReturnsRows` and other classes including:
:class:`.Select`, :class:`.TextClause`, :class:`.TextualSelect`,
:class:`.CompoundSelect`, :class`.Insert`, :class:`.Update`,
and in theory, :class:`.Delete`.
"""
__visit_name__ = "orm_from_statement"
_compile_options = _ORMFromStatementCompileState.default_compile_options
_compile_state_factory = _ORMFromStatementCompileState.create_for_statement
_for_update_arg = None
element: Union[ExecutableReturnsRows, TextClause]
_adapt_on_names: bool
_traverse_internals = [
("_raw_columns", InternalTraversal.dp_clauseelement_list),
("element", InternalTraversal.dp_clauseelement),
] + ExecutableStatement._executable_traverse_internals
_cache_key_traversal = _traverse_internals + [
("_compile_options", InternalTraversal.dp_has_cache_key)
]
is_from_statement = True
def __init__(
self,
entities: Iterable[_ColumnsClauseArgument[Any]],
element: Union[ExecutableReturnsRows, TextClause],
_adapt_on_names: bool = True,
):
self._raw_columns = [
coercions.expect(
roles.ColumnsClauseRole,
ent,
apply_propagate_attrs=self,
post_inspect=True,
)
for ent in util.to_list(entities)
]
self.element = element
self.is_dml = element.is_dml
self.is_select = element.is_select
self.is_delete = element.is_delete
self.is_insert = element.is_insert
self.is_update = element.is_update
self._label_style = (
element._label_style if is_select_base(element) else None
)
self._adapt_on_names = _adapt_on_names
def _compiler_dispatch(self, compiler, **kw):
"""provide a fixed _compiler_dispatch method.
This is roughly similar to using the sqlalchemy.ext.compiler
``@compiles`` extension.
"""
compile_state = self._compile_state_factory(self, compiler, **kw)
toplevel = not compiler.stack
if toplevel:
compiler.compile_state = compile_state
return compiler.process(compile_state.statement, **kw)
@property
def column_descriptions(self):
"""Return a :term:`plugin-enabled` 'column descriptions' structure
referring to the columns which are SELECTed by this statement.
See the section :ref:`queryguide_inspection` for an overview
of this feature.
.. seealso::
:ref:`queryguide_inspection` - ORM background
"""
meth = cast(
_ORMSelectCompileState, SelectState.get_plugin_class(self)
).get_column_descriptions
return meth(self)
def _ensure_disambiguated_names(self):
return self
def get_children(self, **kw):
yield from itertools.chain.from_iterable(
element._from_objects for element in self._raw_columns
)
yield from super().get_children(**kw)
@property
def _all_selected_columns(self):
return self.element._all_selected_columns
@property
def _return_defaults(self):
return self.element._return_defaults if is_dml(self.element) else None
@property
def _returning(self):
return self.element._returning if is_dml(self.element) else None
@property
def _inline(self):
return self.element._inline if is_insert_update(self.element) else None
@sql.base.CompileState.plugin_for("orm", "compound_select")
|
FromStatement
|
python
|
pydantic__pydantic
|
pydantic/v1/errors.py
|
{
"start": 11784,
"end": 12053
}
|
class ____(PydanticValueError):
code = 'decimal.max_digits'
msg_template = 'ensure that there are no more than {max_digits} digits in total'
def __init__(self, *, max_digits: int) -> None:
super().__init__(max_digits=max_digits)
|
DecimalMaxDigitsError
|
python
|
pytorch__pytorch
|
test/dynamo/test_modules.py
|
{
"start": 14976,
"end": 15460
}
|
class ____(LazyModuleMixin, torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def initialize_parameters(self, input):
with torch.no_grad():
self._param = torch.nn.Parameter(
torch.empty(input.x["a"][0].shape).fill_(0.5)
)
def forward(self, input):
input = input.x["a"]
x = 0
for i in range(len(input)):
x = x + input[i]
return x
|
LazyLayerWithNamedTupleInput
|
python
|
sanic-org__sanic
|
sanic/server/websockets/impl.py
|
{
"start": 962,
"end": 36326
}
|
class ____:
ws_proto: ServerProtocol
io_proto: Optional[SanicProtocol]
loop: Optional[asyncio.AbstractEventLoop]
max_queue: int
close_timeout: float
ping_interval: Optional[float]
ping_timeout: Optional[float]
assembler: WebsocketFrameAssembler
# Dict[bytes, asyncio.Future[None]]
pings: dict[bytes, asyncio.Future]
conn_mutex: asyncio.Lock
recv_lock: asyncio.Lock
recv_cancel: Optional[asyncio.Future]
process_event_mutex: asyncio.Lock
can_pause: bool
# Optional[asyncio.Future[None]]
data_finished_fut: Optional[asyncio.Future]
# Optional[asyncio.Future[None]]
pause_frame_fut: Optional[asyncio.Future]
# Optional[asyncio.Future[None]]
connection_lost_waiter: Optional[asyncio.Future]
keepalive_ping_task: Optional[asyncio.Task]
auto_closer_task: Optional[asyncio.Task]
def __init__(
self,
ws_proto,
max_queue=None,
ping_interval: Optional[float] = 20,
ping_timeout: Optional[float] = 20,
close_timeout: float = 10,
loop=None,
):
self.ws_proto = ws_proto
self.io_proto = None
self.loop = None
self.max_queue = max_queue
self.close_timeout = close_timeout
self.ping_interval = ping_interval
self.ping_timeout = ping_timeout
self.assembler = WebsocketFrameAssembler(self)
self.pings = {}
self.conn_mutex = asyncio.Lock()
self.recv_lock = asyncio.Lock()
self.recv_cancel = None
self.process_event_mutex = asyncio.Lock()
self.data_finished_fut = None
self.can_pause = True
self.pause_frame_fut = None
self.keepalive_ping_task = None
self.auto_closer_task = None
self.connection_lost_waiter = None
@property
def subprotocol(self):
return self.ws_proto.subprotocol
def pause_frames(self):
if not self.can_pause:
return False
if self.pause_frame_fut:
websockets_logger.debug("Websocket connection already paused.")
return False
if (not self.loop) or (not self.io_proto):
return False
if self.io_proto.transport:
self.io_proto.transport.pause_reading()
self.pause_frame_fut = self.loop.create_future()
websockets_logger.debug("Websocket connection paused.")
return True
def resume_frames(self):
if not self.pause_frame_fut:
websockets_logger.debug("Websocket connection not paused.")
return False
if (not self.loop) or (not self.io_proto):
websockets_logger.debug(
"Websocket attempting to resume reading frames, "
"but connection is gone."
)
return False
if self.io_proto.transport:
self.io_proto.transport.resume_reading()
self.pause_frame_fut.set_result(None)
self.pause_frame_fut = None
websockets_logger.debug("Websocket connection unpaused.")
return True
async def connection_made(
self,
io_proto: SanicProtocol,
loop: Optional[asyncio.AbstractEventLoop] = None,
):
if not loop:
try:
loop = getattr(io_proto, "loop")
except AttributeError:
loop = asyncio.get_event_loop()
if not loop:
# This catch is for mypy type checker
# to assert loop is not None here.
raise ServerError("Connection received with no asyncio loop.")
if self.auto_closer_task:
raise ServerError(
"Cannot call connection_made more than once "
"on a websocket connection."
)
self.loop = loop
self.io_proto = io_proto
self.connection_lost_waiter = self.loop.create_future()
self.data_finished_fut = asyncio.shield(self.loop.create_future())
if self.ping_interval:
self.keepalive_ping_task = asyncio.create_task(
self.keepalive_ping()
)
self.auto_closer_task = asyncio.create_task(
self.auto_close_connection()
)
async def wait_for_connection_lost(self, timeout=None) -> bool:
"""
Wait until the TCP connection is closed or ``timeout`` elapses.
If timeout is None, wait forever.
Recommend you should pass in self.close_timeout as timeout
Return ``True`` if the connection is closed and ``False`` otherwise.
"""
if not self.connection_lost_waiter:
return False
if self.connection_lost_waiter.done():
return True
else:
try:
await asyncio.wait_for(
asyncio.shield(self.connection_lost_waiter), timeout
)
return True
except asyncio.TimeoutError:
# Re-check self.connection_lost_waiter.done() synchronously
# because connection_lost() could run between the moment the
# timeout occurs and the moment this coroutine resumes running
return self.connection_lost_waiter.done()
async def process_events(self, events: Sequence[Event]) -> None:
"""
Process a list of incoming events.
"""
# Wrapped in a mutex lock, to prevent other incoming events
# from processing at the same time
async with self.process_event_mutex:
for event in events:
if not isinstance(event, Frame):
# Event is not a frame. Ignore it.
continue
if event.opcode == Opcode.PONG:
await self.process_pong(event)
elif event.opcode == Opcode.CLOSE:
if self.recv_cancel:
self.recv_cancel.cancel()
else:
await self.assembler.put(event)
async def process_pong(self, frame: Frame) -> None:
if frame.data in self.pings:
# Acknowledge all pings up to the one matching this pong.
ping_ids = []
for ping_id, ping in self.pings.items():
ping_ids.append(ping_id)
if not ping.done():
ping.set_result(None)
if ping_id == frame.data:
break
else: # noqa
raise ServerError("ping_id is not in self.pings")
# Remove acknowledged pings from self.pings.
for ping_id in ping_ids:
del self.pings[ping_id]
async def keepalive_ping(self) -> None:
"""
Send a Ping frame and wait for a Pong frame at regular intervals.
This coroutine exits when the connection terminates and one of the
following happens:
- :meth:`ping` raises :exc:`ConnectionClosed`, or
- :meth:`auto_close_connection` cancels :attr:`keepalive_ping_task`.
"""
if self.ping_interval is None:
return
try:
while True:
await asyncio.sleep(self.ping_interval)
# ping() raises CancelledError if the connection is closed,
# when auto_close_connection() cancels keepalive_ping_task.
# ping() raises ConnectionClosed if the connection is lost,
# when connection_lost() calls abort_pings().
ping_waiter = await self.ping()
if self.ping_timeout is not None:
try:
await asyncio.wait_for(ping_waiter, self.ping_timeout)
except asyncio.TimeoutError:
websockets_logger.warning(
"Websocket timed out waiting for pong"
)
self.fail_connection(1011)
break
except asyncio.CancelledError:
# It is expected for this task to be cancelled during during
# normal operation, when the connection is closed.
websockets_logger.debug(
"Websocket keepalive ping task was cancelled."
)
except (ConnectionClosed, WebsocketClosed):
websockets_logger.debug(
"Websocket closed. Keepalive ping task exiting."
)
except Exception as e:
websockets_logger.warning(
"Unexpected exception in websocket keepalive ping task."
)
websockets_logger.debug(str(e))
def _force_disconnect(self) -> bool:
"""
Internal method used by end_connection and fail_connection
only when the graceful auto-closer cannot be used
"""
if self.auto_closer_task and not self.auto_closer_task.done():
self.auto_closer_task.cancel()
if self.data_finished_fut and not self.data_finished_fut.done():
self.data_finished_fut.cancel()
self.data_finished_fut = None
if self.keepalive_ping_task and not self.keepalive_ping_task.done():
self.keepalive_ping_task.cancel()
self.keepalive_ping_task = None
if self.loop and self.io_proto and self.io_proto.transport:
self.io_proto.transport.close()
self.loop.call_later(
self.close_timeout, self.io_proto.transport.abort
)
# We were never open, or already closed
return True
def fail_connection(self, code: int = 1006, reason: str = "") -> bool:
"""
Fail the WebSocket Connection
This requires:
1. Stopping all processing of incoming data, which means cancelling
pausing the underlying io protocol. The close code will be 1006
unless a close frame was received earlier.
2. Sending a close frame with an appropriate code if the opening
handshake succeeded and the other side is likely to process it.
3. Closing the connection. :meth:`auto_close_connection` takes care
of this.
(The specification describes these steps in the opposite order.)
"""
if self.io_proto and self.io_proto.transport:
# Stop new data coming in
# In Python Version 3.7: pause_reading is idempotent
# ut can be called when the transport is already paused or closed
self.io_proto.transport.pause_reading()
# Keeping fail_connection() synchronous guarantees it can't
# get stuck and simplifies the implementation of the callers.
# Not draining the write buffer is acceptable in this context.
# clear the send buffer
_ = self.ws_proto.data_to_send()
# If we're not already CLOSED or CLOSING, then send the close.
if self.ws_proto.state is OPEN:
if code in (1000, 1001):
self.ws_proto.send_close(code, reason)
else:
self.ws_proto.fail(code, reason)
try:
data_to_send = self.ws_proto.data_to_send()
while (
len(data_to_send)
and self.io_proto
and self.io_proto.transport
):
frame_data = data_to_send.pop(0)
self.io_proto.transport.write(frame_data)
except Exception:
# sending close frames may fail if the
# transport closes during this period
...
if code == 1006:
# Special case: 1006 consider the transport already closed
self.ws_proto.state = CLOSED
if self.data_finished_fut and not self.data_finished_fut.done():
# We have a graceful auto-closer. Use it to close the connection.
self.data_finished_fut.cancel()
self.data_finished_fut = None
if (not self.auto_closer_task) or self.auto_closer_task.done():
return self._force_disconnect()
return False
def end_connection(self, code=1000, reason=""):
# This is like slightly more graceful form of fail_connection
# Use this instead of close() when you need an immediate
# close and cannot await websocket.close() handshake.
if code == 1006 or not self.io_proto or not self.io_proto.transport:
return self.fail_connection(code, reason)
# Stop new data coming in
# In Python Version 3.7: pause_reading is idempotent
# i.e. it can be called when the transport is already paused or closed.
self.io_proto.transport.pause_reading()
if self.ws_proto.state == OPEN:
data_to_send = self.ws_proto.data_to_send()
self.ws_proto.send_close(code, reason)
data_to_send.extend(self.ws_proto.data_to_send())
try:
while (
len(data_to_send)
and self.io_proto
and self.io_proto.transport
):
frame_data = data_to_send.pop(0)
self.io_proto.transport.write(frame_data)
except Exception:
# sending close frames may fail if the
# transport closes during this period
# But that doesn't matter at this point
...
if self.data_finished_fut and not self.data_finished_fut.done():
# We have the ability to signal the auto-closer
# try to trigger it to auto-close the connection
self.data_finished_fut.cancel()
self.data_finished_fut = None
if (not self.auto_closer_task) or self.auto_closer_task.done():
# Auto-closer is not running, do force disconnect
return self._force_disconnect()
return False
async def auto_close_connection(self) -> None:
"""
Close the WebSocket Connection
When the opening handshake succeeds, :meth:`connection_open` starts
this coroutine in a task. It waits for the data transfer phase to
complete then it closes the TCP connection cleanly.
When the opening handshake fails, :meth:`fail_connection` does the
same. There's no data transfer phase in that case.
"""
try:
# Wait for the data transfer phase to complete.
if self.data_finished_fut:
try:
await self.data_finished_fut
websockets_logger.debug(
"Websocket task finished. Closing the connection."
)
except asyncio.CancelledError:
# Cancelled error is called when data phase is cancelled
# if an error occurred or the client closed the connection
websockets_logger.debug(
"Websocket handler cancelled. Closing the connection."
)
# Cancel the keepalive ping task.
if self.keepalive_ping_task:
self.keepalive_ping_task.cancel()
self.keepalive_ping_task = None
# Half-close the TCP connection if possible (when there's no TLS).
if (
self.io_proto
and self.io_proto.transport
and self.io_proto.transport.can_write_eof()
):
websockets_logger.debug(
"Websocket half-closing TCP connection"
)
self.io_proto.transport.write_eof()
if self.connection_lost_waiter:
if await self.wait_for_connection_lost(timeout=0):
return
except asyncio.CancelledError:
...
except BaseException:
websockets_logger.exception("Error closing websocket connection")
finally:
# Does this still exist?
if self.keepalive_ping_task:
self.keepalive_ping_task.cancel()
self.keepalive_ping_task = None
# The try/finally ensures that the transport never remains open,
# even if this coroutine is cancelled (for example).
if (not self.io_proto) or (not self.io_proto.transport):
# we were never open, or done. Can't do any finalization.
return
elif (
self.connection_lost_waiter
and self.connection_lost_waiter.done()
):
# connection confirmed closed already, proceed to abort waiter
...
elif self.io_proto.transport.is_closing():
# Connection is already closing (due to half-close above)
# proceed to abort waiter
...
else:
self.io_proto.transport.close()
if not self.connection_lost_waiter:
# Our connection monitor task isn't running.
try:
await asyncio.sleep(self.close_timeout)
except asyncio.CancelledError:
...
if self.io_proto and self.io_proto.transport:
self.io_proto.transport.abort()
else:
if await self.wait_for_connection_lost(
timeout=self.close_timeout
):
# Connection aborted before the timeout expired.
return
websockets_logger.warning(
"Timeout waiting for TCP connection to close. Aborting"
)
if self.io_proto and self.io_proto.transport:
self.io_proto.transport.abort()
def abort_pings(self) -> None:
"""
Raise ConnectionClosed in pending keepalive pings.
They'll never receive a pong once the connection is closed.
"""
if self.ws_proto.state is not CLOSED:
raise ServerError(
"Webscoket about_pings should only be called "
"after connection state is changed to CLOSED"
)
for ping in self.pings.values():
ping.set_exception(ConnectionClosedError(None, None))
# If the exception is never retrieved, it will be logged when ping
# is garbage-collected. This is confusing for users.
# Given that ping is done (with an exception), canceling it does
# nothing, but it prevents logging the exception.
ping.cancel()
async def close(self, code: int = 1000, reason: str = "") -> None:
"""
Perform the closing handshake.
This is a websocket-protocol level close.
:meth:`close` waits for the other end to complete the handshake and
for the TCP connection to terminate.
:meth:`close` is idempotent: it doesn't do anything once the
connection is closed.
:param code: WebSocket close code
:param reason: WebSocket close reason
"""
if code == 1006:
self.fail_connection(code, reason)
return
async with self.conn_mutex:
if self.ws_proto.state is OPEN:
self.ws_proto.send_close(code, reason)
data_to_send = self.ws_proto.data_to_send()
await self.send_data(data_to_send)
async def recv(self, timeout: Optional[float] = None) -> Optional[Data]:
"""
Receive the next message.
Return a :class:`str` for a text frame and :class:`bytes` for a binary
frame.
When the end of the message stream is reached, :meth:`recv` raises
:exc:`~websockets.exceptions.ConnectionClosed`. Specifically, it
raises :exc:`~websockets.exceptions.ConnectionClosedOK` after a normal
connection closure and
:exc:`~websockets.exceptions.ConnectionClosedError` after a protocol
error or a network failure.
If ``timeout`` is ``None``, block until a message is received. Else,
if no message is received within ``timeout`` seconds, return ``None``.
Set ``timeout`` to ``0`` to check if a message was already received.
:raises ~websockets.exceptions.ConnectionClosed: when the
connection is closed
:raises asyncio.CancelledError: if the websocket closes while waiting
:raises ServerError: if two tasks call :meth:`recv` or
:meth:`recv_streaming` concurrently
"""
if self.recv_lock.locked():
raise ServerError(
"cannot call recv while another task is "
"already waiting for the next message"
)
await self.recv_lock.acquire()
if self.ws_proto.state is CLOSED:
self.recv_lock.release()
raise WebsocketClosed(
"Cannot receive from websocket interface after it is closed."
)
assembler_get: Optional[asyncio.Task] = None
try:
self.recv_cancel = asyncio.Future()
assembler_get = asyncio.create_task(self.assembler.get(timeout))
tasks = (self.recv_cancel, assembler_get)
done, pending = await asyncio.wait(
tasks,
return_when=asyncio.FIRST_COMPLETED,
)
done_task = next(iter(done))
if done_task is self.recv_cancel:
# recv was cancelled
for p in pending:
p.cancel()
raise asyncio.CancelledError()
else:
self.recv_cancel.cancel()
return done_task.result()
except asyncio.CancelledError:
# recv was cancelled
if assembler_get:
assembler_get.cancel()
raise
finally:
self.recv_cancel = None
self.recv_lock.release()
async def recv_burst(self, max_recv=256) -> Sequence[Data]:
"""
Receive the messages which have arrived since last checking.
Return a :class:`list` containing :class:`str` for a text frame
and :class:`bytes` for a binary frame.
When the end of the message stream is reached, :meth:`recv_burst`
raises :exc:`~websockets.exceptions.ConnectionClosed`. Specifically,
it raises :exc:`~websockets.exceptions.ConnectionClosedOK` after a
normal connection closure and
:exc:`~websockets.exceptions.ConnectionClosedError` after a protocol
error or a network failure.
:raises ~websockets.exceptions.ConnectionClosed: when the
connection is closed
:raises ServerError: if two tasks call :meth:`recv_burst` or
:meth:`recv_streaming` concurrently
"""
if self.recv_lock.locked():
raise ServerError(
"cannot call recv_burst while another task is already waiting "
"for the next message"
)
await self.recv_lock.acquire()
if self.ws_proto.state is CLOSED:
self.recv_lock.release()
raise WebsocketClosed(
"Cannot receive from websocket interface after it is closed."
)
messages = []
assembler_get: Optional[asyncio.Task] = None
try:
# Prevent pausing the transport when we're
# receiving a burst of messages
self.can_pause = False
self.recv_cancel = asyncio.Future()
while True:
assembler_get = asyncio.create_task(self.assembler.get(0))
tasks = (self.recv_cancel, assembler_get)
done, pending = await asyncio.wait(
tasks,
return_when=asyncio.FIRST_COMPLETED,
)
done_task = next(iter(done))
if done_task is self.recv_cancel:
# recv_burst was cancelled
for p in pending:
p.cancel()
raise asyncio.CancelledError()
m = done_task.result()
if m is None:
# None left in the burst. This is good!
break
messages.append(m)
if len(messages) >= max_recv:
# Too much data in the pipe. Hit our burst limit.
break
# Allow an eventloop iteration for the
# next message to pass into the Assembler
await asyncio.sleep(0)
self.recv_cancel.cancel()
except asyncio.CancelledError:
# recv_burst was cancelled
if assembler_get:
assembler_get.cancel()
raise
finally:
self.recv_cancel = None
self.can_pause = True
self.recv_lock.release()
return messages
async def recv_streaming(self) -> AsyncIterator[Data]:
"""
Receive the next message frame by frame.
Return an iterator of :class:`str` for a text frame and :class:`bytes`
for a binary frame. The iterator should be exhausted, or else the
connection will become unusable.
With the exception of the return value, :meth:`recv_streaming` behaves
like :meth:`recv`.
"""
if self.recv_lock.locked():
raise ServerError(
"Cannot call recv_streaming while another task "
"is already waiting for the next message"
)
await self.recv_lock.acquire()
if self.ws_proto.state is CLOSED:
self.recv_lock.release()
raise WebsocketClosed(
"Cannot receive from websocket interface after it is closed."
)
try:
cancelled = False
self.recv_cancel = asyncio.Future()
self.can_pause = False
async for m in self.assembler.get_iter():
if self.recv_cancel.done():
cancelled = True
break
yield m
if cancelled:
raise asyncio.CancelledError()
finally:
self.can_pause = True
self.recv_cancel = None
self.recv_lock.release()
async def send(self, message: Union[Data, Iterable[Data]]) -> None:
"""
Send a message.
A string (:class:`str`) is sent as a `Text frame`_. A bytestring or
bytes-like object (:class:`bytes`, :class:`bytearray`, or
:class:`memoryview`) is sent as a `Binary frame`_.
.. _Text frame: https://tools.ietf.org/html/rfc6455#section-5.6
.. _Binary frame: https://tools.ietf.org/html/rfc6455#section-5.6
:meth:`send` also accepts an iterable of strings, bytestrings, or
bytes-like objects. In that case the message is fragmented. Each item
is treated as a message fragment and sent in its own frame. All items
must be of the same type, or else :meth:`send` will raise a
:exc:`TypeError` and the connection will be closed.
:meth:`send` rejects dict-like objects because this is often an error.
If you wish to send the keys of a dict-like object as fragments, call
its :meth:`~dict.keys` method and pass the result to :meth:`send`.
:raises TypeError: for unsupported inputs
"""
async with self.conn_mutex:
if self.ws_proto.state in (CLOSED, CLOSING):
raise WebsocketClosed(
"Cannot write to websocket interface after it is closed."
)
if (not self.data_finished_fut) or self.data_finished_fut.done():
raise ServerError(
"Cannot write to websocket interface after it is finished."
)
# Unfragmented message -- this case must be handled first because
# strings and bytes-like objects are iterable.
if isinstance(message, str):
self.ws_proto.send_text(message.encode("utf-8"))
await self.send_data(self.ws_proto.data_to_send())
elif isinstance(message, (bytes, bytearray, memoryview)):
self.ws_proto.send_binary(message)
await self.send_data(self.ws_proto.data_to_send())
elif isinstance(message, Mapping):
# Catch a common mistake -- passing a dict to send().
raise TypeError("data is a dict-like object")
elif isinstance(message, Iterable):
# Fragmented message -- regular iterator.
raise NotImplementedError(
"Fragmented websocket messages are not supported."
)
else:
raise TypeError("Websocket data must be bytes, str.")
async def ping(self, data: Optional[Data] = None) -> asyncio.Future:
"""
Send a ping.
Return an :class:`~asyncio.Future` that will be resolved when the
corresponding pong is received. You can ignore it if you don't intend
to wait.
A ping may serve as a keepalive or as a check that the remote endpoint
received all messages up to this point::
await pong_event = ws.ping()
await pong_event # only if you want to wait for the pong
By default, the ping contains four random bytes. This payload may be
overridden with the optional ``data`` argument which must be a string
(which will be encoded to UTF-8) or a bytes-like object.
"""
async with self.conn_mutex:
if self.ws_proto.state in (CLOSED, CLOSING):
raise WebsocketClosed(
"Cannot send a ping when the websocket interface "
"is closed."
)
if (not self.io_proto) or (not self.io_proto.loop):
raise ServerError(
"Cannot send a ping when the websocket has no I/O "
"protocol attached."
)
if data is not None:
if isinstance(data, str):
data = data.encode("utf-8")
elif isinstance(data, (bytearray, memoryview)):
data = bytes(data)
# Protect against duplicates if a payload is explicitly set.
if data in self.pings:
raise ValueError(
"already waiting for a pong with the same data"
)
# Generate a unique random payload otherwise.
while data is None or data in self.pings:
data = struct.pack("!I", random.getrandbits(32))
self.pings[data] = self.io_proto.loop.create_future()
self.ws_proto.send_ping(data)
await self.send_data(self.ws_proto.data_to_send())
return asyncio.shield(self.pings[data])
async def pong(self, data: Data = b"") -> None:
"""
Send a pong.
An unsolicited pong may serve as a unidirectional heartbeat.
The payload may be set with the optional ``data`` argument which must
be a string (which will be encoded to UTF-8) or a bytes-like object.
"""
async with self.conn_mutex:
if self.ws_proto.state in (CLOSED, CLOSING):
# Cannot send pong after transport is shutting down
return
if isinstance(data, str):
data = data.encode("utf-8")
elif isinstance(data, (bytearray, memoryview)):
data = bytes(data)
self.ws_proto.send_pong(data)
await self.send_data(self.ws_proto.data_to_send())
async def send_data(self, data_to_send):
for data in data_to_send:
if data:
await self.io_proto.send(data)
else:
# Send an EOF - We don't actually send it,
# just trigger to autoclose the connection
if (
self.auto_closer_task
and not self.auto_closer_task.done()
and self.data_finished_fut
and not self.data_finished_fut.done()
):
# Auto-close the connection
self.data_finished_fut.set_result(None)
else:
# This will fail the connection appropriately
SanicProtocol.close(self.io_proto, timeout=1.0)
async def async_data_received(self, data_to_send, events_to_process):
if self.ws_proto.state in (OPEN, CLOSING) and len(data_to_send) > 0:
# receiving data can generate data to send (eg, pong for a ping)
# send connection.data_to_send()
await self.send_data(data_to_send)
if len(events_to_process) > 0:
await self.process_events(events_to_process)
def data_received(self, data):
self.ws_proto.receive_data(data)
data_to_send = self.ws_proto.data_to_send()
events_to_process = self.ws_proto.events_received()
if len(data_to_send) > 0 or len(events_to_process) > 0:
asyncio.create_task(
self.async_data_received(data_to_send, events_to_process)
)
async def async_eof_received(self, data_to_send, events_to_process):
# receiving EOF can generate data to send
# send connection.data_to_send()
if self.ws_proto.state in (OPEN, CLOSING) and len(data_to_send) > 0:
await self.send_data(data_to_send)
if len(events_to_process) > 0:
await self.process_events(events_to_process)
if self.recv_cancel:
self.recv_cancel.cancel()
if (
self.auto_closer_task
and not self.auto_closer_task.done()
and self.data_finished_fut
and not self.data_finished_fut.done()
):
# Auto-close the connection
self.data_finished_fut.set_result(None)
# Cancel the running handler if its waiting
else:
# This will fail the connection appropriately
SanicProtocol.close(self.io_proto, timeout=1.0)
def eof_received(self) -> Optional[bool]:
self.ws_proto.receive_eof()
data_to_send = self.ws_proto.data_to_send()
events_to_process = self.ws_proto.events_received()
asyncio.create_task(
self.async_eof_received(data_to_send, events_to_process)
)
return False
def connection_lost(self, exc):
"""
The WebSocket Connection is Closed.
"""
if not self.ws_proto.state == CLOSED:
# signal to the websocket connection handler
# we've lost the connection
self.ws_proto.fail(code=1006)
self.ws_proto.state = CLOSED
self.abort_pings()
if self.connection_lost_waiter:
self.connection_lost_waiter.set_result(None)
async def __aiter__(self):
try:
while True:
yield await self.recv()
except ConnectionClosedOK:
return
|
WebsocketImplProtocol
|
python
|
doocs__leetcode
|
solution/1300-1399/1382.Balance a Binary Search Tree/Solution.py
|
{
"start": 192,
"end": 768
}
|
class ____:
def balanceBST(self, root: TreeNode) -> TreeNode:
def dfs(root: TreeNode):
if root is None:
return
dfs(root.left)
nums.append(root.val)
dfs(root.right)
def build(i: int, j: int) -> TreeNode:
if i > j:
return None
mid = (i + j) >> 1
left = build(i, mid - 1)
right = build(mid + 1, j)
return TreeNode(nums[mid], left, right)
nums = []
dfs(root)
return build(0, len(nums) - 1)
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/hybridcloud/rpc/caching/service.py
|
{
"start": 668,
"end": 1134
}
|
class ____(RpcService):
key = "region_caching"
local_mode = SiloMode.REGION
@classmethod
def get_local_implementation(cls) -> RpcService:
from .impl import LocalRegionCachingService
return LocalRegionCachingService()
@regional_rpc_method(resolve=ByRegionName())
@abc.abstractmethod
def clear_key(self, *, region_name: str, key: str) -> int:
pass
_R = TypeVar("_R", bound=pydantic.BaseModel)
|
RegionCachingService
|
python
|
coleifer__peewee
|
tests/fields.py
|
{
"start": 16928,
"end": 17097
}
|
class ____(TestModel):
first = CharField()
last = CharField()
data = TextField()
class Meta:
primary_key = CompositeKey('first', 'last')
|
Composite
|
python
|
spyder-ide__spyder
|
spyder/plugins/explorer/widgets/explorer.py
|
{
"start": 1877,
"end": 1935
}
|
class ____:
Main = 'Main'
|
DirViewOpenWithSubMenuSections
|
python
|
doocs__leetcode
|
solution/2500-2599/2511.Maximum Enemy Forts That Can Be Captured/Solution.py
|
{
"start": 0,
"end": 392
}
|
class ____:
def captureForts(self, forts: List[int]) -> int:
n = len(forts)
i = ans = 0
while i < n:
j = i + 1
if forts[i]:
while j < n and forts[j] == 0:
j += 1
if j < n and forts[i] + forts[j] == 0:
ans = max(ans, j - i - 1)
i = j
return ans
|
Solution
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/resource_requirement.py
|
{
"start": 6175,
"end": 6617
}
|
class ____(ResourceKeyRequirement):
key: str # pyright: ignore[reportIncompatibleMethodOverride]
attached_to: Optional[str]
hook_name: str
def describe_requirement(self) -> str:
attached_to_desc = f"attached to {self.attached_to}" if self.attached_to else ""
return (
f"resource with key '{self.key}' required by hook '{self.hook_name}' {attached_to_desc}"
)
@record
|
HookResourceRequirement
|
python
|
django__django
|
tests/gis_tests/geoapp/models.py
|
{
"start": 928,
"end": 991
}
|
class ____(NamedModel):
line = models.LineStringField()
|
Track
|
python
|
kamyu104__LeetCode-Solutions
|
Python/synonymous-sentences.py
|
{
"start": 664,
"end": 1814
}
|
class ____(object):
def generateSentences(self, synonyms, text):
"""
:type synonyms: List[List[str]]
:type text: str
:rtype: List[str]
"""
def assign_id(x, lookup, inv_lookup):
if x in lookup:
return
lookup[x] = len(lookup)
inv_lookup[lookup[x]] = x
lookup, inv_lookup = {}, {}
for u, v in synonyms:
assign_id(u, lookup, inv_lookup), assign_id(v, lookup, inv_lookup)
union_find = UnionFind(len(lookup))
for u, v in synonyms:
union_find.union_set(lookup[u], lookup[v])
groups = collections.defaultdict(list)
for i in xrange(len(union_find.set)):
groups[union_find.find_set(i)].append(i)
result = []
for w in text.split(' '):
if w not in lookup:
result.append([w])
continue
result.append(sorted(map(lambda x: inv_lookup[x],
groups[union_find.find_set(lookup[w])])))
return [" ".join(sentense) for sentense in itertools.product(*result)]
|
Solution
|
python
|
optuna__optuna
|
optuna/importance/_ped_anova/evaluator.py
|
{
"start": 2393,
"end": 9346
}
|
class ____(BaseImportanceEvaluator):
"""PED-ANOVA importance evaluator.
Implements the PED-ANOVA hyperparameter importance evaluation algorithm.
PED-ANOVA fits Parzen estimators of :class:`~optuna.trial.TrialState.COMPLETE` trials better
than a user-specified baseline. Users can specify the baseline by a quantile.
The importance can be interpreted as how important each hyperparameter is to get
the performance better than baseline.
For further information about PED-ANOVA algorithm, please refer to the following paper:
- `PED-ANOVA: Efficiently Quantifying Hyperparameter Importance in Arbitrary Subspaces
<https://arxiv.org/abs/2304.10255>`__
.. note::
The performance of PED-ANOVA depends on how many trials to consider above baseline.
To stabilize the analysis, it is preferable to include at least 5 trials above baseline.
.. note::
Please refer to `the original work <https://github.com/nabenabe0928/local-anova>`__.
Args:
baseline_quantile:
Compute the importance of achieving top-``baseline_quantile`` quantile objective value.
For example, ``baseline_quantile=0.1`` means that the importances give the information
of which parameters were important to achieve the top-10% performance during
optimization.
evaluate_on_local:
Whether we measure the importance in the local or global space.
If :obj:`True`, the importances imply how importance each parameter is during
optimization. Meanwhile, ``evaluate_on_local=False`` gives the importances in the
specified search_space. ``evaluate_on_local=True`` is especially useful when users
modify search space during optimization.
Example:
An example of using PED-ANOVA is as follows:
.. testcode::
import optuna
from optuna.importance import PedAnovaImportanceEvaluator
def objective(trial):
x1 = trial.suggest_float("x1", -10, 10)
x2 = trial.suggest_float("x2", -10, 10)
return x1 + x2 / 1000
study = optuna.create_study()
study.optimize(objective, n_trials=100)
evaluator = PedAnovaImportanceEvaluator()
importance = optuna.importance.get_param_importances(study, evaluator=evaluator)
"""
def __init__(
self,
*,
baseline_quantile: float = 0.1,
evaluate_on_local: bool = True,
):
assert 0.0 <= baseline_quantile <= 1.0, "baseline_quantile must be in [0, 1]."
self._baseline_quantile = baseline_quantile
self._evaluate_on_local = evaluate_on_local
# Advanced Setups.
# Discretize a domain [low, high] as `np.linspace(low, high, n_steps)`.
self._n_steps: int = 50
# Control the regularization effect by prior.
self._prior_weight = 1.0
# How many `trials` must be included in `top_trials`.
self._min_n_top_trials = 2
def _get_top_trials(
self,
study: Study,
trials: list[FrozenTrial],
params: list[str],
target: Callable[[FrozenTrial], float] | None,
) -> list[FrozenTrial]:
is_lower_better = study.directions[0] == StudyDirection.MINIMIZE
if target is not None:
optuna_warn(
f"{self.__class__.__name__} computes the importances of params to achieve "
"low `target` values. If this is not what you want, "
"please modify target, e.g., by multiplying the output by -1."
)
is_lower_better = True
top_trials = _QuantileFilter(
self._baseline_quantile, is_lower_better, self._min_n_top_trials, target
).filter(trials)
if len(trials) == len(top_trials):
_logger.warning("All trials are in top trials, which gives equal importances.")
return top_trials
def _compute_pearson_divergence(
self,
param_name: str,
dist: BaseDistribution,
top_trials: list[FrozenTrial],
all_trials: list[FrozenTrial],
) -> float:
# When pdf_all == pdf_top, i.e. all_trials == top_trials, this method will give 0.0.
prior_weight = self._prior_weight
pe_top = _build_parzen_estimator(param_name, dist, top_trials, self._n_steps, prior_weight)
# NOTE: pe_top.n_steps could be different from self._n_steps.
grids = np.arange(pe_top.n_steps)
pdf_top = pe_top.pdf(grids) + 1e-12
if self._evaluate_on_local: # The importance of param during the study.
pe_local = _build_parzen_estimator(
param_name, dist, all_trials, self._n_steps, prior_weight
)
pdf_local = pe_local.pdf(grids) + 1e-12
else: # The importance of param in the search space.
pdf_local = np.full(pe_top.n_steps, 1.0 / pe_top.n_steps)
return float(pdf_local @ ((pdf_top / pdf_local - 1) ** 2))
def evaluate(
self,
study: Study,
params: list[str] | None = None,
*,
target: Callable[[FrozenTrial], float] | None = None,
) -> dict[str, float]:
dists = _get_distributions(study, params=params)
if params is None:
params = list(dists.keys())
assert params is not None
# PED-ANOVA does not support parameter distributions with a single value,
# because the importance of such params become zero.
non_single_dists = {name: dist for name, dist in dists.items() if not dist.single()}
single_dists = {name: dist for name, dist in dists.items() if dist.single()}
if len(non_single_dists) == 0:
return {}
trials = _get_filtered_trials(study, params=params, target=target)
n_params = len(non_single_dists)
# The following should be tested at _get_filtered_trials.
assert target is not None or max([len(t.values) for t in trials], default=1) == 1
if len(trials) <= self._min_n_top_trials:
param_importances = {k: 1.0 / n_params for k in non_single_dists}
param_importances.update({k: 0.0 for k in single_dists})
return {k: 0.0 for k in param_importances}
top_trials = self._get_top_trials(study, trials, params, target)
quantile = len(top_trials) / len(trials)
importance_sum = 0.0
param_importances = {}
for param_name, dist in non_single_dists.items():
param_importances[param_name] = quantile * self._compute_pearson_divergence(
param_name, dist, top_trials=top_trials, all_trials=trials
)
importance_sum += param_importances[param_name]
param_importances.update({k: 0.0 for k in single_dists})
return _sort_dict_by_importance(param_importances)
|
PedAnovaImportanceEvaluator
|
python
|
python__mypy
|
test-data/unit/plugins/class_callable.py
|
{
"start": 223,
"end": 1288
}
|
class ____(Plugin):
def get_function_hook(self, fullname: str) -> Callable[[FunctionContext], Type] | None:
if fullname.startswith("mod.Attr"):
return attr_hook
return None
def attr_hook(ctx: FunctionContext) -> Type:
default = get_proper_type(ctx.default_return_type)
assert isinstance(default, Instance)
if default.type.fullname == "mod.Attr":
attr_base = default
else:
attr_base = None
for base in default.type.bases:
if base.type.fullname == "mod.Attr":
attr_base = base
break
assert attr_base is not None
last_arg_exprs = ctx.args[-1]
if any(isinstance(expr, NameExpr) and expr.name == "True" for expr in last_arg_exprs):
return attr_base
assert len(attr_base.args) == 1
arg_type = attr_base.args[0]
return Instance(
attr_base.type,
[UnionType([arg_type, NoneType()])],
line=default.line,
column=default.column,
)
def plugin(version: str) -> type[AttrPlugin]:
return AttrPlugin
|
AttrPlugin
|
python
|
getsentry__sentry
|
src/sentry/preprod/vcs/status_checks/size/tasks.py
|
{
"start": 14122,
"end": 21655
}
|
class ____(_StatusCheckProvider):
def create_status_check(
self,
repo: str,
sha: str,
status: StatusCheckStatus,
title: str,
subtitle: str,
text: str | None,
summary: str,
external_id: str,
started_at: datetime,
completed_at: datetime | None = None,
target_url: str | None = None,
) -> str | None:
with self._create_scm_interaction_event().capture() as lifecycle:
mapped_status = GITHUB_STATUS_CHECK_STATUS_MAPPING.get(status)
mapped_conclusion = GITHUB_STATUS_CHECK_CONCLUSION_MAPPING.get(status)
if not mapped_status:
logger.error(
"preprod.status_checks.create.invalid_status_mapping",
extra={"status": status},
)
return None
truncated_text = _truncate_to_byte_limit(text, GITHUB_MAX_TEXT_FIELD_LENGTH)
truncated_summary = _truncate_to_byte_limit(summary, GITHUB_MAX_SUMMARY_FIELD_LENGTH)
if text and truncated_text and len(truncated_text) != len(text):
logger.warning(
"preprod.status_checks.create.text_truncated",
extra={
"original_bytes": len(text.encode("utf-8")),
"truncated_bytes": len(truncated_text.encode("utf-8")),
"organization_id": self.organization_id,
"organization_slug": self.organization_slug,
},
)
if summary and truncated_summary and len(truncated_summary) != len(summary):
logger.warning(
"preprod.status_checks.create.summary_truncated",
extra={
"original_bytes": len(summary.encode("utf-8")),
"truncated_bytes": len(truncated_summary.encode("utf-8")),
"organization_id": self.organization_id,
"organization_slug": self.organization_slug,
},
)
check_data: dict[str, Any] = {
"name": title,
"head_sha": sha,
"external_id": external_id,
"output": {
"title": subtitle,
"summary": truncated_summary,
},
"status": mapped_status.value,
}
if truncated_text:
check_data["output"]["text"] = truncated_text
if mapped_conclusion:
check_data["conclusion"] = mapped_conclusion.value
if started_at:
check_data["started_at"] = started_at.isoformat()
if completed_at:
check_data["completed_at"] = completed_at.isoformat()
if target_url:
if target_url.startswith("http"):
check_data["details_url"] = target_url
else:
logger.warning(
"preprod.status_checks.create.invalid_target_url",
extra={"target_url": target_url},
)
try:
response = self.client.create_check_run(repo=repo, data=check_data)
check_id = response.get("id")
return str(check_id) if check_id else None
except ApiError as e:
lifecycle.record_failure(e)
# Only convert specific permission 403s as IntegrationConfigurationError
# GitHub can return 403 for various reasons (rate limits, temporary issues, permissions)
if e.code == 403:
error_message = str(e).lower()
if (
"resource not accessible" in error_message
or "insufficient" in error_message
or "permission" in error_message
):
logger.exception(
"preprod.status_checks.create.insufficient_permissions",
extra={
"organization_id": self.organization_id,
"integration_id": self.integration_id,
"repo": repo,
"error_message": str(e),
},
)
raise IntegrationConfigurationError(
"GitHub App lacks permissions to create check runs. "
"Please ensure the app has the required permissions and that "
"the organization has accepted any updated permissions."
) from e
elif e.code and 400 <= e.code < 500 and e.code != 429:
logger.exception(
"preprod.status_checks.create.client_error",
extra={
"organization_id": self.organization_id,
"integration_id": self.integration_id,
"repo": repo,
"status_code": e.code,
},
)
raise IntegrationConfigurationError(
f"GitHub API returned {e.code} client error when creating check run"
) from e
# For non-permission 403s, 429s, 5xx, and other error
raise
# See: https://docs.github.com/en/rest/checks/runs?apiVersion=2022-11-28#create-a-check-run
GITHUB_MAX_SUMMARY_FIELD_LENGTH = 65535
GITHUB_MAX_TEXT_FIELD_LENGTH = 65535
def _truncate_to_byte_limit(text: str | None, byte_limit: int) -> str | None:
"""Truncate text to fit within byte limit while ensuring valid UTF-8."""
if not text:
return text
TRUNCATE_AMOUNT = 10
encoded = text.encode("utf-8")
if len(encoded) <= byte_limit:
return text
if byte_limit <= TRUNCATE_AMOUNT:
# This shouldn't happen, but just in case.
truncated = encoded[:byte_limit].decode("utf-8", errors="ignore")
return truncated
# Truncate to byte_limit - 10 (a bit of wiggle room) to make room for "..."
# Note: this can break formatting you have and is more of a catch-all,
# broken formatting is better than silently erroring for the user.
# Templating logic itself should try to more contextually trim the content if possible.
truncated = encoded[: byte_limit - TRUNCATE_AMOUNT].decode("utf-8", errors="ignore")
return truncated + "..."
GITHUB_STATUS_CHECK_STATUS_MAPPING: dict[StatusCheckStatus, GitHubCheckStatus] = {
StatusCheckStatus.ACTION_REQUIRED: GitHubCheckStatus.COMPLETED,
StatusCheckStatus.IN_PROGRESS: GitHubCheckStatus.IN_PROGRESS,
StatusCheckStatus.FAILURE: GitHubCheckStatus.COMPLETED,
StatusCheckStatus.NEUTRAL: GitHubCheckStatus.COMPLETED,
StatusCheckStatus.SUCCESS: GitHubCheckStatus.COMPLETED,
}
GITHUB_STATUS_CHECK_CONCLUSION_MAPPING: dict[StatusCheckStatus, GitHubCheckConclusion | None] = {
StatusCheckStatus.ACTION_REQUIRED: GitHubCheckConclusion.ACTION_REQUIRED,
StatusCheckStatus.IN_PROGRESS: None,
StatusCheckStatus.FAILURE: GitHubCheckConclusion.FAILURE,
StatusCheckStatus.NEUTRAL: GitHubCheckConclusion.NEUTRAL,
StatusCheckStatus.SUCCESS: GitHubCheckConclusion.SUCCESS,
}
|
_GitHubStatusCheckProvider
|
python
|
django__django
|
django/db/models/expressions.py
|
{
"start": 72635,
"end": 75885
}
|
class ____(Expression):
"""
Model the frame clause in window expressions. There are two types of frame
clauses which are subclasses, however, all processing and validation (by no
means intended to be complete) is done here. Thus, providing an end for a
frame is optional (the default is UNBOUNDED FOLLOWING, which is the last
row in the frame).
"""
template = "%(frame_type)s BETWEEN %(start)s AND %(end)s%(exclude)s"
def __init__(self, start=None, end=None, exclusion=None):
self.start = Value(start)
self.end = Value(end)
if not isinstance(exclusion, (NoneType, WindowFrameExclusion)):
raise TypeError(
f"{self.__class__.__qualname__}.exclusion must be a "
"WindowFrameExclusion instance."
)
self.exclusion = exclusion
def set_source_expressions(self, exprs):
self.start, self.end = exprs
def get_source_expressions(self):
return [self.start, self.end]
def get_exclusion(self):
if self.exclusion is None:
return ""
return f" EXCLUDE {self.exclusion.value}"
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
start, end = self.window_frame_start_end(
connection, self.start.value, self.end.value
)
if self.exclusion and not connection.features.supports_frame_exclusion:
raise NotSupportedError(
"This backend does not support window frame exclusions."
)
return (
self.template
% {
"frame_type": self.frame_type,
"start": start,
"end": end,
"exclude": self.get_exclusion(),
},
[],
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self):
return []
def __str__(self):
if self.start.value is not None and self.start.value < 0:
start = "%d %s" % (abs(self.start.value), connection.ops.PRECEDING)
elif self.start.value is not None and self.start.value == 0:
start = connection.ops.CURRENT_ROW
elif self.start.value is not None and self.start.value > 0:
start = "%d %s" % (self.start.value, connection.ops.FOLLOWING)
else:
start = connection.ops.UNBOUNDED_PRECEDING
if self.end.value is not None and self.end.value > 0:
end = "%d %s" % (self.end.value, connection.ops.FOLLOWING)
elif self.end.value is not None and self.end.value == 0:
end = connection.ops.CURRENT_ROW
elif self.end.value is not None and self.end.value < 0:
end = "%d %s" % (abs(self.end.value), connection.ops.PRECEDING)
else:
end = connection.ops.UNBOUNDED_FOLLOWING
return self.template % {
"frame_type": self.frame_type,
"start": start,
"end": end,
"exclude": self.get_exclusion(),
}
def window_frame_start_end(self, connection, start, end):
raise NotImplementedError("Subclasses must implement window_frame_start_end().")
|
WindowFrame
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-google/llama_index/readers/google/chat/base.py
|
{
"start": 330,
"end": 9482
}
|
class ____(BasePydanticReader):
"""
Google Chat Reader.
Reads messages from Google Chat
"""
is_remote: bool = True
@classmethod
def class_name(cls) -> str:
"""Gets name identifier of class."""
return "GoogleChatReader"
def load_data(
self,
space_names: List[str],
num_messages: int = -1,
after: datetime = None,
before: datetime = None,
order_asc: bool = True,
) -> List[Document]:
"""
Loads documents from Google Chat.
Args:
space_name (List[str]): List of Space ID names found at top of URL (without the "space/").
num_messages (int, optional): Number of messages to load (may exceed this number). If -1, then loads all messages. Defaults to -1.
after (datetime, optional): Only search for messages after this datetime (UTC). Defaults to None.
before (datetime, optional): Only search for messages before this datetime (UTC). Defaults to None.
order_asc (bool, optional): If messages should be ordered by ascending time order. Defaults to True.
Returns:
List[Document]: List of document objects
"""
from googleapiclient.discovery import build
# get credentials and create chat service
credentials = self._get_credentials()
service = build("chat", "v1", credentials=credentials)
logger.info("Credentials successfully obtained.")
res = []
for space_name in space_names:
all_msgs = self._get_msgs(
service, space_name, num_messages, after, before, order_asc
) # gets raw API output in list of dict
msgs_sorted = self._sort_msgs(
space_name, all_msgs
) # puts messages into list of Document objects
res.extend(msgs_sorted)
logger.info(f"Successfully retrieved messages from {space_name}")
return res
def _sort_msgs(self, space_name: str, all_msgs: List[Dict[str, Any]]) -> Document:
"""
Sorts messages from space and puts them into Document.
Args:
space_name (str): Space ID
all_msgs (List[Dict[str, Any]]): All messages
order_asc (bool): If ordered by ascending order
Returns:
Document: Document with messages
"""
res = []
id_to_text = self._id_to_text(
all_msgs
) # maps message ID to text (useful for retrieving info about quote replies)
thread_msg_cnt = self._get_thread_msg_cnt(
all_msgs
) # gets message count in each thread
for msg in all_msgs:
if any(
i not in msg for i in ("name", "text", "thread", "sender", "createTime")
):
# invalid message
continue
if "name" not in msg["thread"] or "name" not in msg["sender"]:
# invalid message
continue
metadata = {
"space_id": space_name,
"sender_id": msg["sender"]["name"],
"timestamp": msg["createTime"],
}
if (
"quotedMessageMetadata" in msg
and msg["quotedMessageMetadata"]["name"] in id_to_text
):
# metadata for a quote reply
metadata["quoted_msg"] = id_to_text[
msg["quotedMessageMetadata"]["name"]
]
# adds metadata for threads
# all threads with a message count of 1 gets counted as the "main thread"
thread_id = msg["thread"]["name"]
if thread_msg_cnt[thread_id] > 1:
metadata["thread_id"] = thread_id
else:
metadata["thread_id"] = "Main Thread"
doc = Document(id_=msg["name"], text=msg["text"], metadata=metadata)
res.append(doc)
return res
def _id_to_text(self, all_msgs: List[Dict[str, Any]]) -> Dict[str, str]:
"""
Maps message ID to text, used for quote replies.
Args:
all_msgs (List[Dict[str, Any]]): All messages
Returns:
Dict[str, str]: Map message ID -> message text
"""
res = {}
for msg in all_msgs:
if "text" not in msg or "name" not in msg:
continue
res[msg["name"]] = msg["text"]
return res
def _get_thread_msg_cnt(self, all_msgs: List[Dict[str, Any]]) -> Dict[str, int]:
"""
Gets message count for each thread ID.
Args:
all_msgs (List[Dict[str, Any]]): All messages
Returns:
Dict[str, int]: Maps thread ID -> count of messages that were in that thread
"""
# maps thread ID -> count
threads_dict = {}
for msg in all_msgs:
thread_name = msg["thread"]["name"]
if thread_name not in threads_dict:
# add thread name to dict
threads_dict[thread_name] = 1
else:
threads_dict[thread_name] += 1
return threads_dict
def _get_msgs(
self,
service: Any,
space_name: str,
num_messages: int = -1,
after: datetime = None,
before: datetime = None,
order_asc: bool = True,
) -> List[Dict[str, Any]]:
"""
Puts raw API output of chat messages from one space into a list.
Args:
service (Any): Google Chat API service object
space_name (str): Space ID name found at top of URL (without the "space/").
num_messages (int, optional): Number of messages to load (may exceed this number). If -1, then loads all messages. Defaults to -1.
after (datetime, optional): Only search for messages after this datetime (UTC). Defaults to None.
before (datetime, optional): Only search for messages before this datetime (UTC). Defaults to None.
order_asc (bool, optional): If messages should be ordered by ascending time order. Defaults to True.
Returns:
List[Dict[str, Any]]: List of message objects
"""
all_msgs = []
# API parameters
parent = f"spaces/{space_name}"
page_token = ""
filter_str = ""
if after is not None:
offset_str = ""
if after.utcoffset() is None:
offset_str = "+00:00"
filter_str += f'createTime > "{after.isoformat("T") + offset_str}" AND '
if before is not None:
offset_str = ""
if before.utcoffset() is None:
offset_str = "+00:00"
filter_str += f'createTime < "{before.isoformat("T") + offset_str}" AND '
filter_str = filter_str[:-4]
order_by = f"createTime {'ASC' if order_asc else 'DESC'}"
# Get all messages from space
while num_messages == -1 or len(all_msgs) < num_messages:
req_msg = num_messages - len(all_msgs)
result = (
service.spaces()
.messages()
.list(
parent=parent,
pageSize=req_msg if num_messages != -1 else 1000,
pageToken=page_token,
filter=filter_str,
orderBy=order_by,
showDeleted=False,
)
.execute()
)
if result and "messages" in result:
all_msgs.extend(result["messages"])
# if no more messages to load
if not result or "nextPageToken" not in result:
break
page_token = result["nextPageToken"]
return all_msgs
def _get_credentials(self) -> Any:
"""
Get valid user credentials from storage.
The file token.json stores the user's access and refresh tokens, and is
created automatically when the authorization flow completes for the first
time.
Returns:
Credentials, the obtained credential.
"""
import os
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
creds = None
if os.path.exists("token.json"):
creds = Credentials.from_authorized_user_file("token.json", SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
"credentials.json", SCOPES
)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open("token.json", "w") as token:
token.write(creds.to_json())
return creds
|
GoogleChatReader
|
python
|
Netflix__metaflow
|
metaflow/plugins/datatools/s3/s3op.py
|
{
"start": 22236,
"end": 50253
}
|
class ____(object):
def __init__(self, s3config):
self.s3 = None
self.s3config = s3config
self.client_error = None
def reset_client(self, hard_reset=False):
from metaflow.plugins.datatools.s3.s3util import get_s3_client
if hard_reset or self.s3 is None:
self.s3, self.client_error = get_s3_client(
s3_role_arn=self.s3config.role,
s3_session_vars=self.s3config.session_vars,
s3_client_params=self.s3config.client_params,
)
@aws_retry
def get_info(self, url):
self.reset_client()
try:
head = self.s3.head_object(Bucket=url.bucket, Key=url.path)
return (
True,
url,
[
(
S3Url(
bucket=url.bucket,
path=url.path,
url=url.url,
local=url.local,
prefix=url.prefix,
content_type=head["ContentType"],
metadata=head["Metadata"],
encryption=head.get("ServerSideEncryption"),
range=url.range,
),
head["ContentLength"],
)
],
)
except self.client_error as err:
error_code = normalize_client_error(err)
if error_code == 404:
return False, url, ERROR_URL_NOT_FOUND
elif error_code == 403:
return False, url, ERROR_URL_ACCESS_DENIED
# Transient errors are going to be retried by the aws_retry decorator
else:
raise
@aws_retry
def list_prefix(self, prefix_url, delimiter=""):
self.reset_client()
url_base = "s3://%s/" % prefix_url.bucket
try:
paginator = self.s3.get_paginator("list_objects_v2")
urls = []
for page in paginator.paginate(
Bucket=prefix_url.bucket, Prefix=prefix_url.path, Delimiter=delimiter
):
# note that an url may be both a prefix and an object
# - the trailing slash is significant in S3
if "Contents" in page:
for key in page.get("Contents", []):
key_path = key["Key"].lstrip("/")
url = url_base + key_path
urlobj = S3Url(
url=url,
bucket=prefix_url.bucket,
path=key_path,
local=generate_local_path(url),
prefix=prefix_url.url,
)
urls.append((urlobj, key["Size"]))
if "CommonPrefixes" in page:
# we get CommonPrefixes if Delimiter is a non-empty string
for key in page.get("CommonPrefixes", []):
url = url_base + key["Prefix"]
urlobj = S3Url(
url=url,
bucket=prefix_url.bucket,
path=key["Prefix"],
local=None,
prefix=prefix_url.url,
)
urls.append((urlobj, None))
return True, prefix_url, urls
except self.s3.exceptions.NoSuchBucket:
return False, prefix_url, ERROR_URL_NOT_FOUND
except self.client_error as err:
error_code = normalize_client_error(err)
if error_code == 404:
return False, prefix_url, ERROR_URL_NOT_FOUND
elif error_code == 403:
return False, prefix_url, ERROR_URL_ACCESS_DENIED
# Transient errors are going to be retried by the aws_retry decorator
else:
raise
# We want to reuse an S3 client instance over multiple operations.
# This is accomplished by op_ functions below.
def op_get_info(s3config, urls):
s3 = S3Ops(s3config)
return [s3.get_info(url) for url in urls]
def op_list_prefix(s3config, prefix_urls):
s3 = S3Ops(s3config)
return [s3.list_prefix(prefix) for prefix in prefix_urls]
def op_list_prefix_nonrecursive(s3config, prefix_urls):
s3 = S3Ops(s3config)
return [s3.list_prefix(prefix, delimiter="/") for prefix in prefix_urls]
def exit(exit_code, url):
if exit_code == ERROR_INVALID_URL:
msg = "Invalid url: %s" % url.url
elif exit_code == ERROR_NOT_FULL_PATH:
msg = "URL not a full path: %s" % url.url
elif exit_code == ERROR_URL_NOT_FOUND:
msg = "URL not found: %s" % url.url
elif exit_code == ERROR_URL_ACCESS_DENIED:
msg = "Access denied to URL: %s" % url.url
elif exit_code == ERROR_WORKER_EXCEPTION:
msg = "Download failed"
elif exit_code == ERROR_VERIFY_FAILED:
msg = "Verification failed for URL %s, local file %s" % (url.url, url.local)
elif exit_code == ERROR_LOCAL_FILE_NOT_FOUND:
msg = "Local file not found: %s" % url
elif exit_code == ERROR_TRANSIENT:
msg = "Transient error for url: %s" % url
elif exit_code == ERROR_OUT_OF_DISK_SPACE:
msg = "Out of disk space when downloading URL: %s" % url
else:
msg = "Unknown error"
print("s3op failed:\n%s" % msg, file=sys.stderr)
sys.exit(exit_code)
def verify_results(urls, verbose=False):
for url, expected in urls:
if verbose:
print("verifying %s, expected %s" % (url, expected), file=sys.stderr)
try:
got = os.stat(url.local).st_size
except OSError:
raise
if expected != got:
exit(ERROR_VERIFY_FAILED, url)
if url.content_type or url.metadata or url.encryption:
# Verify that we also have a metadata file present
try:
os.stat("%s_meta" % url.local)
except OSError:
exit(ERROR_VERIFY_FAILED, url)
def generate_local_path(url, range="whole", suffix=None):
# this function generates a safe local file name corresponding to
# an S3 URL. URLs may be longer than maximum file length limit on Linux,
# so we mostly hash the URL but retain the leaf part as a convenience
# feature to ease eyeballing
# We also call out "range" specifically to allow multiple ranges for the same
# file to be downloaded in parallel.
if range is None:
range = "whole"
if range != "whole":
# It will be of the form `bytes=%d-` or `bytes=-%d` or `bytes=%d-%d`
range = range[6:].replace("-", "_")
quoted = url_quote(url)
fname = quoted.split(b"/")[-1].replace(b".", b"_").replace(b"-", b"_")
sha = sha1(quoted).hexdigest()
# Truncate fname to ensure the final filename doesn't exceed filesystem limits.
# Most filesystems have a 255 character limit. The structure is:
# <40-char-sha>-<fname>-<range>[-<suffix>]
# We need to leave room for: sha (40) + hyphens (2-3) + range (~10) + suffix (~10)
# This leaves roughly 190 characters for fname. We use 150 to be safe.
fname_decoded = fname.decode("utf-8")
max_fname_len = 150
if len(fname_decoded) > max_fname_len:
# Truncate and add an ellipsis to indicate truncation
fname_decoded = fname_decoded[:max_fname_len] + "..."
if suffix:
return "-".join((sha, fname_decoded, range, suffix))
return "-".join((sha, fname_decoded, range))
def parallel_op(op, lst, num_workers):
# parallel op divides work equally amongst num_workers
# processes. This is a good strategy if the cost is
# uniform over the units of work, e.g. op_get_info, which
# is a single HEAD request to S3.
#
# This approach is less optimal with op_list_prefix where
# the cost of S3 listing per prefix can vary drastically.
# We could optimize this case by using a worker model with
# a queue, like for downloads but the difference here is
# that we need to return a value, which would require a
# bit more work - something to consider if this turns out
# to be a bottleneck.
if lst:
num = min(len(lst), num_workers)
batch_size = math.ceil(len(lst) / float(num))
batches = []
it = iter(lst)
while True:
batch = list(islice(it, batch_size))
if batch:
batches.append(batch)
else:
break
it = parallel_map(op, batches, max_parallel=num)
for x in chain.from_iterable(it):
yield x
# CLI
def common_options(func):
@click.option(
"--inputs",
type=click.Path(exists=True),
help="Read input prefixes from the given file.",
)
@click.option(
"--num-workers",
default=S3_WORKER_COUNT,
show_default=True,
help="Number of concurrent connections.",
)
@click.option(
"--s3role",
default=None,
show_default=True,
required=False,
help="Role to assume when getting the S3 client",
)
@click.option(
"--s3sessionvars",
default=None,
show_default=True,
required=False,
help="Session vars to set when getting the S3 client",
)
@click.option(
"--s3clientparams",
default=None,
show_default=True,
required=False,
help="Client parameters to set when getting the S3 client",
)
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def non_lst_common_options(func):
@click.option(
"--verbose/--no-verbose",
default=True,
show_default=True,
help="Print status information on stderr.",
)
@click.option(
"--listing/--no-listing",
default=False,
show_default=True,
help="Print S3 URL -> local file mapping on stdout.",
)
@click.option(
"--inject-failure",
default=0,
show_default=True,
type=int,
help="Simulate transient failures -- percentage (int) of injected failures",
hidden=True,
)
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
@click.group()
def cli():
pass
@cli.command("list", help="List S3 objects")
@tracing.cli("s3op/list")
@click.option(
"--recursive/--no-recursive",
default=False,
show_default=True,
help="List prefixes recursively.",
)
@common_options
@click.argument("prefixes", nargs=-1)
def lst(
prefixes,
inputs=None,
num_workers=None,
recursive=None,
s3role=None,
s3sessionvars=None,
s3clientparams=None,
):
s3config = S3Config(
s3role,
json.loads(s3sessionvars) if s3sessionvars else None,
json.loads(s3clientparams) if s3clientparams else None,
)
urllist = []
to_iterate, _ = _populate_prefixes(prefixes, inputs)
for _, prefix, url, _ in to_iterate:
src = urlparse(url, allow_fragments=False)
# We always consider the path being passed in to be a directory path so
# we add a trailing slash to the path if it doesn't already have one.
path_with_slash = src.path.lstrip("/")
if not path_with_slash.endswith("/"):
path_with_slash += "/"
url = S3Url(
url=url,
bucket=src.netloc,
path=path_with_slash,
local=None,
prefix=prefix,
)
if src.scheme != "s3":
exit(ERROR_INVALID_URL, url)
urllist.append(url)
op = (
partial(op_list_prefix, s3config)
if recursive
else partial(op_list_prefix_nonrecursive, s3config)
)
urls = []
for success, prefix_url, ret in parallel_op(op, urllist, num_workers):
if success:
urls.extend(ret)
else:
exit(ret, prefix_url)
for idx, (url, size) in enumerate(urls):
if size is None:
print(format_result_line(idx, url.prefix, url.url))
else:
print(format_result_line(idx, url.prefix, url.url, str(size)))
@cli.command(help="Upload files to S3")
@tracing.cli("s3op/put")
@click.option(
"--file",
"files",
type=(click.Path(exists=True), str),
multiple=True,
help="Local file->S3Url pair to upload. Can be specified multiple times.",
)
@click.option(
"--filelist",
type=click.Path(exists=True),
help="Read local file -> S3 URL mappings from the given file. Use --inputs instead",
)
@click.option(
"--overwrite/--no-overwrite",
default=True,
show_default=True,
help="Overwrite key if it already exists in S3.",
)
@common_options
@non_lst_common_options
def put(
files=None,
filelist=None,
inputs=None,
num_workers=None,
verbose=None,
overwrite=True,
listing=None,
s3role=None,
s3sessionvars=None,
s3clientparams=None,
inject_failure=0,
):
if inputs is not None and filelist is not None:
raise RuntimeError("Cannot specify inputs and filelist at the same time")
if inputs is not None and filelist is None:
filelist = inputs
is_transient_retry = False
def _files():
nonlocal is_transient_retry
line_idx = 0
for local, url in files:
local_file = url_unquote(local)
if not os.path.exists(local_file):
exit(ERROR_LOCAL_FILE_NOT_FOUND, local_file)
yield line_idx, local_file, url_unquote(url), None, None
line_idx += 1
if filelist:
# NOTE: We are assuming that the idx is properly set. This is only used
# by the transient failure retry mechanism and users should not use it
# directly. This will not work, for example, if only some lines have
# an idx specified (in some cases)
for line in open(filelist, mode="rb"):
r = json.loads(line)
input_line_idx = r.get("idx")
if input_line_idx is not None:
# We only have input indices if we have a transient retry.
is_transient_retry = True
else:
input_line_idx = line_idx
line_idx += 1
local = r["local"]
url = r["url"]
content_type = r.get("content_type", None)
metadata = r.get("metadata", None)
encryption = r.get("encryption", None)
if not os.path.exists(local):
exit(ERROR_LOCAL_FILE_NOT_FOUND, local)
yield input_line_idx, local, url, content_type, metadata, encryption
def _make_url(idx, local, user_url, content_type, metadata, encryption):
src = urlparse(user_url, allow_fragments=False)
url = S3Url(
url=user_url,
bucket=src.netloc,
path=src.path.lstrip("/"),
local=local,
prefix=None,
content_type=content_type,
metadata=metadata,
idx=idx,
encryption=encryption,
)
if src.scheme != "s3":
exit(ERROR_INVALID_URL, url)
if not src.path:
exit(ERROR_NOT_FULL_PATH, url)
return url
s3config = S3Config(
s3role,
json.loads(s3sessionvars) if s3sessionvars else None,
json.loads(s3clientparams) if s3clientparams else None,
)
urls = list(starmap(_make_url, _files()))
ul_op = "upload"
if not overwrite:
ul_op = "info_upload"
sz_results, transient_error_type = process_urls(
ul_op, urls, verbose, inject_failure, num_workers, s3config
)
retry_lines = []
out_lines = []
denied_url = None
for url, sz in zip(urls, sz_results):
# sz is None if the file wasn't uploaded (no overwrite), 0 if uploaded OK
# or the error code if not (error code here will only be
# ERROR_TRANSIENT or ERROR_URL_ACCESS_DENIED
if sz is None:
if listing:
# We keep a position for it in our out list in case of retries
out_lines.append("%d %s\n" % (url.idx, TRANSIENT_RETRY_LINE_CONTENT))
continue
elif listing and sz == 0:
out_lines.append(format_result_line(url.idx, url.url) + "\n")
elif sz == -ERROR_TRANSIENT:
retry_data = {
"idx": url.idx,
"url": url.url,
"local": url.local,
"content_type": url.content_type,
"metadata": url.metadata,
"encryption": url.encryption,
}
if transient_error_type:
retry_data["transient_error_type"] = transient_error_type
retry_lines.append(json.dumps(retry_data) + "\n")
# Output something to get a total count the first time around
if not is_transient_retry:
out_lines.append("%d %s\n" % (url.idx, TRANSIENT_RETRY_LINE_CONTENT))
elif sz == -ERROR_URL_ACCESS_DENIED:
# We do NOT break because we want to be able to accurately report all
# the files uploaded after retries.
denied_url = url
if denied_url is not None:
exit(ERROR_URL_ACCESS_DENIED, denied_url)
if out_lines:
sys.stdout.writelines(out_lines)
sys.stdout.flush()
if retry_lines:
sys.stderr.write("%s\n" % TRANSIENT_RETRY_START_LINE)
sys.stderr.writelines(retry_lines)
sys.stderr.flush()
sys.exit(ERROR_TRANSIENT)
def _populate_prefixes(prefixes, inputs):
# Returns a tuple: first element is the prefix index, the second element is the
# prefix and the third element is the optional range (or None if the entire prefix
# is requested).
# We again assume that the indices, if provided, are correct. This is again only
# used for the transient error retry so users should not use this directly.
is_transient_retry = False
if prefixes:
prefixes = [(idx, url_unquote(p), None) for idx, p in enumerate(prefixes)]
else:
prefixes = []
if inputs:
with open(inputs, mode="rb") as f:
for idx, l in enumerate(f, start=len(prefixes)):
s = l.split(b" ")
if len(s) == 1:
# User input format: <url>
url = url_unquote(s[0].strip())
prefixes.append((idx, url, url, None))
elif len(s) == 2:
# User input format: <url> <range>
url = url_unquote(s[0].strip())
prefixes.append((idx, url, url, url_unquote(s[1].strip())))
elif len(s) in (4, 5):
# Retry format: <idx> <prefix> <url> <range> [<transient_error_type>]
# The transient_error_type (5th field) is optional and only used for logging.
# Lines with other field counts (e.g., 3) are silently ignored as invalid.
is_transient_retry = True
prefix = url_unquote(s[1].strip())
url = url_unquote(s[2].strip())
range_info = url_unquote(s[3].strip())
if range_info == "<norange>":
range_info = None
prefixes.append(
(int(url_unquote(s[0].strip())), prefix, url, range_info)
)
return prefixes, is_transient_retry
@cli.command(help="Download files from S3")
@tracing.cli("s3op/get")
@click.option(
"--recursive/--no-recursive",
default=False,
show_default=True,
help="Download prefixes recursively.",
)
@click.option(
"--verify/--no-verify",
default=True,
show_default=True,
help="Verify that files were loaded correctly.",
)
@click.option(
"--info/--no-info",
default=True,
show_default=True,
help="Return user tags and content-type",
)
@click.option(
"--allow-missing/--no-allow-missing",
default=False,
show_default=True,
help="Do not exit if missing files are detected. Implies --verify.",
)
@common_options
@non_lst_common_options
@click.argument("prefixes", nargs=-1)
def get(
prefixes,
recursive=None,
num_workers=None,
inputs=None,
verify=None,
info=None,
allow_missing=None,
verbose=None,
listing=None,
s3role=None,
s3sessionvars=None,
s3clientparams=None,
inject_failure=0,
):
s3config = S3Config(
s3role,
json.loads(s3sessionvars) if s3sessionvars else None,
json.loads(s3clientparams) if s3clientparams else None,
)
# Construct a list of URL (prefix) objects
urllist = []
to_iterate, is_transient_retry = _populate_prefixes(prefixes, inputs)
for idx, prefix, url, r in to_iterate:
src = urlparse(url, allow_fragments=False)
url = S3Url(
url=url,
bucket=src.netloc,
path=src.path.lstrip("/"),
local=generate_local_path(url, range=r),
prefix=prefix,
range=r,
idx=idx,
)
if src.scheme != "s3":
exit(ERROR_INVALID_URL, url)
if not recursive and not src.path:
exit(ERROR_NOT_FULL_PATH, url)
urllist.append(url)
# Construct a URL->size mapping and get content-type and metadata if needed
op = None
dl_op = "download"
if recursive:
op = partial(op_list_prefix, s3config)
if verify or verbose or info:
dl_op = "info_download"
if op:
if is_transient_retry:
raise RuntimeError("--recursive not allowed for transient retries")
urls = []
# NOTE - we must retain the order of prefixes requested
# and the listing order returned by S3
for success, prefix_url, ret in parallel_op(op, urllist, num_workers):
if success:
urls.extend(ret)
elif ret == ERROR_URL_NOT_FOUND and allow_missing:
urls.append((prefix_url, None))
else:
exit(ret, prefix_url)
# We re-index here since we may have pulled in a bunch more stuff. On a transient
# retry, we never have recursive so we would not re-index
for idx, (url, _) in enumerate(urls):
url.idx = idx
else:
# pretend zero size since we don't need it for anything.
# it can't be None though, to make sure the listing below
# works correctly (None denotes a missing file)
urls = [(prefix_url, 0) for prefix_url in urllist]
# exclude the non-existent files from loading
to_load = [url for url, size in urls if size is not None]
sz_results, transient_error_type = process_urls(
dl_op, to_load, verbose, inject_failure, num_workers, s3config
)
# We check if there is any access denied
retry_lines = []
out_lines = []
denied_url = None
missing_url = None
verify_info = []
idx_in_sz = 0
for url, _ in urls:
sz = None
# to_load contains an ordered subset of urls
if idx_in_sz != len(to_load) and url.url == to_load[idx_in_sz].url:
sz = sz_results[idx_in_sz]
idx_in_sz += 1
if listing and sz is None:
out_lines.append(format_result_line(url.idx, url.url) + "\n")
elif listing and sz >= 0:
out_lines.append(
format_result_line(url.idx, url.prefix, url.url, url.local) + "\n"
)
if verify:
verify_info.append((url, sz))
elif sz == -ERROR_OUT_OF_DISK_SPACE:
exit(ERROR_OUT_OF_DISK_SPACE, url)
elif sz == -ERROR_URL_ACCESS_DENIED:
denied_url = url
break
elif sz == -ERROR_URL_NOT_FOUND:
if missing_url is None:
missing_url = url
if not allow_missing:
break
out_lines.append(format_result_line(url.idx, url.url) + "\n")
elif sz == -ERROR_TRANSIENT:
retry_line_parts = [
str(url.idx),
url_quote(url.prefix).decode(encoding="utf-8"),
url_quote(url.url).decode(encoding="utf-8"),
(
url_quote(url.range).decode(encoding="utf-8")
if url.range
else "<norange>"
),
]
if transient_error_type:
retry_line_parts.append(transient_error_type)
retry_lines.append(" ".join(retry_line_parts) + "\n")
# First time around, we output something to indicate the total length
if not is_transient_retry:
out_lines.append("%d %s\n" % (url.idx, TRANSIENT_RETRY_LINE_CONTENT))
if denied_url is not None:
exit(ERROR_URL_ACCESS_DENIED, denied_url)
if not allow_missing and missing_url is not None:
exit(ERROR_URL_NOT_FOUND, missing_url)
# Postprocess
if verify:
verify_results(verify_info, verbose=verbose)
if out_lines:
sys.stdout.writelines(out_lines)
sys.stdout.flush()
if retry_lines:
sys.stderr.write("%s\n" % TRANSIENT_RETRY_START_LINE)
sys.stderr.writelines(retry_lines)
sys.stderr.flush()
sys.exit(ERROR_TRANSIENT)
@cli.command(help="Get info about files from S3")
@common_options
@non_lst_common_options
@click.argument("prefixes", nargs=-1)
def info(
prefixes,
num_workers=None,
inputs=None,
verbose=None,
listing=None,
s3role=None,
s3sessionvars=None,
s3clientparams=None,
inject_failure=0,
):
s3config = S3Config(
s3role,
json.loads(s3sessionvars) if s3sessionvars else None,
json.loads(s3clientparams) if s3clientparams else None,
)
# Construct a list of URL (prefix) objects
urllist = []
to_iterate, is_transient_retry = _populate_prefixes(prefixes, inputs)
for idx, prefix, url, _ in to_iterate:
src = urlparse(url, allow_fragments=False)
url = S3Url(
url=url,
bucket=src.netloc,
path=src.path.lstrip("/"),
local=generate_local_path(url, suffix="info"),
prefix=prefix,
range=None,
idx=idx,
)
if src.scheme != "s3":
exit(ERROR_INVALID_URL, url)
urllist.append(url)
sz_results, transient_error_type = process_urls(
"info", urllist, verbose, inject_failure, num_workers, s3config
)
retry_lines = []
out_lines = []
for idx, sz in enumerate(sz_results):
url = urllist[idx]
if listing and sz != -ERROR_TRANSIENT:
out_lines.append(
format_result_line(url.idx, url.prefix, url.url, url.local) + "\n"
)
else:
retry_line_parts = [
str(url.idx),
url_quote(url.prefix).decode(encoding="utf-8"),
url_quote(url.url).decode(encoding="utf-8"),
"<norange>",
]
if transient_error_type:
retry_line_parts.append(transient_error_type)
retry_lines.append(" ".join(retry_line_parts) + "\n")
if not is_transient_retry:
out_lines.append("%d %s\n" % (url.idx, TRANSIENT_RETRY_LINE_CONTENT))
if out_lines:
sys.stdout.writelines(out_lines)
sys.stdout.flush()
if retry_lines:
sys.stderr.write("%s\n" % TRANSIENT_RETRY_START_LINE)
sys.stderr.writelines(retry_lines)
sys.stderr.flush()
sys.exit(ERROR_TRANSIENT)
if __name__ == "__main__":
cli(auto_envvar_prefix="S3OP")
|
S3Ops
|
python
|
python__mypy
|
mypy/test/data.py
|
{
"start": 27091,
"end": 29542
}
|
class ____(pytest.Collector):
"""Represents a single `.test` data driven test file.
More context: https://github.com/python/mypy/issues/11662
"""
parent: DataSuiteCollector
_fixes: list[DataFileFix]
@classmethod # We have to fight with pytest here:
def from_parent(
cls, parent: DataSuiteCollector, *, name: str # type: ignore[override]
) -> DataFileCollector:
collector = super().from_parent(parent, name=name)
assert isinstance(collector, DataFileCollector)
return collector
def collect(self) -> Iterator[DataDrivenTestCase]:
yield from split_test_cases(
parent=self,
suite=self.parent.obj,
file=os.path.join(self.parent.obj.data_prefix, self.name),
)
def setup(self) -> None:
super().setup()
self._fixes = []
def teardown(self) -> None:
super().teardown()
self._apply_fixes()
def enqueue_fix(self, fix: DataFileFix) -> None:
self._fixes.append(fix)
def _apply_fixes(self) -> None:
if not self._fixes:
return
data_path = Path(self.parent.obj.data_prefix) / self.name
lines = data_path.read_text().split("\n")
# start from end to prevent line offsets from shifting as we update
for fix in sorted(self._fixes, reverse=True):
lines[fix.lineno - 1 : fix.end_lineno - 1] = fix.lines
data_path.write_text("\n".join(lines))
def add_test_name_suffix(name: str, suffix: str) -> str:
# Find magic suffix of form "-foobar" (used for things like "-skip").
m = re.search(r"-[-A-Za-z0-9]+$", name)
if m:
# Insert suite-specific test name suffix before the magic suffix
# which must be the last thing in the test case name since we
# are using endswith() checks.
magic_suffix = m.group(0)
return name[: -len(magic_suffix)] + suffix + magic_suffix
else:
return name + suffix
def is_incremental(testcase: DataDrivenTestCase) -> bool:
return "incremental" in testcase.name.lower() or "incremental" in testcase.file
def has_stable_flags(testcase: DataDrivenTestCase) -> bool:
if any(re.match(r"# flags[2-9]:", line) for line in testcase.input):
return False
for filename, contents in testcase.files:
if os.path.basename(filename).startswith("mypy.ini."):
return False
return True
|
DataFileCollector
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/schemas/asset.py
|
{
"start": 2240,
"end": 2443
}
|
class ____(BaseModel):
"""GET /api/assets response."""
items: list[DgApiAsset]
cursor: Optional[str] # Next cursor for pagination
has_more: bool # Whether more results exist
|
DgApiAssetList
|
python
|
pytorch__pytorch
|
test/test_multiprocessing_spawn.py
|
{
"start": 7880,
"end": 8484
}
|
class ____(TestCase, _TestMultiProcessing):
orig_paralell_env_val = None
def setUp(self):
super().setUp()
self.orig_paralell_env_val = os.environ.get(mp.ENV_VAR_PARALLEL_START)
os.environ[mp.ENV_VAR_PARALLEL_START] = "1"
def tearDown(self):
super().tearDown()
if self.orig_paralell_env_val is None:
del os.environ[mp.ENV_VAR_PARALLEL_START]
else:
os.environ[mp.ENV_VAR_PARALLEL_START] = self.orig_paralell_env_val
@unittest.skipIf(
IS_WINDOWS,
"Fork is only available on Unix",
)
|
ParallelForkServerShouldWorkTest
|
python
|
getsentry__sentry
|
tests/sentry/seer/explorer/test_tools.py
|
{
"start": 74389,
"end": 80120
}
|
class ____(APITransactionTestCase, SnubaTestCase, TraceMetricsTestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.ten_mins_ago = before_now(minutes=10)
self.nine_mins_ago = before_now(minutes=9)
self.trace_id = uuid.uuid4().hex
# Create metrics with various attributes
self.metrics = [
self.create_trace_metric(
metric_name="http.request.duration",
metric_value=125.5,
metric_type="distribution",
metric_unit="millisecond",
trace_id=self.trace_id,
attributes={
"http.method": "GET",
"http.status_code": 200,
"my-string-attribute": "custom value",
"my-boolean-attribute": True,
"my-double-attribute": 1.23,
"my-integer-attribute": 123,
},
timestamp=self.ten_mins_ago,
),
self.create_trace_metric(
metric_name="database.query.count",
metric_value=5.0,
metric_type="counter",
# No trace_id - should not be returned in trace queries
timestamp=self.nine_mins_ago,
),
self.create_trace_metric(
metric_name="http.request.duration",
metric_value=200.3,
metric_type="distribution",
metric_unit="millisecond",
trace_id=self.trace_id,
attributes={
"http.method": "POST",
"http.status_code": 201,
},
timestamp=self.nine_mins_ago,
),
self.create_trace_metric(
metric_name="cache.hit.rate",
metric_value=0.85,
metric_type="gauge",
trace_id=self.trace_id,
attributes={
"cache.type": "redis",
},
timestamp=self.nine_mins_ago,
),
]
self.store_trace_metrics(self.metrics)
@staticmethod
def get_id_str(item: TraceItem) -> str:
return item.item_id[::-1].hex()
def test_get_metric_attributes_for_trace_basic(self) -> None:
result = get_metric_attributes_for_trace(
org_id=self.organization.id,
trace_id=self.trace_id,
stats_period="1d",
)
assert result is not None
assert len(result["data"]) == 3
# Find the first http.request.duration metric
http_metric_expected = self.metrics[0]
http_metric = None
for item in result["data"]:
if item["id"] == self.get_id_str(http_metric_expected):
http_metric = item
assert http_metric is not None
ts = datetime.fromisoformat(http_metric["timestamp"]).timestamp()
assert int(ts) == http_metric_expected.timestamp.seconds
for name, value, type in [
("metric.name", "http.request.duration", "str"),
("metric.type", "distribution", "str"),
("value", 125.5, "double"),
("project", self.project.slug, "str"),
("project.id", self.project.id, "int"),
("http.method", "GET", "str"),
("http.status_code", 200, "double"),
("my-string-attribute", "custom value", "str"),
("my-boolean-attribute", True, "double"),
("my-double-attribute", 1.23, "double"),
("my-integer-attribute", 123, "double"),
]:
assert http_metric["attributes"][name]["value"] == value, name
assert http_metric["attributes"][name]["type"] == type, f"{name} type mismatch"
def test_get_metric_attributes_for_trace_name_filter(self) -> None:
# Test substring match (fails)
result = get_metric_attributes_for_trace(
org_id=self.organization.id,
trace_id=self.trace_id,
stats_period="1d",
metric_name="http.",
)
assert result is not None
assert len(result["data"]) == 0
# Test an exact match (case-insensitive)
result = get_metric_attributes_for_trace(
org_id=self.organization.id,
trace_id=self.trace_id,
stats_period="1d",
metric_name="Cache.hit.rate",
)
assert result is not None
assert len(result["data"]) == 1
assert result["data"][0]["id"] == self.get_id_str(self.metrics[3])
def test_get_metric_attributes_for_trace_limit_no_filter(self) -> None:
result = get_metric_attributes_for_trace(
org_id=self.organization.id,
trace_id=self.trace_id,
stats_period="1d",
limit=1,
)
assert result is not None
assert len(result["data"]) == 1
assert result["data"][0]["id"] in [
self.get_id_str(self.metrics[0]),
self.get_id_str(self.metrics[2]),
self.get_id_str(self.metrics[3]),
]
def test_get_metric_attributes_for_trace_limit_with_filter(self) -> None:
result = get_metric_attributes_for_trace(
org_id=self.organization.id,
trace_id=self.trace_id,
stats_period="1d",
metric_name="http.request.duration",
limit=2,
)
assert result is not None
assert len(result["data"]) == 2
ids = [item["id"] for item in result["data"]]
assert self.get_id_str(self.metrics[0]) in ids
assert self.get_id_str(self.metrics[2]) in ids
|
TestMetricsTraceQuery
|
python
|
numba__numba
|
numba/core/lowering.py
|
{
"start": 923,
"end": 13110
}
|
class ____(object):
"""
Lower IR to LLVM
"""
def __init__(self, context, library, fndesc, func_ir, metadata=None):
self.library = library
self.fndesc = fndesc
self.blocks = utils.SortedMap(func_ir.blocks.items())
self.func_ir = func_ir
self.generator_info = func_ir.generator_info
self.metadata = metadata
self.flags = targetconfig.ConfigStack.top_or_none()
# Initialize LLVM
self.module = self.library.create_ir_module(self.fndesc.unique_name)
# Python execution environment (will be available to the compiled
# function).
self.env = Environment.from_fndesc(self.fndesc)
# Internal states
self.blkmap = {}
self.pending_phis = {}
self.varmap = {}
self.firstblk = min(self.blocks.keys())
self.loc = -1
# Specializes the target context as seen inside the Lowerer
# This adds:
# - environment: the python execution environment
self.context = context.subtarget(environment=self.env,
fndesc=self.fndesc)
# Debuginfo
dibuildercls = (self.context.DIBuilder
if self.context.enable_debuginfo
else debuginfo.DummyDIBuilder)
# debuginfo def location
self.defn_loc = self._compute_def_location()
directives_only = self.flags.dbg_directives_only
self.debuginfo = dibuildercls(module=self.module,
filepath=func_ir.loc.filename,
cgctx=context,
directives_only=directives_only)
# Loc notify objects
self._loc_notify_registry = get_registered_loc_notify()
# Subclass initialization
self.init()
@property
def call_conv(self):
return self.context.call_conv
def init(self):
pass
def init_pyapi(self):
"""
Init the Python API and Environment Manager for the function being
lowered.
"""
if self.pyapi is not None:
return
self.pyapi = self.context.get_python_api(self.builder)
# Store environment argument for later use
self.env_manager = self.context.get_env_manager(self.builder)
self.env_body = self.env_manager.env_body
self.envarg = self.env_manager.env_ptr
def _compute_def_location(self):
# Debuginfo requires source to be accurate. Find it and warn if not
# found. If it's not found, use the func_ir line + 1, this assumes that
# the function definition is decorated with a 1 line jit decorator.
defn_loc = self.func_ir.loc.with_lineno(self.func_ir.loc.line + 1)
if self.context.enable_debuginfo:
fn = self.func_ir.func_id.func
optional_lno = get_func_body_first_lineno(fn)
if optional_lno is not None:
# -1 as lines start at 1 and this is an offset.
offset = optional_lno - 1
defn_loc = self.func_ir.loc.with_lineno(offset)
else:
msg = ("Could not find source for function: "
f"{self.func_ir.func_id.func}. Debug line information "
"may be inaccurate.")
warnings.warn(NumbaDebugInfoWarning(msg))
return defn_loc
def pre_lower(self):
"""
Called before lowering all blocks.
"""
# A given Lower object can be used for several LL functions
# (for generators) and it's important to use a new API and
# EnvironmentManager.
self.pyapi = None
self.debuginfo.mark_subprogram(function=self.builder.function,
qualname=self.fndesc.qualname,
argnames=self.fndesc.args,
argtypes=self.fndesc.argtypes,
line=self.defn_loc.line)
# When full debug info is enabled, disable inlining where possible, to
# improve the quality of the debug experience. 'alwaysinline' functions
# cannot have inlining disabled.
attributes = self.builder.function.attributes
full_debug = self.flags.debuginfo and not self.flags.dbg_directives_only
if full_debug and 'alwaysinline' not in attributes:
attributes.add('noinline')
def post_lower(self):
"""
Called after all blocks are lowered
"""
self.debuginfo.finalize()
for notify in self._loc_notify_registry:
notify.close()
def pre_block(self, block):
"""
Called before lowering a block.
"""
def post_block(self, block):
"""
Called after lowering a block.
"""
def return_dynamic_exception(self, exc_class, exc_args, nb_types, loc=None):
self.call_conv.return_dynamic_user_exc(
self.builder, exc_class, exc_args, nb_types,
loc=loc, func_name=self.func_ir.func_id.func_name,
)
def return_exception(self, exc_class, exc_args=None, loc=None):
"""Propagate exception to the caller.
"""
self.call_conv.return_user_exc(
self.builder, exc_class, exc_args,
loc=loc, func_name=self.func_ir.func_id.func_name,
)
def set_exception(self, exc_class, exc_args=None, loc=None):
"""Set exception state in the current function.
"""
self.call_conv.set_static_user_exc(
self.builder, exc_class, exc_args,
loc=loc, func_name=self.func_ir.func_id.func_name,
)
def emit_environment_object(self):
"""Emit a pointer to hold the Environment object.
"""
# Define global for the environment and initialize it to NULL
envname = self.context.get_env_name(self.fndesc)
self.context.declare_env_global(self.module, envname)
def lower(self):
# Emit the Env into the module
self.emit_environment_object()
if self.generator_info is None:
self.genlower = None
self.lower_normal_function(self.fndesc)
else:
self.genlower = self.GeneratorLower(self)
self.gentype = self.genlower.gentype
self.genlower.lower_init_func(self)
self.genlower.lower_next_func(self)
if self.gentype.has_finalizer:
self.genlower.lower_finalize_func(self)
if config.DUMP_LLVM:
utils.dump_llvm(self.fndesc, self.module)
# Special optimization to remove NRT on functions that do not need it.
if self.context.enable_nrt and self.generator_info is None:
removerefctpass.remove_unnecessary_nrt_usage(self.function,
context=self.context,
fndesc=self.fndesc)
# Run target specific post lowering transformation
self.context.post_lowering(self.module, self.library)
# Materialize LLVM Module
self.library.add_ir_module(self.module)
def extract_function_arguments(self):
self.fnargs = self.call_conv.decode_arguments(self.builder,
self.fndesc.argtypes,
self.function)
return self.fnargs
def lower_normal_function(self, fndesc):
"""
Lower non-generator *fndesc*.
"""
self.setup_function(fndesc)
# Init argument values
self.extract_function_arguments()
entry_block_tail = self.lower_function_body()
# Close tail of entry block, do not emit debug metadata else the
# unconditional jump gets associated with the metadata from the function
# body end.
with debuginfo.suspend_emission(self.builder):
self.builder.position_at_end(entry_block_tail)
self.builder.branch(self.blkmap[self.firstblk])
def lower_function_body(self):
"""
Lower the current function's body, and return the entry block.
"""
# Init Python blocks
for offset in self.blocks:
bname = "B%s" % offset
self.blkmap[offset] = self.function.append_basic_block(bname)
self.pre_lower()
# pre_lower() may have changed the current basic block
entry_block_tail = self.builder.basic_block
self.debug_print("# function begin: {0}".format(
self.fndesc.unique_name))
# Lower all blocks
for offset, block in sorted(self.blocks.items()):
bb = self.blkmap[offset]
self.builder.position_at_end(bb)
self.debug_print(f"# lower block: {offset}")
self.lower_block(block)
self.post_lower()
return entry_block_tail
def lower_block(self, block):
"""
Lower the given block.
"""
self.pre_block(block)
for inst in block.body:
self.loc = inst.loc
defaulterrcls = partial(LoweringError, loc=self.loc)
with new_error_context('lowering "{inst}" at {loc}', inst=inst,
loc=self.loc, errcls_=defaulterrcls):
self.lower_inst(inst)
self.post_block(block)
def create_cpython_wrapper(self, release_gil=False):
"""
Create CPython wrapper(s) around this function (or generator).
"""
if self.genlower:
self.context.create_cpython_wrapper(self.library,
self.genlower.gendesc,
self.env, self.call_helper,
release_gil=release_gil)
self.context.create_cpython_wrapper(self.library, self.fndesc,
self.env, self.call_helper,
release_gil=release_gil)
def create_cfunc_wrapper(self):
"""
Create C wrapper around this function.
"""
if self.genlower:
raise UnsupportedError('generator as a first-class function type')
self.context.create_cfunc_wrapper(self.library, self.fndesc,
self.env, self.call_helper)
def setup_function(self, fndesc):
# Setup function
self.function = self.context.declare_function(self.module, fndesc)
if self.flags.dbg_optnone:
attrset = self.function.attributes
if "alwaysinline" not in attrset:
attrset.add("optnone")
attrset.add("noinline")
self.entry_block = self.function.append_basic_block('entry')
self.builder = IRBuilder(self.entry_block)
self.call_helper = self.call_conv.init_call_helper(self.builder)
def typeof(self, varname):
return self.fndesc.typemap[varname]
def notify_loc(self, loc: ir.Loc) -> None:
"""Called when a new instruction with the given `loc` is about to be
lowered.
"""
for notify_obj in self._loc_notify_registry:
notify_obj.notify(loc)
def debug_print(self, msg):
if config.DEBUG_JIT:
self.context.debug_print(
self.builder, f"DEBUGJIT [{self.fndesc.qualname}]: {msg}")
def print_variable(self, msg, varname):
"""Helper to emit ``print(msg, varname)`` for debugging.
Parameters
----------
msg : str
Literal string to be printed.
varname : str
A variable name whose value will be printed.
"""
argtys = (
types.literal(msg),
self.fndesc.typemap[varname]
)
args = (
self.context.get_dummy_value(),
self.loadvar(varname),
)
sig = typing.signature(types.none, *argtys)
impl = self.context.get_function(print, sig)
impl(self.builder, args)
|
BaseLower
|
python
|
explosion__spaCy
|
spacy/schemas.py
|
{
"start": 12345,
"end": 14090
}
|
class ____(BaseModel):
# fmt: off
lang: StrictStr = Field(..., title="Two-letter language code, e.g. 'en'")
name: StrictStr = Field(..., title="Model name")
version: StrictStr = Field(..., title="Model version")
spacy_version: StrictStr = Field("", title="Compatible spaCy version identifier")
parent_package: StrictStr = Field("spacy", title="Name of parent spaCy package, e.g. spacy or spacy-nightly")
requirements: List[StrictStr] = Field([], title="Additional Python package dependencies, used for the Python package setup")
pipeline: List[StrictStr] = Field([], title="Names of pipeline components")
description: StrictStr = Field("", title="Model description")
license: StrictStr = Field("", title="Model license")
author: StrictStr = Field("", title="Model author name")
email: StrictStr = Field("", title="Model author email")
url: StrictStr = Field("", title="Model author URL")
sources: Optional[Union[List[StrictStr], List[Dict[str, str]]]] = Field(None, title="Training data sources")
vectors: Dict[str, Any] = Field({}, title="Included word vectors")
labels: Dict[str, List[str]] = Field({}, title="Component labels, keyed by component name")
performance: Dict[str, Any] = Field({}, title="Accuracy and speed numbers")
spacy_git_version: StrictStr = Field("", title="Commit of spaCy version used")
# fmt: on
# Config schema
# We're not setting any defaults here (which is too messy) and are making all
# fields required, so we can raise validation errors for missing values. To
# provide a default, we include a separate .cfg file with all values and
# check that against this schema in the test suite to make sure it's always
# up to date.
|
ModelMetaSchema
|
python
|
Pylons__pyramid
|
tests/test_settings.py
|
{
"start": 18,
"end": 1014
}
|
class ____(unittest.TestCase):
def _callFUT(self, s):
from pyramid.settings import asbool
return asbool(s)
def test_s_is_None(self):
result = self._callFUT(None)
self.assertEqual(result, False)
def test_s_is_True(self):
result = self._callFUT(True)
self.assertEqual(result, True)
def test_s_is_False(self):
result = self._callFUT(False)
self.assertEqual(result, False)
def test_s_is_true(self):
result = self._callFUT('True')
self.assertEqual(result, True)
def test_s_is_false(self):
result = self._callFUT('False')
self.assertEqual(result, False)
def test_s_is_yes(self):
result = self._callFUT('yes')
self.assertEqual(result, True)
def test_s_is_on(self):
result = self._callFUT('on')
self.assertEqual(result, True)
def test_s_is_1(self):
result = self._callFUT(1)
self.assertEqual(result, True)
|
Test_asbool
|
python
|
huggingface__transformers
|
src/transformers/models/x_clip/modeling_x_clip.py
|
{
"start": 46710,
"end": 47475
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
embed_dim = config.projection_dim
self.cross_attn = XCLIPCrossAttention(config)
self.norm1 = nn.LayerNorm(embed_dim, eps=config.text_config.layer_norm_eps)
self.norm3 = nn.LayerNorm(embed_dim, eps=config.text_config.layer_norm_eps)
self.mlp = nn.Sequential(
nn.Linear(embed_dim, embed_dim * 4),
ACT2FN[config.prompt_hidden_act],
nn.Dropout(config.prompt_attention_dropout),
nn.Linear(embed_dim * 4, embed_dim),
)
def forward(self, x, visual):
x = x + self.cross_attn(self.norm1(x), visual, visual)
x = x + self.mlp(self.norm3(x))
return x
|
PromptGeneratorLayer
|
python
|
cython__cython
|
Cython/Compiler/Symtab.py
|
{
"start": 59077,
"end": 90405
}
|
class ____(Scope):
# module_name string Python name of the module
# module_cname string C name of Python module object
# #module_dict_cname string C name of module dict object
# method_table_cname string C name of method table
# doc string Module doc string
# doc_cname string C name of module doc string
# utility_code_list [UtilityCode] Queuing utility codes for forwarding to Code.py
# c_includes {key: IncludeCode} C headers or verbatim code to be generated
# See process_include() for more documentation
# identifier_to_entry {string : Entry} Map identifier string const to entry
# context Context
# parent_module Scope Parent in the import namespace
# module_entries {string : Entry} For cimport statements
# type_names {string : 1} Set of type names (used during parsing)
# included_files [string] Cython sources included with 'include'
# pxd_file_loaded boolean Corresponding .pxd file has been processed
# cimported_modules [ModuleScope] Modules imported with cimport
# types_imported {PyrexType} Set of types for which import code generated
# has_import_star boolean Module contains import *
# cpp boolean Compiling a C++ file
# is_cython_builtin boolean Is this the Cython builtin scope (or a child scope)
# is_package boolean Is this a package module? (__init__)
is_module_scope = 1
has_import_star = 0
is_cython_builtin = 0
old_style_globals = 0
namespace_cname_is_type = False
scope_predefined_names = [
'__builtins__', '__name__', '__file__', '__doc__', '__path__',
'__spec__', '__loader__', '__package__', '__cached__',
]
def __init__(self, name, parent_module, context, is_package=False):
from . import Builtin
self.parent_module = parent_module
outer_scope = Builtin.builtin_scope
Scope.__init__(self, name, outer_scope, parent_module)
self.is_package = is_package
self.module_name = name
self.module_name = EncodedString(self.module_name)
self._context = context
self.module_cname = Naming.module_cname
self.module_dict_cname = Naming.moddict_cname
self.method_table_cname = Naming.methtable_cname
self.doc = ""
self.doc_cname = Naming.moddoc_cname
self.utility_code_list = []
self.module_entries = {}
self.c_includes = {}
self.type_names = dict(outer_scope.type_names)
self.pxd_file_loaded = 0
self.cimported_modules = []
self.types_imported = set()
self.included_files = []
self.has_extern_class = 0
self.cached_builtins = []
self.undeclared_cached_builtins = []
self.namespace_cname = self.module_cname
self._cached_tuple_types = {}
self._cached_defaults_c_class_entries = {}
self.process_include(Code.IncludeCode("Python.h", initial=True))
def qualifying_scope(self):
return self.parent_module
@property
def context(self):
return self._context
def global_scope(self):
return self
def lookup(self, name, language_level=None):
entry = self.lookup_here(name)
if entry is not None:
return entry
if language_level is None:
language_level = self.context.language_level if self.context is not None else 3
return self.outer_scope.lookup(name, language_level=language_level)
def declare_tuple_type(self, pos, components):
components = tuple(components)
try:
ttype = self._cached_tuple_types[components]
except KeyError:
ttype = self._cached_tuple_types[components] = PyrexTypes.c_tuple_type(components)
cname = ttype.cname
entry = self.lookup_here(cname)
if not entry:
scope = StructOrUnionScope(cname)
for ix, component in enumerate(components):
scope.declare_var(name="f%s" % ix, type=component, pos=pos)
struct_entry = self.declare_struct_or_union(
cname + '_struct', 'struct', scope, typedef_flag=True, pos=pos, cname=cname)
self.type_entries.remove(struct_entry)
ttype.struct_entry = struct_entry
entry = self.declare_type(cname, ttype, pos, cname)
ttype.entry = entry
return entry
def declare_defaults_c_class(self, pos, components):
# returns an entry (for the c-class)
components = tuple(components)
try:
return self._cached_defaults_c_class_entries[components]
except KeyError:
pass
cname = self.next_id(Naming.defaults_struct_prefix)
cname = EncodedString(cname)
entry = self._cached_defaults_c_class_entries[components] = self.declare_c_class(
cname, pos, defining=True, implementing=True,
objstruct_cname=cname)
self.check_c_class(entry)
entry.type.is_final_type = True
scope = entry.type.scope
scope.is_internal = True
scope.is_defaults_class_scope = True
# zero pad the argument number so they can be sorted
num_zeros = len(str(len(components)))
build_argname = ("arg{:0>%dd}" % num_zeros).format
for n, type_ in enumerate(components):
arg_name = EncodedString(build_argname(n))
scope.declare_var(arg_name, type_, pos=None, is_cdef=True)
return entry
def declare_builtin(self, name, pos):
if name not in Code.KNOWN_PYTHON_BUILTINS \
and name not in Code.renamed_py2_builtins_map \
and name not in Code.uncachable_builtins:
if self.has_import_star:
entry = self.declare_var(name, py_object_type, pos)
return entry
else:
if Options.error_on_unknown_names:
error(pos, "undeclared name not builtin: %s" % name)
else:
warning(pos, "undeclared name not builtin: %s" % name, 2)
# unknown - assume it's builtin and look it up at runtime
entry = self.declare(name, None, py_object_type, pos, 'private')
entry.is_builtin = 1
return entry
if Options.cache_builtins:
for entry in self.cached_builtins:
if entry.name == name:
return entry
if name == 'globals' and not self.old_style_globals:
return self.outer_scope.lookup('__Pyx_Globals')
else:
entry = self.declare(None, None, py_object_type, pos, 'private')
if Options.cache_builtins and name not in Code.uncachable_builtins:
entry.is_builtin = 1
entry.is_const = 1 # cached
entry.name = name
entry.cname = Naming.builtin_prefix + name
self.cached_builtins.append(entry)
self.undeclared_cached_builtins.append(entry)
else:
entry.is_builtin = 1
entry.name = name
entry.qualified_name = self.builtin_scope().qualify_name(name)
return entry
def find_module(self, module_name, pos, relative_level=-1):
# Find a module in the import namespace, interpreting
# relative imports relative to this module's parent.
# Finds and parses the module's .pxd file if the module
# has not been referenced before.
is_relative_import = relative_level is not None and relative_level > 0
from_module = None
absolute_fallback = False
if relative_level is not None and relative_level > 0:
# explicit relative cimport
# error of going beyond top-level is handled in cimport node
from_module = self
top_level = 1 if self.is_package else 0
# * top_level == 1 when file is __init__.pyx, current package (from_module) is the current module
# i.e. dot in `from . import ...` points to the current package
# * top_level == 0 when file is regular module, current package (from_module) is parent module
# i.e. dot in `from . import ...` points to the package where module is placed
while relative_level > top_level and from_module:
from_module = from_module.parent_module
relative_level -= 1
elif relative_level != 0:
# -1 or None: try relative cimport first, then absolute
from_module = self.parent_module
absolute_fallback = True
module_scope = self.global_scope()
return module_scope.context.find_module(
module_name, from_module=from_module, pos=pos, absolute_fallback=absolute_fallback, relative_import=is_relative_import)
def find_submodule(self, name, as_package=False):
# Find and return scope for a submodule of this module,
# creating a new empty one if necessary. Doesn't parse .pxd.
if '.' in name:
name, submodule = name.split('.', 1)
else:
submodule = None
scope = self.lookup_submodule(name)
if not scope:
scope = ModuleScope(name, parent_module=self, context=self.context, is_package=True if submodule else as_package)
self.module_entries[name] = scope
if submodule:
scope = scope.find_submodule(submodule, as_package=as_package)
return scope
def lookup_submodule(self, name):
# Return scope for submodule of this module, or None.
if '.' in name:
name, submodule = name.split('.', 1)
else:
submodule = None
module = self.module_entries.get(name, None)
if submodule and module is not None:
module = module.lookup_submodule(submodule)
return module
def add_include_file(self, filename, verbatim_include=None, late=False):
"""
Add `filename` as include file. Add `verbatim_include` as
verbatim text in the C file.
Both `filename` and `verbatim_include` can be `None` or empty.
"""
inc = Code.IncludeCode(filename, verbatim_include, late=late)
self.process_include(inc)
def process_include(self, inc):
"""
Add `inc`, which is an instance of `IncludeCode`, to this
`ModuleScope`. This either adds a new element to the
`c_includes` dict or it updates an existing entry.
In detail: the values of the dict `self.c_includes` are
instances of `IncludeCode` containing the code to be put in the
generated C file. The keys of the dict are needed to ensure
uniqueness in two ways: if an include file is specified in
multiple "cdef extern" blocks, only one `#include` statement is
generated. Second, the same include might occur multiple times
if we find it through multiple "cimport" paths. So we use the
generated code (of the form `#include "header.h"`) as dict key.
If verbatim code does not belong to any include file (i.e. it
was put in a `cdef extern from *` block), then we use a unique
dict key: namely, the `sortkey()`.
One `IncludeCode` object can contain multiple pieces of C code:
one optional "main piece" for the include file and several other
pieces for the verbatim code. The `IncludeCode.dict_update`
method merges the pieces of two different `IncludeCode` objects
if needed.
"""
key = inc.mainpiece()
if key is None:
key = inc.sortkey()
inc.dict_update(self.c_includes, key)
inc = self.c_includes[key]
def add_imported_module(self, scope):
if scope not in self.cimported_modules:
for inc in scope.c_includes.values():
self.process_include(inc)
self.cimported_modules.append(scope)
for m in scope.cimported_modules:
self.add_imported_module(m)
def add_imported_entry(self, name, entry, pos):
if entry.is_pyglobal:
# Allow cimports to follow imports.
entry.is_variable = True
if entry not in self.entries:
self.entries[name] = entry
else:
warning(pos, "'%s' redeclared " % name, 0)
def declare_module(self, name, scope, pos):
# Declare a cimported module. This is represented as a
# Python module-level variable entry with a module
# scope attached to it. Reports an error and returns
# None if previously declared as something else.
entry = self.lookup_here(name)
if entry:
if entry.is_pyglobal and entry.as_module is scope:
return entry # Already declared as the same module
if not (entry.is_pyglobal and not entry.as_module):
# SAGE -- I put this here so Pyrex
# cimport's work across directories.
# Currently it tries to multiply define
# every module appearing in an import list.
# It shouldn't be an error for a module
# name to appear again, and indeed the generated
# code compiles fine.
return entry
else:
entry = self.declare_var(name, py_object_type, pos)
entry.is_variable = 0
entry.as_module = scope
self.add_imported_module(scope)
return entry
def declare_var(self, name, type, pos,
cname=None, visibility='private',
api=False, in_pxd=False, is_cdef=False, pytyping_modifiers=None):
# Add an entry for a global variable. If it is a Python
# object type, and not declared with cdef, it will live
# in the module dictionary, otherwise it will be a C
# global variable.
if visibility not in ('private', 'public', 'extern'):
error(pos, "Module-level variable cannot be declared %s" % visibility)
self._reject_pytyping_modifiers(pos, pytyping_modifiers, ('typing.Optional',)) # let's allow at least this one
if not is_cdef:
if type is unspecified_type:
type = py_object_type
if not (type.is_pyobject and not type.is_extension_type):
raise InternalError(
"Non-cdef global variable is not a generic Python object")
if (is_cdef and visibility != "extern"
and self.directives['subinterpreters_compatible'] != "no"):
extra_warning = ""
pyobject_warning = ""
if type.is_pyobject:
extra_warning = "\nPython objects should not be shared between interpreters"
pyobject_warning = "Python "
warning(
pos,
f"Global cdef {pyobject_warning}variable used with subinterpreter support enabled.\n"
"This variable is not currently in the per-interpreter module state "
"but this will likely change in future releases." +
extra_warning,
2+(1 if extra_warning else 0))
if not cname:
defining = not in_pxd
if visibility == 'extern' or (visibility == 'public' and defining):
cname = name
else:
cname = self.mangle(Naming.var_prefix, name)
entry = self.lookup_here(name)
if entry and entry.defined_in_pxd:
#if visibility != 'private' and visibility != entry.visibility:
# warning(pos, "Variable '%s' previously declared as '%s'" % (name, entry.visibility), 1)
if not entry.type.same_as(type):
if visibility == 'extern' and entry.visibility == 'extern':
warning(pos, "Variable '%s' type does not match previous declaration" % name, 1)
entry.type = type
#else:
# error(pos, "Variable '%s' type does not match previous declaration" % name)
if entry.visibility != "private":
mangled_cname = self.mangle(Naming.var_prefix, name)
if entry.cname == mangled_cname:
cname = name
entry.cname = name
if not entry.is_implemented:
entry.is_implemented = True
return entry
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef, pytyping_modifiers=pytyping_modifiers)
if is_cdef:
entry.is_cglobal = 1
if entry.type.declaration_value:
entry.init = entry.type.declaration_value
self.var_entries.append(entry)
else:
entry.is_pyglobal = 1
if Options.cimport_from_pyx:
entry.used = 1
return entry
def declare_cfunction(self, name, type, pos,
cname=None, visibility='private', api=0, in_pxd=0,
defining=0, modifiers=(), utility_code=None, overridable=False):
if not defining and 'inline' in modifiers:
# TODO(github/1736): Make this an error.
warning(pos, "Declarations should not be declared inline.", 1)
# Add an entry for a C function.
if not cname:
if visibility == 'extern' or (visibility == 'public' and defining):
cname = name
else:
cname = self.mangle(Naming.func_prefix, name)
if visibility == 'extern' and type.optional_arg_count:
error(pos, "Extern functions cannot have default arguments values.")
entry = self.lookup_here(name)
if entry and entry.defined_in_pxd:
if entry.visibility != "private":
mangled_cname = self.mangle(Naming.func_prefix, name)
if entry.cname == mangled_cname:
cname = name
entry.cname = cname
entry.func_cname = cname
entry = Scope.declare_cfunction(
self, name, type, pos,
cname=cname, visibility=visibility, api=api, in_pxd=in_pxd,
defining=defining, modifiers=modifiers, utility_code=utility_code,
overridable=overridable)
return entry
def declare_global(self, name, pos):
entry = self.lookup_here(name)
if not entry:
self.declare_var(name, py_object_type, pos)
def use_utility_code(self, new_code):
if new_code is not None:
self.utility_code_list.append(new_code)
def use_entry_utility_code(self, entry):
if entry is None:
return
if entry.utility_code:
self.utility_code_list.append(entry.utility_code)
if entry.utility_code_definition:
self.utility_code_list.append(entry.utility_code_definition)
for tp in PyrexTypes.get_all_subtypes(entry.type):
if hasattr(tp, "entry") and tp.entry is not entry:
self.use_entry_utility_code(tp.entry)
def declare_c_class(self, name, pos, defining=0, implementing=0,
module_name=None, base_type=None, objstruct_cname=None,
typeobj_cname=None, typeptr_cname=None, visibility='private',
typedef_flag=0, api=0, check_size=None,
buffer_defaults=None, shadow=0):
# If this is a non-extern typedef class, expose the typedef, but use
# the non-typedef struct internally to avoid needing forward
# declarations for anonymous structs.
if typedef_flag and visibility != 'extern':
if not (visibility == 'public' or api):
warning(pos, "ctypedef only valid for 'extern' , 'public', and 'api'", 2)
objtypedef_cname = objstruct_cname
typedef_flag = 0
else:
objtypedef_cname = None
#
# Look for previous declaration as a type
#
entry = self.lookup_here(name)
if entry and not shadow:
type = entry.type
if not (entry.is_type and type.is_extension_type):
entry = None # Will cause redeclaration and produce an error
else:
scope = type.scope
if typedef_flag and (not scope or scope.defined):
self.check_previous_typedef_flag(entry, typedef_flag, pos)
if (scope and scope.defined) or (base_type and type.base_type):
if base_type and base_type is not type.base_type:
error(pos, "Base type does not match previous declaration")
if base_type and not type.base_type:
type.base_type = base_type
#
# Make a new entry if needed
#
if not entry or shadow:
type = PyrexTypes.PyExtensionType(
name, typedef_flag, base_type, visibility == 'extern', check_size=check_size)
type.pos = pos
type.buffer_defaults = buffer_defaults
if objtypedef_cname is not None:
type.objtypedef_cname = objtypedef_cname
if visibility == 'extern':
type.module_name = module_name
else:
type.module_name = self.qualified_name
if typeptr_cname:
type.typeptr_cname = typeptr_cname
else:
type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name)
entry = self.declare_type(name, type, pos, visibility = visibility,
defining = 0, shadow = shadow)
entry.is_cclass = True
if objstruct_cname:
type.objstruct_cname = objstruct_cname
elif not entry.in_cinclude:
type.objstruct_cname = self.mangle(Naming.objstruct_prefix, name)
else:
error(entry.pos,
"Object name required for 'public' or 'extern' C class")
self.attach_var_entry_to_c_class(entry)
self.c_class_entries.append(entry)
#
# Check for re-definition and create scope if needed
#
if not type.scope:
if defining or implementing:
scope = CClassScope(name = name, outer_scope = self,
visibility=visibility,
parent_type=type)
scope.directives = self.directives.copy()
if base_type and base_type.scope:
scope.declare_inherited_c_attributes(base_type.scope)
type.set_scope(scope)
self.type_entries.append(entry)
else:
if defining and type.scope.defined:
error(pos, "C class '%s' already defined" % name)
elif implementing and type.scope.implemented:
error(pos, "C class '%s' already implemented" % name)
#
# Fill in options, checking for compatibility with any previous declaration
#
if defining:
entry.defined_in_pxd = 1
if implementing: # So that filenames in runtime exceptions refer to
entry.pos = pos # the .pyx file and not the .pxd file
if visibility != 'private' and entry.visibility != visibility:
error(pos, "Class '%s' previously declared as '%s'"
% (name, entry.visibility))
if api:
entry.api = 1
if objstruct_cname:
if type.objstruct_cname and type.objstruct_cname != objstruct_cname:
error(pos, "Object struct name differs from previous declaration")
type.objstruct_cname = objstruct_cname
if typeobj_cname:
if type.typeobj_cname and type.typeobj_cname != typeobj_cname:
error(pos, "Type object name differs from previous declaration")
type.typeobj_cname = typeobj_cname
if self.directives.get('final'):
entry.type.is_final_type = True
collection_type = self.directives.get('collection_type')
if collection_type:
from .UtilityCode import NonManglingModuleScope
if not isinstance(self, NonManglingModuleScope):
# TODO - DW would like to make it public, but I'm making it internal-only
# for now to avoid adding new features without consensus
error(pos, "'collection_type' is not a public cython directive")
if collection_type == 'sequence':
entry.type.has_sequence_flag = True
# cdef classes are always exported, but we need to set it to
# distinguish between unused Cython utility code extension classes
entry.used = True
#
# Return new or existing entry
#
return entry
def allocate_vtable_names(self, entry):
# If extension type has a vtable, allocate vtable struct and
# slot names for it.
type = entry.type
if type.base_type and type.base_type.vtabslot_cname:
#print "...allocating vtabslot_cname because base type has one" ###
type.vtabslot_cname = "%s.%s" % (
Naming.obj_base_cname, type.base_type.vtabslot_cname)
elif type.scope and type.scope.cfunc_entries:
# one special case here: when inheriting from builtin
# types, the methods may also be built-in, in which
# case they won't need a vtable
entry_count = len(type.scope.cfunc_entries)
base_type = type.base_type
while base_type:
# FIXME: this will break if we ever get non-inherited C methods
if not base_type.scope or entry_count > len(base_type.scope.cfunc_entries):
break
if base_type.is_builtin_type:
# builtin base type defines all methods => no vtable needed
return
base_type = base_type.base_type
#print "...allocating vtabslot_cname because there are C methods" ###
type.vtabslot_cname = Naming.vtabslot_cname
if type.vtabslot_cname:
#print "...allocating other vtable related cnames" ###
type.vtabstruct_cname = self.mangle(Naming.vtabstruct_prefix, entry.name)
type.vtabptr_cname = self.mangle(Naming.vtabptr_prefix, entry.name)
def check_c_classes_pxd(self):
# Performs post-analysis checking and finishing up of extension types
# being implemented in this module. This is called only for the .pxd.
#
# Checks all extension types declared in this scope to
# make sure that:
#
# * The extension type is fully declared
#
# Also allocates a name for the vtable if needed.
#
for entry in self.c_class_entries:
# Check defined
if not entry.type.scope:
error(entry.pos, "C class '%s' is declared but not defined" % entry.name)
def check_c_class(self, entry):
type = entry.type
name = entry.name
visibility = entry.visibility
# Check defined
if not type.scope:
error(entry.pos, "C class '%s' is declared but not defined" % name)
# Generate typeobj_cname
if visibility != 'extern' and not type.typeobj_cname:
type.typeobj_cname = self.mangle(Naming.typeobj_prefix, name)
## Generate typeptr_cname
#type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name)
# Check C methods defined
if type.scope:
for method_entry in type.scope.cfunc_entries:
if not method_entry.is_inherited and not method_entry.func_cname:
error(method_entry.pos, "C method '%s' is declared but not defined" %
method_entry.name)
# Allocate vtable name if necessary
if type.vtabslot_cname:
#print "ModuleScope.check_c_classes: allocating vtable cname for", self ###
type.vtable_cname = self.mangle(Naming.vtable_prefix, entry.name)
def check_c_classes(self):
# Performs post-analysis checking and finishing up of extension types
# being implemented in this module. This is called only for the main
# .pyx file scope, not for cimported .pxd scopes.
#
# Checks all extension types declared in this scope to
# make sure that:
#
# * The extension type is implemented
# * All required object and type names have been specified or generated
# * All non-inherited C methods are implemented
#
# Also allocates a name for the vtable if needed.
#
debug_check_c_classes = 0
if debug_check_c_classes:
print("Scope.check_c_classes: checking scope " + self.qualified_name)
for entry in self.c_class_entries:
if debug_check_c_classes:
print("...entry %s %s" % (entry.name, entry))
print("......type = ", entry.type)
print("......visibility = ", entry.visibility)
self.check_c_class(entry)
def check_c_functions(self):
# Performs post-analysis checking making sure all
# defined c functions are actually implemented.
for name, entry in self.entries.items():
if entry.is_cfunction:
if (entry.defined_in_pxd
and entry.scope is self
and entry.visibility != 'extern'
and not entry.in_cinclude
and not entry.is_implemented):
error(entry.pos, "Non-extern C function '%s' declared but not defined" % name)
def attach_var_entry_to_c_class(self, entry):
# The name of an extension class has to serve as both a type
# name and a variable name holding the type object. It is
# represented in the symbol table by a type entry with a
# variable entry attached to it. For the variable entry,
# we use a read-only C global variable whose name is an
# expression that refers to the type object.
from . import Builtin
var_entry = Entry(name = entry.name,
type = Builtin.type_type,
pos = entry.pos,
cname = entry.type.typeptr_cname)
var_entry.qualified_name = entry.qualified_name
var_entry.is_variable = 1
var_entry.is_cglobal = 1
var_entry.is_readonly = 1
var_entry.is_cclass_var_entry = True
var_entry.scope = entry.scope
entry.as_variable = var_entry
def is_cpp(self):
return self.cpp
def infer_types(self):
from .TypeInference import PyObjectTypeInferer
PyObjectTypeInferer().infer_types(self)
|
ModuleScope
|
python
|
Lightning-AI__lightning
|
tests/tests_fabric/helpers/datasets.py
|
{
"start": 429,
"end": 704
}
|
class ____(IterableDataset):
def __init__(self, size: int, count: int) -> None:
self.count = count
self.size = size
def __iter__(self) -> Iterator[Tensor]:
for _ in range(self.count):
yield torch.randn(self.size)
|
RandomIterableDataset
|
python
|
chroma-core__chroma
|
chromadb/utils/embedding_functions/chroma_langchain_embedding_function.py
|
{
"start": 755,
"end": 6158
}
|
class ____(EmbeddingFunction[Embeddable]):
"""
This class is used as bridge between langchain embedding functions and custom chroma embedding functions.
"""
def __init__(self, embedding_function: Any) -> None:
"""
Initialize the ChromaLangchainEmbeddingFunction
Args:
embedding_function: The embedding function implementing Embeddings from langchain_core.
"""
try:
import langchain_core.embeddings
LangchainEmbeddings = langchain_core.embeddings.Embeddings
except ImportError:
raise ValueError(
"The langchain_core python package is not installed. Please install it with `pip install langchain-core`"
)
if not isinstance(embedding_function, LangchainEmbeddings):
raise ValueError(
"The embedding_function must implement the Embeddings interface from langchain_core."
)
self.embedding_function = embedding_function
# Store the class name for serialization
self._embedding_function_class = embedding_function.__class__.__name__
def embed_documents(self, documents: Sequence[str]) -> List[List[float]]:
"""
Embed documents using the langchain embedding function.
Args:
documents: The documents to embed.
Returns:
The embeddings for the documents.
"""
return cast(
List[List[float]], self.embedding_function.embed_documents(list(documents))
)
def embed_query(self, query: str) -> List[float]:
"""
Embed a query using the langchain embedding function.
Args:
query: The query to embed.
Returns:
The embedding for the query.
"""
return cast(List[float], self.embedding_function.embed_query(query))
def embed_image(self, uris: List[str]) -> List[List[float]]:
"""
Embed images using the langchain embedding function.
Args:
uris: The URIs of the images to embed.
Returns:
The embeddings for the images.
"""
if hasattr(self.embedding_function, "embed_image"):
return cast(List[List[float]], self.embedding_function.embed_image(uris))
else:
raise ValueError(
"The provided embedding function does not support image embeddings."
)
def __call__(self, input: Union[Documents, Images]) -> Embeddings:
"""
Get the embeddings for a list of texts or images.
Args:
input: A list of texts or images to get embeddings for.
Images should be provided as a list of URIs passed through the langchain data loader
Returns:
The embeddings for the texts or images.
Example:
>>> from langchain_openai import OpenAIEmbeddings
>>> langchain_embedding = ChromaLangchainEmbeddingFunction(embedding_function=OpenAIEmbeddings(model="text-embedding-3-large"))
>>> texts = ["Hello, world!", "How are you?"]
>>> embeddings = langchain_embedding(texts)
"""
# Due to langchain quirks, the dataloader returns a tuple if the input is uris of images
if isinstance(input, tuple) and len(input) == 2 and input[0] == "images":
embeddings = self.embed_image(list(input[1]))
else:
# Cast to Sequence[str] to satisfy the type checker
embeddings = self.embed_documents(cast(Sequence[str], input))
# Convert to numpy arrays
return [np.array(embedding, dtype=np.float32) for embedding in embeddings]
@staticmethod
def name() -> str:
return "langchain"
@staticmethod
def build_from_config(
config: Dict[str, Any]
) -> "EmbeddingFunction[Union[Documents, Images]]":
# This is a placeholder implementation since we can't easily serialize and deserialize
# langchain embedding functions. Users will need to recreate the langchain embedding function
# and pass it to create_langchain_embedding.
raise NotImplementedError(
"Building a ChromaLangchainEmbeddingFunction from config is not supported. "
"Please recreate the langchain embedding function and pass it to create_langchain_embedding."
)
def get_config(self) -> Dict[str, Any]:
return {
"embedding_function_class": self._embedding_function_class,
"note": "This is a placeholder config. You will need to recreate the langchain embedding function.",
}
def validate_config_update(
self, old_config: Dict[str, Any], new_config: Dict[str, Any]
) -> None:
raise NotImplementedError(
"Updating a ChromaLangchainEmbeddingFunction config is not supported. "
"Please recreate the langchain embedding function and pass it to create_langchain_embedding."
)
@staticmethod
def validate_config(config: Dict[str, Any]) -> None:
"""
Validate the configuration using the JSON schema.
Args:
config: Configuration to validate
Raises:
ValidationError: If the configuration does not match the schema
"""
validate_config_schema(config, "chroma_langchain")
|
ChromaLangchainEmbeddingFunction
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 70126,
"end": 78208
}
|
class ____(sgqlc.types.Enum):
"""The possible item types found in a timeline.
Enumeration Choices:
* `ADDED_TO_MERGE_QUEUE_EVENT`: Represents an
'added_to_merge_queue' event on a given pull request.
* `ADDED_TO_PROJECT_EVENT`: Represents a 'added_to_project' event
on a given issue or pull request.
* `ASSIGNED_EVENT`: Represents an 'assigned' event on any
assignable object.
* `AUTOMATIC_BASE_CHANGE_FAILED_EVENT`: Represents a
'automatic_base_change_failed' event on a given pull request.
* `AUTOMATIC_BASE_CHANGE_SUCCEEDED_EVENT`: Represents a
'automatic_base_change_succeeded' event on a given pull request.
* `AUTO_MERGE_DISABLED_EVENT`: Represents a 'auto_merge_disabled'
event on a given pull request.
* `AUTO_MERGE_ENABLED_EVENT`: Represents a 'auto_merge_enabled'
event on a given pull request.
* `AUTO_REBASE_ENABLED_EVENT`: Represents a 'auto_rebase_enabled'
event on a given pull request.
* `AUTO_SQUASH_ENABLED_EVENT`: Represents a 'auto_squash_enabled'
event on a given pull request.
* `BASE_REF_CHANGED_EVENT`: Represents a 'base_ref_changed' event
on a given issue or pull request.
* `BASE_REF_DELETED_EVENT`: Represents a 'base_ref_deleted' event
on a given pull request.
* `BASE_REF_FORCE_PUSHED_EVENT`: Represents a
'base_ref_force_pushed' event on a given pull request.
* `CLOSED_EVENT`: Represents a 'closed' event on any `Closable`.
* `COMMENT_DELETED_EVENT`: Represents a 'comment_deleted' event on
a given issue or pull request.
* `CONNECTED_EVENT`: Represents a 'connected' event on a given
issue or pull request.
* `CONVERTED_NOTE_TO_ISSUE_EVENT`: Represents a
'converted_note_to_issue' event on a given issue or pull
request.
* `CONVERTED_TO_DISCUSSION_EVENT`: Represents a
'converted_to_discussion' event on a given issue.
* `CONVERT_TO_DRAFT_EVENT`: Represents a 'convert_to_draft' event
on a given pull request.
* `CROSS_REFERENCED_EVENT`: Represents a mention made by one issue
or pull request to another.
* `DEMILESTONED_EVENT`: Represents a 'demilestoned' event on a
given issue or pull request.
* `DEPLOYED_EVENT`: Represents a 'deployed' event on a given pull
request.
* `DEPLOYMENT_ENVIRONMENT_CHANGED_EVENT`: Represents a
'deployment_environment_changed' event on a given pull request.
* `DISCONNECTED_EVENT`: Represents a 'disconnected' event on a
given issue or pull request.
* `HEAD_REF_DELETED_EVENT`: Represents a 'head_ref_deleted' event
on a given pull request.
* `HEAD_REF_FORCE_PUSHED_EVENT`: Represents a
'head_ref_force_pushed' event on a given pull request.
* `HEAD_REF_RESTORED_EVENT`: Represents a 'head_ref_restored'
event on a given pull request.
* `ISSUE_COMMENT`: Represents a comment on an Issue.
* `LABELED_EVENT`: Represents a 'labeled' event on a given issue
or pull request.
* `LOCKED_EVENT`: Represents a 'locked' event on a given issue or
pull request.
* `MARKED_AS_DUPLICATE_EVENT`: Represents a 'marked_as_duplicate'
event on a given issue or pull request.
* `MENTIONED_EVENT`: Represents a 'mentioned' event on a given
issue or pull request.
* `MERGED_EVENT`: Represents a 'merged' event on a given pull
request.
* `MILESTONED_EVENT`: Represents a 'milestoned' event on a given
issue or pull request.
* `MOVED_COLUMNS_IN_PROJECT_EVENT`: Represents a
'moved_columns_in_project' event on a given issue or pull
request.
* `PINNED_EVENT`: Represents a 'pinned' event on a given issue or
pull request.
* `PULL_REQUEST_COMMIT`: Represents a Git commit part of a pull
request.
* `PULL_REQUEST_COMMIT_COMMENT_THREAD`: Represents a commit
comment thread part of a pull request.
* `PULL_REQUEST_REVIEW`: A review object for a given pull request.
* `PULL_REQUEST_REVIEW_THREAD`: A threaded list of comments for a
given pull request.
* `PULL_REQUEST_REVISION_MARKER`: Represents the latest point in
the pull request timeline for which the viewer has seen the pull
request's commits.
* `READY_FOR_REVIEW_EVENT`: Represents a 'ready_for_review' event
on a given pull request.
* `REFERENCED_EVENT`: Represents a 'referenced' event on a given
`ReferencedSubject`.
* `REMOVED_FROM_MERGE_QUEUE_EVENT`: Represents a
'removed_from_merge_queue' event on a given pull request.
* `REMOVED_FROM_PROJECT_EVENT`: Represents a
'removed_from_project' event on a given issue or pull request.
* `RENAMED_TITLE_EVENT`: Represents a 'renamed' event on a given
issue or pull request
* `REOPENED_EVENT`: Represents a 'reopened' event on any
`Closable`.
* `REVIEW_DISMISSED_EVENT`: Represents a 'review_dismissed' event
on a given issue or pull request.
* `REVIEW_REQUESTED_EVENT`: Represents an 'review_requested' event
on a given pull request.
* `REVIEW_REQUEST_REMOVED_EVENT`: Represents an
'review_request_removed' event on a given pull request.
* `SUBSCRIBED_EVENT`: Represents a 'subscribed' event on a given
`Subscribable`.
* `TRANSFERRED_EVENT`: Represents a 'transferred' event on a given
issue or pull request.
* `UNASSIGNED_EVENT`: Represents an 'unassigned' event on any
assignable object.
* `UNLABELED_EVENT`: Represents an 'unlabeled' event on a given
issue or pull request.
* `UNLOCKED_EVENT`: Represents an 'unlocked' event on a given
issue or pull request.
* `UNMARKED_AS_DUPLICATE_EVENT`: Represents an
'unmarked_as_duplicate' event on a given issue or pull request.
* `UNPINNED_EVENT`: Represents an 'unpinned' event on a given
issue or pull request.
* `UNSUBSCRIBED_EVENT`: Represents an 'unsubscribed' event on a
given `Subscribable`.
* `USER_BLOCKED_EVENT`: Represents a 'user_blocked' event on a
given user.
"""
__schema__ = github_schema
__choices__ = (
"ADDED_TO_MERGE_QUEUE_EVENT",
"ADDED_TO_PROJECT_EVENT",
"ASSIGNED_EVENT",
"AUTOMATIC_BASE_CHANGE_FAILED_EVENT",
"AUTOMATIC_BASE_CHANGE_SUCCEEDED_EVENT",
"AUTO_MERGE_DISABLED_EVENT",
"AUTO_MERGE_ENABLED_EVENT",
"AUTO_REBASE_ENABLED_EVENT",
"AUTO_SQUASH_ENABLED_EVENT",
"BASE_REF_CHANGED_EVENT",
"BASE_REF_DELETED_EVENT",
"BASE_REF_FORCE_PUSHED_EVENT",
"CLOSED_EVENT",
"COMMENT_DELETED_EVENT",
"CONNECTED_EVENT",
"CONVERTED_NOTE_TO_ISSUE_EVENT",
"CONVERTED_TO_DISCUSSION_EVENT",
"CONVERT_TO_DRAFT_EVENT",
"CROSS_REFERENCED_EVENT",
"DEMILESTONED_EVENT",
"DEPLOYED_EVENT",
"DEPLOYMENT_ENVIRONMENT_CHANGED_EVENT",
"DISCONNECTED_EVENT",
"HEAD_REF_DELETED_EVENT",
"HEAD_REF_FORCE_PUSHED_EVENT",
"HEAD_REF_RESTORED_EVENT",
"ISSUE_COMMENT",
"LABELED_EVENT",
"LOCKED_EVENT",
"MARKED_AS_DUPLICATE_EVENT",
"MENTIONED_EVENT",
"MERGED_EVENT",
"MILESTONED_EVENT",
"MOVED_COLUMNS_IN_PROJECT_EVENT",
"PINNED_EVENT",
"PULL_REQUEST_COMMIT",
"PULL_REQUEST_COMMIT_COMMENT_THREAD",
"PULL_REQUEST_REVIEW",
"PULL_REQUEST_REVIEW_THREAD",
"PULL_REQUEST_REVISION_MARKER",
"READY_FOR_REVIEW_EVENT",
"REFERENCED_EVENT",
"REMOVED_FROM_MERGE_QUEUE_EVENT",
"REMOVED_FROM_PROJECT_EVENT",
"RENAMED_TITLE_EVENT",
"REOPENED_EVENT",
"REVIEW_DISMISSED_EVENT",
"REVIEW_REQUESTED_EVENT",
"REVIEW_REQUEST_REMOVED_EVENT",
"SUBSCRIBED_EVENT",
"TRANSFERRED_EVENT",
"UNASSIGNED_EVENT",
"UNLABELED_EVENT",
"UNLOCKED_EVENT",
"UNMARKED_AS_DUPLICATE_EVENT",
"UNPINNED_EVENT",
"UNSUBSCRIBED_EVENT",
"USER_BLOCKED_EVENT",
)
|
PullRequestTimelineItemsItemType
|
python
|
getsentry__sentry
|
tests/sentry/incidents/test_logic.py
|
{
"start": 32995,
"end": 77728
}
|
class ____(TestCase, BaseIncidentsTest):
@cached_property
def alert_rule(self):
return self.create_alert_rule(name="hello")
def create_error_event(self, **kwargs):
two_weeks_ago = before_now(days=14).replace(hour=10, minute=0, second=0, microsecond=0)
data = {
"event_id": "a" * 32,
"message": "super bad",
"timestamp": two_weeks_ago + timedelta(minutes=1),
"tags": {"sentry:user": self.user.email},
"exception": [{"value": "BadError"}],
}
data.update(**kwargs)
self.store_event(
data=data,
project_id=self.project.id,
)
def test(self) -> None:
name = "uh oh"
query = "level:warning"
aggregate = "count_unique(tags[sentry:user])"
time_window = 50
threshold_type = AlertRuleThresholdType.BELOW
threshold_period = 2
event_types = [SnubaQueryEventType.EventType.ERROR, SnubaQueryEventType.EventType.DEFAULT]
updated_projects = [self.project, self.create_project(fire_project_created=True)]
updated_rule = update_alert_rule(
self.alert_rule,
projects=updated_projects,
name=name,
query=query,
aggregate=aggregate,
time_window=time_window,
threshold_type=threshold_type,
threshold_period=threshold_period,
event_types=event_types,
)
assert self.alert_rule.id == updated_rule.id
assert self.alert_rule.name == name
updated_subscriptions = self.alert_rule.snuba_query.subscriptions.all()
assert {sub.project for sub in updated_subscriptions} == set(updated_projects)
for subscription in updated_subscriptions:
assert subscription.snuba_query.query == query
assert subscription.snuba_query.aggregate == aggregate
assert subscription.snuba_query.time_window == int(
timedelta(minutes=time_window).total_seconds()
)
assert self.alert_rule.snuba_query.query == query
assert self.alert_rule.snuba_query.aggregate == aggregate
assert self.alert_rule.snuba_query.time_window == time_window * 60
assert set(self.alert_rule.snuba_query.event_types) == set(event_types)
assert self.alert_rule.threshold_type == threshold_type.value
assert self.alert_rule.threshold_period == threshold_period
assert self.alert_rule.projects.all().count() == 2
assert self.alert_rule.projects.all()[0] == updated_projects[0]
def test_update_subscription(self) -> None:
old_subscription_id = self.alert_rule.snuba_query.subscriptions.get().subscription_id
with self.tasks():
update_alert_rule(self.alert_rule, query="some new query")
assert (
old_subscription_id != self.alert_rule.snuba_query.subscriptions.get().subscription_id
)
def test_snapshot_alert_rule_with_only_owner(self) -> None:
# Force the alert rule into an invalid state
AlertRule.objects.filter(id=self.alert_rule.id).update(user_id=None, team_id=None)
self.alert_rule.refresh_from_db()
snapshot_alert_rule(self.alert_rule, self.user)
def test_empty_query(self) -> None:
alert_rule = update_alert_rule(self.alert_rule, query="")
assert alert_rule.snuba_query.query == ""
def test_delete_projects(self) -> None:
# Testing delete projects from update
alert_rule = self.create_alert_rule(
projects=[self.project, self.create_project(fire_project_created=True)]
)
unaffected_alert_rule = self.create_alert_rule(
projects=[self.project, self.create_project(fire_project_created=True)]
)
with self.tasks():
update_alert_rule(alert_rule, projects=[self.project])
# NOTE: subscribing alert rule to projects creates a new subscription per project
subscriptions = alert_rule.snuba_query.subscriptions.all()
assert subscriptions.count() == 1
assert alert_rule.snuba_query.subscriptions.get().project == self.project
assert alert_rule.projects.all().count() == 1
assert unaffected_alert_rule.projects.all().count() == 2
def test_new_updated_deleted_projects(self) -> None:
alert_rule = self.create_alert_rule(
projects=[self.project, self.create_project(fire_project_created=True)]
)
query_update = "level:warning"
new_project = self.create_project(fire_project_created=True)
project_updates = [self.project, new_project]
with self.tasks():
update_alert_rule(alert_rule, projects=project_updates, query=query_update)
updated_subscriptions = alert_rule.snuba_query.subscriptions.all()
updated_projects = alert_rule.projects.all()
assert {sub.project for sub in updated_subscriptions} == set(project_updates)
assert set(updated_projects) == set(project_updates)
for sub in updated_subscriptions:
assert sub.snuba_query.query == query_update
def test_with_attached_incident(self) -> None:
# A snapshot of the pre-updated rule should be created, and the incidents should also be resolved.
with self.tasks():
incident = self.create_incident()
incident.update(alert_rule=self.alert_rule)
incident_2 = self.create_incident()
incident_2.update(alert_rule=self.alert_rule)
# Give the rule some actions and triggers so we can verify they've been snapshotted correctly.
trigger = create_alert_rule_trigger(self.alert_rule, "hello", 1000)
action = create_alert_rule_trigger_action(
trigger,
AlertRuleTriggerAction.Type.EMAIL,
AlertRuleTriggerAction.TargetType.USER,
target_identifier=str(self.user.id),
)
trigger_count = AlertRuleTrigger.objects.all().count()
action_count = AlertRuleTriggerAction.objects.all().count()
updated_projects = [self.project, self.create_project(fire_project_created=True)]
updated_rule = update_alert_rule(
self.alert_rule,
projects=updated_projects,
query="level:warning",
aggregate="count_unique(tags[sentry:user])",
time_window=50,
threshold_period=2,
threshold_type=AlertRuleThresholdType.BELOW,
resolve_threshold=1200,
)
incident.refresh_from_db()
incident_2.refresh_from_db()
rule_snapshot_query = AlertRule.objects_with_snapshots.filter(
name=self.alert_rule.name
).exclude(id=updated_rule.id)
assert rule_snapshot_query.count() == 1
rule_snapshot = rule_snapshot_query.get()
assert rule_snapshot.status == AlertRuleStatus.SNAPSHOT.value
# Rule snapshot should have properties of the rule before it was updated.
assert rule_snapshot.id != updated_rule.id
assert rule_snapshot.snuba_query_id != updated_rule.snuba_query_id
assert rule_snapshot.name == updated_rule.name
assert rule_snapshot.snuba_query.query == "level:error"
assert rule_snapshot.snuba_query.time_window == 600
assert rule_snapshot.threshold_type == AlertRuleThresholdType.ABOVE.value
assert rule_snapshot.resolve_threshold is None
assert rule_snapshot.snuba_query.aggregate == "count()"
assert rule_snapshot.threshold_period == 1
for incident in (incident, incident_2):
# Incidents should now be pointing to the rule snapshot.
assert incident.alert_rule.id == rule_snapshot.id
assert incident.alert_rule.name == updated_rule.name
# Incidents should be resolved
assert incident.status == IncidentStatus.CLOSED.value
# Action and trigger counts should double (from 1 to 2)
assert AlertRuleTrigger.objects.all().count() == trigger_count * 2
assert AlertRuleTriggerAction.objects.all().count() == action_count * 2
# Verify actions and triggers have the same properties...and are not the same actions & triggers as the original rule.
assert AlertRuleTrigger.objects.filter(alert_rule=rule_snapshot).exists()
trigger_snapshot = AlertRuleTrigger.objects.get(alert_rule=rule_snapshot)
assert trigger_snapshot.id != trigger.id
assert trigger_snapshot.label == trigger.label
assert trigger_snapshot.alert_threshold == trigger.alert_threshold
assert AlertRuleTriggerAction.objects.filter(
alert_rule_trigger=trigger_snapshot
).exists()
action_snapshot = AlertRuleTriggerAction.objects.get(
alert_rule_trigger=trigger_snapshot
)
assert action_snapshot.id != action.id
assert action_snapshot.type == action.type
assert action_snapshot.target_type == action.target_type
assert action_snapshot.target_identifier == action.target_identifier
assert action_snapshot.target_display == action.target_display
def test_alert_rule_owner(self) -> None:
alert_rule = create_alert_rule(
self.organization,
[self.project],
"alert rule 1",
"level:error",
"count()",
1,
AlertRuleThresholdType.ABOVE,
1,
owner=Actor.from_identifier(self.user.id),
)
assert alert_rule.user_id == self.user.id
assert alert_rule.team_id is None
update_alert_rule(
alert_rule=alert_rule,
owner=Actor.from_identifier(f"team:{self.team.id}"),
)
assert alert_rule.team_id == self.team.id
assert alert_rule.user_id is None
# Ignore "unreachable" because Mypy sees the `user_id` field declaration on
# the AlertRule model class and assumes that it's always non-null.
update_alert_rule( # type: ignore[unreachable]
alert_rule=alert_rule,
owner=Actor.from_identifier(f"user:{self.user.id}"),
)
assert alert_rule.user_id == self.user.id
assert alert_rule.team_id is None
update_alert_rule(
alert_rule=alert_rule,
owner=Actor.from_identifier(self.user.id),
)
assert alert_rule.user_id == self.user.id
assert alert_rule.team_id is None
update_alert_rule(
alert_rule=alert_rule,
name="not updating owner",
)
assert alert_rule.user_id == self.user.id
assert alert_rule.team_id is None
update_alert_rule(
alert_rule=alert_rule,
owner=None,
)
assert alert_rule.user_id is None
assert alert_rule.team_id is None
def test_comparison_delta(self) -> None:
comparison_delta = 60
update_alert_rule(self.alert_rule, comparison_delta=comparison_delta)
assert self.alert_rule.comparison_delta == comparison_delta * 60
assert (
self.alert_rule.snuba_query.resolution
== DEFAULT_CMP_ALERT_RULE_RESOLUTION_MULTIPLIER * 60
)
# Should be no change if we don't specify `comparison_delta` for update at all.
update_alert_rule(self.alert_rule)
assert self.alert_rule.comparison_delta == comparison_delta * 60
assert (
self.alert_rule.snuba_query.resolution
== DEFAULT_CMP_ALERT_RULE_RESOLUTION_MULTIPLIER * 60
)
# Should change if we explicitly set it to None.
update_alert_rule(self.alert_rule, comparison_delta=None)
assert self.alert_rule.comparison_delta is None
assert self.alert_rule.snuba_query.resolution == DEFAULT_ALERT_RULE_RESOLUTION * 60
def test_performance_metric_alert(self) -> None:
alert_rule = create_alert_rule(
self.organization,
[self.project],
"performance alert",
"",
"count()",
1,
AlertRuleThresholdType.ABOVE,
1,
query_type=SnubaQuery.Type.ERROR,
dataset=Dataset.Events,
)
alert_rule = update_alert_rule(
alert_rule,
query_type=SnubaQuery.Type.PERFORMANCE,
dataset=Dataset.PerformanceMetrics,
)
assert alert_rule.snuba_query.type == SnubaQuery.Type.PERFORMANCE.value
assert alert_rule.snuba_query.dataset == Dataset.PerformanceMetrics.value
@patch("sentry.incidents.logic.schedule_update_project_config")
def test_on_demand_metric_alert(self, mocked_schedule_update_project_config: MagicMock) -> None:
alert_rule = create_alert_rule(
self.organization,
[self.project],
"custom metric alert",
"",
"count()",
1,
AlertRuleThresholdType.ABOVE,
1,
query_type=SnubaQuery.Type.PERFORMANCE,
dataset=Dataset.Metrics,
)
mocked_schedule_update_project_config.assert_called_with(alert_rule, [self.project])
alert_rule = update_alert_rule(
alert_rule, name="updated alert", query="transaction.duration:>=100"
)
mocked_schedule_update_project_config.assert_called_with(alert_rule, None)
def test_update_alert_load_shedding_on_window(self) -> None:
time_window = 1440
alert_rule = create_alert_rule(
self.organization,
[self.project],
"custom metric alert",
"transaction.duration:>=1000",
"count()",
time_window,
AlertRuleThresholdType.ABOVE,
1440,
query_type=SnubaQuery.Type.PERFORMANCE,
dataset=Dataset.Metrics,
)
assert (
alert_rule.snuba_query.resolution
== DEFAULT_ALERT_RULE_WINDOW_TO_RESOLUTION[time_window] * 60
)
time_window = 90
updated_alert_rule = update_alert_rule(alert_rule, time_window=time_window)
assert (
updated_alert_rule.snuba_query.resolution
== DEFAULT_ALERT_RULE_WINDOW_TO_RESOLUTION[time_window] * 60
)
def test_update_alert_load_shedding_on_window_with_comparison(self) -> None:
time_window = 1440
comparison_delta = 60
alert_rule = create_alert_rule(
self.organization,
[self.project],
"custom metric alert",
"transaction.duration:>=1000",
"count()",
time_window,
AlertRuleThresholdType.ABOVE,
1440,
query_type=SnubaQuery.Type.PERFORMANCE,
dataset=Dataset.Metrics,
comparison_delta=comparison_delta,
detection_type=AlertRuleDetectionType.PERCENT,
)
assert (
alert_rule.snuba_query.resolution
== DEFAULT_ALERT_RULE_WINDOW_TO_RESOLUTION[time_window]
* DEFAULT_CMP_ALERT_RULE_RESOLUTION_MULTIPLIER
* 60
)
time_window = 90
updated_alert_rule = update_alert_rule(alert_rule, time_window=time_window)
assert (
updated_alert_rule.snuba_query.resolution
== DEFAULT_ALERT_RULE_WINDOW_TO_RESOLUTION[time_window]
* DEFAULT_CMP_ALERT_RULE_RESOLUTION_MULTIPLIER
* 60
)
def test_update_alert_load_shedding_on_comparison(self) -> None:
time_window = 1440
comparison_delta = 60
alert_rule = create_alert_rule(
self.organization,
[self.project],
"custom metric alert",
"transaction.duration:>=1000",
"count()",
time_window,
AlertRuleThresholdType.ABOVE,
1440,
query_type=SnubaQuery.Type.PERFORMANCE,
dataset=Dataset.Metrics,
comparison_delta=comparison_delta,
detection_type=AlertRuleDetectionType.PERCENT,
)
assert alert_rule.snuba_query.resolution == 1800
updated_alert_rule = update_alert_rule(alert_rule, comparison_delta=90)
assert (
updated_alert_rule.snuba_query.resolution
== DEFAULT_ALERT_RULE_WINDOW_TO_RESOLUTION[time_window]
* DEFAULT_CMP_ALERT_RULE_RESOLUTION_MULTIPLIER
* 60
)
def test_update_alert_load_shedding_on_comparison_and_window(self) -> None:
time_window = 1440
comparison_delta = 60
alert_rule = create_alert_rule(
self.organization,
[self.project],
"custom metric alert",
"transaction.duration:>=1000",
"count()",
time_window,
AlertRuleThresholdType.ABOVE,
1440,
query_type=SnubaQuery.Type.PERFORMANCE,
dataset=Dataset.Metrics,
comparison_delta=comparison_delta,
detection_type=AlertRuleDetectionType.PERCENT,
)
assert alert_rule.snuba_query.resolution == 1800
time_window = 30
updated_alert_rule = update_alert_rule(
alert_rule, time_window=time_window, comparison_delta=90
)
assert (
updated_alert_rule.snuba_query.resolution
== DEFAULT_ALERT_RULE_WINDOW_TO_RESOLUTION[time_window]
* DEFAULT_CMP_ALERT_RULE_RESOLUTION_MULTIPLIER
* 60
)
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
@patch(
"sentry.seer.anomaly_detection.delete_rule.seer_anomaly_detection_connection_pool.urlopen"
)
def test_update_detection_type(
self, mock_seer_delete_request: MagicMock, mock_seer_request: MagicMock
) -> None:
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
mock_seer_delete_request.return_value = HTTPResponse(
orjson.dumps(seer_return_value), status=200
)
comparison_delta = 60
# test percent to dynamic
rule = self.create_alert_rule(
comparison_delta=comparison_delta,
detection_type=AlertRuleDetectionType.PERCENT,
)
updated_rule = update_alert_rule(
rule,
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
detection_type=AlertRuleDetectionType.DYNAMIC,
time_window=30,
)
assert updated_rule.comparison_delta is None
assert updated_rule.sensitivity == AlertRuleSensitivity.HIGH
assert updated_rule.seasonality == AlertRuleSeasonality.AUTO
assert updated_rule.detection_type == AlertRuleDetectionType.DYNAMIC
# test dynamic to percent
rule = self.create_alert_rule(
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
detection_type=AlertRuleDetectionType.DYNAMIC,
time_window=15,
)
updated_rule = update_alert_rule(
rule, comparison_delta=comparison_delta, detection_type=AlertRuleDetectionType.PERCENT
)
assert updated_rule.comparison_delta == comparison_delta * 60
assert updated_rule.sensitivity is None
assert updated_rule.seasonality is None
assert updated_rule.detection_type == AlertRuleDetectionType.PERCENT
# test static to percent
rule = self.create_alert_rule()
updated_rule = update_alert_rule(
rule, comparison_delta=comparison_delta, detection_type=AlertRuleDetectionType.PERCENT
)
assert updated_rule.comparison_delta == comparison_delta * 60
assert updated_rule.detection_type == AlertRuleDetectionType.PERCENT
# test static to dynamic
rule = self.create_alert_rule()
updated_rule = update_alert_rule(
rule,
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
detection_type=AlertRuleDetectionType.DYNAMIC,
time_window=30,
)
assert updated_rule.sensitivity == AlertRuleSensitivity.HIGH
assert updated_rule.seasonality == AlertRuleSeasonality.AUTO
assert updated_rule.detection_type == AlertRuleDetectionType.DYNAMIC
# test percent to static
rule = self.create_alert_rule(
comparison_delta=comparison_delta,
detection_type=AlertRuleDetectionType.PERCENT,
)
updated_rule = update_alert_rule(rule, detection_type=AlertRuleDetectionType.STATIC)
assert updated_rule.comparison_delta is None
assert updated_rule.sensitivity is None
assert updated_rule.seasonality is None
assert updated_rule.detection_type == AlertRuleDetectionType.STATIC
# test dynamic to static
rule = self.create_alert_rule(
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
detection_type=AlertRuleDetectionType.DYNAMIC,
time_window=15,
)
updated_rule = update_alert_rule(rule, detection_type=AlertRuleDetectionType.STATIC)
assert updated_rule.comparison_delta is None
assert updated_rule.sensitivity is None
assert updated_rule.seasonality is None
assert updated_rule.detection_type == AlertRuleDetectionType.STATIC
# test dynamic to dynamic
rule = self.create_alert_rule(
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
detection_type=AlertRuleDetectionType.DYNAMIC,
time_window=15,
)
updated_rule = update_alert_rule(
rule, detection_type=AlertRuleDetectionType.DYNAMIC, time_window=30
)
assert updated_rule.detection_type == AlertRuleDetectionType.DYNAMIC
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_update_infer_detection_type(self, mock_seer_request: MagicMock) -> None:
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
# static to static
rule = self.create_alert_rule()
updated_rule = update_alert_rule(rule, time_window=15)
assert updated_rule.detection_type == AlertRuleDetectionType.STATIC
# static to percent
rule = self.create_alert_rule()
updated_rule = update_alert_rule(rule, comparison_delta=60)
assert updated_rule.detection_type == AlertRuleDetectionType.PERCENT
# percent to percent
rule = self.create_alert_rule(
comparison_delta=60, detection_type=AlertRuleDetectionType.PERCENT
)
updated_rule = update_alert_rule(rule, time_window=15)
assert updated_rule.detection_type == AlertRuleDetectionType.PERCENT
# percent to static
rule = self.create_alert_rule(
comparison_delta=60, detection_type=AlertRuleDetectionType.PERCENT
)
updated_rule = update_alert_rule(rule, comparison_delta=None)
assert updated_rule.detection_type == AlertRuleDetectionType.STATIC
# dynamic to percent
rule = self.create_alert_rule(
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
time_window=60,
detection_type=AlertRuleDetectionType.DYNAMIC,
)
updated_rule = update_alert_rule(
rule, comparison_delta=60, sensitivity=None, seasonality=None
)
assert updated_rule.detection_type == AlertRuleDetectionType.PERCENT
# dynamic to static
rule = self.create_alert_rule(
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
time_window=60,
detection_type=AlertRuleDetectionType.DYNAMIC,
)
updated_rule = update_alert_rule(
rule, comparison_delta=None, sensitivity=None, seasonality=None
)
assert updated_rule.detection_type == AlertRuleDetectionType.STATIC
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_update_dynamic_alerts(self, mock_seer_request: MagicMock) -> None:
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
dynamic_rule = self.create_alert_rule(
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
time_window=60,
detection_type=AlertRuleDetectionType.DYNAMIC,
)
snuba_query = SnubaQuery.objects.get(id=dynamic_rule.snuba_query_id)
assert dynamic_rule.snuba_query.resolution == 60 * 60
assert mock_seer_request.call_count == 1
mock_seer_request.reset_mock()
# update time_window
update_alert_rule(
dynamic_rule,
time_window=30,
detection_type=AlertRuleDetectionType.DYNAMIC,
)
snuba_query.refresh_from_db()
assert snuba_query.resolution == 30 * 60
assert mock_seer_request.call_count == 0
mock_seer_request.reset_mock()
# update name
update_alert_rule(dynamic_rule, name="everything is broken")
dynamic_rule.refresh_from_db()
assert dynamic_rule.name == "everything is broken"
assert mock_seer_request.call_count == 0
mock_seer_request.reset_mock()
# update query
update_alert_rule(
dynamic_rule,
query="message:*post_process*",
detection_type=AlertRuleDetectionType.DYNAMIC,
)
assert mock_seer_request.call_count == 1
snuba_query.refresh_from_db()
assert snuba_query.query == "message:*post_process*"
mock_seer_request.reset_mock()
# update aggregate
update_alert_rule(
dynamic_rule,
aggregate="count_unique(user)",
detection_type=AlertRuleDetectionType.DYNAMIC,
)
assert mock_seer_request.call_count == 1
snuba_query.refresh_from_db()
assert snuba_query.aggregate == "count_unique(user)"
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_update_dynamic_alert_static_to_dynamic(self, mock_seer_request: MagicMock) -> None:
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
static_rule = self.create_alert_rule(time_window=30)
update_alert_rule(
static_rule,
time_window=30,
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
detection_type=AlertRuleDetectionType.DYNAMIC,
)
assert mock_seer_request.call_count == 1
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_update_dynamic_alert_percent_to_dynamic(self, mock_seer_request: MagicMock) -> None:
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
percent_rule = self.create_alert_rule(
comparison_delta=60, time_window=30, detection_type=AlertRuleDetectionType.PERCENT
)
update_alert_rule(
percent_rule,
time_window=30,
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
detection_type=AlertRuleDetectionType.DYNAMIC,
)
assert mock_seer_request.call_count == 1
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_update_alert_rule_static_to_dynamic_enough_data(
self, mock_seer_request: MagicMock
) -> None:
"""
Assert that the status is PENDING if enough data exists.
"""
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
two_weeks_ago = before_now(days=14).replace(hour=10, minute=0, second=0, microsecond=0)
self.create_event(two_weeks_ago + timedelta(minutes=1))
self.create_event(two_weeks_ago + timedelta(days=10))
alert_rule = self.create_alert_rule(
time_window=30, detection_type=AlertRuleDetectionType.STATIC
)
update_alert_rule(
alert_rule,
time_window=30,
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
detection_type=AlertRuleDetectionType.DYNAMIC,
)
assert mock_seer_request.call_count == 1
assert alert_rule.status == AlertRuleStatus.PENDING.value
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_update_dynamic_alert_not_enough_to_pending(self, mock_seer_request: MagicMock) -> None:
"""
Update a dynamic rule's aggregate so the rule's status changes from not enough data to enough/pending
"""
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
dynamic_rule = self.create_alert_rule(
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
time_window=60,
detection_type=AlertRuleDetectionType.DYNAMIC,
)
assert mock_seer_request.call_count == 1
assert dynamic_rule.status == AlertRuleStatus.NOT_ENOUGH_DATA.value
mock_seer_request.reset_mock()
two_weeks_ago = before_now(days=14).replace(hour=10, minute=0, second=0, microsecond=0)
self.create_error_event(timestamp=(two_weeks_ago + timedelta(minutes=1)).isoformat())
self.create_error_event(
timestamp=(two_weeks_ago + timedelta(days=10)).isoformat()
) # 4 days ago
# update aggregate
update_alert_rule(
dynamic_rule,
aggregate="count_unique(user)",
time_window=60,
detection_type=AlertRuleDetectionType.DYNAMIC,
)
assert mock_seer_request.call_count == 1
assert dynamic_rule.status == AlertRuleStatus.PENDING.value
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_update_dynamic_alert_pending_to_not_enough(self, mock_seer_request: MagicMock) -> None:
"""
Update a dynamic rule's aggregate so the rule's status changes from enough/pending to not enough data
"""
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
two_weeks_ago = before_now(days=14).replace(hour=10, minute=0, second=0, microsecond=0)
self.create_error_event(timestamp=(two_weeks_ago + timedelta(minutes=1)).isoformat())
self.create_error_event(
timestamp=(two_weeks_ago + timedelta(days=10)).isoformat()
) # 4 days ago
dynamic_rule = self.create_alert_rule(
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
time_window=60,
detection_type=AlertRuleDetectionType.DYNAMIC,
)
assert mock_seer_request.call_count == 1
assert dynamic_rule.status == AlertRuleStatus.PENDING.value
mock_seer_request.reset_mock()
# update aggregate
update_alert_rule(
dynamic_rule,
aggregate="p95(measurements.fid)", # first input delay data we don't have stored
dataset=Dataset.Transactions,
event_types=[SnubaQueryEventType.EventType.TRANSACTION],
query="",
detection_type=AlertRuleDetectionType.DYNAMIC,
)
assert mock_seer_request.call_count == 1
assert dynamic_rule.status == AlertRuleStatus.NOT_ENOUGH_DATA.value
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_update_alert_rule_static_to_dynamic_not_enough_data(
self, mock_seer_request: MagicMock
) -> None:
"""
Assert that the status is NOT_ENOUGH_DATA if we don't have 7 days of data.
"""
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
two_days_ago = before_now(days=2).replace(hour=10, minute=0, second=0, microsecond=0)
self.create_event(two_days_ago + timedelta(minutes=1))
self.create_event(two_days_ago + timedelta(days=1))
alert_rule = self.create_alert_rule(
time_window=30, detection_type=AlertRuleDetectionType.STATIC
)
update_alert_rule(
alert_rule,
time_window=30,
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
detection_type=AlertRuleDetectionType.DYNAMIC,
)
assert mock_seer_request.call_count == 1
assert alert_rule.status == AlertRuleStatus.NOT_ENOUGH_DATA.value
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_update_alert_rule_dynamic_to_static_status(self, mock_seer_request: MagicMock) -> None:
"""
Assert that the alert rule status changes to PENDING if we switch from a dynamic alert to another type of alert.
"""
# just setting up an alert
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
two_days_ago = before_now(days=2).replace(hour=10, minute=0, second=0, microsecond=0)
self.create_event(two_days_ago + timedelta(minutes=1))
self.create_event(two_days_ago + timedelta(days=1))
alert_rule = self.create_alert_rule()
update_alert_rule(
alert_rule,
time_window=30,
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
detection_type=AlertRuleDetectionType.DYNAMIC,
)
assert mock_seer_request.call_count == 1
assert alert_rule.status == AlertRuleStatus.NOT_ENOUGH_DATA.value
# okay, here's the test :)
update_alert_rule(
alert_rule,
sensitivity=None,
seasonality=None,
detection_type=AlertRuleDetectionType.STATIC,
)
assert alert_rule.status == AlertRuleStatus.PENDING.value
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
@patch("sentry.seer.anomaly_detection.store_data.logger")
def test_update_alert_rule_anomaly_detection_seer_timeout_max_retry(
self, mock_logger, mock_seer_request
):
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
dynamic_rule = self.create_alert_rule(
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
time_window=60,
detection_type=AlertRuleDetectionType.DYNAMIC,
)
assert mock_seer_request.call_count == 1
mock_seer_request.reset_mock()
mock_seer_request.side_effect = TimeoutError
with pytest.raises(TimeoutError):
# attempt to update query
update_alert_rule(
dynamic_rule,
time_window=30,
query="message:*post_process*",
detection_type=AlertRuleDetectionType.DYNAMIC,
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
)
assert mock_logger.warning.call_count == 1
assert mock_seer_request.call_count == 1
mock_seer_request.reset_mock()
mock_logger.reset_mock()
mock_seer_request.side_effect = MaxRetryError(
seer_anomaly_detection_connection_pool, SEER_ANOMALY_DETECTION_STORE_DATA_URL
)
with pytest.raises(TimeoutError):
# attempt to update query
update_alert_rule(
dynamic_rule,
time_window=30,
query="message:*post_process*",
detection_type=AlertRuleDetectionType.DYNAMIC,
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
)
assert mock_logger.warning.call_count == 1
assert mock_seer_request.call_count == 1
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
@patch("sentry.seer.anomaly_detection.store_data.logger")
def test_update_alert_rule_static_to_anomaly_detection_seer_timeout(
self, mock_logger, mock_seer_request
):
mock_seer_request.side_effect = MaxRetryError(
seer_anomaly_detection_connection_pool, SEER_ANOMALY_DETECTION_STORE_DATA_URL
)
static_rule = self.create_alert_rule(time_window=30)
with pytest.raises(TimeoutError):
update_alert_rule(
static_rule,
time_window=30,
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
detection_type=AlertRuleDetectionType.DYNAMIC,
)
static_rule.refresh_from_db()
assert static_rule.detection_type == AlertRuleDetectionType.STATIC
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.delete_rule.seer_anomaly_detection_connection_pool.urlopen"
)
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_update_alert_rule_dynamic_to_static_delete_call(
self, mock_store_request, mock_delete_request
):
seer_return_value = {"success": True}
mock_store_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
mock_delete_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
alert_rule = self.create_alert_rule(
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
time_window=60,
detection_type=AlertRuleDetectionType.DYNAMIC,
)
update_alert_rule(alert_rule, detection_type=AlertRuleDetectionType.STATIC)
assert mock_delete_request.call_count == 1
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_update_alert_rule_anomaly_detection_no_feature(
self, mock_seer_request: MagicMock
) -> None:
static_rule = self.create_alert_rule(time_window=30)
with pytest.raises(ResourceDoesNotExist):
update_alert_rule(
static_rule,
time_window=30,
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
detection_type=AlertRuleDetectionType.DYNAMIC,
)
assert mock_seer_request.call_count == 0
assert static_rule.detection_type == AlertRuleDetectionType.STATIC
@with_feature("organizations:anomaly-detection-alerts")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_update_invalid_time_window(self, mock_seer_request: MagicMock) -> None:
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
rule = self.create_alert_rule(
sensitivity=AlertRuleSensitivity.HIGH,
seasonality=AlertRuleSeasonality.AUTO,
detection_type=AlertRuleDetectionType.DYNAMIC,
time_window=15,
)
with pytest.raises(ValidationError):
update_alert_rule(rule, detection_type=AlertRuleDetectionType.DYNAMIC, time_window=300)
def test_snapshot_alert_rule_with_event_types(self) -> None:
# Create alert rule with event types
alert_rule = create_alert_rule(
self.organization,
[self.project],
"test alert rule",
"severity:error",
"count()",
1,
AlertRuleThresholdType.ABOVE,
1,
event_types=[SnubaQueryEventType.EventType.TRACE_ITEM_LOG],
query_type=SnubaQuery.Type.PERFORMANCE,
dataset=Dataset.EventsAnalyticsPlatform,
)
# Create incident to trigger snapshot
incident = self.create_incident()
incident.update(alert_rule=alert_rule)
# Verify original event types exist
original_event_types = SnubaQueryEventType.objects.filter(
snuba_query=alert_rule.snuba_query
)
assert [snuba_event_type.type for snuba_event_type in original_event_types] == [
SnubaQueryEventType.EventType.TRACE_ITEM_LOG.value
]
# Update alert rule to trigger snapshot
with self.tasks():
updated_rule = update_alert_rule(
alert_rule,
query="level:warning",
event_types=[SnubaQueryEventType.EventType.TRACE_ITEM_SPAN],
)
# Find the snapshot
rule_snapshot = (
AlertRule.objects_with_snapshots.filter(name=alert_rule.name)
.exclude(id=updated_rule.id)
.get()
)
# Verify snapshot has its own SnubaQuery
assert rule_snapshot.snuba_query_id != updated_rule.snuba_query_id
# Verify snapshot has the original event types
snapshot_event_types = SnubaQueryEventType.objects.filter(
snuba_query=rule_snapshot.snuba_query
)
assert [snuba_event_type.type for snuba_event_type in snapshot_event_types] == [
SnubaQueryEventType.EventType.TRACE_ITEM_LOG.value
]
# Verify event types are different objects but have same values
original_types = {snuba_event_type.type for snuba_event_type in original_event_types}
snapshot_types = {snuba_event_type.type for snuba_event_type in snapshot_event_types}
assert original_types == snapshot_types
# Verify updated rule has new event types
updated_event_types = SnubaQueryEventType.objects.filter(
snuba_query=updated_rule.snuba_query
)
assert [snuba_event_type.type for snuba_event_type in updated_event_types] == [
SnubaQueryEventType.EventType.TRACE_ITEM_SPAN.value
]
|
UpdateAlertRuleTest
|
python
|
huggingface__transformers
|
tests/test_tokenization_common.py
|
{
"start": 127070,
"end": 127466
}
|
class ____(TokenizersBackendTesterMixin, unittest.TestCase):
"""
A single test class that runs all tokenizers-backend tests once.
Uses BertTokenizer as a representative tokenizer.
"""
tokenizer_class = BertTokenizer
rust_tokenizer_class = BertTokenizerFast
from_pretrained_id = "google-bert/bert-base-uncased"
from_pretrained_kwargs = {}
|
TokenizersBackendCommonTest
|
python
|
numpy__numpy
|
numpy/lib/tests/test__datasource.py
|
{
"start": 1898,
"end": 4115
}
|
class ____:
def test_ValidHTTP(self, tmp_path):
ds = datasource.DataSource(tmp_path)
fh = ds.open(valid_httpurl())
assert_(fh)
fh.close()
def test_InvalidHTTP(self, tmp_path):
ds = datasource.DataSource(tmp_path)
url = invalid_httpurl()
assert_raises(OSError, ds.open, url)
try:
ds.open(url)
except OSError as e:
# Regression test for bug fixed in r4342.
assert_(e.errno is None)
def test_InvalidHTTPCacheURLError(self, tmp_path):
ds = datasource.DataSource(tmp_path)
assert_raises(URLError, ds._cache, invalid_httpurl())
def test_ValidFile(self, tmp_path):
ds = datasource.DataSource(tmp_path)
local_file = valid_textfile(tmp_path)
fh = ds.open(local_file)
assert_(fh)
fh.close()
def test_InvalidFile(self, tmp_path):
ds = datasource.DataSource(tmp_path)
invalid_file = invalid_textfile(tmp_path)
assert_raises(OSError, ds.open, invalid_file)
def test_ValidGzipFile(self, tmp_path):
try:
import gzip
except ImportError:
# We don't have the gzip capabilities to test.
pytest.skip()
# Test datasource's internal file_opener for Gzip files.
ds = datasource.DataSource(tmp_path)
filepath = os.path.join(tmp_path, 'foobar.txt.gz')
fp = gzip.open(filepath, 'w')
fp.write(magic_line)
fp.close()
fp = ds.open(filepath)
result = fp.readline()
fp.close()
assert_equal(magic_line, result)
def test_ValidBz2File(self, tmp_path):
try:
import bz2
except ImportError:
# We don't have the bz2 capabilities to test.
pytest.skip()
# Test datasource's internal file_opener for BZip2 files.
ds = datasource.DataSource(tmp_path)
filepath = os.path.join(tmp_path, 'foobar.txt.bz2')
fp = bz2.BZ2File(filepath, 'w')
fp.write(magic_line)
fp.close()
fp = ds.open(filepath)
result = fp.readline()
fp.close()
assert_equal(magic_line, result)
|
TestDataSourceOpen
|
python
|
ApeWorX__ape
|
src/ape/api/config.py
|
{
"start": 879,
"end": 1760
}
|
class ____(str, Enum):
"""
A configuration `Enum <https://docs.python.org/3/library/enum.html>`__ type.
Use this to limit the values of a config item, such as colors ``"RED"``, ``"BLUE"``,
``"GREEN"``, rather than any arbitrary ``str``.
Usage example::
class MyEnum(ConfigEnum):
FOO = "FOO"
BAR = "BAR"
class MyConfig(PluginConfig):
my_enum: MyEnum
model = MyConfig(my_enum="FOO")
"""
def _find_config_yaml_files(base_path: Path) -> list[Path]:
"""
Find all ape config file in the given path.
"""
found: list[Path] = []
if (base_path / "ape-config.yaml").is_file():
found.append(base_path / "ape-config.yaml")
if (base_path / "ape-config.yml").is_file():
found.append(base_path / "ape-config.yml")
return found
|
ConfigEnum
|
python
|
doocs__leetcode
|
solution/1000-1099/1049.Last Stone Weight II/Solution2.py
|
{
"start": 0,
"end": 308
}
|
class ____:
def lastStoneWeightII(self, stones: List[int]) -> int:
s = sum(stones)
m, n = len(stones), s >> 1
dp = [0] * (n + 1)
for v in stones:
for j in range(n, v - 1, -1):
dp[j] = max(dp[j], dp[j - v] + v)
return s - dp[-1] * 2
|
Solution
|
python
|
scikit-learn__scikit-learn
|
sklearn/linear_model/_omp.py
|
{
"start": 30243,
"end": 38358
}
|
class ____(RegressorMixin, LinearModel):
"""Cross-validated Orthogonal Matching Pursuit model (OMP).
See glossary entry for :term:`cross-validation estimator`.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
copy : bool, default=True
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
max_iter : int, default=None
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : int, cross-validation generator or iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : bool or int, default=False
Sets the verbosity amount.
Attributes
----------
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function.
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems.
orthogonal_mp_gram : Solves n_targets Orthogonal Matching Pursuit
problems using only the Gram matrix X.T * X and the product X.T * y.
lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm.
Lars : Least Angle Regression model a.k.a. LAR.
LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP).
LarsCV : Cross-validated Least Angle Regression model.
LassoLarsCV : Cross-validated Lasso model fit with Least Angle Regression.
sklearn.decomposition.sparse_encode : Generic sparse coding.
Each column of the result is the solution to a Lasso problem.
Notes
-----
In `fit`, once the optimal number of non-zero coefficients is found through
cross-validation, the model is fit again using the entire training set.
Examples
--------
>>> from sklearn.linear_model import OrthogonalMatchingPursuitCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=100, n_informative=10,
... noise=4, random_state=0)
>>> reg = OrthogonalMatchingPursuitCV(cv=5).fit(X, y)
>>> reg.score(X, y)
0.9991
>>> reg.n_nonzero_coefs_
np.int64(10)
>>> reg.predict(X[:1,])
array([-78.3854])
"""
_parameter_constraints: dict = {
"copy": ["boolean"],
"fit_intercept": ["boolean"],
"max_iter": [Interval(Integral, 0, None, closed="left"), None],
"cv": ["cv_object"],
"n_jobs": [Integral, None],
"verbose": ["verbose"],
}
def __init__(
self,
*,
copy=True,
fit_intercept=True,
max_iter=None,
cv=None,
n_jobs=None,
verbose=False,
):
self.copy = copy
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, **fit_params):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
**fit_params : dict
Parameters to pass to the underlying splitter.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns an instance of self.
"""
_raise_for_params(fit_params, self, "fit")
X, y = validate_data(self, X, y, y_numeric=True, ensure_min_features=2)
X = as_float_array(X, copy=False, ensure_all_finite=False)
cv = check_cv(self.cv, classifier=False)
if _routing_enabled():
routed_params = process_routing(self, "fit", **fit_params)
else:
# TODO(SLEP6): remove when metadata routing cannot be disabled.
routed_params = Bunch()
routed_params.splitter = Bunch(split={})
max_iter = (
min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter
)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train],
y[train],
X[test],
y[test],
self.copy,
self.fit_intercept,
max_iter,
)
for train, test in cv.split(X, **routed_params.splitter.split)
)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array(
[(fold[:min_early_stop] ** 2).mean(axis=1) for fold in cv_paths]
)
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(
n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
).fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.4
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self).add(
splitter=self.cv,
method_mapping=MethodMapping().add(caller="fit", callee="split"),
)
return router
|
OrthogonalMatchingPursuitCV
|
python
|
huggingface__transformers
|
src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py
|
{
"start": 5087,
"end": 7469
}
|
class ____(nn.Module):
def __init__(self, config: ASTConfig):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.dropout_prob = config.attention_probs_dropout_prob
self.scaling = self.attention_head_size**-0.5
self.is_causal = False
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
batch_size = hidden_states.shape[0]
new_shape = batch_size, -1, self.num_attention_heads, self.attention_head_size
key_layer = self.key(hidden_states).view(*new_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*new_shape).transpose(1, 2)
query_layer = self.query(hidden_states).view(*new_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
context_layer, attention_probs = attention_interface(
self,
query_layer,
key_layer,
value_layer,
None,
is_causal=self.is_causal,
scaling=self.scaling,
dropout=0.0 if not self.training else self.dropout_prob,
)
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer, attention_probs
# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->AST
|
ASTSelfAttention
|
python
|
run-llama__llama_index
|
llama-index-integrations/llms/llama-index-llms-openai-like/tests/test_openai_like.py
|
{
"start": 511,
"end": 5339
}
|
class ____(Tokenizer):
def encode(self, text: str) -> List[int]:
return [sum(ord(letter) for letter in word) for word in text.split(" ")]
STUB_MODEL_NAME = "models/stub.gguf"
STUB_API_KEY = "stub_key"
# Use these as kwargs for OpenAILike to connect to LocalAIs
DEFAULT_LOCALAI_PORT = 8080
# TODO: move to MappingProxyType[str, Any] once Python 3.9+
LOCALAI_DEFAULTS: Dict[str, Any] = MappingProxyType( # type: ignore[assignment]
{
"api_key": "localai_fake",
"api_type": "localai_fake",
"api_base": f"http://localhost:{DEFAULT_LOCALAI_PORT}/v1",
}
)
def test_interfaces() -> None:
llm = OpenAILike(model=STUB_MODEL_NAME, api_key=STUB_API_KEY)
assert llm.class_name() == type(llm).__name__
assert llm.model == STUB_MODEL_NAME
def mock_chat_completion(text: str) -> ChatCompletion:
return ChatCompletion(
id="chatcmpl-abc123",
object="chat.completion",
created=1677858242,
model=STUB_MODEL_NAME,
usage={"prompt_tokens": 13, "completion_tokens": 7, "total_tokens": 20},
choices=[
Choice(
message=ChatCompletionMessage(role="assistant", content=text),
finish_reason="stop",
index=0,
)
],
)
def mock_completion(text: str) -> Completion:
return Completion(
id="cmpl-abc123",
object="text_completion",
created=1677858242,
model=STUB_MODEL_NAME,
usage={"prompt_tokens": 13, "completion_tokens": 7, "total_tokens": 20},
choices=[
CompletionChoice(
text=text,
finish_reason="stop",
index=0,
)
],
)
@patch("llama_index.llms.openai.base.SyncOpenAI")
def test_completion(MockSyncOpenAI: MagicMock) -> None:
mock_instance = MockSyncOpenAI.return_value
mock_instance.completions.create.side_effect = [
mock_completion("1"),
mock_completion("2"),
]
llm = OpenAILike(
**LOCALAI_DEFAULTS, model=STUB_MODEL_NAME, context_window=1024, max_tokens=None
)
response = llm.complete("A long time ago in a galaxy far, far away")
expected_calls = [
# NOTE: has no max_tokens or tokenizer, so won't infer max_tokens
call(
prompt="A long time ago in a galaxy far, far away",
stream=False,
model=STUB_MODEL_NAME,
temperature=0.1,
)
]
assert response.text == "1"
mock_instance.completions.create.assert_has_calls(expected_calls)
llm = OpenAILike(
model=STUB_MODEL_NAME,
context_window=1024,
tokenizer=StubTokenizer(),
)
response = llm.complete("A long time ago in a galaxy far, far away")
expected_calls += [
# NOTE: has tokenizer, so will infer max_tokens
call(
prompt="A long time ago in a galaxy far, far away",
stream=False,
model=STUB_MODEL_NAME,
temperature=0.1,
max_tokens=1014,
)
]
assert response.text == "2"
mock_instance.completions.create.assert_has_calls(expected_calls)
@patch("llama_index.llms.openai.base.SyncOpenAI")
def test_chat(MockSyncOpenAI: MagicMock) -> None:
content = "placeholder"
mock_instance = MockSyncOpenAI.return_value
mock_instance.chat.completions.create.return_value = mock_chat_completion(content)
llm = OpenAILike(
model=STUB_MODEL_NAME,
is_chat_model=True,
tokenizer=StubTokenizer(),
)
response = llm.chat([ChatMessage(role=MessageRole.USER, content="test message")])
assert response.message.content == content
mock_instance.chat.completions.create.assert_called_once_with(
messages=[{"role": "user", "content": "test message"}],
stream=False,
model=STUB_MODEL_NAME,
temperature=0.1,
)
llm = OpenAILike(
model=STUB_MODEL_NAME,
is_chat_model=True,
tokenizer=StubTokenizer(),
)
response = llm.chat([ChatMessage(role=MessageRole.USER, content="test message")])
assert response.message.content == content
mock_instance.chat.completions.create.assert_called_with(
messages=[{"role": "user", "content": "test message"}],
stream=False,
model=STUB_MODEL_NAME,
temperature=0.1,
)
def test_serialization() -> None:
llm = OpenAILike(
model=STUB_MODEL_NAME,
is_chat_model=True,
max_tokens=42,
context_window=43,
tokenizer=StubTokenizer(),
)
serialized = llm.to_dict()
# Check OpenAI base class specifics
assert serialized["max_tokens"] == 42
# Check OpenAILike subclass specifics
assert serialized["context_window"] == 43
assert serialized["is_chat_model"]
|
StubTokenizer
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/cluster_resolver/kubernetes_cluster_resolver.py
|
{
"start": 1593,
"end": 8614
}
|
class ____(ClusterResolver):
"""ClusterResolver for Kubernetes.
This is an implementation of cluster resolvers for Kubernetes. When given the
the Kubernetes namespace and label selector for pods, we will retrieve the
pod IP addresses of all running pods matching the selector, and return a
ClusterSpec based on that information.
Note: it cannot retrieve `task_type`, `task_id` or `rpc_layer`. To use it
with some distribution strategies like
`tf.distribute.experimental.MultiWorkerMirroredStrategy`, you will need to
specify `task_type` and `task_id` by setting these attributes.
Usage example with tf.distribute.Strategy:
```Python
# On worker 0
cluster_resolver = KubernetesClusterResolver(
{"worker": ["job-name=worker-cluster-a", "job-name=worker-cluster-b"]})
cluster_resolver.task_type = "worker"
cluster_resolver.task_id = 0
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
cluster_resolver=cluster_resolver)
# On worker 1
cluster_resolver = KubernetesClusterResolver(
{"worker": ["job-name=worker-cluster-a", "job-name=worker-cluster-b"]})
cluster_resolver.task_type = "worker"
cluster_resolver.task_id = 1
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
cluster_resolver=cluster_resolver)
```
"""
def __init__(
self,
job_to_label_mapping=None,
tf_server_port=8470,
rpc_layer='grpc',
override_client=None,
executable_location=ExecutableLocation.WITHIN_CLUSTER,
):
"""Initializes a new KubernetesClusterResolver.
This initializes a new Kubernetes ClusterResolver. The ClusterResolver
will attempt to talk to the Kubernetes master to retrieve all the instances
of pods matching a label selector.
Args:
job_to_label_mapping: A mapping of TensorFlow jobs to label selectors.
This allows users to specify many TensorFlow jobs in one Cluster
Resolver, and each job can have pods belong with different label
selectors. For example, a sample mapping might be
```
{'worker': ['job-name=worker-cluster-a', 'job-name=worker-cluster-b'],
'ps': ['job-name=ps-1', 'job-name=ps-2']}
```
tf_server_port: The port the TensorFlow server is listening on.
rpc_layer: (Optional) The RPC layer TensorFlow should use to communicate
between tasks in Kubernetes. Defaults to 'grpc'.
override_client: The Kubernetes client (usually automatically retrieved
using `from kubernetes import client as k8sclient`). If you pass this
in, you are responsible for setting Kubernetes credentials manually and
calling `k8sconfig.load_kube_config()` or
`k8sconfig.load_incluster_config()` before using this ClusterResolver.
executable_location: Parameter that specifies whether or not this
TensorFlow code is running from within a K8S cluster or not.
Raises:
ImportError: If the Kubernetes Python client is not installed and no
`override_client` is passed in.
RuntimeError: If autoresolve_task is not a boolean or a callable.
ValueError: If `executable_location` is not a valid value.
"""
try:
from kubernetes import config as k8sconfig # pylint: disable=g-import-not-at-top
if not override_client:
if executable_location == ExecutableLocation.OFF_CLUSTER:
k8sconfig.load_kube_config()
elif executable_location == ExecutableLocation.WITHIN_CLUSTER:
k8sconfig.load_incluster_config()
else:
raise ValueError('The executable location provided is invalid.')
except ImportError:
if not override_client:
raise ImportError('The Kubernetes Python client must be installed '
'before using the Kubernetes Cluster Resolver. '
'To install the Kubernetes Python client, run '
'`pip install kubernetes` on your command line.')
if not job_to_label_mapping:
job_to_label_mapping = {'worker': ['job-name=tensorflow']}
self._job_to_label_mapping = job_to_label_mapping
self._tf_server_port = tf_server_port
self._override_client = override_client
self.task_type = None
self.task_id = None
self.rpc_layer = rpc_layer
def master(self, task_type=None, task_id=None, rpc_layer=None):
"""Returns the master address to use when creating a session.
You must have set the task_type and task_id object properties before
calling this function, or pass in the `task_type` and `task_id`
parameters when using this function. If you do both, the function parameters
will override the object properties.
Note: this is only useful for TensorFlow 1.x.
Args:
task_type: (Optional) The type of the TensorFlow task of the master.
task_id: (Optional) The index of the TensorFlow task of the master.
rpc_layer: (Optional) The RPC protocol for the given cluster.
Returns:
The name or URL of the session master.
"""
task_type = task_type if task_type is not None else self.task_type
task_id = task_id if task_id is not None else self.task_id
if task_type is not None and task_id is not None:
return format_master_url(
self.cluster_spec().task_address(task_type, task_id),
rpc_layer or self.rpc_layer)
return ''
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest info from Kubernetes.
We retrieve the information from the Kubernetes master every time this
method is called.
Returns:
A ClusterSpec containing host information returned from Kubernetes.
Raises:
RuntimeError: If any of the pods returned by the master is not in the
`Running` phase.
"""
if self._override_client:
client = self._override_client
else:
from kubernetes import config as k8sconfig # pylint: disable=g-import-not-at-top
from kubernetes import client as k8sclient # pylint: disable=g-import-not-at-top
k8sconfig.load_kube_config()
client = k8sclient.CoreV1Api()
cluster_map = {}
for tf_job in self._job_to_label_mapping:
all_pods = []
for selector in self._job_to_label_mapping[tf_job]:
ret = client.list_pod_for_all_namespaces(label_selector=selector)
selected_pods = []
# Sort the list by the name to make sure it doesn't change call to call.
for pod in sorted(ret.items, key=lambda x: x.metadata.name):
if pod.status.phase == 'Running':
selected_pods.append(
'%s:%s' % (pod.status.host_ip, self._tf_server_port))
else:
raise RuntimeError('Pod "%s" is not running; phase: "%s"' %
(pod.metadata.name, pod.status.phase))
all_pods.extend(selected_pods)
cluster_map[tf_job] = all_pods
return server_lib.ClusterSpec(cluster_map)
|
KubernetesClusterResolver
|
python
|
numpy__numpy
|
numpy/_core/tests/test_conversion_utils.py
|
{
"start": 5696,
"end": 6506
}
|
class ____:
""" Tests of PyArray_IntpConverter """
conv = mt.run_intp_converter
def test_basic(self):
assert self.conv(1) == (1,)
assert self.conv((1, 2)) == (1, 2)
assert self.conv([1, 2]) == (1, 2)
assert self.conv(()) == ()
def test_none(self):
with pytest.raises(TypeError):
assert self.conv(None) == ()
def test_float(self):
with pytest.raises(TypeError):
self.conv(1.0)
with pytest.raises(TypeError):
self.conv([1, 1.0])
def test_too_large(self):
with pytest.raises(ValueError):
self.conv(2**64)
def test_too_many_dims(self):
assert self.conv([1] * 64) == (1,) * 64
with pytest.raises(ValueError):
self.conv([1] * 65)
|
TestIntpConverter
|
python
|
Lightning-AI__lightning
|
src/lightning/pytorch/demos/boring_classes.py
|
{
"start": 1598,
"end": 1979
}
|
class ____(Dataset):
"""
.. warning:: This is meant for testing/debugging and is experimental.
"""
def __init__(self, size: int, length: int):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index: int) -> Tensor:
return self.data[index]
def __len__(self) -> int:
return self.len
|
RandomDataset
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/meetup/views.py
|
{
"start": 181,
"end": 923
}
|
class ____(OAuth2Adapter):
provider_id = "meetup"
access_token_url = "https://secure.meetup.com/oauth2/access" # nosec
authorize_url = "https://secure.meetup.com/oauth2/authorize"
profile_url = "https://api.meetup.com/2/member/self"
def complete_login(self, request, app, token, **kwargs):
resp = (
get_adapter()
.get_requests_session()
.get(self.profile_url, params={"access_token": token.token})
)
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(MeetupOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(MeetupOAuth2Adapter)
|
MeetupOAuth2Adapter
|
python
|
realpython__materials
|
python-serialize/python-objects/customize-pickle/models.py
|
{
"start": 59,
"end": 465
}
|
class ____:
name: str
password: str
def __getstate__(self):
state = self.__dict__.copy()
state["timestamp"] = int(time.time())
del state["password"]
return state
def __setstate__(self, state):
self.__dict__.update(state)
with open("/dev/random", mode="rb") as file:
self.password = file.read(8).decode("ascii", errors="ignore")
|
User
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/compiler.py
|
{
"start": 13725,
"end": 17483
}
|
class ____(NamedTuple):
"""represents state to use for executing an "insertmanyvalues" statement.
The primary consumers of this object are the
:meth:`.SQLCompiler._deliver_insertmanyvalues_batches` and
:meth:`.DefaultDialect._deliver_insertmanyvalues_batches` methods.
.. versionadded:: 2.0
"""
is_default_expr: bool
"""if True, the statement is of the form
``INSERT INTO TABLE DEFAULT VALUES``, and can't be rewritten as a "batch"
"""
single_values_expr: str
"""The rendered "values" clause of the INSERT statement.
This is typically the parenthesized section e.g. "(?, ?, ?)" or similar.
The insertmanyvalues logic uses this string as a search and replace
target.
"""
insert_crud_params: List[crud._CrudParamElementStr]
"""List of Column / bind names etc. used while rewriting the statement"""
num_positional_params_counted: int
"""the number of bound parameters in a single-row statement.
This count may be larger or smaller than the actual number of columns
targeted in the INSERT, as it accommodates for SQL expressions
in the values list that may have zero or more parameters embedded
within them.
This count is part of what's used to organize rewritten parameter lists
when batching.
"""
sort_by_parameter_order: bool = False
"""if the deterministic_returnined_order parameter were used on the
insert.
All of the attributes following this will only be used if this is True.
"""
includes_upsert_behaviors: bool = False
"""if True, we have to accommodate for upsert behaviors.
This will in some cases downgrade "insertmanyvalues" that requests
deterministic ordering.
"""
sentinel_columns: Optional[Sequence[Column[Any]]] = None
"""List of sentinel columns that were located.
This list is only here if the INSERT asked for
sort_by_parameter_order=True,
and dialect-appropriate sentinel columns were located.
.. versionadded:: 2.0.10
"""
num_sentinel_columns: int = 0
"""how many sentinel columns are in the above list, if any.
This is the same as
``len(sentinel_columns) if sentinel_columns is not None else 0``
"""
sentinel_param_keys: Optional[Sequence[str]] = None
"""parameter str keys in each param dictionary / tuple
that would link to the client side "sentinel" values for that row, which
we can use to match up parameter sets to result rows.
This is only present if sentinel_columns is present and the INSERT
statement actually refers to client side values for these sentinel
columns.
.. versionadded:: 2.0.10
.. versionchanged:: 2.0.29 - the sequence is now string dictionary keys
only, used against the "compiled parameteters" collection before
the parameters were converted by bound parameter processors
"""
implicit_sentinel: bool = False
"""if True, we have exactly one sentinel column and it uses a server side
value, currently has to generate an incrementing integer value.
The dialect in question would have asserted that it supports receiving
these values back and sorting on that value as a means of guaranteeing
correlation with the incoming parameter list.
.. versionadded:: 2.0.10
"""
embed_values_counter: bool = False
"""Whether to embed an incrementing integer counter in each parameter
set within the VALUES clause as parameters are batched over.
This is only used for a specific INSERT..SELECT..VALUES..RETURNING syntax
where a subquery is used to produce value tuples. Current support
includes PostgreSQL, Microsoft SQL Server.
.. versionadded:: 2.0.10
"""
|
_InsertManyValues
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/inheritance/test_polymorphic_rel.py
|
{
"start": 74816,
"end": 79454
}
|
class ____(_PolymorphicTestBase, _PolymorphicUnions):
@testing.skip_if(
lambda: True, "join condition doesn't work w/ this mapping"
)
def test_lazyload_related_w_cache_check(self):
pass
def test_with_polymorphic_two_future_default_wp(self):
"""test #7262
compare to
test_with_polymorphic_two_future_adhoc_wp
"""
sess = fixture_session()
def go():
wp = with_polymorphic(Person, "*")
eq_(
sess.query(wp).order_by(wp.person_id).all(),
self._emps_wo_relationships_fixture(),
)
self.assert_sql_count(testing.db, go, 2)
def test_subqueryload_on_subclass_uses_path_correctly(self):
sess = fixture_session()
expected = [
Engineer(
name="dilbert",
engineer_name="dilbert",
primary_language="java",
status="regular engineer",
machines=[
Machine(name="IBM ThinkPad"),
Machine(name="IPhone"),
],
)
]
with self.sql_execution_asserter(testing.db) as asserter:
wp = with_polymorphic(Person, "*")
eq_(
sess.query(wp)
.options(subqueryload(wp.Engineer.machines))
.filter(wp.name == "dilbert")
.all(),
expected,
)
asserter.assert_(
CompiledSQL(
"SELECT pjoin.person_id AS pjoin_person_id, "
"pjoin.company_id AS pjoin_company_id, "
"pjoin.name AS pjoin_name, pjoin.type AS pjoin_type, "
"pjoin.status AS pjoin_status, "
"pjoin.engineer_name AS pjoin_engineer_name, "
"pjoin.primary_language AS pjoin_primary_language, "
"pjoin.manager_name AS pjoin_manager_name "
"FROM (SELECT engineers.person_id AS person_id, "
"people.company_id AS company_id, people.name AS name, "
"people.type AS type, engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, "
"CAST(NULL AS VARCHAR(50)) AS manager_name "
"FROM people JOIN engineers ON people.person_id = "
"engineers.person_id UNION ALL SELECT managers.person_id "
"AS person_id, people.company_id AS company_id, people.name "
"AS name, people.type AS type, managers.status AS status, "
"CAST(NULL AS VARCHAR(50)) AS engineer_name, "
"CAST(NULL AS VARCHAR(50)) AS primary_language, "
"managers.manager_name AS manager_name FROM people "
"JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin WHERE pjoin.name = :name_1",
params=[{"name_1": "dilbert"}],
),
CompiledSQL(
"SELECT machines.machine_id AS machines_machine_id, "
"machines.name AS machines_name, machines.engineer_id "
"AS machines_engineer_id, anon_1.pjoin_person_id AS "
"anon_1_pjoin_person_id FROM "
"(SELECT pjoin.person_id AS pjoin_person_id FROM "
"(SELECT engineers.person_id AS person_id, people.company_id "
"AS company_id, people.name AS name, "
"people.type AS type, engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, "
"CAST(NULL AS VARCHAR(50)) AS manager_name FROM people "
"JOIN engineers ON people.person_id = engineers.person_id "
"UNION ALL SELECT managers.person_id AS person_id, "
"people.company_id AS company_id, people.name AS name, "
"people.type AS type, managers.status AS status, "
"CAST(NULL AS VARCHAR(50)) AS engineer_name, "
"CAST(NULL AS VARCHAR(50)) AS primary_language, "
"managers.manager_name AS manager_name FROM people "
"JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin WHERE pjoin.name = :name_1) AS anon_1 JOIN "
"machines ON anon_1.pjoin_person_id = machines.engineer_id "
"ORDER BY machines.machine_id",
params=[{"name_1": "dilbert"}],
),
)
|
PolymorphicUnionsTest
|
python
|
geekcomputers__Python
|
Test-Case-Generator/test_case.py
|
{
"start": 24475,
"end": 25468
}
|
class ____(Case):
def __init__(self, master):
super(Type5, self).__init__(master)
self.forget_home()
self.take_input()
def take_input(self): # Type 5
try:
self.try_forget()
except AttributeError:
pass
self.get_t(0)
self.get_n(1)
self.get_m(2)
self.get_k(3)
self.show_button(4)
def generate(self): # Type 5
self.output.delete("1.0", END)
self.output.insert(END, self.t)
self.output.insert(END, "\n")
for i in range(self.t):
self.n = randint(self.n_min, self.n_max)
self.m = randint(self.m_min, self.m_max)
self.k = randint(self.k_min, self.k_max)
self.output.insert(END, self.n)
self.output.insert(END, " ")
self.output.insert(END, self.m)
self.output.insert(END, " ")
self.output.insert(END, self.k)
self.output.insert(END, "\n")
|
Type5
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1alpha1_server_storage_version.py
|
{
"start": 383,
"end": 7146
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_server_id': 'str',
'decodable_versions': 'list[str]',
'encoding_version': 'str',
'served_versions': 'list[str]'
}
attribute_map = {
'api_server_id': 'apiServerID',
'decodable_versions': 'decodableVersions',
'encoding_version': 'encodingVersion',
'served_versions': 'servedVersions'
}
def __init__(self, api_server_id=None, decodable_versions=None, encoding_version=None, served_versions=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1ServerStorageVersion - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_server_id = None
self._decodable_versions = None
self._encoding_version = None
self._served_versions = None
self.discriminator = None
if api_server_id is not None:
self.api_server_id = api_server_id
if decodable_versions is not None:
self.decodable_versions = decodable_versions
if encoding_version is not None:
self.encoding_version = encoding_version
if served_versions is not None:
self.served_versions = served_versions
@property
def api_server_id(self):
"""Gets the api_server_id of this V1alpha1ServerStorageVersion. # noqa: E501
The ID of the reporting API server. # noqa: E501
:return: The api_server_id of this V1alpha1ServerStorageVersion. # noqa: E501
:rtype: str
"""
return self._api_server_id
@api_server_id.setter
def api_server_id(self, api_server_id):
"""Sets the api_server_id of this V1alpha1ServerStorageVersion.
The ID of the reporting API server. # noqa: E501
:param api_server_id: The api_server_id of this V1alpha1ServerStorageVersion. # noqa: E501
:type: str
"""
self._api_server_id = api_server_id
@property
def decodable_versions(self):
"""Gets the decodable_versions of this V1alpha1ServerStorageVersion. # noqa: E501
The API server can decode objects encoded in these versions. The encodingVersion must be included in the decodableVersions. # noqa: E501
:return: The decodable_versions of this V1alpha1ServerStorageVersion. # noqa: E501
:rtype: list[str]
"""
return self._decodable_versions
@decodable_versions.setter
def decodable_versions(self, decodable_versions):
"""Sets the decodable_versions of this V1alpha1ServerStorageVersion.
The API server can decode objects encoded in these versions. The encodingVersion must be included in the decodableVersions. # noqa: E501
:param decodable_versions: The decodable_versions of this V1alpha1ServerStorageVersion. # noqa: E501
:type: list[str]
"""
self._decodable_versions = decodable_versions
@property
def encoding_version(self):
"""Gets the encoding_version of this V1alpha1ServerStorageVersion. # noqa: E501
The API server encodes the object to this version when persisting it in the backend (e.g., etcd). # noqa: E501
:return: The encoding_version of this V1alpha1ServerStorageVersion. # noqa: E501
:rtype: str
"""
return self._encoding_version
@encoding_version.setter
def encoding_version(self, encoding_version):
"""Sets the encoding_version of this V1alpha1ServerStorageVersion.
The API server encodes the object to this version when persisting it in the backend (e.g., etcd). # noqa: E501
:param encoding_version: The encoding_version of this V1alpha1ServerStorageVersion. # noqa: E501
:type: str
"""
self._encoding_version = encoding_version
@property
def served_versions(self):
"""Gets the served_versions of this V1alpha1ServerStorageVersion. # noqa: E501
The API server can serve these versions. DecodableVersions must include all ServedVersions. # noqa: E501
:return: The served_versions of this V1alpha1ServerStorageVersion. # noqa: E501
:rtype: list[str]
"""
return self._served_versions
@served_versions.setter
def served_versions(self, served_versions):
"""Sets the served_versions of this V1alpha1ServerStorageVersion.
The API server can serve these versions. DecodableVersions must include all ServedVersions. # noqa: E501
:param served_versions: The served_versions of this V1alpha1ServerStorageVersion. # noqa: E501
:type: list[str]
"""
self._served_versions = served_versions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1ServerStorageVersion):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1ServerStorageVersion):
return True
return self.to_dict() != other.to_dict()
|
V1alpha1ServerStorageVersion
|
python
|
keras-team__keras
|
keras/src/ops/numpy.py
|
{
"start": 180422,
"end": 181216
}
|
class ____(Operation):
def __init__(self, decimals=0, *, name=None):
super().__init__(name=name)
self.decimals = decimals
def call(self, x):
return backend.numpy.round(x, self.decimals)
def compute_output_spec(self, x):
sparse = getattr(x, "sparse", False)
return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse)
@keras_export(["keras.ops.round", "keras.ops.numpy.round"])
def round(x, decimals=0):
"""Evenly round to the given number of decimals.
Args:
x: Input tensor.
decimals: Number of decimal places to round to. Defaults to `0`.
Returns:
Output tensor.
"""
if any_symbolic_tensors((x,)):
return Round(decimals).symbolic_call(x)
return backend.numpy.round(x, decimals)
|
Round
|
python
|
scipy__scipy
|
scipy/stats/_continuous_distns.py
|
{
"start": 179505,
"end": 180966
}
|
class ____(rv_continuous):
r"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is:
.. math::
f(x, a, b) = \frac{b}{x(1-x)} \phi(a + b \log \frac{x}{1-x} )
where :math:`x`, :math:`a`, and :math:`b` are real scalars; :math:`b > 0`
and :math:`x \in [0,1]`. :math:`\phi` is the pdf of the normal
distribution.
`johnsonsb` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _shape_info(self):
ia = _ShapeInfo("a", False, (-np.inf, np.inf), (False, False))
ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
return [ia, ib]
def _pdf(self, x, a, b):
# johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
trm = _norm_pdf(a + b*sc.logit(x))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b*sc.logit(x))
def _ppf(self, q, a, b):
return sc.expit(1.0 / b * (_norm_ppf(q) - a))
def _sf(self, x, a, b):
return _norm_sf(a + b*sc.logit(x))
def _isf(self, q, a, b):
return sc.expit(1.0 / b * (_norm_isf(q) - a))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb')
|
johnsonsb_gen
|
python
|
ray-project__ray
|
doc/source/serve/doc_code/model_composition/streaming_example.py
|
{
"start": 384,
"end": 1196
}
|
class ____:
def __init__(self, streamer: DeploymentHandle):
self._streamer = streamer.options(
# Must set `stream=True` on the handle, then the output will be a
# response generator.
stream=True,
)
async def __call__(self, limit: int) -> AsyncGenerator[int, None]:
# Response generator can be used in an `async for` block.
r: DeploymentResponseGenerator = self._streamer.remote(limit)
async for i in r:
yield i
app = Caller.bind(Streamer.bind())
handle: DeploymentHandle = serve.run(app).options(
stream=True,
)
# Response generator can also be used as a regular generator in a sync context.
r: DeploymentResponseGenerator = handle.remote(10)
assert list(r) == list(range(10))
# __streaming_example_end__
|
Caller
|
python
|
plotly__plotly.py
|
plotly/graph_objs/layout/smith/imaginaryaxis/_tickfont.py
|
{
"start": 235,
"end": 9955
}
|
class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.smith.imaginaryaxis"
_path_str = "layout.smith.imaginaryaxis.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the tick font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.smith.i
maginaryaxis.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.smith.imaginaryaxis.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.smith.imaginaryaxis.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Tickfont
|
python
|
tornadoweb__tornado
|
tornado/test/simple_httpclient_test.py
|
{
"start": 20330,
"end": 20671
}
|
class ____(AsyncHTTPTestCase, SimpleHTTPClientTestMixin):
def setUp(self):
super().setUp()
self.http_client = self.create_client()
def get_app(self):
return self.mixin_get_app()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(force_instance=True, **kwargs)
|
SimpleHTTPClientTestCase
|
python
|
plotly__plotly.py
|
plotly/graph_objs/isosurface/caps/_x.py
|
{
"start": 233,
"end": 4043
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "isosurface.caps"
_path_str = "isosurface.caps.x"
_valid_props = {"fill", "show"}
@property
def fill(self):
"""
Sets the fill ratio of the `caps`. The default fill value of
the `caps` is 1 meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than one would allow
the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
@property
def show(self):
"""
Sets the fill ratio of the `slices`. The default fill value of
the x `slices` is 1 meaning that they are entirely shaded. On
the other hand Applying a `fill` ratio less than one would
allow the creation of openings parallel to the edges.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
@property
def _prop_descriptions(self):
return """\
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the x `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
"""
def __init__(self, arg=None, fill=None, show=None, **kwargs):
"""
Construct a new X object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.isosurface.caps.X`
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the x `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
Returns
-------
X
"""
super().__init__("x")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.isosurface.caps.X
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.caps.X`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("fill", arg, fill)
self._set_property("show", arg, show)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
X
|
python
|
pytorch__pytorch
|
torch/distributed/elastic/metrics/api.py
|
{
"start": 952,
"end": 1066
}
|
class ____(abc.ABC):
@abc.abstractmethod
def emit(self, metric_data: MetricData):
pass
|
MetricHandler
|
python
|
ashishps1__awesome-system-design-resources
|
implementations/python/rate_limiting/sliding_window_log.py
|
{
"start": 43,
"end": 1147
}
|
class ____:
def __init__(self, window_size, max_requests):
self.window_size = window_size # Size of the sliding window in seconds
self.max_requests = max_requests # Maximum number of requests per window
self.request_log = deque() # Log to keep track of request timestamps
def allow_request(self):
now = time.time()
# Remove timestamps that are outside the current window
while self.request_log and now - self.request_log[0] >= self.window_size:
self.request_log.popleft()
# Check if we're still within the limit
if len(self.request_log) < self.max_requests:
self.request_log.append(now)
return True
return False
# Usage example
limiter = SlidingWindowLog(window_size=60, max_requests=5) # 5 requests per minute
for _ in range(10):
print(limiter.allow_request()) # Will print True for the first 5 requests, then False
time.sleep(0.1) # Wait a bit between requests
time.sleep(60) # Wait for the window to slide
print(limiter.allow_request()) # True
|
SlidingWindowLog
|
python
|
numba__numba
|
numba/core/typeinfer.py
|
{
"start": 26728,
"end": 27138
}
|
class ____(CallConstraint):
def __call__(self, typeinfer):
with new_error_context("typing of intrinsic-call at {loc}",
loc=self.loc):
fnty = self.func
if fnty in utils.OPERATORS_TO_BUILTINS:
fnty = typeinfer.resolve_value_type(None, fnty)
self.resolve(typeinfer, typeinfer.typevars, fnty=fnty)
|
IntrinsicCallConstraint
|
python
|
plotly__plotly.py
|
plotly/graph_objs/densitymapbox/legendgrouptitle/_font.py
|
{
"start": 233,
"end": 9957
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "densitymapbox.legendgrouptitle"
_path_str = "densitymapbox.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.densitymapbox.
legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.densitymapbox.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.densitymapbox.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Font
|
python
|
pypa__warehouse
|
tests/unit/packaging/test_models.py
|
{
"start": 1715,
"end": 2444
}
|
class ____:
@pytest.mark.parametrize(("name", "normalized"), [("foo", "foo"), ("Bar", "bar")])
def test_traversal_finds(self, db_request, name, normalized):
project = DBProjectFactory.create(name=name)
root = ProjectFactory(db_request)
assert root[normalized] == project
def test_travel_cant_find(self, db_request):
project = DBProjectFactory.create()
root = ProjectFactory(db_request)
with pytest.raises(KeyError):
root[project.name + "invalid"]
def test_contains(self, db_request):
DBProjectFactory.create(name="foo")
root = ProjectFactory(db_request)
assert "foo" in root
assert "bar" not in root
|
TestProjectFactory
|
python
|
celery__celery
|
celery/contrib/migrate.py
|
{
"start": 8427,
"end": 14361
}
|
class ____:
def __init__(self, app, conn, filter,
limit=None, timeout=1.0,
ack_messages=False, tasks=None, queues=None,
callback=None, forever=False, on_declare_queue=None,
consume_from=None, state=None, accept=None, **kwargs):
self.app = app
self.conn = conn
self.filter = filter
self.limit = limit
self.timeout = timeout
self.ack_messages = ack_messages
self.tasks = set(str_to_list(tasks) or [])
self.queues = prepare_queues(queues)
self.callback = callback
self.forever = forever
self.on_declare_queue = on_declare_queue
self.consume_from = [
_maybe_queue(self.app, q)
for q in consume_from or list(self.queues)
]
self.state = state or State()
self.accept = accept
def start(self):
# start migrating messages.
with self.prepare_consumer(self.create_consumer()):
try:
for _ in eventloop(self.conn, # pragma: no cover
timeout=self.timeout,
ignore_timeouts=self.forever):
pass
except socket.timeout:
pass
except StopFiltering:
pass
return self.state
def update_state(self, body, message):
self.state.count += 1
if self.limit and self.state.count >= self.limit:
raise StopFiltering()
def ack_message(self, body, message):
message.ack()
def create_consumer(self):
return self.app.amqp.TaskConsumer(
self.conn,
queues=self.consume_from,
accept=self.accept,
)
def prepare_consumer(self, consumer):
filter = self.filter
update_state = self.update_state
ack_message = self.ack_message
if self.tasks:
filter = filter_callback(filter, self.tasks)
update_state = filter_callback(update_state, self.tasks)
ack_message = filter_callback(ack_message, self.tasks)
consumer.register_callback(filter)
consumer.register_callback(update_state)
if self.ack_messages:
consumer.register_callback(self.ack_message)
if self.callback is not None:
callback = partial(self.callback, self.state)
if self.tasks:
callback = filter_callback(callback, self.tasks)
consumer.register_callback(callback)
self.declare_queues(consumer)
return consumer
def declare_queues(self, consumer):
# declare all queues on the new broker.
for queue in consumer.queues:
if self.queues and queue.name not in self.queues:
continue
if self.on_declare_queue is not None:
self.on_declare_queue(queue)
try:
_, mcount, _ = queue(
consumer.channel).queue_declare(passive=True)
if mcount:
self.state.total_apx += mcount
except self.conn.channel_errors:
pass
def start_filter(app, conn, filter, limit=None, timeout=1.0,
ack_messages=False, tasks=None, queues=None,
callback=None, forever=False, on_declare_queue=None,
consume_from=None, state=None, accept=None, **kwargs):
"""Filter tasks."""
return Filterer(
app, conn, filter,
limit=limit,
timeout=timeout,
ack_messages=ack_messages,
tasks=tasks,
queues=queues,
callback=callback,
forever=forever,
on_declare_queue=on_declare_queue,
consume_from=consume_from,
state=state,
accept=accept,
**kwargs).start()
def move_task_by_id(task_id, dest, **kwargs):
"""Find a task by id and move it to another queue.
Arguments:
task_id (str): Id of task to find and move.
dest: (str, kombu.Queue): Destination queue.
transform (Callable): Optional function to transform the return
value (destination) of the filter function.
**kwargs (Any): Also supports the same keyword
arguments as :func:`move`.
"""
return move_by_idmap({task_id: dest}, **kwargs)
def move_by_idmap(map, **kwargs):
"""Move tasks by matching from a ``task_id: queue`` mapping.
Where ``queue`` is a queue to move the task to.
Example:
>>> move_by_idmap({
... '5bee6e82-f4ac-468e-bd3d-13e8600250bc': Queue('name'),
... 'ada8652d-aef3-466b-abd2-becdaf1b82b3': Queue('name'),
... '3a2b140d-7db1-41ba-ac90-c36a0ef4ab1f': Queue('name')},
... queues=['hipri'])
"""
def task_id_in_map(body, message):
return map.get(message.properties['correlation_id'])
# adding the limit means that we don't have to consume any more
# when we've found everything.
return move(task_id_in_map, limit=len(map), **kwargs)
def move_by_taskmap(map, **kwargs):
"""Move tasks by matching from a ``task_name: queue`` mapping.
``queue`` is the queue to move the task to.
Example:
>>> move_by_taskmap({
... 'tasks.add': Queue('name'),
... 'tasks.mul': Queue('name'),
... })
"""
def task_name_in_map(body, message):
return map.get(body['task']) # <- name of task
return move(task_name_in_map, **kwargs)
def filter_status(state, body, message, **kwargs):
print(MOVING_PROGRESS_FMT.format(state=state, body=body, **kwargs))
move_direct = partial(move, transform=worker_direct)
move_direct_by_id = partial(move_task_by_id, transform=worker_direct)
move_direct_by_idmap = partial(move_by_idmap, transform=worker_direct)
move_direct_by_taskmap = partial(move_by_taskmap, transform=worker_direct)
|
Filterer
|
python
|
spack__spack
|
lib/spack/spack/binary_distribution.py
|
{
"start": 100632,
"end": 102924
}
|
class ____(IndexFetcher):
def __init__(self, url_and_version: MirrorURLAndVersion, local_hash, urlopen=None) -> None:
self.local_hash = local_hash
self.ref = spack.oci.image.ImageReference.from_url(url_and_version.url)
self.urlopen = urlopen or spack.oci.opener.urlopen
def conditional_fetch(self) -> FetchIndexResult:
"""Download an index from an OCI registry type mirror."""
url_manifest = self.ref.with_tag(default_index_tag).manifest_url()
try:
response = self.urlopen(
urllib.request.Request(
url=url_manifest,
headers={"Accept": "application/vnd.oci.image.manifest.v1+json"},
)
)
except OSError as e:
raise FetchIndexError(f"Could not fetch manifest from {url_manifest}", e) from e
try:
manifest = json.load(response)
except Exception as e:
raise FetchIndexError(f"Remote index {url_manifest} is invalid", e) from e
# Get first blob hash, which should be the index.json
try:
index_digest = spack.oci.image.Digest.from_string(manifest["layers"][0]["digest"])
except Exception as e:
raise FetchIndexError(f"Remote index {url_manifest} is invalid", e) from e
# Fresh?
if index_digest.digest == self.local_hash:
return FetchIndexResult(etag=None, hash=None, data=None, fresh=True)
# Otherwise fetch the blob / index.json
try:
response = self.urlopen(
urllib.request.Request(
url=self.ref.blob_url(index_digest),
headers={"Accept": "application/vnd.oci.image.layer.v1.tar+gzip"},
)
)
result = codecs.getreader("utf-8")(response).read()
except (OSError, ValueError) as e:
raise FetchIndexError(f"Remote index {url_manifest} is invalid", e) from e
# Make sure the blob we download has the advertised hash
if compute_hash(result) != index_digest.digest:
raise FetchIndexError(f"Remote index {url_manifest} is invalid")
return FetchIndexResult(etag=None, hash=index_digest.digest, data=result, fresh=False)
|
OCIIndexFetcher
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_bar06.py
|
{
"start": 315,
"end": 1472
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_bar06.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [64053248, 64446464]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_x_axis({"name": "Apple"})
chart.set_y_axis({"name": "Pear"})
chart.set_title({"name": "Title"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/mediawiki/views.py
|
{
"start": 228,
"end": 1300
}
|
class ____(OAuth2Adapter):
provider_id = "mediawiki"
settings = app_settings.PROVIDERS.get(provider_id, {})
REST_API = settings.get("REST_API", "https://meta.wikimedia.org/w/rest.php")
access_token_url = REST_API + "/oauth2/access_token"
authorize_url = REST_API + "/oauth2/authorize"
profile_url = REST_API + "/oauth2/resource/profile"
# Allow custom User-Agent per Wikimedia policy.
headers = {"User-Agent": settings.get("USER_AGENT", "django-allauth")}
def complete_login(self, request, app, token, **kwargs):
headers = {"Authorization": f"Bearer {token.token}"}
headers.update(self.headers)
resp = (
get_adapter().get_requests_session().get(self.profile_url, headers=headers)
)
resp.raise_for_status()
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(MediaWikiOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(MediaWikiOAuth2Adapter)
|
MediaWikiOAuth2Adapter
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_axis18.py
|
{
"start": 315,
"end": 1395
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis18.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [43813504, 45705472]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5", "invert_if_negative": 1})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5", "invert_if_negative": 0})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.