language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
pytorch__pytorch
|
torch/distributed/checkpoint/_async_thread_executor.py
|
{
"start": 1333,
"end": 2476
}
|
class ____(_AsyncCheckpointExecutor):
def __init__(self) -> None:
self._executor = ThreadPoolExecutor(
max_workers=1, thread_name_prefix="AsyncCheckpointExecutor"
)
def execute_save(
self,
staging_future_or_state_dict: Union[Future[STATE_DICT_TYPE], STATE_DICT_TYPE],
*,
checkpoint_id: Union[str, os.PathLike, None] = None,
storage_writer: Optional[StorageWriter] = None,
planner: Optional[SavePlanner] = None,
process_group: Optional[dist.ProcessGroup] = None,
no_dist: bool = False,
use_collectives: bool = True,
) -> Future:
f: Future = self._executor.submit(
save_wrapper,
staging_future_or_state_dict=staging_future_or_state_dict,
checkpoint_id=checkpoint_id,
storage_writer=storage_writer,
planner=planner,
process_group=process_group,
no_dist=no_dist,
use_collectives=use_collectives,
)
f.add_done_callback(lambda f: self._executor.shutdown(wait=False))
return f
|
_ThreadBasedAsyncCheckpointExecutor
|
python
|
tornadoweb__tornado
|
tornado/httputil.py
|
{
"start": 23387,
"end": 23518
}
|
class ____(Exception):
"""Exception class for errors in HTTP output.
.. versionadded:: 4.0
"""
pass
|
HTTPOutputError
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/mirrored_variable_test.py
|
{
"start": 3166,
"end": 26313
}
|
class ____(test.TestCase):
"""Base class that tests mirrored variable creator.
Currently it assumes all strategy objects have two replicas.
"""
@classmethod
def setUpClass(cls):
_mimic_two_cpus()
def assertAllDifferent(self, objs):
for i in range(len(objs)):
for j in range(len(objs)):
if i == j:
continue
self.assertIsNot(objs[i], objs[j])
# TODO(priyag): Modify more tests to use this helper and check more
# properties.
def _test_mv_properties(self, var, name, strategy):
self.assertTrue(distribute_utils.is_mirrored(var))
self.assertEqual(name, var.name)
self.assertIs(strategy, var.distribute_strategy)
for i, d in enumerate(var._devices):
self.assertEqual(d, strategy.experimental_local_results(var)[i].device)
self.assertIs(
strategy,
strategy.experimental_local_results(var)[i]._distribute_strategy) # pylint: disable=protected-access
def testVariableInFuncGraph(self, distribution):
def model_fn():
v = variable_v1.VariableV1(2.0, name="bar")
distribute_lib.get_replica_context().merge_call(lambda _: _)
return v
with func_graph.FuncGraph("fg").as_default(), distribution.scope():
v1 = variable_v1.VariableV1(1.0, name="foo")
v2 = distribution.extended.call_for_each_replica(model_fn)
self._test_mv_properties(v1, "foo:0", distribution)
self._test_mv_properties(v2, "bar:0", distribution)
def testVariableWithTensorInitialValueInFunction(self, distribution):
if not context.executing_eagerly():
self.skipTest("`tf.function` is an eager-only feature")
v = [None]
def model_fn():
if v[0] is None:
init_val = array_ops.zeros([])
v[0] = variables.Variable(init_val)
distribute_lib.get_replica_context().merge_call(lambda _: _)
return v[0]
@def_function.function(autograph=False)
def make_v1():
return distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn))
self.assertAllEqual([0, 0], make_v1())
def testVariableWithTensorInitialValueInFunctionXLA(self, distribution):
if not context.executing_eagerly():
self.skipTest("`tf.function` is an eager-only feature")
v = [None]
def model_fn():
if v[0] is None:
init_val = array_ops.zeros([])
v[0] = variables.Variable(init_val)
distribute_lib.get_replica_context().merge_call(lambda _: _)
return v[0]
@def_function.function(autograph=False, jit_compile=True)
def make_v1():
return distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn))
with self.assertRaisesRegex(
errors.UnimplementedError,
"We failed to lift variable creations out of this tf.function, "
"so this tf.function cannot be run on XLA."):
_ = make_v1()
def testSingleVariable(self, distribution):
def model_fn():
# This variable should be created only once across the threads because of
# special variable_creator functions used by
# `distribution.extended.call_for_each_replica`.
v = variable_v1.VariableV1(1.0, name="foo")
distribute_lib.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self._test_mv_properties(result, "foo:0", distribution)
def testUnnamedVariable(self, distribution):
def model_fn():
v = variable_v1.VariableV1(1.0)
distribute_lib.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self._test_mv_properties(result, "Variable:0", distribution)
def testMultipleVariables(self, distribution):
def model_fn():
vs = []
for i in range(5):
vs.append(variable_v1.VariableV1(1.0, name="foo" + str(i)))
distribute_lib.get_replica_context().merge_call(lambda _: _)
return vs
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
for i, v in enumerate(result):
self._test_mv_properties(v, "foo" + str(i) + ":0", distribution)
def testMultipleVariablesWithSameCanonicalName(self, distribution):
def model_fn():
vs = []
vs.append(variable_v1.VariableV1(1.0, name="foo/bar"))
vs.append(variable_v1.VariableV1(1.0, name="foo_1/bar"))
vs.append(variable_v1.VariableV1(1.0, name="foo_1/bar_1"))
vs.append(variable_v1.VariableV1(1.0, name="foo/bar_1"))
distribute_lib.get_replica_context().merge_call(lambda _: _)
return vs
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
for v in result:
self.assertTrue(distribute_utils.is_mirrored(v))
self.assertEqual(4, len(result))
self.assertEqual("foo/bar:0", result[0].name)
self.assertEqual("foo_1/bar:0", result[1].name)
self.assertEqual("foo_1/bar_1:0", result[2].name)
self.assertEqual("foo/bar_1:0", result[3].name)
def testVariableWithSameCanonicalNameAcrossThreads(self, distribution):
def model_fn():
replica_id = self.evaluate(_replica_id())
v = variable_v1.VariableV1(1.0, name="foo_" + str(replica_id))
distribute_lib.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertTrue(distribute_utils.is_mirrored(result))
# The resulting mirrored variable will use the name from the first device.
self.assertEqual("foo_0:0", result.name)
def testWithVariableAndVariableScope(self, distribution):
def model_fn():
v0 = variable_v1.VariableV1(1.0, name="var0", aggregation=None)
with variable_scope.variable_scope("common"):
v1 = variable_v1.VariableV1(1.0, name="var1")
# This will pause the current thread, and execute the other thread.
distribute_lib.get_replica_context().merge_call(lambda _: _)
v2 = variable_v1.VariableV1(
1.0,
name="var2",
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
v3 = variable_v1.VariableV1(
1.0,
name="var3",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN)
return v0, v1, v2, v3
with distribution.scope():
v = variable_v1.VariableV1(1.0, name="var-main0")
self.assertEqual("var-main0:0", v.name)
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(4, len(result))
v0, v1, v2, v3 = result
self.assertTrue(distribute_utils.is_mirrored(v0))
self.assertEqual("var0:0", v0.name)
self.assertTrue(distribute_utils.is_mirrored(v1))
self.assertEqual("common/var1:0", v1.name)
self.assertTrue(distribute_utils.is_sync_on_read(v2))
self.assertEqual("common/var2:0", v2.name)
self.assertEqual(variable_scope.VariableAggregation.SUM, v2.aggregation)
self.assertTrue(distribute_utils.is_mirrored(v3))
self.assertEqual("common/var3:0", v3.name)
self.assertEqual(variable_scope.VariableAggregation.MEAN, v3.aggregation)
def testWithGetVariableAndVariableScope(self, distribution):
def model_fn():
v0 = variable_scope.get_variable("var0", [1])
with variable_scope.variable_scope("common"):
v1 = variable_scope.get_variable("var1", [1])
# This will pause the current thread, and execute the other thread.
distribute_lib.get_replica_context().merge_call(lambda _: _)
v2 = variable_scope.get_variable(
"var2", [1],
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
v3 = variable_scope.get_variable(
"var3", [1],
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN)
return v0, v1, v2, v3
with distribution.scope():
with variable_scope.variable_scope("main"):
v = variable_scope.get_variable("var-main0", [1])
self.assertEqual("main/var-main0:0", v.name)
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(4, len(result))
v0, v1, v2, v3 = result
self.assertTrue(distribute_utils.is_mirrored(v0))
self.assertEqual("main/var0:0", v0.name)
self.assertTrue(distribute_utils.is_mirrored(v1))
self.assertEqual("main/common/var1:0", v1.name)
self.assertTrue(distribute_utils.is_sync_on_read(v2))
self.assertEqual("main/common/var2:0", v2.name)
self.assertEqual(variable_scope.VariableAggregation.SUM, v2.aggregation)
self.assertTrue(distribute_utils.is_mirrored(v3))
self.assertEqual("main/common/var3:0", v3.name)
self.assertEqual(variable_scope.VariableAggregation.MEAN,
v3.aggregation)
def testOnlyFirstReplicaUpdatesVariables(self, distribution):
def create_fn():
aggregation = variable_scope.VariableAggregation.ONLY_FIRST_REPLICA
v0 = variable_v1.VariableV1(
2.0,
name="on_read",
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=aggregation)
v1 = variable_v1.VariableV1(
3.0,
name="on_write",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=aggregation)
return v0, v1
with distribution.scope():
v0, v1 = distribution.extended.call_for_each_replica(create_fn)
self.evaluate(v0.initializer)
self.assertEqual(
2.0, self.evaluate(distribution.experimental_local_results(v0)[0]))
self.assertEqual(
2.0, self.evaluate(distribution.experimental_local_results(v0)[1]))
self.assertEqual(2.0, self.evaluate(distribution.extended.read_var(v0)))
self.evaluate(v1.initializer)
self.assertEqual(
3.0, self.evaluate(distribution.experimental_local_results(v1)[0]))
self.assertEqual(
3.0, self.evaluate(distribution.experimental_local_results(v1)[1]))
self.assertEqual(3.0, self.evaluate(distribution.extended.read_var(v1)))
def replica_id_plus_one():
return math_ops.cast(_replica_id() + 1, dtype=dtypes.float32)
# Update using the assign_add member function.
def update_member_fn():
update0 = v0.assign_add(5.0 * replica_id_plus_one())
update1 = v1.assign_add(7.0 * replica_id_plus_one())
return update0, update1
update0a, update1a = distribution.extended.call_for_each_replica(
update_member_fn)
# Update "sync on read" variable.
self.evaluate(distribution.group(update0a))
local_results = self.evaluate(distribution.experimental_local_results(v0))
self.assertEqual(2.0 + 5.0, local_results[0])
# Writes are not synchronized for "sync on read" variables,
# so device[1] can end up with a different value.
self.assertEqual(2.0 + 2 * 5.0, local_results[1])
# Always reads from device 0.
self.assertEqual(2.0 + 5.0,
self.evaluate(distribution.extended.read_var(v0)))
# Update "sync on write" variable.
self.evaluate(distribution.group(update1a))
local_results1 = self.evaluate(
distribution.experimental_local_results(v1))
self.assertEqual(3.0 + 7.0, local_results1[0])
# Writes are synchronized for v1, only the argument to assign_add on
# device[0] is used.
self.assertEqual(3.0 + 7.0, local_results1[1])
self.assertEqual(3.0 + 7.0,
self.evaluate(distribution.extended.read_var(v1)))
# Update using state_ops.assign_add global function.
def update_state_ops_fn():
update0 = state_ops.assign_add(v0, 11.0 * replica_id_plus_one())
update1 = state_ops.assign_add(v1, 13.0 * replica_id_plus_one())
return update0, update1
update0b, update1b = distribution.extended.call_for_each_replica(
update_state_ops_fn)
self.evaluate(distribution.group(update0b))
# Update "sync on read" variable.
local_results = self.evaluate(distribution.experimental_local_results(v0))
self.assertEqual(2.0 + 5.0 + 11.0, local_results[0])
self.assertEqual(2.0 + 2 * 5.0 + 2 * 11.0, local_results[1])
self.assertEqual(2.0 + 5.0 + 11.0,
self.evaluate(distribution.extended.read_var(v0)))
# Update "sync on write" variable.
self.evaluate(distribution.group(update1b))
local_results1 = self.evaluate(
distribution.experimental_local_results(v1))
self.assertEqual(3.0 + 7.0 + 13.0, local_results1[0])
self.assertEqual(3.0 + 7.0 + 13.0, local_results1[1])
self.assertEqual(3.0 + 7.0 + 13.0,
self.evaluate(distribution.extended.read_var(v1)))
def testNoneSynchronizationWithGetVariable(self, distribution):
with distribution.scope():
with self.assertRaisesRegex(
ValueError, "`NONE` variable synchronization mode is not "
"supported with "):
variable_scope.get_variable(
"v", [1],
synchronization=variable_scope.VariableSynchronization.NONE)
def testNoneSynchronizationWithVariable(self, distribution):
with distribution.scope():
with self.assertRaisesRegex(
ValueError, "`NONE` variable synchronization mode is not "
"supported with "):
variable_v1.VariableV1(
1.0,
name="v",
synchronization=variable_scope.VariableSynchronization.NONE)
def testInvalidSynchronizationWithVariable(self, distribution):
with distribution.scope():
with self.assertRaisesRegex(
ValueError, "Invalid variable synchronization mode: Invalid for "
"variable: v"):
variable_v1.VariableV1(1.0, name="v", synchronization="Invalid")
def testInvalidAggregationWithGetVariable(self, distribution):
with distribution.scope():
with self.assertRaisesRegex(
ValueError, "Invalid variable aggregation mode: invalid for "
"variable: v"):
variable_scope.get_variable(
"v", [1],
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation="invalid")
def testInvalidAggregationWithVariable(self, distribution):
with distribution.scope():
with self.assertRaisesRegex(
ValueError, "Invalid variable aggregation mode: invalid for "
"variable: v"):
variable_v1.VariableV1(
1.0,
name="v",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation="invalid")
def testNonMatchingVariableCreation(self, distribution):
def model_fn(name):
v = variable_v1.VariableV1(1.0, name=name)
distribute_lib.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
names = values.PerReplica(("foo", "bar"))
with self.assertRaises(RuntimeError):
_ = distribution.extended.call_for_each_replica(model_fn, args=(names,))
def testSyncOnReadVariable(self, distribution):
all_v_sum = {}
all_v_mean = {}
components_sum = {}
components_mean = {}
def model_fn():
replica_id = self.evaluate(_replica_id())
v_sum = variable_v1.VariableV1(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
v_mean = variable_v1.VariableV1(
4.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.MEAN)
self.assertTrue(distribute_utils.is_sync_on_read(v_sum))
self.assertTrue(distribute_utils.is_sync_on_read(v_mean))
updates = [
v_sum.assign_add(2.0 + replica_id),
v_mean.assign(6.0 * replica_id)
]
all_v_sum[replica_id] = v_sum
all_v_mean[replica_id] = v_mean
c_sum = v_sum._get()
c_mean = v_mean._get()
components_sum[replica_id] = c_sum
components_mean[replica_id] = c_mean
self.assertIsNot(v_sum, c_sum)
self.assertIsNot(v_mean, c_mean)
return updates, v_sum, v_mean, c_sum, c_mean
with distribution.scope():
# Create "sum" and "mean" versions of SyncOnReadVariables.
ret_ops, ret_v_sum, ret_v_mean, regrouped_sum, regrouped_mean = (
distribution.extended.call_for_each_replica(model_fn))
# Should see the same wrapping instance in all replicas.
self.assertIs(all_v_sum[0], ret_v_sum)
self.assertIs(all_v_mean[0], ret_v_mean)
self.assertIs(all_v_sum[0], all_v_sum[1])
self.assertIs(all_v_mean[0], all_v_mean[1])
# Regroup should recover the same wrapper.
self.assertIs(ret_v_sum, regrouped_sum)
self.assertIs(ret_v_mean, regrouped_mean)
self.assertIsNot(components_sum[0], components_sum[1])
self.assertIsNot(components_mean[0], components_mean[1])
# Apply updates
self.evaluate(variables.global_variables_initializer())
self.evaluate([
y for x in ret_ops # pylint: disable=g-complex-comprehension
for y in distribution.experimental_local_results(x)
])
expected_sum = 0.0
expected_mean = 0.0
for i, _ in enumerate(distribution.extended.worker_devices):
# Should see different values on different devices.
v_sum_value = self.evaluate(
distribution.experimental_local_results(ret_v_sum)[i].read_value())
v_mean_value = self.evaluate(
distribution.experimental_local_results(ret_v_mean)[i].read_value())
expected = i + 3.0
self.assertEqual(expected, v_sum_value)
expected_sum += expected
expected = i * 6.0
self.assertEqual(expected, v_mean_value)
expected_mean += expected
expected_mean /= len(distribution.extended.worker_devices)
# Without get(device), should return the value you get by
# applying the reduction across all replicas (whether you use
# read_var(), get(), or nothing).
self.assertEqual(expected_sum, self.evaluate(
distribution.extended.read_var(ret_v_sum)))
self.assertEqual(expected_mean, self.evaluate(
distribution.extended.read_var(ret_v_mean)))
self.assertEqual(expected_sum, self.evaluate(ret_v_sum._get()))
self.assertEqual(expected_mean, self.evaluate(ret_v_mean._get()))
self.assertEqual(expected_sum, self.evaluate(ret_v_sum))
self.assertEqual(expected_mean, self.evaluate(ret_v_mean))
# TODO(priyag): Update this test to work in eager mode as well.
def testDynamicRnnVariables(self, distribution):
def model_fn():
inputs = constant_op.constant(2 * [2 * [[0.0, 1.0, 2.0, 3.0, 4.0]]])
cell_fw = rnn_cell_impl.LSTMCell(300)
cell_bw = rnn_cell_impl.LSTMCell(300)
(outputs, _) = rnn.bidirectional_dynamic_rnn(
cell_fw, cell_bw, inputs, dtype=dtypes.float32)
return outputs
with context.graph_mode(), distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
# Two variables are created by the RNN layer.
self.assertEqual(2, len(result))
for v in result:
self.assertIsInstance(v, values.DistributedValues)
_, v1 = distribution.experimental_local_results(v)
self.assertStartsWith(v1._op.name, "replica_1/")
def testSyncOnReadVariableUpdate(self, distribution):
def model_fn():
v_sum = variable_v1.VariableV1(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertTrue(distribute_utils.is_sync_on_read(v_sum))
return v_sum
def update(var, value):
return var.assign(value)
with distribution.scope():
ret_v_sum = distribution.extended.call_for_each_replica(model_fn)
# Initialize variables.
self.evaluate(variables.global_variables_initializer())
# Assert that the aggregated value of the sync on read var is the sum
# of the individual values before running the update ops.
self.assertEqual(
1.0,
self.evaluate(
distribution.experimental_local_results(ret_v_sum)
[0].read_value()))
self.assertEqual(2.0, self.evaluate(ret_v_sum))
# Apply updates.
update_ops = distribution.extended.update(
ret_v_sum, update, args=(5.0,), group=False)
self.evaluate(update_ops)
# Assert that the aggregated value of the sync on read vars is the sum
# of the individual values after running the update ops.
self.assertEqual(
5.0,
self.evaluate(
distribution.experimental_local_results(ret_v_sum)
[0].read_value()))
self.assertEqual(10.0, self.evaluate(ret_v_sum))
def testVarDistributeStrategy(self, distribution):
with distribution.scope():
mirrored = variable_v1.VariableV1(1.0)
sync_on_read = variable_v1.VariableV1(
1.0, synchronization=variable_scope.VariableSynchronization.ON_READ)
self.assertIs(distribution, mirrored.distribute_strategy)
self.assertIs(distribution, sync_on_read.distribute_strategy)
def testInitializer(self, distribution, mode):
if mode == "graph":
self.skipTest("Skip graph mode")
temp_dir = self.get_temp_dir()
class Model(tracking_util.Checkpoint):
def __init__(self):
self._v = variables.Variable(1.0)
with distribution.scope():
m = Model()
save.save(m, temp_dir)
g = ops.Graph()
with g.as_default():
with distribution.scope():
load.load(temp_dir)
for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES):
self.assertIsNotNone(v.initializer)
def testCustomGradient(self, distribution):
class CustomModel:
def __init__(self):
self._v = variables.Variable(1.0)
def __call__(self):
@custom_gradient.recompute_grad
def _call():
return self._v + 1
return _call()
with distribution.scope():
model = CustomModel()
@def_function.function
def train_step():
def replica_step():
with backprop.GradientTape() as tape:
result = model()
return tape.gradient(result, [model._v])
return distribution.run(replica_step)
grads = distribution.experimental_local_results(train_step())
self.assertLen(grads, distribution.num_replicas_in_sync)
if __name__ == "__main__":
test.main()
|
MirroredVariableCreationTest
|
python
|
google__pytype
|
pytype/tools/xref/indexer.py
|
{
"start": 14411,
"end": 28600
}
|
class ____(ScopedVisitor, traces.MatchAstVisitor):
"""Visitor that generates indexes."""
def __init__(self, ast, src, module_name):
super().__init__(ast=ast, src_code=src, module_name=module_name)
self.defs = {}
self.locs = collections.defaultdict(list)
self.refs = []
self.modules = {}
self.aliases = {}
self.source = src
self.traces = src.traces
self.typemap = {}
self.classmap = {}
self.calls = []
self.function_params = []
# Record childof relationships for nested functions/classes
self.childof = []
self.scope_defn = {}
def _get_location(self, node, args):
"""Get a more accurate node location."""
loc = None
if isinstance(node, self._ast.ClassDef):
# For class and function definitions, search for the string
# (class|def) <name>
# between the start of the AST node and the start of the body. Handles the
# offset for decorated functions/classes.
body_start = node.body[0].lineno
text = f"class {args['name']}"
loc = self.source.find_first_text(node.lineno, body_start, text)
elif isinstance(node, self._ast.FunctionDef):
body_start = node.body[0].lineno
text = f"def {args['name']}"
loc = self.source.find_first_text(node.lineno, body_start, text)
if loc is None:
loc = get_location(node)
return loc
def _get_node_name(self, node):
if isinstance(node, str):
# We replace nodes with their names after visiting them.
return node
return super()._get_node_name(node)
def make_def(self, node, **kwargs):
"""Make a definition from a node."""
if isinstance(node, self._ast.Name):
t = node_utils.typename(node.ctx)
elif isinstance(node, self._ast.arg):
t = "Param"
else:
t = node_utils.typename(node)
args = {
"name": node_utils.get_name(node, self._ast),
"scope": self.scope_id(),
"typ": t,
"data": None,
"target": None,
"doc": None,
}
args.update(kwargs)
defn = Definition(**args)
line, col = self._get_location(node, args)
assert line is not None
defloc = DefLocation(defn.id, source.Location(line, col))
return (defn, defloc)
def make_ref(self, node, **kwargs):
"""Make a reference from a node."""
assert "data" in kwargs # required kwarg
args = {
"name": node_utils.get_name(node, self._ast),
"scope": self.scope_id(),
"ref_scope": None,
"typ": node_utils.typename(node),
"location": get_location(node),
"target": None,
}
args.update(kwargs)
return Reference(**args)
def add_local_def(self, node, **kwargs):
defn, defloc = self.make_def(node, **kwargs)
if defn.id not in self.defs:
self.defs[defn.id] = defn
self.locs[defn.id].append(defloc)
self.envs[defn.scope][defn.name] = defn
return defn
def add_global_def(self, node, **kwargs):
kwargs.update({"scope": "module"})
return self.add_local_def(node, **kwargs)
def add_local_ref(self, node, **kwargs):
kwargs.update({"ref_scope": self.scope_id()})
ref = self.make_ref(node, **kwargs)
self.refs.append(ref)
return ref
def add_closure_ref(self, node, **kwargs):
"""Look for node.name up the chain of scopes."""
name = node_utils.get_name(node, self._ast)
env, _ = self.current_env.lookup(name)
if env:
kwargs.update({"ref_scope": env.scope})
else:
# This should never happen! If python has generated a LOAD_DEREF bytecode
# then we do have the name defined in a parent scope. However, in the
# interests of not crashing the indexer, fall back to the current scope.
# TODO(mdemello): We need error logs.
pass
ref = self.make_ref(node, **kwargs)
self.refs.append(ref)
return ref
def add_global_ref(self, node, **kwargs):
kwargs.update({"ref_scope": "module"})
return self.add_local_ref(node, **kwargs)
def add_call(self, node, name, func, arg_varnames, return_type):
start = get_location(node)
end = get_end_location(node)
self.calls.append(
Funcall(name, self.scope_id(), func, start, end, arg_varnames,
return_type))
def add_attr(self, node):
defn, _ = self.make_def(node)
self.defs[defn.id] = defn
env = self.envs[self.scope_id()]
if env.is_self_attr(node):
self.envs[self.scope_id()].setattr(node.attr, defn)
def _has_decorator(self, f, decorator):
for d in f.decorator_list:
if isinstance(d, self._ast.Name) and d.id == decorator:
return True
return False
def _record_childof(self, node, defn):
"""Record a childof relationship for nested definitions."""
parent = self.scope_defn.get(self.scope_id())
if parent:
self.childof.append((defn, parent))
def enter_ClassDef(self, node):
class_name = node_utils.get_name(node, self._ast)
last_line = max(node.lineno, node.body[0].lineno - 1)
ops = match_opcodes_multiline(self.traces, node.lineno, last_line, [
("LOAD_BUILD_CLASS", None),
("STORE_NAME", class_name),
# Classes defined within a function generate a STORE_FAST or
# STORE_DEREF op.
("STORE_FAST", class_name),
("STORE_DEREF", class_name),
# A class being declared global anywhere generates a STORE_GLOBAL op.
("STORE_GLOBAL", class_name),
])
# pytype sometimes analyses this twice, leading to duplicate opcode
# traces. We only want the first two in the list.
d = data = None
if (len(ops) >= 2 and
ops[0][0] == "LOAD_BUILD_CLASS" and
ops[1][0] in (
"STORE_NAME", "STORE_FAST", "STORE_DEREF", "STORE_GLOBAL")):
_, _, data = ops[1]
d = _unwrap(data)
assert d, "Did not get pytype data for class %s at line %d" % (
class_name, node.lineno)
defn = self.add_local_def(node, data=data,
doc=DocString.from_node(self._ast, node))
self._record_childof(node, defn)
self.classmap[d[0]] = defn
super().enter_ClassDef(node)
self.scope_defn[self.scope_id()] = defn
def enter_FunctionDef(self, node):
last_line = max(node.lineno, node.body[0].lineno - 1)
ops = match_opcodes_multiline(self.traces, node.lineno, last_line, [
("MAKE_FUNCTION", None), # py2 has no symbol, py3 has node.name
("LOAD_CLOSURE", None) # Nested functions
])
if ops:
_, _, data = ops[0]
else:
# TODO(mdemello): Add an assert; this should not happen but I would rather
# not break grok indexing if it does.
data = None
fn_def = self.add_local_def(node, data=data,
doc=DocString.from_node(self._ast, node))
self._record_childof(node, fn_def)
env = self.add_scope(node)
self.scope_defn[self.scope_id()] = fn_def
# TODO(mdemello): Get pytype data for params
args = node.args
posonlyargs = getattr(args, "posonlyargs", [])
vararg = [args.vararg] if getattr(args, "vararg", None) else []
kwarg = [args.kwarg] if getattr(args, "kwarg", None) else []
all_args = posonlyargs + args.args + vararg + args.kwonlyargs + kwarg
params = [self.add_local_def(v) for v in all_args]
for i, param in enumerate(params):
self.function_params.append(FunctionParam(
def_id=fn_def.id, param_id=param.id, position=i))
if env.cls:
if (not self._has_decorator(node, "classmethod") and
not self._has_decorator(node, "staticmethod")):
# Don't crash if we have buggy code like
# class A(): def f(): ...
if params:
env.self_var = params[0]
def visit_Name(self, node):
# We ignore the location returned by match() because we'll recompute the
# same location anyways.
# We use pytype trace data to distinguish between local and global
# variables.
for unused_loc, trace in self.match(node):
op = trace.op
symbol = trace.symbol
data = trace.types
d = _unwrap(data)
ref = None
if op == "LOAD_GLOBAL":
ref = self.add_global_ref(node, name=symbol, data=data)
self.typemap[ref.id] = d
elif op in ["LOAD_FAST", "LOAD_NAME"]:
ref = self.add_local_ref(node, name=symbol, data=data)
self.typemap[ref.id] = d
elif op in ["LOAD_DEREF"]:
ref = self.add_closure_ref(node, name=symbol, data=data)
self.typemap[ref.id] = d
elif op == "STORE_GLOBAL":
defn = self.add_global_def(node, name=symbol, data=data)
self.typemap[defn.id] = d
elif op in ["STORE_FAST", "STORE_NAME", "STORE_DEREF"]:
defn = self.add_local_def(node, name=symbol, data=data)
self.typemap[defn.id] = d
if ref and self.current_env.ret == _RETURNING_NAME:
self.current_env.ret = ref
return node.id
def visit_Call(self, node):
name = self._get_node_name(node)
# We have replaced Name() in args with the corresponding string
arg_varnames = [x for x in node.args if isinstance(x, str)]
seen = set()
for _, trace in self.match(node):
call, return_type = trace.types
if call is None:
continue
for d in call:
for f in qualified_method(d):
if f not in seen:
self.add_call(node, name, f, arg_varnames, return_type)
seen.add(f)
return name
def visit_Assign(self, node):
for v in node.targets:
if isinstance(v, self._ast.Attribute):
self.add_attr(v)
def visit_AnnAssign(self, node):
parent = self.scope_defn.get(self.scope_id())
if parent and parent.typ == "ClassDef":
self.add_local_def(node, name=node.target)
def _add_attr_ref(self, node, node_str, trace):
ref = self.add_local_ref(
node,
target=node.value,
name=node_str,
data=trace.types)
if len(trace.types) == 2:
_, rhs = trace.types
self.typemap[ref.id] = rhs
def visit_Attribute(self, node):
node_str = self._get_node_name(node)
# match() returns the location of the attribute, whereas the indexer needs
# the location of the value on which the attribute is accessed, in order to
# link function calls. We'll manually adjust the location later.
for unused_loc, trace in self.match(node):
if trace.op in ("LOAD_ATTR", "LOAD_METHOD"):
self._add_attr_ref(node, node_str, trace)
elif trace.op == "STORE_ATTR":
env = self.envs[self.scope_id()]
if env.is_self_attr(node):
# Add a new definition for `self.x = ...`
defn = self.add_local_def(node)
if self.current_class:
# We only support attr definitions within a class definition.
self.current_env.setattr(node.attr, defn)
else:
# Otherwise just add a reference
self._add_attr_ref(node, node_str, trace)
return node_str
def visit_Subscript(self, node):
return node.value
def visit_DictComp(self, _node):
return "<expr>"
def visit_ListComp(self, _node):
return "<expr>"
def process_import(self, node):
"""Common code for Import and ImportFrom."""
for alias, (loc, trace) in zip(node.names, self.match(node)):
# If an import is aliased, match() returns only the symbol/loc of
# the alias, whereas the indexer also needs access to the unaliased
# name in order to reference the imported module.
op = trace.op
symbol = trace.symbol
data = trace.types
defn: Definition | None = None
if alias.asname:
defn = self.add_local_def(
node, name=symbol, target=alias.name, data=data)
defloc = self.locs[defn.id].pop()
self.locs[defn.id].append(DefLocation(defloc.def_id, loc))
# Shift symbol/loc back to the unaliased name.
symbol = alias.name
m = re.search("[ ,]" + symbol + r"\b", self.source.line(loc.line))
if m is None:
# TODO(slebedev): Support multi-line from-imports.
continue
c, _ = m.span()
loc = source.Location(loc.line, c + 1)
imported = None
try:
[imported] = _unwrap(data)
except (TypeError, ValueError):
resolved = False
else:
resolved = not isinstance(imported, abstract.Unsolvable)
if not resolved:
continue
if op == "STORE_NAME":
# for |import x.y as z| or |from x import y as z| we want {z: x.y}
self.add_local_ref(node, name=symbol, data=data, location=loc)
if not isinstance(imported, abstract.Module):
# Make the from-imported symbol available in the current namespace.
remote = Remote(imported.module, name=symbol, resolved=True)
if defn:
self.aliases[defn.id] = remote
self.current_env[symbol] = remote
self.typemap[remote.id] = [imported]
continue
if defn:
remote = Remote(imported.full_name, IMPORT_FILE_MARKER, resolved=True)
self.aliases[defn.id] = remote
self.modules[defn.id] = imported.full_name
else:
self.modules[self.scope_id() + "." + symbol] = imported.full_name
elif op == "IMPORT_NAME":
# |import x.y| puts both {x: x} and {x.y: x.y} in modules
self.add_local_ref(node, name=symbol, data=data, location=loc)
# TODO(slebedev): Reference every import path component.
# For example here
#
# from foo.bar import boo
# import foo.bar.boo
#
# we should reference both foo and foo.bar (in addition to foo.bar.boo).
for mod in module_utils.get_all_prefixes(symbol):
self.modules[self.scope_id() + "." + mod] = mod
def visit_Import(self, node):
self.process_import(node)
def visit_ImportFrom(self, node):
self.process_import(node)
def enter_Return(self, node):
if isinstance(node.value, self._ast.Name):
self.current_env.ret = _RETURNING_NAME
def leave_Return(self, node):
if self.current_env.ret == _RETURNING_NAME:
self.current_env.ret = None
# pylint: enable=invalid-name
# pylint: enable=missing-docstring
|
IndexVisitor
|
python
|
django__django
|
django/core/exceptions.py
|
{
"start": 1936,
"end": 2037
}
|
class ____(Exception):
"""The user did not have permission to do that"""
pass
|
PermissionDenied
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/sparse_bincount_ops_test.py
|
{
"start": 24240,
"end": 27303
}
|
class ____(test_util.TensorFlowTestCase):
def test_dense_input_sparse_weights_fails(self):
x = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
weights = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
with self.assertRaisesRegex(ValueError, "must be a tf.Tensor"):
self.evaluate(sparse_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_dense_input_wrong_shape_fails(self):
x = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
weights = np.array([[3, 2], [5, 4], [4, 3]])
# Note: Eager mode and graph mode throw different errors here. Graph mode
# will fail with a ValueError from the shape checking logic, while Eager
# will fail with an InvalidArgumentError from the kernel itself.
if context.executing_eagerly():
with self.assertRaisesRegex(errors.InvalidArgumentError,
"must have the same shape"):
self.evaluate(sparse_ops.sparse_bincount(x, weights=weights, axis=-1))
else:
with self.assertRaisesRegex(ValueError, "both shapes must be equal"):
self.evaluate(sparse_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_sparse_input_dense_weights_fails(self):
x = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
weights = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
with self.assertRaisesRegex(ValueError, "must be a SparseTensor"):
self.evaluate(sparse_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_sparse_input_wrong_indices_fails(self):
x = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
weights = sparse_ops.from_dense(
np.array([[3, 1, 0, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
with self.assertRaisesRegex(errors.InvalidArgumentError,
"must have the same indices"):
self.evaluate(sparse_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_sparse_input_too_many_indices_fails(self):
x = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
weights = sparse_ops.from_dense(
np.array([[3, 1, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
with self.assertRaisesIncompatibleShapesError():
self.evaluate(sparse_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_sparse_input_wrong_shape_fails(self):
x = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
weights = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4], [0, 0, 0, 0]],
dtype=np.int32))
with self.assertRaisesRegex(errors.InvalidArgumentError,
"must have the same dense shape"):
self.evaluate(sparse_ops.sparse_bincount(x, weights=weights, axis=-1))
if __name__ == "__main__":
v2_compat.enable_v2_behavior()
test.main()
|
TestSparseCountFailureModes
|
python
|
patrick-kidger__equinox
|
equinox/_vmap_pmap.py
|
{
"start": 1538,
"end": 3423
}
|
class ____:
"""Returns a callable that returns the specified integer if evaluated on an array.
Otherwise, it returns `None`.
!!! Example
```python
fn = if_array(1)
# Evaluate on an array, return the integer.
fn(jax.numpy.array([0, 1, 2])) # 1
# Evaluate on not-an-array, return None.
fn(True) # None
```
"""
axis: int
def __call__(self, x: Any) -> int | None:
return self.axis if is_array(x) else None
def _moveaxis(array, axis):
return jnp.moveaxis(array, 0, axis)
def _named_in_axes(fun, in_axes, args):
if isinstance(in_axes, dict):
in_axes = dict(in_axes)
new_in_axes = []
default = if_array(0)
params = inspect.signature(fun).parameters
# We may have that len(args) < len(params) due to default arguments.
# Truncate to considering just the arguments that have been passed.
#
# (If len(args) > len(params) then we'll get the usual error later when
# attempting to call with the wrong number of arguments.)
for _, param_name in zip(args, params):
new_in_axes.append(in_axes.pop(param_name, default))
if len(in_axes) != 0:
raise ValueError(
"The following `in_axes` did not correspond to any argument: "
f"{tuple(in_axes.keys())}"
)
# Note that this requires all named arguments to be passed; they cannot take
# default values. That is, we deliberately don't allow something like
# ```python
# @eqx.filter_vmap(in_axes=dict(foo=0))
# def fn(foo=default_value):
# ...
#
# fn()
# ```
# This is because it is ambiguous whether the default value is vectorised or
# not.
in_axes = tuple(new_in_axes)
return in_axes
|
if_array
|
python
|
numpy__numpy
|
numpy/_core/tests/test_umath.py
|
{
"start": 55876,
"end": 58751
}
|
class ____:
def test_log_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt) * log2_
assert_almost_equal(np.log(xf), yf)
# test aliasing(issue #17761)
x = np.array([2, 0.937500, 3, 0.947500, 1.054697])
xf = np.log(x)
assert_almost_equal(np.log(x, out=x), xf)
def test_log_values_maxofdtype(self):
# test log() of max for dtype does not raise
dtypes = [np.float32, np.float64]
# This is failing at least on linux aarch64 (see gh-25460), and on most
# other non x86-64 platforms checking `longdouble` isn't too useful as
# it's an alias for float64.
if platform.machine() == 'x86_64':
dtypes += [np.longdouble]
for dt in dtypes:
with np.errstate(all='raise'):
x = np.finfo(dt).max
np.log(x)
def test_log_strides(self):
np.random.seed(42)
strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4])
sizes = np.arange(2, 100)
for ii in sizes:
x_f64 = np.float64(np.random.uniform(low=0.01, high=100.0, size=ii))
x_special = x_f64.copy()
x_special[3:-1:4] = 1.0
y_true = np.log(x_f64)
y_special = np.log(x_special)
for jj in strides:
assert_array_almost_equal_nulp(np.log(x_f64[::jj]), y_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.log(x_special[::jj]), y_special[::jj], nulp=2)
# Reference values were computed with mpmath, with mp.dps = 200.
@pytest.mark.parametrize(
'z, wref',
[(1 + 1e-12j, 5e-25 + 1e-12j),
(1.000000000000001 + 3e-08j,
1.5602230246251546e-15 + 2.999999999999996e-08j),
(0.9999995000000417 + 0.0009999998333333417j,
7.831475869017683e-18 + 0.001j),
(0.9999999999999996 + 2.999999999999999e-08j,
5.9107901499372034e-18 + 3e-08j),
(0.99995000042 - 0.009999833j,
-7.015159763822903e-15 - 0.009999999665816696j)],
)
def test_log_precision_float64(self, z, wref):
w = np.log(z)
assert_allclose(w, wref, rtol=1e-15)
# Reference values were computed with mpmath, with mp.dps = 200.
@pytest.mark.parametrize(
'z, wref',
[(np.complex64(1.0 + 3e-6j), np.complex64(4.5e-12 + 3e-06j)),
(np.complex64(1.0 - 2e-5j), np.complex64(1.9999999e-10 - 2e-5j)),
(np.complex64(0.9999999 + 1e-06j),
np.complex64(-1.192088e-07 + 1.0000001e-06j))],
)
def test_log_precision_float32(self, z, wref):
w = np.log(z)
assert_allclose(w, wref, rtol=1e-6)
|
TestLog
|
python
|
great-expectations__great_expectations
|
tests/execution_engine/test_sqlalchemy_execution_engine.py
|
{
"start": 47399,
"end": 59826
}
|
class ____:
"""Tests for SQLAlchemy condition_to_filter_clause methods."""
@pytest.mark.sqlite
@pytest.mark.parametrize(
"condition,expected_sql",
[
pytest.param(
ComparisonCondition(column=Column("age"), operator=Operator.EQUAL, parameter=5),
"age = 5",
id="equal_int",
),
pytest.param(
ComparisonCondition(
column=Column("age"), operator=Operator.NOT_EQUAL, parameter=10
),
"age != 10",
id="not_equal_int",
),
pytest.param(
ComparisonCondition(
column=Column("age"), operator=Operator.LESS_THAN, parameter=18
),
"age < 18",
id="less_than",
),
pytest.param(
ComparisonCondition(
column=Column("age"), operator=Operator.GREATER_THAN, parameter=65
),
"age > 65",
id="greater_than",
),
pytest.param(
ComparisonCondition(
column=Column("age"), operator=Operator.LESS_THAN_OR_EQUAL, parameter=100
),
"age <= 100",
id="less_than_or_equal",
),
pytest.param(
ComparisonCondition(
column=Column("age"), operator=Operator.GREATER_THAN_OR_EQUAL, parameter=0
),
"age >= 0",
id="greater_than_or_equal",
),
pytest.param(
ComparisonCondition(
column=Column("name"), operator=Operator.EQUAL, parameter="John"
),
"name = 'John'",
id="equal_string",
),
],
)
def test_comparison_conditions(
self, sa, condition: ComparisonCondition, expected_sql: str
) -> None:
"""Test that comparison conditions compile to correct SQL."""
engine = SqlAlchemyExecutionEngine(connection_string="sqlite://")
result = engine.condition_to_filter_clause(condition)
compiled = str(result.compile(compile_kwargs={"literal_binds": True}))
assert compiled == expected_sql
@pytest.mark.sqlite
@pytest.mark.parametrize(
"condition,expected_sql",
[
pytest.param(
ComparisonCondition(
column=Column("status"), operator=Operator.IN, parameter=[1, 2, 3]
),
"status IN (1, 2, 3)",
id="in_integers",
),
pytest.param(
ComparisonCondition(
column=Column("status"), operator=Operator.NOT_IN, parameter=[1, 2, 3]
),
"(status NOT IN (1, 2, 3))",
id="not_in_integers",
),
],
)
def test_in_conditions(self, sa, condition: ComparisonCondition, expected_sql: str) -> None:
"""Test that IN/NOT IN conditions compile to correct SQL."""
engine = SqlAlchemyExecutionEngine(connection_string="sqlite://")
result = engine.condition_to_filter_clause(condition)
compiled = str(result.compile(compile_kwargs={"literal_binds": True}))
assert compiled == expected_sql
@pytest.mark.sqlite
@pytest.mark.parametrize(
"condition,expected_sql",
[
pytest.param(
NullityCondition(column=Column("email"), is_null=True),
"email IS NULL",
id="is_null",
),
pytest.param(
NullityCondition(column=Column("email"), is_null=False),
"email IS NOT NULL",
id="is_not_null",
),
],
)
def test_nullity_conditions(self, sa, condition: NullityCondition, expected_sql: str) -> None:
"""Test that nullity conditions compile to correct SQL."""
engine = SqlAlchemyExecutionEngine(connection_string="sqlite://")
result = engine.condition_to_filter_clause(condition)
compiled = str(result.compile(compile_kwargs={"literal_binds": True}))
assert compiled == expected_sql
@pytest.mark.sqlite
def test_and_condition_simple(self, sa) -> None:
engine = SqlAlchemyExecutionEngine(connection_string="sqlite://")
and_condition = AndCondition(
conditions=[
ComparisonCondition(
column=Column("age"), operator=Operator.GREATER_THAN, parameter=18
),
ComparisonCondition(
column=Column("age"), operator=Operator.LESS_THAN, parameter=65
),
]
)
result = engine.condition_to_filter_clause(and_condition)
compiled = str(result.compile(compile_kwargs={"literal_binds": True}))
assert compiled == "age > 18 AND age < 65"
@pytest.mark.sqlite
def test_or_condition_simple(self, sa) -> None:
engine = SqlAlchemyExecutionEngine(connection_string="sqlite://")
or_condition = OrCondition(
conditions=[
ComparisonCondition(
column=Column("status"), operator=Operator.EQUAL, parameter="active"
),
ComparisonCondition(
column=Column("status"), operator=Operator.EQUAL, parameter="pending"
),
]
)
result = engine.condition_to_filter_clause(or_condition)
compiled = str(result.compile(compile_kwargs={"literal_binds": True}))
assert compiled == "status = 'active' OR status = 'pending'"
@pytest.mark.sqlite
def test_nested_conditions(self, sa) -> None:
engine = SqlAlchemyExecutionEngine(connection_string="sqlite://")
or_condition = OrCondition(
conditions=[
AndCondition(
conditions=[
ComparisonCondition(
column=Column("age"),
operator=Operator.GREATER_THAN_OR_EQUAL,
parameter=18,
),
ComparisonCondition(
column=Column("age"),
operator=Operator.LESS_THAN_OR_EQUAL,
parameter=65,
),
]
),
ComparisonCondition(
column=Column("status"), operator=Operator.EQUAL, parameter="exempt"
),
]
)
result = engine.condition_to_filter_clause(or_condition)
compiled = str(result.compile(compile_kwargs={"literal_binds": True}))
assert compiled == "age >= 18 AND age <= 65 OR status = 'exempt'"
@pytest.mark.sqlite
def test_comparison_filter_clause_filters_query(self, sa, test_db_connection_string) -> None:
"""Test that comparison conditions work with actual SQL queries."""
engine = SqlAlchemyExecutionEngine(connection_string=test_db_connection_string)
# Create test table
df = pd.DataFrame({"age": [15, 25, 35, 45, 55], "name": ["A", "B", "C", "D", "E"]})
with engine.get_connection() as conn:
add_dataframe_to_db(df=df, name="test_table", con=conn, index=False)
# Create condition
condition = ComparisonCondition(
column=Column("age"), operator=Operator.GREATER_THAN, parameter=30
)
filter_clause = engine.condition_to_filter_clause(condition)
# Build and execute query
query = sa.select(sa.text("*")).select_from(sa.text("test_table")).where(filter_clause)
with engine.get_connection() as conn:
result = conn.execute(query).fetchall()
assert len(result) == 3
ages = [row[0] for row in result]
assert ages == [35, 45, 55]
@pytest.mark.sqlite
def test_in_filter_clause_filters_query(self, sa, test_db_connection_string) -> None:
"""Test that IN operator works with actual SQL queries."""
engine = SqlAlchemyExecutionEngine(connection_string=test_db_connection_string)
# Create test table
df = pd.DataFrame(
{
"status": ["active", "pending", "inactive", "active", "deleted"],
"id": [1, 2, 3, 4, 5],
}
)
with engine.get_connection() as conn:
add_dataframe_to_db(df=df, name="test_status_table", con=conn, index=False)
# Create condition
condition = ComparisonCondition(
column=Column("status"), operator=Operator.IN, parameter=["active", "pending"]
)
filter_clause = engine.condition_to_filter_clause(condition)
# Build and execute query
query = (
sa.select(sa.text("*")).select_from(sa.text("test_status_table")).where(filter_clause)
)
with engine.get_connection() as conn:
result = conn.execute(query).fetchall()
assert len(result) == 3
ids = sorted([row[1] for row in result])
assert ids == [1, 2, 4]
@pytest.mark.sqlite
def test_nullity_filter_clause_filters_query(self, sa, test_db_connection_string) -> None:
"""Test that nullity conditions work with actual SQL queries."""
engine = SqlAlchemyExecutionEngine(connection_string=test_db_connection_string)
# Create test table
df = pd.DataFrame(
{
"email": ["a@example.com", None, "c@example.com", None, "e@example.com"],
"id": [1, 2, 3, 4, 5],
}
)
with engine.get_connection() as conn:
add_dataframe_to_db(df=df, name="test_email_table", con=conn, index=False)
# Create condition
condition = NullityCondition(column=Column("email"), is_null=False)
filter_clause = engine.condition_to_filter_clause(condition)
# Build and execute query
query = (
sa.select(sa.text("*")).select_from(sa.text("test_email_table")).where(filter_clause)
)
with engine.get_connection() as conn:
result = conn.execute(query).fetchall()
assert len(result) == 3
ids = sorted([row[1] for row in result])
assert ids == [1, 3, 5]
@pytest.mark.sqlite
def test_nested_condition_filters_query(self, sa, test_db_connection_string) -> None:
"""Test that nested conditions work with actual SQL queries."""
engine = SqlAlchemyExecutionEngine(connection_string=test_db_connection_string)
# Create test table
df = pd.DataFrame(
{
"age": [15, 25, 35, 45, 75],
"status": ["active", "active", "active", "active", "exempt"],
"id": [1, 2, 3, 4, 5],
}
)
with engine.get_connection() as conn:
add_dataframe_to_db(df=df, name="test_nested_table", con=conn, index=False)
# Create nested condition
or_condition = OrCondition(
conditions=[
AndCondition(
conditions=[
ComparisonCondition(
column=Column("age"),
operator=Operator.GREATER_THAN_OR_EQUAL,
parameter=18,
),
ComparisonCondition(
column=Column("age"),
operator=Operator.LESS_THAN_OR_EQUAL,
parameter=65,
),
]
),
ComparisonCondition(
column=Column("status"), operator=Operator.EQUAL, parameter="exempt"
),
]
)
filter_clause = engine.condition_to_filter_clause(or_condition)
# Build and execute query
query = (
sa.select(sa.text("*")).select_from(sa.text("test_nested_table")).where(filter_clause)
)
with engine.get_connection() as conn:
result = conn.execute(query).fetchall()
assert len(result) == 4
ids = sorted([row[2] for row in result])
assert ids == [2, 3, 4, 5]
|
TestConditionToFilterClauseSqlAlchemy
|
python
|
pytest-dev__pytest
|
testing/test_reports.py
|
{
"start": 20076,
"end": 22651
}
|
class ____:
"""Test that the hooks are working correctly for plugins"""
def test_test_report(self, pytester: Pytester, pytestconfig: Config) -> None:
pytester.makepyfile(
"""
def test_a(): assert False
def test_b(): pass
"""
)
reprec = pytester.inline_run()
reports = reprec.getreports("pytest_runtest_logreport")
assert len(reports) == 6
for rep in reports:
data = pytestconfig.hook.pytest_report_to_serializable(
config=pytestconfig, report=rep
)
assert data["$report_type"] == "TestReport"
new_rep = pytestconfig.hook.pytest_report_from_serializable(
config=pytestconfig, data=data
)
assert new_rep.nodeid == rep.nodeid
assert new_rep.when == rep.when
assert new_rep.outcome == rep.outcome
def test_collect_report(self, pytester: Pytester, pytestconfig: Config) -> None:
pytester.makepyfile(
"""
def test_a(): assert False
def test_b(): pass
"""
)
reprec = pytester.inline_run()
reports = reprec.getreports("pytest_collectreport")
assert len(reports) == 3
for rep in reports:
data = pytestconfig.hook.pytest_report_to_serializable(
config=pytestconfig, report=rep
)
assert data["$report_type"] == "CollectReport"
new_rep = pytestconfig.hook.pytest_report_from_serializable(
config=pytestconfig, data=data
)
assert new_rep.nodeid == rep.nodeid
assert new_rep.when == "collect"
assert new_rep.outcome == rep.outcome
@pytest.mark.parametrize(
"hook_name", ["pytest_runtest_logreport", "pytest_collectreport"]
)
def test_invalid_report_types(
self, pytester: Pytester, pytestconfig: Config, hook_name: str
) -> None:
pytester.makepyfile(
"""
def test_a(): pass
"""
)
reprec = pytester.inline_run()
reports = reprec.getreports(hook_name)
assert reports
rep = reports[0]
data = pytestconfig.hook.pytest_report_to_serializable(
config=pytestconfig, report=rep
)
data["$report_type"] = "Unknown"
with pytest.raises(AssertionError):
_ = pytestconfig.hook.pytest_report_from_serializable(
config=pytestconfig, data=data
)
|
TestHooks
|
python
|
tensorflow__tensorflow
|
tensorflow/python/tpu/tests/tpu_embedding_v2_correctness_dense_lookup_test.py
|
{
"start": 1078,
"end": 3789
}
|
class ____(
tpu_embedding_v2_correctness_base_test.TPUEmbeddingCorrectnessBaseTest):
@parameterized.parameters([True, False])
def test_dense_lookup(self, is_high_dimensional):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
if is_high_dimensional:
dataset = self._create_high_dimensional_dense_dataset(strategy)
else:
dataset = self._create_dense_dataset(strategy)
dist = strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(experimental_fetch_to_device=False))
dist_iter = iter(dist)
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
mid_level_api.enqueue(next(dist_iter), training=False)
return strategy.run(step)
# Run model.
shard_out_val = test_fn()
shard0 = (self._unpack(strategy, shard_out_val[0]),
self._unpack(strategy, shard_out_val[1]),
self._unpack(strategy, shard_out_val[2]))
# embedding_values is a linear list, so we reshape to match the correct
# shape of the corresponding table before performing the lookup.
numpy_videos = np.reshape(self.embedding_values, (8, 4))
numpy_users = np.reshape(self.embedding_values, (16, 2))
repeat_batch_num = strategy.num_replicas_in_sync // 2
golden = (
(numpy_videos[self.feature_watched_values[:self.data_batch_size] *
repeat_batch_num],
numpy_videos[self.feature_favorited_values[:self.data_batch_size] *
repeat_batch_num],
numpy_users[self.feature_friends_values[:self.data_batch_size] *
repeat_batch_num]))
if is_high_dimensional:
dense_size = self.data_batch_size * self.data_batch_size
golden = ((
numpy_videos[self.feature_watched_values_high_dimensional[:dense_size]
* repeat_batch_num].reshape(
self.data_batch_size * repeat_batch_num,
self.data_batch_size, -1),
numpy_videos[
self.feature_favorited_values_high_dimensional[:dense_size] *
repeat_batch_num].reshape(self.data_batch_size * repeat_batch_num,
self.data_batch_size, -1),
numpy_users[self.feature_friends_values_high_dimensional[:dense_size]
* repeat_batch_num].reshape(
self.data_batch_size * repeat_batch_num,
self.data_batch_size, -1)))
self.assertAllClose(shard0, golden)
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
TPUEmbeddingCorrectnessTest
|
python
|
huggingface__transformers
|
src/transformers/models/megatron_bert/modeling_megatron_bert.py
|
{
"start": 21558,
"end": 21984
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MegatronBertLMPredictionHead(config)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->MegatronBert
|
MegatronBertOnlyMLMHead
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_textbox32.py
|
{
"start": 315,
"end": 972
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox32.xlsx")
self.ignore_elements = {"xl/drawings/drawing1.xml": ["<a:fld"]}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet1.insert_textbox("E9", "", {"textlink": "=Sheet2!A1"})
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
langchain-ai__langchain
|
libs/standard-tests/tests/unit_tests/test_basic_retriever.py
|
{
"start": 198,
"end": 475
}
|
class ____(BaseRetriever):
parrot_name: str
k: int = 3
def _get_relevant_documents(self, query: str, **kwargs: Any) -> list[Document]:
k = kwargs.get("k", self.k)
return [Document(page_content=f"{self.parrot_name} says: {query}")] * k
|
ParrotRetriever
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/ddl.py
|
{
"start": 34159,
"end": 34289
}
|
class ____(_DropBase["Sequence"]):
"""Represent a DROP SEQUENCE statement."""
__visit_name__ = "drop_sequence"
|
DropSequence
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/destination-google-sheets/destination_google_sheets/spreadsheet.py
|
{
"start": 250,
"end": 3521
}
|
class ____:
def __init__(self, client: pygsheets_client, spreadsheet_id: str):
self.client = client
self.spreadsheet_id = spreadsheet_id
@property
def spreadsheet(self) -> Spreadsheet:
"""
Returns pygsheets.Spreadsheet with opened target spreadsheet by key.
"""
return self.client.open_by_key(self.spreadsheet_id)
def open_worksheet(self, stream_name: str) -> Worksheet:
"""
Opens the connection to target worksheet, if exists. Otherwise, creates one.
"""
try:
stream = self.spreadsheet.worksheet_by_title(stream_name)
except WorksheetNotFound:
stream = self.spreadsheet.add_worksheet(stream_name)
return stream
def clean_worksheet(self, stream_name: str):
"""
Cleans up the existing records inside the worksheet or creates one, if doesn't exist.
"""
try:
stream = self.open_worksheet(stream_name)
stream.clear()
except WorksheetNotFound:
self.spreadsheet.add_worksheet(stream_name)
def set_headers(self, stream_name: str, headers_list: List[str]):
"""
Sets headers belonging to the input stream
"""
stream: Worksheet = self.open_worksheet(stream_name)
if headers_list:
stream.update_row(1, headers_list)
def index_cols(self, stream: Worksheet) -> Mapping[str, int]:
"""
Helps to find the index of every colums exists in worksheet.
Returns: Mapping with column name and it's index.
{"id": 1, "name": 2, ..., "other": 99}
"""
header = stream[1] # get the first row
col_index = {}
for i, col in enumerate(header):
col_index[col] = i + 1
return col_index
def find_duplicates(self, stream: Worksheet, primary_key: str):
"""
Finds the duplicated records inside of target worksheet.
Returns: List of indexes of rows to remove from target worksheet.
[1, 4, 5, ..., 99]
"""
rows_unique_values, rows_to_delete = {}, []
pk_col_index = self.index_cols(stream)[primary_key]
# get all values except 0, because it's a header value
pk_col_values = stream.get_col(pk_col_index, include_tailing_empty=False)[1:]
for i, row_value in enumerate(pk_col_values, 2):
if row_value not in rows_unique_values:
rows_unique_values[row_value] = None
else:
rows_to_delete.append(i)
# reverse the order of the list
rows_to_delete.reverse()
return rows_to_delete
def remove_duplicates(self, stream: Worksheet, rows_list: list):
"""
Removes duplicated rows, provided by `rows_list` as list of indexes.
We are working with delete operation in offline mode, to decrease the number of API calls.
1) Unlink the spreadsheet (make it for offline use)
2) Perform delete operation and update the actual row index
3) Link the spreadsheet (sync with online version) using batch_update method.
"""
stream.unlink()
[stream.delete_rows(row, 1) for row in rows_list]
stream.link()
|
GoogleSheets
|
python
|
apache__airflow
|
helm-tests/tests/helm_tests/other/test_keda.py
|
{
"start": 914,
"end": 14377
}
|
class ____:
"""Tests keda."""
def test_keda_disabled_by_default(self):
"""Disabled by default."""
docs = render_chart(
values={},
show_only=["templates/workers/worker-kedaautoscaler.yaml"],
)
assert docs == []
@pytest.mark.parametrize(
("executor", "is_created"),
[
("CeleryExecutor", True),
("CeleryKubernetesExecutor", True),
("CeleryExecutor,KubernetesExecutor", True),
],
)
def test_keda_enabled(self, executor, is_created):
"""ScaledObject should only be created when enabled and executor is Celery or CeleryKubernetes."""
docs = render_chart(
values={
"workers": {"keda": {"enabled": True}, "persistence": {"enabled": False}},
"executor": executor,
},
show_only=["templates/workers/worker-kedaautoscaler.yaml"],
)
if is_created:
assert jmespath.search("metadata.name", docs[0]) == "release-name-worker"
else:
assert docs == []
@pytest.mark.parametrize(
"executor", ["CeleryExecutor", "CeleryKubernetesExecutor", "CeleryExecutor,KubernetesExecutor"]
)
def test_include_event_source_container_name_in_scaled_object(self, executor):
docs = render_chart(
values={
"workers": {"keda": {"enabled": True}, "persistence": {"enabled": False}},
"executor": executor,
},
show_only=["templates/workers/worker-kedaautoscaler.yaml"],
)
assert jmespath.search("spec.scaleTargetRef.envSourceContainerName", docs[0]) == "worker"
@pytest.mark.parametrize(
"executor", ["CeleryExecutor", "CeleryKubernetesExecutor", "CeleryExecutor,KubernetesExecutor"]
)
def test_keda_advanced(self, executor):
"""Verify keda advanced config."""
expected_advanced = {
"horizontalPodAutoscalerConfig": {
"behavior": {
"scaleDown": {
"stabilizationWindowSeconds": 300,
"policies": [{"type": "Percent", "value": 100, "periodSeconds": 15}],
}
}
}
}
docs = render_chart(
values={
"workers": {
"keda": {
"enabled": True,
"advanced": expected_advanced,
},
},
"executor": executor,
},
show_only=["templates/workers/worker-kedaautoscaler.yaml"],
)
assert jmespath.search("spec.advanced", docs[0]) == expected_advanced
@staticmethod
def build_query(executor, concurrency=16, queue=None):
"""Build the query used by KEDA autoscaler to determine how many workers there should be."""
query = (
f"SELECT ceil(COUNT(*)::decimal / {concurrency}) "
"FROM task_instance WHERE (state='running' OR state='queued')"
)
if "CeleryKubernetesExecutor" in executor:
queue_value = queue or "kubernetes"
query += f" AND queue != '{queue_value}'"
elif "KubernetesExecutor" in executor:
query += " AND executor IS DISTINCT FROM 'KubernetesExecutor'"
elif "airflow.providers.edge3.executors.EdgeExecutor" in executor:
query += " AND executor IS DISTINCT FROM 'EdgeExecutor'"
return query
@pytest.mark.parametrize(
("executor", "concurrency"),
[
("CeleryExecutor", 8),
("CeleryExecutor", 16),
("CeleryKubernetesExecutor", 8),
("CeleryKubernetesExecutor", 16),
("CeleryExecutor,KubernetesExecutor", 8),
("CeleryExecutor,KubernetesExecutor", 16),
("CeleryExecutor,airflow.providers.edge3.executors.EdgeExecutor", 8),
("CeleryExecutor,airflow.providers.edge3.executors.EdgeExecutor", 16),
],
)
def test_keda_concurrency(self, executor, concurrency):
"""Verify keda sql query uses configured concurrency."""
docs = render_chart(
values={
"workers": {"keda": {"enabled": True}, "persistence": {"enabled": False}},
"executor": executor,
"config": {"celery": {"worker_concurrency": concurrency}},
},
show_only=["templates/workers/worker-kedaautoscaler.yaml"],
)
expected_query = self.build_query(executor=executor, concurrency=concurrency)
assert jmespath.search("spec.triggers[0].metadata.query", docs[0]) == expected_query
@pytest.mark.parametrize(
("executor", "queue", "should_filter"),
[
("CeleryExecutor", None, False),
("CeleryExecutor", "my_queue", False),
("CeleryKubernetesExecutor", None, True),
("CeleryKubernetesExecutor", "my_queue", True),
("CeleryExecutor,KubernetesExecutor", "None", False),
("CeleryExecutor,KubernetesExecutor", "my_queue", True),
],
)
def test_keda_query_kubernetes_queue(self, executor, queue, should_filter):
"""
Verify keda sql query ignores kubernetes queue when CKE is used.
Sometimes a user might want to use a different queue name for k8s executor tasks,
and we also verify here that we use the configured queue name in that case.
"""
values = {
"workers": {"keda": {"enabled": True}, "persistence": {"enabled": False}},
"executor": executor,
}
if queue:
values.update({"config": {"celery_kubernetes_executor": {"kubernetes_queue": queue}}})
docs = render_chart(
values=values,
show_only=["templates/workers/worker-kedaautoscaler.yaml"],
)
expected_query = self.build_query(executor=executor, queue=queue)
assert jmespath.search("spec.triggers[0].metadata.query", docs[0]) == expected_query
@pytest.mark.parametrize(
("enabled", "kind"),
[
("enabled", "StatefulSet"),
("not_enabled", "Deployment"),
],
)
def test_persistence(self, enabled, kind):
"""If worker persistence is enabled, scaleTargetRef should be StatefulSet else Deployment."""
is_enabled = enabled == "enabled"
docs = render_chart(
values={
"workers": {"keda": {"enabled": True}, "persistence": {"enabled": is_enabled}},
"executor": "CeleryExecutor",
},
show_only=["templates/workers/worker-kedaautoscaler.yaml"],
)
assert jmespath.search("spec.scaleTargetRef.kind", docs[0]) == kind
def test_default_keda_db_connection(self):
"""Verify default keda db connection."""
import base64
docs = render_chart(
values={
"workers": {"keda": {"enabled": True}},
"executor": "CeleryExecutor",
},
show_only=[
"templates/workers/worker-deployment.yaml",
"templates/workers/worker-kedaautoscaler.yaml",
"templates/secrets/metadata-connection-secret.yaml",
],
)
worker_deployment = docs[0]
keda_autoscaler = docs[1]
metadata_connection_secret = docs[2]
worker_container_env_vars = jmespath.search(
"spec.template.spec.containers[?name=='worker'].env[].name", worker_deployment
)
assert "AIRFLOW_CONN_AIRFLOW_DB" in worker_container_env_vars
assert "KEDA_DB_CONN" not in worker_container_env_vars
secret_data = jmespath.search("data", metadata_connection_secret)
assert "connection" in secret_data.keys()
assert "@release-name-postgresql" in base64.b64decode(secret_data["connection"]).decode()
assert "kedaConnection" not in secret_data.keys()
autoscaler_connection_env_var = jmespath.search(
"spec.triggers[0].metadata.connectionFromEnv", keda_autoscaler
)
assert autoscaler_connection_env_var == "AIRFLOW_CONN_AIRFLOW_DB"
def test_default_keda_db_connection_pgbouncer_enabled(self):
"""Verify keda db connection when pgbouncer is enabled."""
import base64
docs = render_chart(
values={
"workers": {"keda": {"enabled": True}},
"executor": "CeleryExecutor",
"pgbouncer": {"enabled": True},
},
show_only=[
"templates/workers/worker-deployment.yaml",
"templates/workers/worker-kedaautoscaler.yaml",
"templates/secrets/metadata-connection-secret.yaml",
],
)
worker_deployment = docs[0]
keda_autoscaler = docs[1]
metadata_connection_secret = docs[2]
worker_container_env_vars = jmespath.search(
"spec.template.spec.containers[?name=='worker'].env[].name", worker_deployment
)
assert "AIRFLOW_CONN_AIRFLOW_DB" in worker_container_env_vars
assert "KEDA_DB_CONN" not in worker_container_env_vars
secret_data = jmespath.search("data", metadata_connection_secret)
assert "connection" in secret_data.keys()
assert "@release-name-pgbouncer" in base64.b64decode(secret_data["connection"]).decode()
assert "kedaConnection" not in secret_data.keys()
autoscaler_connection_env_var = jmespath.search(
"spec.triggers[0].metadata.connectionFromEnv", keda_autoscaler
)
assert autoscaler_connection_env_var == "AIRFLOW_CONN_AIRFLOW_DB"
def test_default_keda_db_connection_pgbouncer_enabled_usePgbouncer_false(self):
"""Verify keda db connection when pgbouncer is enabled and usePgbouncer is false."""
import base64
docs = render_chart(
values={
"workers": {"keda": {"enabled": True, "usePgbouncer": False}},
"executor": "CeleryExecutor",
"pgbouncer": {"enabled": True},
},
show_only=[
"templates/workers/worker-deployment.yaml",
"templates/workers/worker-kedaautoscaler.yaml",
"templates/secrets/metadata-connection-secret.yaml",
],
)
worker_deployment = docs[0]
keda_autoscaler = docs[1]
metadata_connection_secret = docs[2]
worker_container_env_vars = jmespath.search(
"spec.template.spec.containers[?name=='worker'].env[].name", worker_deployment
)
assert "AIRFLOW_CONN_AIRFLOW_DB" in worker_container_env_vars
assert "KEDA_DB_CONN" in worker_container_env_vars
secret_data = jmespath.search("data", metadata_connection_secret)
connection_secret = base64.b64decode(secret_data["connection"]).decode()
keda_connection_secret = base64.b64decode(secret_data["kedaConnection"]).decode()
assert "connection" in secret_data.keys()
assert "@release-name-pgbouncer" in connection_secret
assert ":6543" in connection_secret
assert "/release-name-metadata" in connection_secret
assert "kedaConnection" in secret_data.keys()
assert "@release-name-postgresql" in keda_connection_secret
assert ":5432" in keda_connection_secret
assert "/postgres" in keda_connection_secret
autoscaler_connection_env_var = jmespath.search(
"spec.triggers[0].metadata.connectionFromEnv", keda_autoscaler
)
assert autoscaler_connection_env_var == "KEDA_DB_CONN"
def test_mysql_keda_db_connection(self):
"""Verify keda db connection when pgbouncer is enabled."""
import base64
docs = render_chart(
values={
"data": {"metadataConnection": {"protocol": "mysql", "port": 3306}},
"workers": {"keda": {"enabled": True}},
"executor": "CeleryExecutor",
},
show_only=[
"templates/workers/worker-deployment.yaml",
"templates/workers/worker-kedaautoscaler.yaml",
"templates/secrets/metadata-connection-secret.yaml",
],
)
worker_deployment = docs[0]
keda_autoscaler = docs[1]
metadata_connection_secret = docs[2]
worker_container_env_vars = jmespath.search(
"spec.template.spec.containers[?name=='worker'].env[].name", worker_deployment
)
assert "AIRFLOW_CONN_AIRFLOW_DB" in worker_container_env_vars
assert "KEDA_DB_CONN" in worker_container_env_vars
keda_autoscaler_metadata = jmespath.search("spec.triggers[0].metadata", keda_autoscaler)
assert "queryValue" in keda_autoscaler_metadata
secret_data = jmespath.search("data", metadata_connection_secret)
keda_connection_secret = base64.b64decode(secret_data["kedaConnection"]).decode()
assert "connection" in secret_data.keys()
assert "kedaConnection" in secret_data.keys()
assert not keda_connection_secret.startswith("//")
autoscaler_connection_env_var = jmespath.search(
"spec.triggers[0].metadata.connectionStringFromEnv", keda_autoscaler
)
assert autoscaler_connection_env_var == "KEDA_DB_CONN"
|
TestKeda
|
python
|
pypa__warehouse
|
warehouse/filters.py
|
{
"start": 491,
"end": 5409
}
|
class ____(enum.Enum):
bdist_dmg = "OSX Disk Image"
bdist_dumb = "Dumb Binary"
bdist_egg = "Egg"
bdist_msi = "Windows MSI Installer"
bdist_rpm = "RPM"
bdist_wheel = "Wheel"
bdist_wininst = "Windows Installer"
sdist = "Source"
def format_package_type(value):
try:
return PackageType[value].value
except KeyError:
return value
def _camo_url(request, url):
camo_url = request.registry.settings["camo.url"].format(request=request)
camo_key = request.registry.settings["camo.key"].encode("utf8")
url = url.encode("utf8")
path = "/".join(
[
hmac.new(camo_key, url, digestmod="sha1").hexdigest(),
binascii.hexlify(url).decode("utf8"),
]
)
return urllib.parse.urljoin(camo_url, path)
@jinja2.pass_context
def camoify(ctx, value):
request = ctx.get("request") or get_current_request()
# Parse the rendered output and replace any inline images that don't point
# to HTTPS with camouflaged images.
tree_builder = html5lib.treebuilders.getTreeBuilder("dom")
parser = html5lib.html5parser.HTMLParser(tree=tree_builder)
dom = parser.parse(value)
for element in dom.getElementsByTagName("img"):
src = element.getAttribute("src")
if src:
element.setAttribute("src", request.camo_url(src))
tree_walker = html5lib.treewalkers.getTreeWalker("dom")
html_serializer = html5lib.serializer.HTMLSerializer()
camoed = "".join(html_serializer.serialize(tree_walker(dom)))
return camoed
_SI_SYMBOLS = ["k", "M", "G", "T", "P", "E", "Z", "Y"]
def shorten_number(value):
for i, symbol in enumerate(_SI_SYMBOLS):
magnitude = value / (1000 ** (i + 1))
if magnitude >= 1 and magnitude < 1000:
return f"{magnitude:.3g}{symbol}"
return str(value)
def tojson(value):
return json.dumps(value, sort_keys=True, separators=(",", ":"))
def urlparse(value):
return parse_url(value)
def format_tags(tags):
# split tags
if re.search(r",", tags):
split_tags = re.split(r"\s*,\s*", tags)
elif re.search(r";", tags):
split_tags = re.split(r"\s*;\s*", tags)
else:
split_tags = re.split(r"\s+", tags)
# strip whitespace, quotes, double quotes
stripped_tags = [re.sub(r'^["\'\s]+|["\'\s]+$', "", t) for t in split_tags]
# remove any empty tags
formatted_tags = [t for t in stripped_tags if t]
return formatted_tags
def format_classifiers(classifiers):
structured: collections.OrderedDict[str, list[str]] = collections.OrderedDict()
# Split up our classifiers into our data structure
for classifier in classifiers:
key, *value = classifier.split(" :: ", 1)
if value:
if key not in structured:
structured[key] = []
structured[key].append(value[0])
# Sort all the values in our data structure
for key, value in structured.items():
structured[key] = natsorted(value)
return structured
def classifier_id(classifier):
return classifier.replace(" ", "_").replace("::", ".")
def contains_valid_uris(items):
"""Returns boolean representing whether the input list contains any valid
URIs
"""
return any(is_valid_uri(i) for i in items)
def parse_version(version_str):
return packaging_legacy.version.parse(version_str)
def localize_datetime(timestamp):
return pytz.utc.localize(timestamp)
def ctime(timestamp):
return datetime.datetime.fromtimestamp(timestamp)
def is_recent(timestamp):
if timestamp:
return timestamp + datetime.timedelta(days=30) > datetime.datetime.now()
return False
def parse_isoformat(datestring):
return datetime.datetime.fromisoformat(datestring)
def format_email(metadata_email: str) -> tuple[str, str]:
"""
Return the name and email address from a metadata RFC-822 string.
Use Jinja's `first` and `last` to access each part in a template.
TODO: Support more than one email address, per RFC-822.
"""
emails = []
for name, email in getaddresses([metadata_email]):
if "@" not in email:
return name, ""
emails.append((name, email))
return emails[0][0], emails[0][1]
def remove_invalid_xml_unicode(value: str | None) -> str | None:
"""
Remove invalid unicode characters from a string.
Useful for XML Templates.
Ref: https://www.w3.org/TR/REC-xml/#NT-Char
"""
return "".join(c for c in value if ord(c) >= 32) if value else value
def _canonical_url(request, **kwargs):
if request.matched_route:
try:
return request.route_url(request.matched_route.name, **kwargs)
except KeyError:
pass
def includeme(config):
config.add_request_method(_camo_url, name="camo_url")
config.add_request_method(_canonical_url, name="canonical_url")
|
PackageType
|
python
|
bokeh__bokeh
|
src/bokeh/models/glyph.py
|
{
"start": 3317,
"end": 3553
}
|
class ____(HasProps):
''' Glyphs with line properties
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
@abstract
|
LineGlyph
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/experimental/kernel_tests/optimization/optimization_test.py
|
{
"start": 3569,
"end": 12095
}
|
class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testOptimizationStatefulFunction(self):
dataset = dataset_ops.Dataset.range(
10).map(lambda _: random_ops.random_uniform([])).batch(10)
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
self.evaluate(get_next())
# TODO(b/123354468)
@combinations.generate(test_base.graph_only_combinations())
def testOptimizationLargeInputFromTensor(self):
input_t = array_ops.placeholder(dtypes.int32, (None, None, None))
dataset = dataset_ops.Dataset.from_tensors(input_t)
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {input_t: np.ones([512, 1024, 1025], np.int32)})
self.evaluate(get_next)
# TODO(b/123354468)
@combinations.generate(test_base.graph_only_combinations())
def testOptimizationLargeInputFromTensorSlices(self):
input_t = array_ops.placeholder(dtypes.int32, (None, None, None, None))
dataset = dataset_ops.Dataset.from_tensor_slices(input_t)
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {input_t: np.ones([1, 512, 1024, 1025], np.int32)})
self.evaluate(get_next)
@combinations.generate(test_base.default_test_combinations())
def testOptimizationNestedDataset(self):
def flat_map_fn(_):
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset.apply(testing.assert_next(["MemoryCacheImpl"]))
dataset = dataset.skip(0) # Should be removed by noop elimination
dataset = dataset.cache()
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.flat_map(flat_map_fn)
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[0])
@combinations.generate(test_base.default_test_combinations())
def testOptimizationNestedDatasetWithModifiedRetval(self):
def flat_map_fn(_):
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset.apply(testing.assert_next(["MapAndBatch"]))
# Should be fused by map and batch fusion
dataset = dataset.map(lambda x: x)
dataset = dataset.batch(1)
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.flat_map(flat_map_fn)
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_batch_fusion = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[[0]])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(autotune=[True, False, None]),
combinations.combine(map_parallelization=[True, False, None])))
def testOptimizationMapParallelization(self, autotune, map_parallelization):
dataset = dataset_ops.Dataset.range(5)
if autotune is not False and map_parallelization is not False: # pylint: disable=g-bool-id-comparison
dataset = dataset.apply(testing.assert_next(["ParallelMap"]))
else:
dataset = dataset.apply(testing.assert_next(["Map"]))
dataset = dataset.map(lambda x: x + 1)
options = options_lib.Options()
if autotune is not None:
options.autotune.enabled = autotune
if map_parallelization is not None:
options.experimental_optimization.map_parallelization = (
map_parallelization)
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=list(range(1, 6)))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(existing_prefetch=[True, False]),
combinations.combine(autotune=[True, False]),
combinations.combine(inject_prefetch=[True, False])))
def testOptimizationInjectPrefetch(self, existing_prefetch, autotune,
inject_prefetch):
dataset = dataset_ops.Dataset.range(5)
dataset = dataset.map(
lambda x: x + 1, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset.batch(1)
if existing_prefetch:
dataset = dataset.prefetch(1)
if autotune and inject_prefetch and not existing_prefetch:
dataset = dataset.apply(testing.assert_next(["Prefetch", "Root"]))
else:
dataset = dataset.apply(testing.assert_next(["Root"]))
options = options_lib.Options()
options.autotune.enabled = autotune
options.experimental_optimization.map_and_batch_fusion = False
if not inject_prefetch:
options.experimental_optimization.inject_prefetch = False
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[np.array([x]) for x in
range(1, 6)])
# Reference variables are not supported in eager mode.
@combinations.generate(
combinations.times(test_base.graph_only_combinations(),
_captured_refvar_test_combinations()))
def testOptimizationWithCapturedRefVar(self, dataset_fn):
"""Tests that default optimizations are disabled with ref variables."""
variable = variable_scope.get_variable(
"v", initializer=0, use_resource=False)
assign_op = variable.assign_add(1)
unoptimized_dataset = dataset_fn(variable)
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
options.experimental_optimization.map_and_batch_fusion = True
options.experimental_warm_start = False
optimized_dataset = unoptimized_dataset.with_options(options)
optimized_it = dataset_ops.make_initializable_iterator(optimized_dataset)
# Check that outputs are the same in the optimized and unoptimized cases,
# when the variable value is changing.
unoptimized_it = dataset_ops.make_initializable_iterator(
unoptimized_dataset)
with ops.control_dependencies([assign_op]):
unoptimized_output = unoptimized_it.get_next()
optimized_output = optimized_it.get_next()
self.evaluate(variable.initializer)
self.evaluate((unoptimized_it.initializer, optimized_it.initializer))
while True:
try:
unoptimized, optimized = self.evaluate((unoptimized_output,
optimized_output))
self.assertEqual(unoptimized, optimized)
except errors.OutOfRangeError:
break
@combinations.generate(
combinations.times(
test_base.eager_only_combinations(),
combinations.combine(warm_start=[True, False]),
)
)
def testOptimizationWarmStart(self, warm_start):
dataset = dataset_ops.Dataset.range(10)
counter = variables.Variable(0)
def update_counter(x):
counter.assign_add(1)
return x
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
if warm_start:
options.experimental_warm_start = True
else:
options.experimental_warm_start = False
dataset = dataset.with_options(options)
dataset = dataset.map(update_counter).prefetch(10)
unused_iter = iter(dataset)
if warm_start:
for sleep_time_secs in [0.1, 0.2, 0.5, 2, 5, 10]:
if counter.numpy() == 0:
time.sleep(sleep_time_secs)
else:
break
self.assertGreater(counter.numpy(), 0)
else:
self.assertEqual(counter.numpy(), 0)
if __name__ == "__main__":
test.main()
|
OptimizationTest
|
python
|
scipy__scipy
|
scipy/spatial/tests/test_kdtree.py
|
{
"start": 12368,
"end": 12516
}
|
class ____(_Test_random_ball):
def setup_method(self):
super().setup_method()
self.eps = 0.1
@KDTreeTest
|
_Test_random_ball_approx
|
python
|
getsentry__sentry
|
tests/snuba/api/endpoints/test_organization_events_trends.py
|
{
"start": 30325,
"end": 34192
}
|
class ____(APITestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.url = reverse(
"sentry-api-0-organization-events-trends-stats",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
self.prototype = load_data("transaction")
self.features = {"organizations:performance-view": True}
# Make 10 transactions for paging
for i in range(10):
for j in range(2):
data = self.prototype.copy()
data["user"] = {"email": "foo@example.com"}
data["start_timestamp"] = (self.day_ago + timedelta(minutes=30)).isoformat()
data["timestamp"] = (
self.day_ago + timedelta(hours=j, minutes=30, seconds=2)
).isoformat()
if i < 5:
data["transaction"] = f"transaction_1{i}"
else:
data["transaction"] = f"transaction_2{i}"
self.store_event(data, project_id=self.project.id)
def _parse_links(self, header):
# links come in {url: {...attrs}}, but we need {rel: {...attrs}}
links = {}
for url, attrs in parse_link_header(header).items():
links[attrs["rel"]] = attrs
attrs["href"] = url
return links
def test_pagination(self) -> None:
with self.feature(self.features):
response = self.client.get(
self.url,
format="json",
data={
# Set the timeframe to where the second range has no transactions so all the counts/percentile are 0
"end": (self.day_ago + timedelta(hours=2)).isoformat(),
"start": (self.day_ago - timedelta(hours=2)).isoformat(),
"field": ["project", "transaction"],
"query": "event.type:transaction",
"project": [self.project.id],
},
)
assert response.status_code == 200, response.content
links = self._parse_links(response["Link"])
assert links["previous"]["results"] == "false"
assert links["next"]["results"] == "true"
assert len(response.data["events"]["data"]) == 5
response = self.client.get(links["next"]["href"], format="json")
assert response.status_code == 200, response.content
links = self._parse_links(response["Link"])
assert links["previous"]["results"] == "true"
assert links["next"]["results"] == "false"
assert len(response.data["events"]["data"]) == 5
def test_pagination_with_query(self) -> None:
with self.feature(self.features):
response = self.client.get(
self.url,
format="json",
data={
# Set the timeframe to where the second range has no transactions so all the counts/percentile are 0
"end": (self.day_ago + timedelta(hours=2)).isoformat(),
"start": (self.day_ago - timedelta(hours=2)).isoformat(),
"field": ["project", "transaction"],
"query": "event.type:transaction transaction:transaction_1*",
"project": [self.project.id],
},
)
assert response.status_code == 200, response.content
links = self._parse_links(response["Link"])
assert links["previous"]["results"] == "false"
assert links["next"]["results"] == "false"
assert len(response.data["events"]["data"]) == 5
|
OrganizationEventsTrendsPagingTest
|
python
|
keras-team__keras
|
keras/src/optimizers/schedules/learning_rate_schedule_test.py
|
{
"start": 1358,
"end": 2999
}
|
class ____(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
schedules.ExponentialDecay(
initial_learning_rate=0.05,
decay_steps=10,
decay_rate=0.96,
staircase=True,
name="my_ed",
)
)
def test_continuous(self):
step = 5
decayed_lr = schedules.ExponentialDecay(0.05, 10, 0.96)
expected = 0.05 * 0.96 ** (5.0 / 10.0)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_staircase(self):
step = backend.Variable(1.0)
decayed_lr = schedules.ExponentialDecay(0.1, 3, 0.96, staircase=True)
# No change to learning rate due to staircase
expected = 0.1
self.assertAllClose(decayed_lr(step), expected, 1e-6)
expected = 0.1
step.assign(2)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
# Decayed learning rate
expected = 0.1 * 0.96 ** (100 // 3)
step.assign(100)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_variables(self):
step = backend.Variable(1.0)
decayed_lr = schedules.ExponentialDecay(0.1, 3, 0.96, staircase=True)
# No change to learning rate
step.assign(1)
self.assertAllClose(decayed_lr(step), 0.1, 1e-6)
step.assign(2)
self.assertAllClose(decayed_lr(step), 0.1, 1e-6)
# Decayed learning rate
step.assign(100)
expected = 0.1 * 0.96 ** (100 // 3)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
|
ExponentialDecayTest
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 43036,
"end": 43151
}
|
class ____(BaseModel, extra="forbid"):
geo_distance: "GeoDistanceParams" = Field(..., description="")
|
GeoDistance
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_groupby.py
|
{
"start": 42327,
"end": 42461
}
|
class ____(GroupByCumulative):
chunk = M.cumcount
aggregate = staticmethod(_cumcount_aggregate)
initial = -1
|
GroupByCumcount
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/worksheet/test_write_row_breaks.py
|
{
"start": 301,
"end": 1327
}
|
class ____(unittest.TestCase):
"""
Test the Worksheet _write_row_breaks() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_row_breaks_1(self):
"""Test the _write_row_breaks() method"""
self.worksheet.hbreaks = [1]
self.worksheet._write_row_breaks()
exp = """<rowBreaks count="1" manualBreakCount="1"><brk id="1" max="16383" man="1"/></rowBreaks>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_row_breaks_2(self):
"""Test the _write_row_breaks() method"""
self.worksheet.hbreaks = [15, 7, 3, 0]
self.worksheet._write_row_breaks()
exp = """<rowBreaks count="3" manualBreakCount="3"><brk id="3" max="16383" man="1"/><brk id="7" max="16383" man="1"/><brk id="15" max="16383" man="1"/></rowBreaks>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
|
TestWriteRowBreaks
|
python
|
pypa__hatch
|
src/hatch/project/env.py
|
{
"start": 16072,
"end": 17790
}
|
class ____:
def __init__(self, data_dir: Path, project_path: Path):
self.__data_dir = data_dir
self.__project_path = project_path
def dependency_hash(self, environment: EnvironmentInterface) -> str:
return self._read(environment).get("dependency_hash", "")
def update_dependency_hash(self, environment: EnvironmentInterface, dependency_hash: str) -> None:
metadata = self._read(environment)
metadata["dependency_hash"] = dependency_hash
self._write(environment, metadata)
def reset(self, environment: EnvironmentInterface) -> None:
self._metadata_file(environment).unlink(missing_ok=True)
def _read(self, environment: EnvironmentInterface) -> dict[str, Any]:
import json
metadata_file = self._metadata_file(environment)
if not metadata_file.is_file():
return {}
return json.loads(metadata_file.read_text())
def _write(self, environment: EnvironmentInterface, metadata: dict[str, Any]) -> None:
import json
metadata_file = self._metadata_file(environment)
metadata_file.parent.ensure_dir_exists()
metadata_file.write_text(json.dumps(metadata))
def _metadata_file(self, environment: EnvironmentInterface) -> Path:
from hatch.env.internal import is_isolated_environment
if is_isolated_environment(environment.name, environment.config):
return self.__data_dir / ".internal" / f"{environment.name}.json"
return self._storage_dir / environment.config["type"] / f"{environment.name}.json"
@cached_property
def _storage_dir(self) -> Path:
return self.__data_dir / self.__project_path.id
|
EnvironmentMetadata
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 118441,
"end": 118561
}
|
class ____(BaseModel, extra="forbid"):
searches: List["SearchRequest"] = Field(..., description="")
|
SearchRequestBatch
|
python
|
ray-project__ray
|
python/ray/tune/search/search_generator.py
|
{
"start": 914,
"end": 8185
}
|
class ____(SearchAlgorithm):
"""Generates trials to be passed to the TrialRunner.
Uses the provided ``searcher`` object to generate trials. This class
transparently handles repeating trials with score aggregation
without embedding logic into the Searcher.
Args:
searcher: Search object that subclasses the Searcher base class. This
is then used for generating new hyperparameter samples.
"""
CKPT_FILE_TMPL = "search_gen_state-{}.json"
def __init__(self, searcher: Searcher):
assert issubclass(
type(searcher), Searcher
), "Searcher should be subclassing Searcher."
self.searcher = searcher
self._parser = _make_parser()
self._experiment = None
self._counter = 0 # Keeps track of number of trials created.
self._total_samples = 0 # int: total samples to evaluate.
self._finished = False
@property
def metric(self):
return self.searcher.metric
def set_search_properties(
self, metric: Optional[str], mode: Optional[str], config: Dict, **spec
) -> bool:
return _set_search_properties_backwards_compatible(
self.searcher.set_search_properties, metric, mode, config, **spec
)
@property
def total_samples(self):
return self._total_samples
def add_configurations(
self, experiments: Union[Experiment, List[Experiment], Dict[str, Dict]]
):
"""Registers experiment specifications.
Arguments:
experiments: Experiments to run.
"""
assert not self._experiment
logger.debug("added configurations")
experiment_list = _convert_to_experiment_list(experiments)
assert (
len(experiment_list) == 1
), "SearchAlgorithms can only support 1 experiment at a time."
self._experiment = experiment_list[0]
experiment_spec = self._experiment.spec
self._total_samples = self._experiment.spec.get("num_samples", 1)
_warn_on_repeater(self.searcher, self._total_samples)
if "run" not in experiment_spec:
raise TuneError("Must specify `run` in {}".format(experiment_spec))
def next_trial(self):
"""Provides one Trial object to be queued into the TrialRunner.
Returns:
Trial: Returns a single trial.
"""
if not self.is_finished():
return self.create_trial_if_possible(self._experiment.spec)
return None
def create_trial_if_possible(self, experiment_spec: Dict) -> Optional[Trial]:
logger.debug("creating trial")
trial_id = Trial.generate_id()
suggested_config = self.searcher.suggest(trial_id)
if suggested_config == Searcher.FINISHED:
self._finished = True
logger.debug("Searcher has finished.")
return
if suggested_config is None:
return
spec = copy.deepcopy(experiment_spec)
spec["config"] = merge_dicts(spec["config"], copy.deepcopy(suggested_config))
# Create a new trial_id if duplicate trial is created
flattened_config = _resolve_nested_dict(spec["config"])
self._counter += 1
tag = "{0}_{1}".format(str(self._counter), format_vars(flattened_config))
trial = _create_trial_from_spec(
spec,
self._parser,
evaluated_params=flatten_dict(suggested_config),
experiment_tag=tag,
trial_id=trial_id,
)
return trial
def on_trial_result(self, trial_id: str, result: Dict):
"""Notifies the underlying searcher."""
self.searcher.on_trial_result(trial_id, result)
def on_trial_complete(
self, trial_id: str, result: Optional[Dict] = None, error: bool = False
):
self.searcher.on_trial_complete(trial_id=trial_id, result=result, error=error)
def is_finished(self) -> bool:
return self._counter >= self._total_samples or self._finished
def get_state(self) -> Dict:
return {
"counter": self._counter,
"total_samples": self._total_samples,
"finished": self._finished,
"experiment": self._experiment,
}
def set_state(self, state: Dict):
self._counter = state["counter"]
self._total_samples = state["total_samples"]
self._finished = state["finished"]
self._experiment = state["experiment"]
def has_checkpoint(self, dirpath: str):
return bool(_load_newest_checkpoint(dirpath, self.CKPT_FILE_TMPL.format("*")))
def save_to_dir(self, dirpath: str, session_str: str):
"""Saves self + searcher to dir.
Separates the "searcher" from its wrappers (concurrency, repeating).
This allows the user to easily restore a given searcher.
The save operation is atomic (write/swap).
Args:
dirpath: Filepath to experiment dir.
session_str: Unique identifier of the current run
session.
"""
searcher = self.searcher
search_alg_state = self.get_state()
while hasattr(searcher, "searcher"):
searcher_name = type(searcher).__name__
if searcher_name in search_alg_state:
logger.warning(
"There was a duplicate when saving {}. "
"Restore may not work properly.".format(searcher_name)
)
else:
search_alg_state["name:" + searcher_name] = searcher.get_state()
searcher = searcher.searcher
base_searcher = searcher
# We save the base searcher separately for users to easily
# separate the searcher.
base_searcher.save_to_dir(dirpath, session_str)
_atomic_save(
state=search_alg_state,
checkpoint_dir=dirpath,
file_name=self.CKPT_FILE_TMPL.format(session_str),
tmp_file_name=".tmp_search_generator_ckpt",
)
def restore_from_dir(self, dirpath: str):
"""Restores self + searcher + search wrappers from dirpath."""
searcher = self.searcher
search_alg_state = _load_newest_checkpoint(
dirpath, self.CKPT_FILE_TMPL.format("*")
)
if not search_alg_state:
raise RuntimeError("Unable to find checkpoint in {}.".format(dirpath))
while hasattr(searcher, "searcher"):
searcher_name = "name:" + type(searcher).__name__
if searcher_name not in search_alg_state:
names = [
key.split("name:")[1]
for key in search_alg_state
if key.startswith("name:")
]
logger.warning(
"{} was not found in the experiment "
"state when restoring. Found {}.".format(searcher_name, names)
)
else:
searcher.set_state(search_alg_state.pop(searcher_name))
searcher = searcher.searcher
base_searcher = searcher
logger.debug(f"searching base {base_searcher}")
base_searcher.restore_from_dir(dirpath)
self.set_state(search_alg_state)
|
SearchGenerator
|
python
|
psf__black
|
tests/test_black.py
|
{
"start": 85333,
"end": 100819
}
|
class ____:
def test_get_cache_dir(
self,
tmp_path: Path,
monkeypatch: pytest.MonkeyPatch,
) -> None:
# Create multiple cache directories
workspace1 = tmp_path / "ws1"
workspace1.mkdir()
workspace2 = tmp_path / "ws2"
workspace2.mkdir()
# Force user_cache_dir to use the temporary directory for easier assertions
patch_user_cache_dir = patch(
target="black.cache.user_cache_dir",
autospec=True,
return_value=str(workspace1),
)
# If BLACK_CACHE_DIR is not set, use user_cache_dir
monkeypatch.delenv("BLACK_CACHE_DIR", raising=False)
with patch_user_cache_dir:
assert get_cache_dir().parent == workspace1
# If it is set, use the path provided in the env var.
monkeypatch.setenv("BLACK_CACHE_DIR", str(workspace2))
assert get_cache_dir().parent == workspace2
def test_cache_file_length(self) -> None:
cases = [
DEFAULT_MODE,
# all of the target versions
Mode(target_versions=set(TargetVersion)),
# all of the features
Mode(enabled_features=set(Preview)),
# all of the magics
Mode(python_cell_magics={f"magic{i}" for i in range(500)}),
# all of the things
Mode(
target_versions=set(TargetVersion),
enabled_features=set(Preview),
python_cell_magics={f"magic{i}" for i in range(500)},
),
]
for case in cases:
cache_file = get_cache_file(case)
# Some common file systems enforce a maximum path length
# of 143 (issue #4174). We can't do anything if the directory
# path is too long, but ensure the name of the cache file itself
# doesn't get too crazy.
assert len(cache_file.name) <= 96
def test_cache_broken_file(self) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace:
cache_file = get_cache_file(mode)
cache_file.write_text("this is not a pickle", encoding="utf-8")
assert black.Cache.read(mode).file_data == {}
src = (workspace / "test.py").resolve()
src.write_text("print('hello')", encoding="utf-8")
invokeBlack([str(src)])
cache = black.Cache.read(mode)
assert not cache.is_changed(src)
def test_cache_single_file_already_cached(self) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace:
src = (workspace / "test.py").resolve()
src.write_text("print('hello')", encoding="utf-8")
cache = black.Cache.read(mode)
cache.write([src])
invokeBlack([str(src)])
assert src.read_text(encoding="utf-8") == "print('hello')"
@event_loop()
def test_cache_multiple_files(self) -> None:
mode = DEFAULT_MODE
with (
cache_dir() as workspace,
patch("concurrent.futures.ProcessPoolExecutor", new=ThreadPoolExecutor),
):
one = (workspace / "one.py").resolve()
one.write_text("print('hello')", encoding="utf-8")
two = (workspace / "two.py").resolve()
two.write_text("print('hello')", encoding="utf-8")
cache = black.Cache.read(mode)
cache.write([one])
invokeBlack([str(workspace)])
assert one.read_text(encoding="utf-8") == "print('hello')"
assert two.read_text(encoding="utf-8") == 'print("hello")\n'
cache = black.Cache.read(mode)
assert not cache.is_changed(one)
assert not cache.is_changed(two)
@pytest.mark.incompatible_with_mypyc
@pytest.mark.parametrize("color", [False, True], ids=["no-color", "with-color"])
def test_no_cache_when_writeback_diff(self, color: bool) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace:
src = (workspace / "test.py").resolve()
src.write_text("print('hello')", encoding="utf-8")
with (
patch.object(black.Cache, "read") as read_cache,
patch.object(black.Cache, "write") as write_cache,
):
cmd = [str(src), "--diff"]
if color:
cmd.append("--color")
invokeBlack(cmd)
cache_file = get_cache_file(mode)
assert cache_file.exists() is False
read_cache.assert_called_once()
write_cache.assert_not_called()
@pytest.mark.parametrize("color", [False, True], ids=["no-color", "with-color"])
@event_loop()
def test_output_locking_when_writeback_diff(self, color: bool) -> None:
with cache_dir() as workspace:
for tag in range(0, 4):
src = (workspace / f"test{tag}.py").resolve()
src.write_text("print('hello')", encoding="utf-8")
with patch(
"black.concurrency.Manager", wraps=multiprocessing.Manager
) as mgr:
cmd = ["--diff", str(workspace)]
if color:
cmd.append("--color")
invokeBlack(cmd, exit_code=0)
# this isn't quite doing what we want, but if it _isn't_
# called then we cannot be using the lock it provides
mgr.assert_called()
def test_no_cache_when_stdin(self) -> None:
mode = DEFAULT_MODE
with cache_dir():
result = CliRunner().invoke(
black.main, ["-"], input=BytesIO(b"print('hello')")
)
assert not result.exit_code
cache_file = get_cache_file(mode)
assert not cache_file.exists()
def test_no_cache_flag_prevents_writes(self) -> None:
"""--no-cache should neither read nor write the cache"""
mode = DEFAULT_MODE
with cache_dir() as workspace:
src = (workspace / "test.py").resolve()
src.write_text("print('hello')", encoding="utf-8")
cache = black.Cache.read(mode)
# Pre-populate cache so the file is considered cached
cache.write([src])
with (
patch.object(black.Cache, "read") as read_cache,
patch.object(black.Cache, "write") as write_cache,
):
# Pass --no-cache; it should neither read nor write
invokeBlack([str(src), "--no-cache"])
read_cache.assert_not_called()
write_cache.assert_not_called()
def test_no_cache_with_multiple_files(self) -> None:
"""Formatting multiple files with --no-cache should not read or write cache
and should format files normally."""
mode = DEFAULT_MODE
with (cache_dir() as workspace,):
one = (workspace / "one.py").resolve()
one.write_text("print('hello')", encoding="utf-8")
two = (workspace / "two.py").resolve()
two.write_text("print('hello')", encoding="utf-8")
# Pre-populate cache for `one` so it would normally be skipped
cache = black.Cache.read(mode)
cache.write([one])
with (
patch.object(black.Cache, "read") as read_cache,
patch.object(black.Cache, "write") as write_cache,
):
# Run Black over the directory with --no-cache
invokeBlack([str(workspace), "--no-cache"])
# Cache should not be consulted or updated
read_cache.assert_not_called()
write_cache.assert_not_called()
# Both files should have been formatted (double quotes + newline)
assert one.read_text(encoding="utf-8") == 'print("hello")\n'
assert two.read_text(encoding="utf-8") == 'print("hello")\n'
def test_read_cache_no_cachefile(self) -> None:
mode = DEFAULT_MODE
with cache_dir():
assert black.Cache.read(mode).file_data == {}
def test_write_cache_read_cache(self) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace:
src = (workspace / "test.py").resolve()
src.touch()
write_cache = black.Cache.read(mode)
write_cache.write([src])
read_cache = black.Cache.read(mode)
assert not read_cache.is_changed(src)
@pytest.mark.incompatible_with_mypyc
def test_filter_cached(self) -> None:
with TemporaryDirectory() as workspace:
path = Path(workspace)
uncached = (path / "uncached").resolve()
cached = (path / "cached").resolve()
cached_but_changed = (path / "changed").resolve()
uncached.touch()
cached.touch()
cached_but_changed.touch()
cache = black.Cache.read(DEFAULT_MODE)
orig_func = black.Cache.get_file_data
def wrapped_func(path: Path) -> FileData:
if path == cached:
return orig_func(path)
if path == cached_but_changed:
return FileData(0.0, 0, "")
raise AssertionError
with patch.object(black.Cache, "get_file_data", side_effect=wrapped_func):
cache.write([cached, cached_but_changed])
todo, done = cache.filtered_cached({uncached, cached, cached_but_changed})
assert todo == {uncached, cached_but_changed}
assert done == {cached}
def test_filter_cached_hash(self) -> None:
with TemporaryDirectory() as workspace:
path = Path(workspace)
src = (path / "test.py").resolve()
src.write_text("print('hello')", encoding="utf-8")
st = src.stat()
cache = black.Cache.read(DEFAULT_MODE)
cache.write([src])
cached_file_data = cache.file_data[str(src)]
todo, done = cache.filtered_cached([src])
assert todo == set()
assert done == {src}
assert cached_file_data.st_mtime == st.st_mtime
# Modify st_mtime
cached_file_data = cache.file_data[str(src)] = FileData(
cached_file_data.st_mtime - 1,
cached_file_data.st_size,
cached_file_data.hash,
)
todo, done = cache.filtered_cached([src])
assert todo == set()
assert done == {src}
assert cached_file_data.st_mtime < st.st_mtime
assert cached_file_data.st_size == st.st_size
assert cached_file_data.hash == black.Cache.hash_digest(src)
# Modify contents
src.write_text("print('hello world')", encoding="utf-8")
new_st = src.stat()
todo, done = cache.filtered_cached([src])
assert todo == {src}
assert done == set()
assert cached_file_data.st_mtime < new_st.st_mtime
assert cached_file_data.st_size != new_st.st_size
assert cached_file_data.hash != black.Cache.hash_digest(src)
def test_write_cache_creates_directory_if_needed(self) -> None:
mode = DEFAULT_MODE
with cache_dir(exists=False) as workspace:
assert not workspace.exists()
cache = black.Cache.read(mode)
cache.write([])
assert workspace.exists()
@event_loop()
def test_failed_formatting_does_not_get_cached(self) -> None:
mode = DEFAULT_MODE
with (
cache_dir() as workspace,
patch("concurrent.futures.ProcessPoolExecutor", new=ThreadPoolExecutor),
):
failing = (workspace / "failing.py").resolve()
failing.write_text("not actually python", encoding="utf-8")
clean = (workspace / "clean.py").resolve()
clean.write_text('print("hello")\n', encoding="utf-8")
invokeBlack([str(workspace)], exit_code=123)
cache = black.Cache.read(mode)
assert cache.is_changed(failing)
assert not cache.is_changed(clean)
def test_write_cache_write_fail(self) -> None:
mode = DEFAULT_MODE
with cache_dir():
cache = black.Cache.read(mode)
with patch.object(Path, "open") as mock:
mock.side_effect = OSError
cache.write([])
def test_read_cache_line_lengths(self) -> None:
mode = DEFAULT_MODE
short_mode = replace(DEFAULT_MODE, line_length=1)
with cache_dir() as workspace:
path = (workspace / "file.py").resolve()
path.touch()
cache = black.Cache.read(mode)
cache.write([path])
one = black.Cache.read(mode)
assert not one.is_changed(path)
two = black.Cache.read(short_mode)
assert two.is_changed(path)
def test_cache_key(self) -> None:
# Test that all members of the mode enum affect the cache key.
for field in fields(Mode):
values: list[Any]
if field.name == "target_versions":
values = [
{TargetVersion.PY312},
{TargetVersion.PY313},
]
elif field.name == "python_cell_magics":
values = [{"magic1"}, {"magic2"}]
elif field.name == "enabled_features":
# If you are looking to remove one of these features, just
# replace it with any other feature.
values = [
{Preview.multiline_string_handling},
{Preview.string_processing},
]
elif field.type is bool:
values = [True, False]
elif field.type is int:
values = [1, 2]
else:
raise AssertionError(
f"Unhandled field type: {field.type} for field {field.name}"
)
modes = [replace(DEFAULT_MODE, **{field.name: value}) for value in values]
keys = [mode.get_cache_key() for mode in modes]
assert len(set(keys)) == len(modes)
def assert_collected_sources(
src: Sequence[str | Path],
expected: Sequence[str | Path],
*,
root: Path | None = None,
exclude: str | None = None,
include: str | None = None,
extend_exclude: str | None = None,
force_exclude: str | None = None,
stdin_filename: str | None = None,
) -> None:
gs_src = tuple(str(Path(s)) for s in src)
gs_expected = [Path(s) for s in expected]
gs_exclude = None if exclude is None else compile_pattern(exclude)
gs_include = DEFAULT_INCLUDE if include is None else compile_pattern(include)
gs_extend_exclude = (
None if extend_exclude is None else compile_pattern(extend_exclude)
)
gs_force_exclude = None if force_exclude is None else compile_pattern(force_exclude)
collected = black.get_sources(
root=root or THIS_DIR,
src=gs_src,
quiet=False,
verbose=False,
include=gs_include,
exclude=gs_exclude,
extend_exclude=gs_extend_exclude,
force_exclude=gs_force_exclude,
report=black.Report(),
stdin_filename=stdin_filename,
)
assert sorted(collected) == sorted(gs_expected)
|
TestCaching
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/projects.py
|
{
"start": 89575,
"end": 93926
}
|
class ____(Request):
"""
Get a list of distinct values for the chosen hyperparameter
:param projects: Project IDs
:type projects: Sequence[str]
:param section: Hyperparameter section name
:type section: str
:param name: Hyperparameter name
:type name: str
:param allow_public: If set to 'true' then collect values from both company and
public tasks otherwise company tasks only. The default is 'true'
:type allow_public: bool
:param include_subprojects: If set to 'true' and the project field is set then
the result includes hyper parameters values from the subproject tasks
:type include_subprojects: bool
"""
_service = "projects"
_action = "get_hyperparam_values"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"allow_public": {
"description": "If set to 'true' then collect values from both company and public tasks otherwise company tasks only. The default is 'true'",
"type": "boolean",
},
"include_subprojects": {
"default": True,
"description": "If set to 'true' and the project field is set then the result includes hyper parameters values from the subproject tasks",
"type": "boolean",
},
"name": {"description": "Hyperparameter name", "type": "string"},
"projects": {
"description": "Project IDs",
"items": {"type": "string"},
"type": ["array", "null"],
},
"section": {"description": "Hyperparameter section name", "type": "string"},
},
"required": ["section", "name"],
"type": "object",
}
def __init__(
self,
section: str,
name: str,
projects: Optional[List[str]] = None,
allow_public: Optional[bool] = None,
include_subprojects: Optional[bool] = True,
**kwargs: Any
) -> None:
super(GetHyperparamValuesRequest, self).__init__(**kwargs)
self.projects = projects
self.section = section
self.name = name
self.allow_public = allow_public
self.include_subprojects = include_subprojects
@schema_property("projects")
def projects(self) -> Optional[List[str]]:
return self._property_projects
@projects.setter
def projects(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_projects = None
return
self.assert_isinstance(value, "projects", (list, tuple))
self.assert_isinstance(value, "projects", six.string_types, is_array=True)
self._property_projects = value
@schema_property("section")
def section(self) -> str:
return self._property_section
@section.setter
def section(self, value: str) -> None:
if value is None:
self._property_section = None
return
self.assert_isinstance(value, "section", six.string_types)
self._property_section = value
@schema_property("name")
def name(self) -> str:
return self._property_name
@name.setter
def name(self, value: str) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("allow_public")
def allow_public(self) -> Optional[bool]:
return self._property_allow_public
@allow_public.setter
def allow_public(self, value: Optional[bool]) -> None:
if value is None:
self._property_allow_public = None
return
self.assert_isinstance(value, "allow_public", (bool,))
self._property_allow_public = value
@schema_property("include_subprojects")
def include_subprojects(self) -> Optional[bool]:
return self._property_include_subprojects
@include_subprojects.setter
def include_subprojects(self, value: Optional[bool]) -> None:
if value is None:
self._property_include_subprojects = None
return
self.assert_isinstance(value, "include_subprojects", (bool,))
self._property_include_subprojects = value
|
GetHyperparamValuesRequest
|
python
|
getsentry__sentry
|
src/sentry/models/organizationonboardingtask.py
|
{
"start": 3344,
"end": 4636
}
|
class ____(Model):
"""
An abstract onboarding task that can be subclassed. This abstract model exists so that the Sandbox can create a subclass
which allows for the creation of tasks that are unique to users instead of organizations.
"""
__relocation_scope__ = RelocationScope.Excluded
STATUS_CHOICES = (
(OnboardingTaskStatus.COMPLETE, "complete"),
(OnboardingTaskStatus.SKIPPED, "skipped"),
)
STATUS_KEY_MAP = dict(STATUS_CHOICES)
STATUS_LOOKUP_BY_KEY = {v: k for k, v in STATUS_CHOICES}
organization = FlexibleForeignKey("sentry.Organization")
user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete="SET_NULL")
status = BoundedPositiveIntegerField(choices=[(k, str(v)) for k, v in STATUS_CHOICES])
completion_seen = models.DateTimeField(null=True)
date_completed = models.DateTimeField(default=timezone.now)
project = FlexibleForeignKey(
"sentry.Project", db_constraint=False, null=True, on_delete=SET_NULL
)
# INVITE_MEMBER { invited_member: user.id }
data = LegacyTextJSONField(default=dict)
# abstract
TASK_LOOKUP_BY_KEY: dict[str, int]
SKIPPABLE_TASKS: frozenset[int]
class Meta:
abstract = True
@region_silo_model
|
AbstractOnboardingTask
|
python
|
scrapy__scrapy
|
tests/test_downloadermiddleware_retry.py
|
{
"start": 793,
"end": 4934
}
|
class ____:
def setup_method(self):
self.crawler = get_crawler(DefaultSpider)
self.crawler.spider = self.crawler._create_spider()
self.mw = RetryMiddleware.from_crawler(self.crawler)
self.mw.max_retry_times = 2
def test_priority_adjust(self):
req = Request("http://www.scrapytest.org/503")
rsp = Response("http://www.scrapytest.org/503", body=b"", status=503)
req2 = self.mw.process_response(req, rsp)
assert req2.priority < req.priority
def test_404(self):
req = Request("http://www.scrapytest.org/404")
rsp = Response("http://www.scrapytest.org/404", body=b"", status=404)
# dont retry 404s
assert self.mw.process_response(req, rsp) is rsp
def test_dont_retry(self):
req = Request("http://www.scrapytest.org/503", meta={"dont_retry": True})
rsp = Response("http://www.scrapytest.org/503", body=b"", status=503)
# first retry
r = self.mw.process_response(req, rsp)
assert r is rsp
# Test retry when dont_retry set to False
req = Request("http://www.scrapytest.org/503", meta={"dont_retry": False})
rsp = Response("http://www.scrapytest.org/503")
# first retry
r = self.mw.process_response(req, rsp)
assert r is rsp
def test_dont_retry_exc(self):
req = Request("http://www.scrapytest.org/503", meta={"dont_retry": True})
r = self.mw.process_exception(req, DNSLookupError())
assert r is None
def test_503(self):
req = Request("http://www.scrapytest.org/503")
rsp = Response("http://www.scrapytest.org/503", body=b"", status=503)
# first retry
req = self.mw.process_response(req, rsp)
assert isinstance(req, Request)
assert req.meta["retry_times"] == 1
# second retry
req = self.mw.process_response(req, rsp)
assert isinstance(req, Request)
assert req.meta["retry_times"] == 2
# discard it
assert self.mw.process_response(req, rsp) is rsp
assert self.crawler.stats.get_value("retry/max_reached") == 1
assert (
self.crawler.stats.get_value("retry/reason_count/503 Service Unavailable")
== 2
)
assert self.crawler.stats.get_value("retry/count") == 2
def test_twistederrors(self):
exceptions = [
ConnectError,
ConnectionDone,
ConnectionLost,
TxConnectionRefusedError,
defer.TimeoutError,
DNSLookupError,
ResponseFailed,
TCPTimedOutError,
TxTimeoutError,
]
for exc in exceptions:
req = Request(f"http://www.scrapytest.org/{exc.__name__}")
self._test_retry_exception(req, exc("foo"))
stats = self.crawler.stats
assert stats.get_value("retry/max_reached") == len(exceptions)
assert stats.get_value("retry/count") == len(exceptions) * 2
assert (
stats.get_value("retry/reason_count/twisted.internet.defer.TimeoutError")
== 2
)
def test_exception_to_retry_added(self):
exc = ValueError
settings_dict = {
"RETRY_EXCEPTIONS": [*RETRY_EXCEPTIONS, exc],
}
crawler = get_crawler(DefaultSpider, settings_dict=settings_dict)
crawler.spider = crawler._create_spider()
mw = RetryMiddleware.from_crawler(crawler)
req = Request(f"http://www.scrapytest.org/{exc.__name__}")
self._test_retry_exception(req, exc("foo"), mw)
def _test_retry_exception(self, req, exception, mw=None):
if mw is None:
mw = self.mw
# first retry
req = mw.process_exception(req, exception)
assert isinstance(req, Request)
assert req.meta["retry_times"] == 1
# second retry
req = mw.process_exception(req, exception)
assert isinstance(req, Request)
assert req.meta["retry_times"] == 2
# discard it
req = mw.process_exception(req, exception)
assert req is None
|
TestRetry
|
python
|
pytorch__pytorch
|
torch/distributed/_tools/fsdp2_mem_tracker.py
|
{
"start": 1027,
"end": 2360
}
|
class ____(_RefType):
"""
Enumerates categories of memory usage in FSDP modules, including parameters, gradients, activations,
and optimizer states.
Attributes:
SHARDED_PARAM (str): Memory usage of sharded parameters.
UNSHARDED_PARAM (str): Memory usage of unsharded parameters.
SHARDED_GRAD (str): Memory usage of sharded gradients corresponding to the sharded parameters.
UNSHARDED_GRAD (str): Memory usage of unsharded gradients corresponding to the unsharded parameters.
ACT (str): Memory usage of activations and tensors from forward and AC recomputation.
TEMP (str): Memory usage of temporary tensors during the backward pass including gradients of activations.
ALL_GATHER (str): Memory usage of all_gather output tensor.
REDUCE_SCATTER (str): Memory usage of reduce_scatter input tensor.
OPT (str): Memory usage of tensors storing optimizer states.
INP (str): Memory usage of input tensors.
"""
SHARDED_PARAM = "Sharded Param"
UNSHARDED_PARAM = "Unsharded Param"
BUFFER = "Buffer"
SHARDED_GRAD = "Sharded Grad"
UNSHARDED_GRAD = "Unsharded Grad"
ACT = "Activation"
TEMP = "Temp"
ALL_GATHER = "All Gather"
REDUCE_SCATTER = "Reduce Scatter"
OPT = "OptState"
INP = "Inputs"
|
_FSDPRefType
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_cloud_memorystore.py
|
{
"start": 14135,
"end": 15891
}
|
class ____:
@mock.patch("airflow.providers.google.cloud.operators.cloud_memorystore.CloudMemorystoreHook")
def test_assert_valid_hook_call(self, mock_hook):
task = CloudMemorystoreCreateInstanceAndImportOperator(
task_id=TEST_TASK_ID,
location=TEST_LOCATION,
instance_id=TEST_INSTANCE_ID,
instance=TEST_INSTANCE,
input_config=TEST_INPUT_CONFIG,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
task.execute(mock.MagicMock())
mock_hook.assert_has_calls(
[
mock.call(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
),
mock.call().create_instance(
location=TEST_LOCATION,
instance_id=TEST_INSTANCE_ID,
instance=TEST_INSTANCE,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
),
mock.call().import_instance(
input_config=TEST_INPUT_CONFIG,
instance=TEST_INSTANCE_ID,
location=TEST_LOCATION,
metadata=TEST_METADATA,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
),
]
)
|
TestCloudMemorystoreCreateInstanceAndImportOperatorOperator
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airlift/dagster_airlift/in_airflow/materialize_assets_operator.py
|
{
"start": 261,
"end": 2040
}
|
class ____(BaseDagsterAssetsOperator):
"""An operator base class that proxies execution to a user-provided list of Dagster assets.
Will throw an error at runtime if not all assets can be found on the corresponding Dagster instance.
Args:
asset_key_paths (Sequence[Union[str, Sequence[str]]]): A sequence of asset key paths to materialize.
Each path in the sequence can be a string, which is treated as an asset key path with a single
component per "/" key, or a sequence of strings representing a path with multiple components. For more,
see the docs on asset keys: https://docs.dagster.io/guides/build/assets#multi-part-asset-keys
"""
def __init__(self, asset_key_paths: Sequence[Union[str, Sequence[str]]], *args, **kwargs):
self.asset_key_paths = [
_get_path_from_str(path) if isinstance(path, str) else tuple(path)
for path in asset_key_paths
]
super().__init__(*args, **kwargs)
def filter_asset_nodes(
self, context: Context, asset_nodes: Sequence[Mapping[str, Any]]
) -> Iterable[Mapping[str, Any]]:
hashable_path_to_node = {tuple(node["assetKey"]["path"]): node for node in asset_nodes}
if not all(path in hashable_path_to_node for path in self.asset_key_paths):
raise ValueError(
f"Could not find all asset key paths {self.asset_key_paths} in the asset nodes. Found: {list(hashable_path_to_node.keys())}"
)
yield from [hashable_path_to_node[path] for path in self.asset_key_paths]
def _get_path_from_str(path_str: str) -> tuple:
parts = re.split(UNESCAPED_SLASH_RE, path_str)
return tuple(part.replace(ESCAPED_SLASH, "/") for part in parts)
|
BaseMaterializeAssetsOperator
|
python
|
doocs__leetcode
|
solution/0400-0499/0404.Sum of Left Leaves/Solution.py
|
{
"start": 192,
"end": 566
}
|
class ____:
def sumOfLeftLeaves(self, root: Optional[TreeNode]) -> int:
if root is None:
return 0
ans = self.sumOfLeftLeaves(root.right)
if root.left:
if root.left.left == root.left.right:
ans += root.left.val
else:
ans += self.sumOfLeftLeaves(root.left)
return ans
|
Solution
|
python
|
pytorch__pytorch
|
torch/utils/data/datapipes/_typing.py
|
{
"start": 12268,
"end": 16437
}
|
class ____(_DataPipeMeta):
r"""
Metaclass for `IterDataPipe` and inherits from `_DataPipeMeta`.
Add various functions for behaviors specific to `IterDataPipe`.
"""
def __new__(cls, name, bases, namespace, **kwargs):
if "reset" in namespace:
reset_func = namespace["reset"]
@functools.wraps(reset_func)
def conditional_reset(*args, **kwargs) -> None:
r"""
Only execute DataPipe's `reset()` method if `_SnapshotState` is `Iterating` or `NotStarted`.
This allows recently restored DataPipe to preserve its restored state during the initial `__iter__` call.
"""
datapipe = args[0]
if datapipe._snapshot_state in (
_SnapshotState.Iterating,
_SnapshotState.NotStarted,
):
# Reset `NotStarted` is necessary because the `source_datapipe` of a DataPipe might have
# already begun iterating.
datapipe._number_of_samples_yielded = 0
datapipe._fast_forward_iterator = None
reset_func(*args, **kwargs)
datapipe._snapshot_state = _SnapshotState.Iterating
namespace["reset"] = conditional_reset
if "__iter__" in namespace:
hook_iterator(namespace)
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
def _dp_init_subclass(sub_cls, *args, **kwargs) -> None:
# Add function for datapipe instance to reinforce the type
sub_cls.reinforce_type = reinforce_type
# TODO:
# - add global switch for type checking at compile-time
# Ignore internal type class
if getattr(sub_cls, "__type_class__", False):
return
# Check if the string type is valid
if isinstance(sub_cls.type.param, ForwardRef):
base_globals = sys.modules[sub_cls.__module__].__dict__
try:
param = _eval_type(sub_cls.type.param, base_globals, locals())
sub_cls.type.param = param
except TypeError as e:
raise TypeError(
f"{sub_cls.type.param.__forward_arg__} is not supported by Python typing"
) from e
if "__iter__" in sub_cls.__dict__:
iter_fn = sub_cls.__dict__["__iter__"]
hints = get_type_hints(iter_fn)
if "return" in hints:
return_hint = hints["return"]
# Plain Return Hint for Python 3.6
if return_hint == Iterator:
return
if not (
hasattr(return_hint, "__origin__")
and (
return_hint.__origin__ == Iterator
or return_hint.__origin__ == collections.abc.Iterator
)
):
raise TypeError(
"Expected 'Iterator' as the return annotation for `__iter__` of {}"
", but found {}".format(
sub_cls.__name__, _type_repr(hints["return"])
)
)
data_type = return_hint.__args__[0]
if not issubtype(data_type, sub_cls.type.param):
raise TypeError(
f"Expected return type of '__iter__' as a subtype of {sub_cls.type},"
f" but found {_type_repr(data_type)} for {sub_cls.__name__}"
)
def reinforce_type(self, expected_type):
r"""
Reinforce the type for DataPipe instance.
And the 'expected_type' is required to be a subtype of the original type
hint to restrict the type requirement of DataPipe instance.
"""
if isinstance(expected_type, tuple):
expected_type = tuple[expected_type] # type: ignore[valid-type]
_type_check(expected_type, msg="'expected_type' must be a type")
if not issubtype(expected_type, self.type.param):
raise TypeError(
f"Expected 'expected_type' as subtype of {self.type}, but found {_type_repr(expected_type)}"
)
self.type = _DataPipeType(expected_type)
return self
|
_IterDataPipeMeta
|
python
|
jd__tenacity
|
tenacity/retry.py
|
{
"start": 8440,
"end": 8738
}
|
class ____(retry_base):
"""Retries if any of the retries condition is valid."""
def __init__(self, *retries: retry_base) -> None:
self.retries = retries
def __call__(self, retry_state: "RetryCallState") -> bool:
return any(r(retry_state) for r in self.retries)
|
retry_any
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/array_ops/constant_op_eager_test.py
|
{
"start": 20083,
"end": 22114
}
|
class ____(test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
ctx = context.context()
device = "GPU:0" if (use_gpu and ctx.num_gpus()) else "CPU:0"
with ops.device(device):
tf_ans = array_ops.fill(dims, val, name="fill")
out = tf_ans.numpy()
self.assertAllClose(np_ans, out)
def _compareAll(self, dims, val, np_ans):
self._compare(dims, val, np_ans, False)
self._compare(dims, val, np_ans, True)
def testFillFloat(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillDouble(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt32(self):
np_ans = np.array([[42] * 3] * 2).astype(np.int32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt64(self):
np_ans = np.array([[-42] * 3] * 2).astype(np.int64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex64(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex64)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillComplex128(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex128)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillString(self):
np_ans = np.array([[b"yolo"] * 3] * 2)
tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").numpy()
self.assertAllEqual(np_ans, tf_ans)
def testFillNegative(self):
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill(shape, 7)
def testShapeFunctionEdgeCases(self):
# Non-vector dimensions.
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill([[0, 1], [2, 3]], 1.0)
# Non-scalar value.
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill([3, 2], [1.0, 2.0])
if __name__ == "__main__":
test.main()
|
FillTest
|
python
|
pytorch__pytorch
|
torch/multiprocessing/spawn.py
|
{
"start": 973,
"end": 1274
}
|
class ____(ProcessException):
"""Exception raised when a process failed due to an exception raised by the code."""
def __init__(
self,
msg: str,
error_index: int,
error_pid: int,
):
super().__init__(msg, error_index, error_pid)
|
ProcessRaisedException
|
python
|
tensorflow__tensorflow
|
tensorflow/python/tools/api/generator2/shared/exported_api.py
|
{
"start": 848,
"end": 1244
}
|
class ____(NamedTuple):
"""Information about a single tf_export instance."""
file_name: str
line_no: int
symbol_name: str
v1_apis: tuple[str, ...]
v2_apis: tuple[str, ...]
@classmethod
def create(
cls, *, v1_apis: Sequence[str], v2_apis: Sequence[str], **kwargs
) -> "ExportedSymbol":
return cls(v1_apis=tuple(v1_apis), v2_apis=tuple(v2_apis), **kwargs)
|
ExportedSymbol
|
python
|
urllib3__urllib3
|
test/contrib/test_pyopenssl_dependencies.py
|
{
"start": 611,
"end": 1988
}
|
class ____:
"""
Tests for error handling in pyopenssl's 'inject_into urllib3'
"""
def test_inject_validate_fail_cryptography(self) -> None:
"""
Injection should not be supported if cryptography is too old.
"""
try:
with patch("cryptography.x509.extensions.Extensions") as mock:
del mock.get_extension_for_class
with pytest.raises(ImportError):
inject_into_urllib3()
finally:
# `inject_into_urllib3` is not supposed to succeed.
# If it does, this test should fail, but we need to
# clean up so that subsequent tests are unaffected.
extract_from_urllib3()
def test_inject_validate_fail_pyopenssl(self) -> None:
"""
Injection should not be supported if pyOpenSSL is too old.
"""
try:
return_val = Mock()
del return_val._x509
with patch("OpenSSL.crypto.X509", return_value=return_val):
with pytest.raises(ImportError):
inject_into_urllib3()
finally:
# `inject_into_urllib3` is not supposed to succeed.
# If it does, this test should fail, but we need to
# clean up so that subsequent tests are unaffected.
extract_from_urllib3()
|
TestPyOpenSSLInjection
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/path_registry.py
|
{
"start": 24423,
"end": 24580
}
|
class ____(_AbstractEntityRegistry):
# for aliased class, return lightweight, no-cycles created
# version
inherit_cache = True
|
_SlotsEntityRegistry
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_naturalpks.py
|
{
"start": 50349,
"end": 52468
}
|
class ____(fixtures.MappedTest):
"""Test integration with TypeEngine.sort_key_function"""
class HashableDict(dict):
def __hash__(self):
return hash((self["x"], self["y"]))
@classmethod
def define_tables(cls, metadata):
class MyUnsortable(TypeDecorator):
impl = String(10)
cache_ok = True
def process_bind_param(self, value, dialect):
return "%s,%s" % (value["x"], value["y"])
def process_result_value(self, value, dialect):
rec = value.split(",")
return cls.HashableDict({"x": rec[0], "y": rec[1]})
def sort_key_function(self, value):
return (value["x"], value["y"])
Table(
"data",
metadata,
Column("info", MyUnsortable(), primary_key=True),
Column("int_value", Integer),
)
@classmethod
def setup_classes(cls):
class Data(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
cls.mapper_registry.map_imperatively(cls.classes.Data, cls.tables.data)
def test_updates_sorted(self):
Data = self.classes.Data
s = fixture_session()
s.add_all(
[
Data(info=self.HashableDict(x="a", y="b")),
Data(info=self.HashableDict(x="a", y="a")),
Data(info=self.HashableDict(x="b", y="b")),
Data(info=self.HashableDict(x="b", y="a")),
]
)
s.commit()
aa, ab, ba, bb = s.query(Data).order_by(Data.info).all()
counter = itertools.count()
ab.int_value = bindparam(key=None, callable_=lambda: next(counter))
ba.int_value = bindparam(key=None, callable_=lambda: next(counter))
bb.int_value = bindparam(key=None, callable_=lambda: next(counter))
aa.int_value = bindparam(key=None, callable_=lambda: next(counter))
s.commit()
eq_(
s.query(Data.int_value).order_by(Data.info).all(),
[(0,), (1,), (2,), (3,)],
)
|
UnsortablePKTest
|
python
|
getsentry__sentry
|
tests/sentry/data_export/processors/test_discover.py
|
{
"start": 5832,
"end": 6590
}
|
class ____(TestCase, PerformanceIssueTestCase):
def test_handle_dataset(self) -> None:
query = {
"statsPeriod": "14d",
"project": [self.project.id],
"field": ["count(id)", "fake(field)", "issue"],
"query": "",
}
query["field"] = ["title", "count()"]
query["dataset"] = "issuePlatform"
self.create_performance_issue()
processor = DiscoverProcessor(organization=self.organization, discover_query=query)
assert processor.header_fields == [
"title",
"count",
]
result = processor.data_fn(0, 1)
assert len(result["data"]) == 1
assert result["data"][0]["title"] == "N+1 Query"
|
DiscoverIssuesProcessorTest
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/operators/sagemaker.py
|
{
"start": 80520,
"end": 85391
}
|
class ____(AwsBaseOperator[SageMakerHook]):
"""
Create a SageMaker notebook.
More information regarding parameters of this operator can be found here
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker/client/create_notebook_instance.html.
.. seealso:
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SageMakerCreateNotebookOperator`
:param instance_name: The name of the notebook instance.
:param instance_type: The type of instance to create.
:param role_arn: The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access
:param volume_size_in_gb: Size in GB of the EBS root device volume of the notebook instance.
:param volume_kms_key_id: The KMS key ID for the EBS root device volume.
:param lifecycle_config_name: The name of the lifecycle configuration to associate with the notebook
:param direct_internet_access: Whether to enable direct internet access for the notebook instance.
:param root_access: Whether to give the notebook instance root access to the Amazon S3 bucket.
:param wait_for_completion: Whether or not to wait for the notebook to be InService before returning
:param create_instance_kwargs: Additional configuration options for the create call.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:return: The ARN of the created notebook.
"""
aws_hook_class = SageMakerHook
template_fields: Sequence[str] = aws_template_fields(
"instance_name",
"instance_type",
"role_arn",
"volume_size_in_gb",
"volume_kms_key_id",
"lifecycle_config_name",
"direct_internet_access",
"root_access",
"wait_for_completion",
"create_instance_kwargs",
)
ui_color = "#ff7300"
def __init__(
self,
*,
instance_name: str,
instance_type: str,
role_arn: str,
volume_size_in_gb: int | None = None,
volume_kms_key_id: str | None = None,
lifecycle_config_name: str | None = None,
direct_internet_access: str | None = None,
root_access: str | None = None,
create_instance_kwargs: dict[str, Any] | None = None,
wait_for_completion: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.instance_name = instance_name
self.instance_type = instance_type
self.role_arn = role_arn
self.volume_size_in_gb = volume_size_in_gb
self.volume_kms_key_id = volume_kms_key_id
self.lifecycle_config_name = lifecycle_config_name
self.direct_internet_access = direct_internet_access
self.root_access = root_access
self.wait_for_completion = wait_for_completion
self.create_instance_kwargs = create_instance_kwargs or {}
if self.create_instance_kwargs.get("tags") is not None:
self.create_instance_kwargs["tags"] = format_tags(self.create_instance_kwargs["tags"])
def execute(self, context: Context):
create_notebook_instance_kwargs = {
"NotebookInstanceName": self.instance_name,
"InstanceType": self.instance_type,
"RoleArn": self.role_arn,
"VolumeSizeInGB": self.volume_size_in_gb,
"KmsKeyId": self.volume_kms_key_id,
"LifecycleConfigName": self.lifecycle_config_name,
"DirectInternetAccess": self.direct_internet_access,
"RootAccess": self.root_access,
}
if self.create_instance_kwargs:
create_notebook_instance_kwargs.update(self.create_instance_kwargs)
self.log.info("Creating SageMaker notebook %s.", self.instance_name)
response = self.hook.conn.create_notebook_instance(**prune_dict(create_notebook_instance_kwargs))
self.log.info("SageMaker notebook created: %s", response["NotebookInstanceArn"])
if self.wait_for_completion:
self.log.info("Waiting for SageMaker notebook %s to be in service", self.instance_name)
waiter = self.hook.conn.get_waiter("notebook_instance_in_service")
waiter.wait(NotebookInstanceName=self.instance_name)
return response["NotebookInstanceArn"]
|
SageMakerCreateNotebookOperator
|
python
|
ray-project__ray
|
rllib/models/torch/misc.py
|
{
"start": 10924,
"end": 11397
}
|
class ____(nn.Module):
"""Simple bias appending layer for free_log_std."""
def __init__(self, num_bias_vars: int):
super().__init__()
self.log_std = torch.nn.Parameter(torch.as_tensor([0.0] * num_bias_vars))
self.register_parameter("log_std", self.log_std)
def forward(self, x: TensorType) -> TensorType:
out = torch.cat([x, self.log_std.unsqueeze(0).repeat([len(x), 1])], axis=1)
return out
@DeveloperAPI
|
AppendBiasLayer
|
python
|
doocs__leetcode
|
solution/0600-0699/0639.Decode Ways II/Solution.py
|
{
"start": 0,
"end": 1182
}
|
class ____:
def numDecodings(self, s: str) -> int:
mod = int(1e9 + 7)
n = len(s)
# dp[i - 2], dp[i - 1], dp[i]
a, b, c = 0, 1, 0
for i in range(1, n + 1):
# 1 digit
if s[i - 1] == "*":
c = 9 * b % mod
elif s[i - 1] != "0":
c = b
else:
c = 0
# 2 digits
if i > 1:
if s[i - 2] == "*" and s[i - 1] == "*":
c = (c + 15 * a) % mod
elif s[i - 2] == "*":
if s[i - 1] > "6":
c = (c + a) % mod
else:
c = (c + 2 * a) % mod
elif s[i - 1] == "*":
if s[i - 2] == "1":
c = (c + 9 * a) % mod
elif s[i - 2] == "2":
c = (c + 6 * a) % mod
elif (
s[i - 2] != "0"
and (ord(s[i - 2]) - ord("0")) * 10 + ord(s[i - 1]) - ord("0") <= 26
):
c = (c + a) % mod
a, b = b, c
return c
|
Solution
|
python
|
ray-project__ray
|
python/ray/util/scheduling_strategies.py
|
{
"start": 4768,
"end": 4944
}
|
class ____:
def __init__(self, *values):
_validate_label_match_operator_values(values, "NotIn")
self.values = list(values)
@PublicAPI(stability="alpha")
|
NotIn
|
python
|
davidhalter__jedi
|
test/completion/pep0484_typing.py
|
{
"start": 5903,
"end": 8684
}
|
class ____(typing.Generic[TYPE_VARX]):
def lala(self) -> TYPE_VARX:
...
def maaan(p: WithTypeVar[int]):
#? int()
p.lala()
def in_out1(x: TYPE_VARX) -> TYPE_VARX: ...
#? int()
in_out1(1)
#? str()
in_out1("")
#? str()
in_out1(str())
#?
in_out1()
def type_in_out1(x: typing.Type[TYPE_VARX]) -> TYPE_VARX: ...
#? int()
type_in_out1(int)
#? str()
type_in_out1(str)
#? float()
type_in_out1(float)
#?
type_in_out1()
def in_out2(x: TYPE_VAR_CONSTRAINTSX) -> TYPE_VAR_CONSTRAINTSX: ...
#? int()
in_out2(1)
#? str()
in_out2("")
#? str()
in_out2(str())
#? str() int()
in_out2()
# TODO this should actually be str() int(), because of the constraints.
#? float()
in_out2(1.0)
def type_in_out2(x: typing.Type[TYPE_VAR_CONSTRAINTSX]) -> TYPE_VAR_CONSTRAINTSX: ...
#? int()
type_in_out2(int)
#? str()
type_in_out2(str)
#? str() int()
type_in_out2()
# TODO this should actually be str() int(), because of the constraints.
#? float()
type_in_out2(float)
def ma(a: typing.Callable[[str], TYPE_VARX]) -> typing.Callable[[str], TYPE_VARX]:
#? typing.Callable()
return a
def mf(s: str) -> int:
return int(s)
#? int()
ma(mf)('2')
def xxx(x: typing.Iterable[TYPE_VARX]) -> typing.Tuple[str, TYPE_VARX]: ...
#? str()
xxx([0])[0]
#? int()
xxx([0])[1]
#?
xxx([0])[2]
def call_pls() -> typing.Callable[[TYPE_VARX], TYPE_VARX]: ...
#? int()
call_pls()(1)
def call2_pls() -> typing.Callable[[str, typing.Callable[[int], TYPE_VARX]], TYPE_VARX]: ...
#? float()
call2_pls('')(1, lambda x: 3.0)
def call3_pls() -> typing.Callable[[typing.Callable[[int], TYPE_VARX]], typing.List[TYPE_VARX]]: ...
def the_callable() -> float: ...
#? float()
call3_pls()(the_callable)[0]
def call4_pls(fn: typing.Callable[..., TYPE_VARX]) -> typing.Callable[..., TYPE_VARX]:
return ""
#? int()
call4_pls(lambda x: 1)()
# -------------------------
# TYPE_CHECKING
# -------------------------
if typing.TYPE_CHECKING:
with_type_checking = 1
else:
without_type_checking = 1.0
#? int()
with_type_checking
#?
without_type_checking
def foo(a: typing.List, b: typing.Dict, c: typing.MutableMapping) -> typing.Type[int]:
#? ['append']
a.appen
#? list()
a
#?
a[0]
#? ['setdefault']
b.setd
#? ['setdefault']
c.setd
#? typing.MutableMapping()
c
#?
c['asdf']
#? int
foo()
# -------------------------
# cast
# -------------------------
def cast_tests():
x = 3.0
y = typing.cast(int, x)
#? int()
y
return typing.cast(str, x)
#? str()
cast_tests()
# -------------------------
# dynamic
# -------------------------
def dynamic_annotation(x: int):
#? int()
return x
#? int()
dynamic_annotation('')
# -------------------------
# TypeDict
# -------------------------
# python >= 3.8
|
WithTypeVar
|
python
|
dask__distributed
|
distributed/tests/test_core.py
|
{
"start": 40230,
"end": 43644
}
|
class ____(TCPBackend):
_listener_class = AsyncStopTCPListener
@gen_test()
async def test_async_listener_stop(monkeypatch):
monkeypatch.setitem(backends, "tcp", TCPAsyncListenerBackend())
with pytest.warns(DeprecationWarning):
async with Server({}) as s:
await s.listen(0)
assert s.listeners
@gen_test()
async def test_messages_are_ordered_bsend():
ledger = []
async def async_handler(val):
await asyncio.sleep(0.01 * random.random())
ledger.append(val)
def sync_handler(val):
ledger.append(val)
async with Server(
{},
stream_handlers={
"sync_handler": sync_handler,
"async_handler": async_handler,
},
) as s:
await s.listen()
comm = await connect(s.address)
try:
b = BatchedSend(interval=10)
try:
await comm.write({"op": "connection_stream"})
b.start(comm)
n = 100
for ix in range(n):
if ix % 2:
b.send({"op": "sync_handler", "val": ix})
else:
b.send({"op": "async_handler", "val": ix})
while not len(ledger) == n:
await asyncio.sleep(0.01)
assert ledger == list(range(n))
finally:
await b.close()
finally:
await comm.close()
@gen_test()
async def test_messages_are_ordered_raw():
ledger = []
async def async_handler(val):
await asyncio.sleep(0.01 * random.random())
ledger.append(val)
def sync_handler(val):
ledger.append(val)
async with Server(
{},
stream_handlers={
"sync_handler": sync_handler,
"async_handler": async_handler,
},
) as s:
await s.listen()
comm = await connect(s.address)
try:
await comm.write({"op": "connection_stream"})
n = 100
for ix in range(n):
if ix % 2:
await comm.write({"op": "sync_handler", "val": ix})
else:
await comm.write({"op": "async_handler", "val": ix})
while not len(ledger) == n:
await asyncio.sleep(0.01)
assert ledger == list(range(n))
finally:
await comm.close()
@pytest.mark.slow
@gen_test(timeout=180)
async def test_large_payload(caplog):
"""See also: protocol/tests/test_protocol.py::test_large_payload"""
critical_size = 2**31 + 1 # >2 GiB
data = b"0" * critical_size
async with Server({"echo": echo_serialize}) as server:
await server.listen(0)
comm = await connect(server.address)
# FIXME https://github.com/dask/distributed/issues/8465
# At debug level, messages are dumped into the log. By default, pytest captures
# all logs, which would make this test extremely expensive to run.
with caplog.at_level(logging.INFO, logger="distributed.core"):
# Note: if we wrap data in to_serialize, it will be sent as a buffer, which
# is not encoded by msgpack.
await comm.write({"op": "echo", "x": data})
response = await comm.read()
assert response["result"] == data
await comm.close()
|
TCPAsyncListenerBackend
|
python
|
rapidsai__cudf
|
python/cudf/cudf/tests/general_functions/test_register_accessor.py
|
{
"start": 1597,
"end": 2247
}
|
class ____:
def __init__(self, obj):
self._obj = obj
def __getitem__(self, i):
return self._obj[2 * i - 1]
@pytest.mark.parametrize("klass", [cudf.Index, cudf.Series])
def test_index_series_accessor(klass):
obj = klass([1, 2, 3])
pobj = obj.to_pandas()
assert_eq(obj.odd[1], pobj.odd[1])
def test_accessor_space_separate():
data = [1, 2, 3]
gdf = cudf.DataFrame(data)
gidx = cudf.Index(data)
gs = cudf.Series(data)
assert not id(gdf._accessors) == id(gidx._accessors)
assert not id(gidx._accessors) == id(gs._accessors)
assert not id(gdf._accessors) == id(gs._accessors)
|
OddRowAccessor
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 1060760,
"end": 1060986
}
|
class ____(sgqlc.types.Union):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__types__ = (ProjectV2Field, ProjectV2IterationField, ProjectV2SingleSelectField)
|
ProjectV2FieldConfiguration
|
python
|
numpy__numpy
|
numpy/_core/tests/test_numeric.py
|
{
"start": 59046,
"end": 63222
}
|
class ____:
def makegen(self):
return (x**2 for x in range(24))
def test_types(self):
ai32 = np.fromiter(self.makegen(), np.int32)
ai64 = np.fromiter(self.makegen(), np.int64)
af = np.fromiter(self.makegen(), float)
assert_(ai32.dtype == np.dtype(np.int32))
assert_(ai64.dtype == np.dtype(np.int64))
assert_(af.dtype == np.dtype(float))
def test_lengths(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
assert_(len(a) == len(expected))
assert_(len(a20) == 20)
assert_raises(ValueError, np.fromiter,
self.makegen(), int, len(expected) + 10)
def test_values(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
assert_(np.all(a == expected, axis=0))
assert_(np.all(a20 == expected[:20], axis=0))
def load_data(self, n, eindex):
# Utility method for the issue 2592 tests.
# Raise an exception at the desired index in the iterator.
for e in range(n):
if e == eindex:
raise NIterError(f'error at index {eindex}')
yield e
@pytest.mark.parametrize("dtype", [int, object])
@pytest.mark.parametrize(["count", "error_index"], [(10, 5), (10, 9)])
def test_2592(self, count, error_index, dtype):
# Test iteration exceptions are correctly raised. The data/generator
# has `count` elements but errors at `error_index`
iterable = self.load_data(count, error_index)
with pytest.raises(NIterError):
np.fromiter(iterable, dtype=dtype, count=count)
@pytest.mark.parametrize("dtype", ["S", "S0", "V0", "U0"])
def test_empty_not_structured(self, dtype):
# Note, "S0" could be allowed at some point, so long "S" (without
# any length) is rejected.
with pytest.raises(ValueError, match="Must specify length"):
np.fromiter([], dtype=dtype)
@pytest.mark.parametrize(["dtype", "data"],
[("d", [1, 2, 3, 4, 5, 6, 7, 8, 9]),
("O", [1, 2, 3, 4, 5, 6, 7, 8, 9]),
("i,O", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]),
# subarray dtypes (important because their dimensions end up
# in the result arrays dimension:
("2i", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]),
(np.dtype(("O", (2, 3))),
[((1, 2, 3), (3, 4, 5)), ((3, 2, 1), (5, 4, 3))])])
@pytest.mark.parametrize("length_hint", [0, 1])
def test_growth_and_complicated_dtypes(self, dtype, data, length_hint):
dtype = np.dtype(dtype)
data = data * 100 # make sure we realloc a bit
class MyIter:
# Class/example from gh-15789
def __length_hint__(self):
# only required to be an estimate, this is legal
return length_hint # 0 or 1
def __iter__(self):
return iter(data)
res = np.fromiter(MyIter(), dtype=dtype)
expected = np.array(data, dtype=dtype)
assert_array_equal(res, expected)
def test_empty_result(self):
class MyIter:
def __length_hint__(self):
return 10
def __iter__(self):
return iter([]) # actual iterator is empty.
res = np.fromiter(MyIter(), dtype="d")
assert res.shape == (0,)
assert res.dtype == "d"
def test_too_few_items(self):
msg = "iterator too short: Expected 10 but iterator had only 3 items."
with pytest.raises(ValueError, match=msg):
np.fromiter([1, 2, 3], count=10, dtype=int)
def test_failed_itemsetting(self):
with pytest.raises(TypeError):
np.fromiter([1, None, 3], dtype=int)
# The following manages to hit somewhat trickier code paths:
iterable = ((2, 3, 4) for i in range(5))
with pytest.raises(ValueError):
np.fromiter(iterable, dtype=np.dtype((int, 2)))
|
TestFromiter
|
python
|
facelessuser__pymdown-extensions
|
pymdownx/smartsymbols.py
|
{
"start": 3960,
"end": 5427
}
|
class ____(Extension):
"""Smart Symbols extension."""
def __init__(self, *args, **kwargs):
"""Setup config of which symbols are enabled."""
self.config = {
'trademark': [True, 'Trademark'],
'copyright': [True, 'Copyright'],
'registered': [True, 'Registered'],
'plusminus': [True, 'Plus/Minus'],
'arrows': [True, 'Arrows'],
'notequal': [True, 'Not Equal'],
'fractions': [True, 'Fractions'],
'ordinal_numbers': [True, 'Ordinal Numbers'],
'care_of': [True, 'Care/of']
}
super().__init__(*args, **kwargs)
def add_pattern(self, patterns, md):
"""Construct the inline symbol pattern."""
self.patterns.register(SmartSymbolsPattern(patterns[1], patterns[2], md), patterns[0], 30)
def extendMarkdown(self, md):
"""Create a dict of inline replace patterns and add to the tree processor."""
configs = self.getConfigs()
self.patterns = Registry()
for k, v in REPL.items():
if configs[k]:
self.add_pattern(v, md)
inline_processor = treeprocessors.InlineProcessor(md)
inline_processor.inlinePatterns = self.patterns
md.treeprocessors.register(inline_processor, "smart-symbols", 6.1)
def makeExtension(*args, **kwargs):
"""Return extension."""
return SmartSymbolsExtension(*args, **kwargs)
|
SmartSymbolsExtension
|
python
|
getsentry__sentry
|
src/sentry/issues/endpoints/project_user_issue.py
|
{
"start": 2477,
"end": 5253
}
|
class ____(BaseUserIssueFormatter):
def get_issue_type(self) -> type[GroupType]:
return WebVitalsGroup
def get_issue_title(self) -> str:
vital = self.data.get("vital", "")
return f"{vital.upper()} score needs improvement"
def get_issue_subtitle(self) -> str:
vital = self.data.get("vital", "")
transaction = self.data.get("transaction", "")
a_or_an = "an" if vital in ["lcp", "fcp", "inp"] else "a"
return f"{transaction} has {a_or_an} {vital.upper()} score of {self.data.get("score")}"
def create_fingerprint(self) -> list[str]:
vital = self.data.get("vital", "")
transaction = self.data.get("transaction", "")
# We add a uuid to force uniqueness on the fingerprint
# This is because we do not want historic autofix runs to be connected to new issue events
uuid = uuid4().hex
return [f"insights-web-vitals-{vital}-{transaction}-{uuid}"]
def get_tags(self) -> dict:
vital = self.data.get("vital", "")
transaction = self.data.get("transaction", "")
return {
"transaction": transaction,
"web_vital": vital,
"score": str(self.data.get("score")),
vital: str(self.data.get("value", "")),
}
def get_evidence(self) -> tuple[dict, list[IssueEvidence]]:
vital = self.data.get("vital", "")
score = self.data.get("score")
transaction = self.data.get("transaction", "")
trace_id = self.data.get("traceId")
vital_value = self.data.get("value")
evidence_data = {
"transaction": transaction,
"vital": vital,
"score": score,
vital: vital_value,
}
evidence_display = [
IssueEvidence(
name="Transaction",
value=transaction,
important=False,
),
IssueEvidence(
name="Web Vital",
value=vital.upper(),
important=True,
),
IssueEvidence(
name="Score",
value=str(score),
important=True,
),
IssueEvidence(
name=vital.upper(),
value=str(vital_value),
important=True,
),
]
if trace_id:
evidence_data["trace_id"] = trace_id
evidence_display.append(
IssueEvidence(
name="Trace ID",
value=trace_id,
important=False,
)
)
return (evidence_data, evidence_display)
ISSUE_TYPE_CHOICES = [
WebVitalsGroup.slug,
]
|
WebVitalsUserIssueFormatter
|
python
|
getsentry__sentry
|
tests/tools/test_flake8_plugin.py
|
{
"start": 1683,
"end": 5145
}
|
class ____(unittest.TestCase):
def test(self) -> None:
with self.assertRaises(ValueError):
func()
"""
errors = _run(S004_py)
assert errors == [
"t.py:7:13: S004 Use `pytest.raises` instead for better debuggability.",
]
def test_S005() -> None:
S005_py = """\
from sentry.models import User
"""
errors = _run(S005_py)
assert errors == [
"t.py:1:0: S005 Do not import models from sentry.models but the actual module",
]
def test_S006() -> None:
src = """\
from django.utils.encoding import force_bytes
from django.utils.encoding import force_str
"""
# only error in tests until we can fix the rest
assert _run(src, filename="src/sentry/whatever.py") == []
errors = _run(src, filename="tests/test_foo.py")
assert errors == [
"t.py:1:0: S006 Do not use force_bytes / force_str -- test the types directly",
"t.py:2:0: S006 Do not use force_bytes / force_str -- test the types directly",
]
def test_S007() -> None:
src = """\
from sentry.testutils.outbox import outbox_runner
"""
# no errors in tests/
assert _run(src, filename="tests/test_foo.py") == []
# no errors in src/sentry/testutils/
assert _run(src, filename="src/sentry/testutils/silo.py") == []
# errors in other paths
errors = _run(src, filename="src/sentry/api/endpoints/organization_details.py")
assert errors == [
"t.py:1:0: S007 Do not import sentry.testutils into production code.",
]
# Module imports should have errors too.
src = """\
import sentry.testutils.outbox as outbox_utils
"""
assert _run(src, filename="tests/test_foo.py") == []
errors = _run(src, filename="src/sentry/api/endpoints/organization_details.py")
assert errors == [
"t.py:1:0: S007 Do not import sentry.testutils into production code.",
]
def test_s008() -> None:
src = """\
from dateutil.parser import parse
"""
# no errors in source
assert _run(src, filename="src/sentry/example.py") == []
# errors in tests
tests1 = _run(src, filename="tests/test_example.py")
tests2 = _run(src, filename="src/sentry/testutils/example.py")
assert (
tests1
== tests2
== ["t.py:1:0: S008 Use datetime.fromisoformat rather than guessing at date formats"]
)
def test_S009() -> None:
src = """\
try:
...
except OSError:
raise # ok: what we want people to do!
except TypeError as e:
raise RuntimeError() # ok: reraising a different exception
except ValueError as e:
raise e # bad!
"""
expected = ["t.py:8:4: S009 Use `raise` with no arguments to reraise exceptions"]
assert _run(src) == expected
def test_S010() -> None:
src = """\
try:
...
except ValueError:
... # ok: not a reraise body
except Exception:
raise # bad!
try:
...
except Exception:
...
raise # ok: non just a reraise body
"""
expected = ["t.py:5:0: S010 Except handler does nothing and should be removed"]
assert _run(src) == expected
def test_S011() -> None:
src = """\
from sentry.testutils.cases import APITestCase
from django.test import override_settings
def test() -> None:
with override_settings(SENTRY_OPTIONS={"foo": "bar"}): # bad
...
with override_settings(
SENTRY_OPTIONS={"foo": "bar"}, # bad
OTHER_SETTING=2, # ok
):
...
with override_settings(OTHER_SETTING=2): # ok
...
|
Test
|
python
|
getsentry__sentry
|
src/sentry/dynamic_sampling/rules/biases/boost_low_volume_transactions_bias.py
|
{
"start": 320,
"end": 3916
}
|
class ____(Bias):
def generate_rules(self, project: Project, base_sample_rate: float) -> list[PolymorphicRule]:
proj_id = project.id
org_id = project.organization.id
transaction_map, base_implicit_rate = get_transactions_resampling_rates(
org_id=org_id, proj_id=proj_id, default_rate=base_sample_rate
)
ret_val: list[Rule] = []
if len(transaction_map) == 0:
return ret_val # no point returning any rules the project rule should take over
if base_sample_rate == 0:
return ret_val # we can't deal without a base_sample_rate
if base_implicit_rate == 0.0:
base_implicit_rate = 1.0
# The implicit rate that we compute is transformed to a factor, so that when the rate is multiplied by the last
# sample rate rule, the value will be `base_implicit_rate`.
implicit_rate = base_implicit_rate / base_sample_rate
idx = 0
for name, base_transaction_rate in transaction_map.items():
# Here we apply a similar logic to above and since we expect that the resulting multiplication on the Relay
# end will multiply transaction_rate * implicit_rate * base_sample_rate and the resulting value that we want
# is the actual base_transaction_rate.
#
# This operation has been designed to be minimal since through some math we can reduce the number of
# operations. Given:
# s = base_sample_rate
# i = base_implicit_rate
# t = base_transaction_rate
# we start with the base case for the implicit_rate, which will result in the following expression being
# computed by Relay -> s * (i / s) = i. Now we want to extend this expression to perform a similar logic
# but with an added term t. This would result in ((s * (i / s)) * (t / ((i / s) * s))) which can be
# simplified to ((s * (i / s)) * (t / i))).
transaction_rate = base_transaction_rate / base_implicit_rate
if transaction_rate != 1.0:
ret_val.append(
{
"samplingValue": {
"type": "factor",
"value": transaction_rate,
},
"type": "trace",
"condition": {
"op": "or",
"inner": [
{
"op": "eq",
"name": "trace.transaction",
"value": [name],
"options": {"ignoreCase": True},
}
],
},
"id": RESERVED_IDS[RuleType.BOOST_LOW_VOLUME_TRANSACTIONS_RULE] + idx,
}
)
idx += 1
if implicit_rate != 1.0:
ret_val.append(
{
"samplingValue": {
"type": "factor",
"value": implicit_rate,
},
"type": "trace",
"condition": {
"op": "and",
"inner": [],
},
"id": RESERVED_IDS[RuleType.BOOST_LOW_VOLUME_TRANSACTIONS_RULE] + idx,
}
)
return ret_val
|
BoostLowVolumeTransactionsBias
|
python
|
pytest-dev__pytest
|
src/_pytest/fixtures.py
|
{
"start": 36293,
"end": 43827
}
|
class ____(Generic[FixtureValue]):
"""A container for a fixture definition.
Note: At this time, only explicitly documented fields and methods are
considered public stable API.
"""
def __init__(
self,
config: Config,
baseid: str | None,
argname: str,
func: _FixtureFunc[FixtureValue],
scope: Scope | _ScopeName | Callable[[str, Config], _ScopeName] | None,
params: Sequence[object] | None,
ids: tuple[object | None, ...] | Callable[[Any], object | None] | None = None,
*,
_ispytest: bool = False,
# only used in a deprecationwarning msg, can be removed in pytest9
_autouse: bool = False,
) -> None:
check_ispytest(_ispytest)
# The "base" node ID for the fixture.
#
# This is a node ID prefix. A fixture is only available to a node (e.g.
# a `Function` item) if the fixture's baseid is a nodeid of a parent of
# node.
#
# For a fixture found in a Collector's object (e.g. a `Module`s module,
# a `Class`'s class), the baseid is the Collector's nodeid.
#
# For a fixture found in a conftest plugin, the baseid is the conftest's
# directory path relative to the rootdir.
#
# For other plugins, the baseid is the empty string (always matches).
self.baseid: Final = baseid or ""
# Whether the fixture was found from a node or a conftest in the
# collection tree. Will be false for fixtures defined in non-conftest
# plugins.
self.has_location: Final = baseid is not None
# The fixture factory function.
self.func: Final = func
# The name by which the fixture may be requested.
self.argname: Final = argname
if scope is None:
scope = Scope.Function
elif callable(scope):
scope = _eval_scope_callable(scope, argname, config)
if isinstance(scope, str):
scope = Scope.from_user(
scope, descr=f"Fixture '{func.__name__}'", where=baseid
)
self._scope: Final = scope
# If the fixture is directly parametrized, the parameter values.
self.params: Final = params
# If the fixture is directly parametrized, a tuple of explicit IDs to
# assign to the parameter values, or a callable to generate an ID given
# a parameter value.
self.ids: Final = ids
# The names requested by the fixtures.
self.argnames: Final = getfuncargnames(func, name=argname)
# If the fixture was executed, the current value of the fixture.
# Can change if the fixture is executed with different parameters.
self.cached_result: _FixtureCachedResult[FixtureValue] | None = None
self._finalizers: Final[list[Callable[[], object]]] = []
# only used to emit a deprecationwarning, can be removed in pytest9
self._autouse = _autouse
@property
def scope(self) -> _ScopeName:
"""Scope string, one of "function", "class", "module", "package", "session"."""
return self._scope.value
def addfinalizer(self, finalizer: Callable[[], object]) -> None:
self._finalizers.append(finalizer)
def finish(self, request: SubRequest) -> None:
exceptions: list[BaseException] = []
while self._finalizers:
fin = self._finalizers.pop()
try:
fin()
except BaseException as e:
exceptions.append(e)
node = request.node
node.ihook.pytest_fixture_post_finalizer(fixturedef=self, request=request)
# Even if finalization fails, we invalidate the cached fixture
# value and remove all finalizers because they may be bound methods
# which will keep instances alive.
self.cached_result = None
self._finalizers.clear()
if len(exceptions) == 1:
raise exceptions[0]
elif len(exceptions) > 1:
msg = f'errors while tearing down fixture "{self.argname}" of {node}'
raise BaseExceptionGroup(msg, exceptions[::-1])
def execute(self, request: SubRequest) -> FixtureValue:
"""Return the value of this fixture, executing it if not cached."""
# Ensure that the dependent fixtures requested by this fixture are loaded.
# This needs to be done before checking if we have a cached value, since
# if a dependent fixture has their cache invalidated, e.g. due to
# parametrization, they finalize themselves and fixtures depending on it
# (which will likely include this fixture) setting `self.cached_result = None`.
# See #4871
requested_fixtures_that_should_finalize_us = []
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
# Saves requested fixtures in a list so we later can add our finalizer
# to them, ensuring that if a requested fixture gets torn down we get torn
# down first. This is generally handled by SetupState, but still currently
# needed when this fixture is not parametrized but depends on a parametrized
# fixture.
requested_fixtures_that_should_finalize_us.append(fixturedef)
# Check for (and return) cached value/exception.
if self.cached_result is not None:
request_cache_key = self.cache_key(request)
cache_key = self.cached_result[1]
try:
# Attempt to make a normal == check: this might fail for objects
# which do not implement the standard comparison (like numpy arrays -- #6497).
cache_hit = bool(request_cache_key == cache_key)
except (ValueError, RuntimeError):
# If the comparison raises, use 'is' as fallback.
cache_hit = request_cache_key is cache_key
if cache_hit:
if self.cached_result[2] is not None:
exc, exc_tb = self.cached_result[2]
raise exc.with_traceback(exc_tb)
else:
return self.cached_result[0]
# We have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one.
self.finish(request)
assert self.cached_result is None
# Add finalizer to requested fixtures we saved previously.
# We make sure to do this after checking for cached value to avoid
# adding our finalizer multiple times. (#12135)
finalizer = functools.partial(self.finish, request=request)
for parent_fixture in requested_fixtures_that_should_finalize_us:
parent_fixture.addfinalizer(finalizer)
ihook = request.node.ihook
try:
# Setup the fixture, run the code in it, and cache the value
# in self.cached_result.
result: FixtureValue = ihook.pytest_fixture_setup(
fixturedef=self, request=request
)
finally:
# Schedule our finalizer, even if the setup failed.
request.node.addfinalizer(finalizer)
return result
def cache_key(self, request: SubRequest) -> object:
return getattr(request, "param", None)
def __repr__(self) -> str:
return f"<FixtureDef argname={self.argname!r} scope={self.scope!r} baseid={self.baseid!r}>"
|
FixtureDef
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-aws/prefect_aws/credentials.py
|
{
"start": 494,
"end": 1437
}
|
class ____(Enum):
"""The supported boto3 clients."""
S3 = "s3"
ECS = "ecs"
BATCH = "batch"
SECRETS_MANAGER = "secretsmanager"
@lru_cache(maxsize=8, typed=True)
def _get_client_cached(ctx, client_type: Union[str, ClientType]) -> Any:
"""
Helper method to cache and dynamically get a client type.
Args:
client_type: The client's service name.
Returns:
An authenticated client.
Raises:
ValueError: if the client is not supported.
"""
with _LOCK:
if isinstance(client_type, ClientType):
client_type = client_type.value
params_override = ctx.aws_client_parameters.get_params_override()
if ctx.region_name is not None:
params_override["region_name"] = ctx.region_name
client = ctx.get_boto3_session().client(
service_name=client_type,
**params_override,
)
return client
|
ClientType
|
python
|
doocs__leetcode
|
solution/2100-2199/2106.Maximum Fruits Harvested After at Most K Steps/Solution.py
|
{
"start": 0,
"end": 511
}
|
class ____:
def maxTotalFruits(self, fruits: List[List[int]], startPos: int, k: int) -> int:
ans = i = s = 0
for j, (pj, fj) in enumerate(fruits):
s += fj
while (
i <= j
and pj
- fruits[i][0]
+ min(abs(startPos - fruits[i][0]), abs(startPos - fruits[j][0]))
> k
):
s -= fruits[i][1]
i += 1
ans = max(ans, s)
return ans
|
Solution
|
python
|
patrick-kidger__equinox
|
equinox/_tree.py
|
{
"start": 407,
"end": 685
}
|
class ____:
def __init__(self, value: Any):
self.value = value
def _remove_leaf_wrapper(x: _LeafWrapper) -> Any:
if not isinstance(x, _LeafWrapper):
raise TypeError(f"Operation undefined, {x} is not a leaf of the pytree.")
return x.value
|
_LeafWrapper
|
python
|
django__django
|
tests/model_fields/test_autofield.py
|
{
"start": 602,
"end": 1359
}
|
class ____(SimpleTestCase):
def test_isinstance_of_autofield(self):
for field in (models.BigAutoField, models.SmallAutoField):
with self.subTest(field.__name__):
self.assertIsInstance(field(), models.AutoField)
def test_issubclass_of_autofield(self):
class MyBigAutoField(models.BigAutoField):
pass
class MySmallAutoField(models.SmallAutoField):
pass
tests = [
MyBigAutoField,
MySmallAutoField,
models.BigAutoField,
models.SmallAutoField,
]
for field in tests:
with self.subTest(field.__name__):
self.assertTrue(issubclass(field, models.AutoField))
|
AutoFieldInheritanceTests
|
python
|
ray-project__ray
|
python/ray/serve/tests/test_target_capacity.py
|
{
"start": 30955,
"end": 31688
}
|
class ____:
async def __call__(self, *args):
await asyncio.sleep(10000)
def create_hang_app(config: Dict) -> Application:
name: str = config["name"]
min_replicas: int = config["min_replicas"]
initial_replicas: int = config["initial_replicas"]
max_replicas: int = config["max_replicas"]
return HangDeployment.options(
name=name,
autoscaling_config={
"min_replicas": min_replicas,
"initial_replicas": initial_replicas,
"max_replicas": max_replicas,
"target_ongoing_requests": 1,
"metrics_interval_s": 0.01,
"downscale_delay_s": 0.01,
},
graceful_shutdown_timeout_s=0.01,
).bind()
|
HangDeployment
|
python
|
mkdocs__mkdocs
|
mkdocs/tests/config/config_options_legacy_tests.py
|
{
"start": 3788,
"end": 5647
}
|
class ____(TestCase):
def test_required(self):
class Schema:
option = c.Choice(('python', 'node'), required=True)
conf = self.get_config(Schema, {'option': 'python'})
self.assertEqual(conf['option'], 'python')
def test_optional(self):
class Schema:
option = c.Choice(('python', 'node'))
conf = self.get_config(Schema, {'option': 'python'})
self.assertEqual(conf['option'], 'python')
conf = self.get_config(Schema, {})
self.assertEqual(conf['option'], None)
conf = self.get_config(Schema, {'option': None})
self.assertEqual(conf['option'], None)
def test_default(self):
class Schema:
option = c.Choice(('a', 'b', 'c'), default='b')
conf = self.get_config(Schema, {})
self.assertEqual(conf['option'], 'b')
conf = self.get_config(Schema, {'option': None})
self.assertEqual(conf['option'], 'b')
with self.expect_error(option="Expected one of: ('a', 'b', 'c') but received: 'go'"):
self.get_config(Schema, {'option': 'go'})
def test_invalid_default(self):
with self.assertRaises(ValueError):
c.Choice(('a', 'b'), default='c')
with self.assertRaises(ValueError):
c.Choice(('a', 'b'), default='c', required=True)
def test_invalid_choice(self):
class Schema:
option = c.Choice(('python', 'node'))
with self.expect_error(option="Expected one of: ('python', 'node') but received: 'go'"):
self.get_config(Schema, {'option': 'go'})
def test_invalid_choices(self):
with self.assertRaises(ValueError):
c.Choice('')
with self.assertRaises(ValueError):
c.Choice([])
with self.assertRaises(ValueError):
c.Choice(5)
|
ChoiceTest
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/index/collector.py
|
{
"start": 12821,
"end": 12953
}
|
class ____(NamedTuple):
find_links: Sequence[Optional[LinkSource]]
index_urls: Sequence[Optional[LinkSource]]
|
CollectedSources
|
python
|
huggingface__transformers
|
src/transformers/models/mt5/modeling_mt5.py
|
{
"start": 23058,
"end": 23882
}
|
class ____(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config: MT5Config):
super().__init__()
self.dense = nn.Linear(config.d_model, config.d_model)
self.dropout = nn.Dropout(p=config.classifier_dropout)
self.out_proj = nn.Linear(config.d_model, config.num_labels)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
@auto_docstring
# Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel with T5->MT5, t5->mt5
|
MT5ClassificationHead
|
python
|
sympy__sympy
|
sympy/tensor/array/dense_ndim_array.py
|
{
"start": 390,
"end": 3658
}
|
class ____(NDimArray):
_array: List[Basic]
def __new__(self, *args, **kwargs):
return ImmutableDenseNDimArray(*args, **kwargs)
@property
def kind(self) -> ArrayKind:
return ArrayKind._union(self._array)
def __getitem__(self, index):
"""
Allows to get items from N-dim array.
Examples
========
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray([0, 1, 2, 3], (2, 2))
>>> a
[[0, 1], [2, 3]]
>>> a[0, 0]
0
>>> a[1, 1]
3
>>> a[0]
[0, 1]
>>> a[1]
[2, 3]
Symbolic index:
>>> from sympy.abc import i, j
>>> a[i, j]
[[0, 1], [2, 3]][i, j]
Replace `i` and `j` to get element `(1, 1)`:
>>> a[i, j].subs({i: 1, j: 1})
3
"""
syindex = self._check_symbolic_index(index)
if syindex is not None:
return syindex
index = self._check_index_for_getitem(index)
if isinstance(index, tuple) and any(isinstance(i, slice) for i in index):
sl_factors, eindices = self._get_slice_data_for_array_access(index)
array = [self._array[self._parse_index(i)] for i in eindices]
nshape = [len(el) for i, el in enumerate(sl_factors) if isinstance(index[i], slice)]
return type(self)(array, nshape)
else:
index = self._parse_index(index)
return self._array[index]
@classmethod
def zeros(cls, *shape):
list_length = functools.reduce(lambda x, y: x*y, shape, S.One)
return cls._new(([0]*list_length,), shape)
def tomatrix(self):
"""
Converts MutableDenseNDimArray to Matrix. Can convert only 2-dim array, else will raise error.
Examples
========
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray([1 for i in range(9)], (3, 3))
>>> b = a.tomatrix()
>>> b
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
"""
from sympy.matrices import Matrix
if self.rank() != 2:
raise ValueError('Dimensions must be of size of 2')
return Matrix(self.shape[0], self.shape[1], self._array)
def reshape(self, *newshape):
"""
Returns MutableDenseNDimArray instance with new shape. Elements number
must be suitable to new shape. The only argument of method sets
new shape.
Examples
========
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray([1, 2, 3, 4, 5, 6], (2, 3))
>>> a.shape
(2, 3)
>>> a
[[1, 2, 3], [4, 5, 6]]
>>> b = a.reshape(3, 2)
>>> b.shape
(3, 2)
>>> b
[[1, 2], [3, 4], [5, 6]]
"""
new_total_size = functools.reduce(lambda x,y: x*y, newshape)
if new_total_size != self._loop_size:
raise ValueError(f"Expecting reshape size to {self._loop_size} but got prod({newshape}) = {new_total_size}")
# there is no `.func` as this class does not subtype `Basic`:
return type(self)(self._array, newshape)
|
DenseNDimArray
|
python
|
uqfoundation__dill
|
dill/_dill.py
|
{
"start": 12208,
"end": 12261
}
|
class ____(Warning, PickleError):
pass
|
PickleWarning
|
python
|
huggingface__transformers
|
src/transformers/pipelines/base.py
|
{
"start": 21636,
"end": 23212
}
|
class ____(PipelineDataFormat):
"""
Read data from piped input to the python process. For multi columns data, columns should separated by \t
If columns are provided, then the output will be a dictionary with {column_x: value_x}
Args:
output_path (`str`): Where to save the outgoing data.
input_path (`str`): Where to look for the input data.
column (`str`): The column to read.
overwrite (`bool`, *optional*, defaults to `False`):
Whether or not to overwrite the `output_path`.
"""
def __iter__(self):
for line in sys.stdin:
# Split for multi-columns
if "\t" in line:
line = line.split("\t")
if self.column:
# Dictionary to map arguments
yield {kwargs: l for (kwargs, _), l in zip(self.column, line)}
else:
yield tuple(line)
# No dictionary to map arguments
else:
yield line
def save(self, data: dict):
"""
Print the data.
Args:
data (`dict`): The data to store.
"""
print(data)
def save_binary(self, data: dict | list[dict]) -> str:
if self.output_path is None:
raise KeyError(
"When using piped input on pipeline outputting large object requires an output file path. "
"Please provide such output path through --output argument."
)
return super().save_binary(data)
|
PipedPipelineDataFormat
|
python
|
mwaskom__seaborn
|
tests/_stats/test_aggregation.py
|
{
"start": 194,
"end": 661
}
|
class ____:
@pytest.fixture
def df(self, rng):
n = 30
return pd.DataFrame(dict(
x=rng.uniform(0, 7, n).round(),
y=rng.normal(size=n),
color=rng.choice(["a", "b", "c"], n),
group=rng.choice(["x", "y"], n),
))
def get_groupby(self, df, orient):
other = {"x": "y", "y": "x"}[orient]
cols = [c for c in df if c != other]
return GroupBy(cols)
|
AggregationFixtures
|
python
|
miyuchina__mistletoe
|
test/test_contrib/test_toc_renderer.py
|
{
"start": 173,
"end": 2520
}
|
class ____(TestCase):
def test_parse_rendered_heading(self):
rendered_heading = '<h3>some <em>text</em></h3>'
content = TocRenderer.parse_rendered_heading(rendered_heading)
self.assertEqual(content, 'some text')
def test_render_heading(self):
renderer = TocRenderer()
Heading.start('### some *text*\n')
token = Heading(Heading.read(iter(['foo'])))
renderer.render_heading(token)
self.assertEqual(renderer._headings[0], (3, 'some text'))
def test_depth(self):
renderer = TocRenderer(depth=3)
token = Document(['# title\n', '## heading\n', '#### heading\n'])
renderer.render(token)
self.assertEqual(renderer._headings, [(2, 'heading')])
def test_omit_title(self):
renderer = TocRenderer(omit_title=True)
token = Document(['# title\n', '\n', '## heading\n'])
renderer.render(token)
self.assertEqual(renderer._headings, [(2, 'heading')])
def test_filter_conditions(self):
import re
filter_conds = [lambda x: re.match(r'heading', x),
lambda x: re.match(r'foo', x)]
renderer = TocRenderer(filter_conds=filter_conds)
token = Document(['# title\n',
'\n',
'## heading\n',
'\n',
'#### not heading\n'])
renderer.render(token)
self.assertEqual(renderer._headings, [(4, 'not heading')])
def test_get_toc(self):
headings = [(1, 'heading 1'),
(2, 'subheading 1'),
(2, 'subheading 2'),
(3, 'subsubheading 1'),
(2, 'subheading 3'),
(1, 'heading 2')]
renderer = TocRenderer(omit_title=False)
renderer._headings = headings
toc = renderer.toc
self.assertIsInstance(toc, block_token.List)
# for now, we check at least the most nested heading
# (hierarchy: `List -> ListItem -> {Paragraph -> RawText.content | List -> ...}`):
heading_item = toc.children[0].children[1].children[1].children[1].children[0]
self.assertIsInstance(heading_item, block_token.ListItem)
self.assertEqual(heading_item.children[0].children[0].content, 'subsubheading 1')
|
TestTocRenderer
|
python
|
PyCQA__pylint
|
tests/functional/a/attribute_defined_outside_init.py
|
{
"start": 774,
"end": 1014
}
|
class ____:
def test_mixin(self):
"""Don't emit attribute-defined-outside-init for mixin classes."""
if self.defined_already: # pylint: disable=access-member-before-definition
self.defined_already = None
|
Mixin
|
python
|
walkccc__LeetCode
|
solutions/2808. Minimum Seconds to Equalize a Circular Array/2808.py
|
{
"start": 0,
"end": 622
}
|
class ____:
def minimumSeconds(self, nums: list[int]) -> int:
n = len(nums)
ans = n
numToIndices = collections.defaultdict(list)
for i, num in enumerate(nums):
numToIndices[num].append(i)
def getSeconds(i: int, j: int) -> int:
"""Returns the number of seconds required to make nums[i..j] the same."""
return (i - j) // 2
for indices in numToIndices.values():
seconds = getSeconds(indices[0] + n, indices[-1])
for i in range(1, len(indices)):
seconds = max(seconds, getSeconds(indices[i], indices[i - 1]))
ans = min(ans, seconds)
return ans
|
Solution
|
python
|
lepture__authlib
|
authlib/oauth1/client.py
|
{
"start": 300,
"end": 6737
}
|
class ____:
auth_class = ClientAuth
def __init__(
self,
session,
client_id,
client_secret=None,
token=None,
token_secret=None,
redirect_uri=None,
rsa_key=None,
verifier=None,
signature_method=SIGNATURE_HMAC_SHA1,
signature_type=SIGNATURE_TYPE_HEADER,
force_include_body=False,
realm=None,
**kwargs,
):
if not client_id:
raise ValueError('Missing "client_id"')
self.session = session
self.auth = self.auth_class(
client_id,
client_secret=client_secret,
token=token,
token_secret=token_secret,
redirect_uri=redirect_uri,
signature_method=signature_method,
signature_type=signature_type,
rsa_key=rsa_key,
verifier=verifier,
realm=realm,
force_include_body=force_include_body,
)
self._kwargs = kwargs
@property
def redirect_uri(self):
return self.auth.redirect_uri
@redirect_uri.setter
def redirect_uri(self, uri):
self.auth.redirect_uri = uri
@property
def token(self):
return dict(
oauth_token=self.auth.token,
oauth_token_secret=self.auth.token_secret,
oauth_verifier=self.auth.verifier,
)
@token.setter
def token(self, token):
"""This token setter is designed for an easy integration for
OAuthClient. Make sure both OAuth1Session and OAuth2Session
have token setters.
"""
if token is None:
self.auth.token = None
self.auth.token_secret = None
self.auth.verifier = None
elif "oauth_token" in token:
self.auth.token = token["oauth_token"]
if "oauth_token_secret" in token:
self.auth.token_secret = token["oauth_token_secret"]
if "oauth_verifier" in token:
self.auth.verifier = token["oauth_verifier"]
else:
message = f"oauth_token is missing: {token!r}"
self.handle_error("missing_token", message)
def create_authorization_url(self, url, request_token=None, **kwargs):
"""Create an authorization URL by appending request_token and optional
kwargs to url.
This is the second step in the OAuth 1 workflow. The user should be
redirected to this authorization URL, grant access to you, and then
be redirected back to you. The redirection back can either be specified
during client registration or by supplying a callback URI per request.
:param url: The authorization endpoint URL.
:param request_token: The previously obtained request token.
:param kwargs: Optional parameters to append to the URL.
:returns: The authorization URL with new parameters embedded.
"""
kwargs["oauth_token"] = request_token or self.auth.token
if self.auth.redirect_uri:
kwargs["oauth_callback"] = self.auth.redirect_uri
return add_params_to_uri(url, kwargs.items())
def fetch_request_token(self, url, **kwargs):
"""Method for fetching an access token from the token endpoint.
This is the first step in the OAuth 1 workflow. A request token is
obtained by making a signed post request to url. The token is then
parsed from the application/x-www-form-urlencoded response and ready
to be used to construct an authorization url.
:param url: Request Token endpoint.
:param kwargs: Extra parameters to include for fetching token.
:return: A Request Token dict.
"""
return self._fetch_token(url, **kwargs)
def fetch_access_token(self, url, verifier=None, **kwargs):
"""Method for fetching an access token from the token endpoint.
This is the final step in the OAuth 1 workflow. An access token is
obtained using all previously obtained credentials, including the
verifier from the authorization step.
:param url: Access Token endpoint.
:param verifier: A verifier string to prove authorization was granted.
:param kwargs: Extra parameters to include for fetching access token.
:return: A token dict.
"""
if verifier:
self.auth.verifier = verifier
if not self.auth.verifier:
self.handle_error("missing_verifier", 'Missing "verifier" value')
return self._fetch_token(url, **kwargs)
def parse_authorization_response(self, url):
"""Extract parameters from the post authorization redirect
response URL.
:param url: The full URL that resulted from the user being redirected
back from the OAuth provider to you, the client.
:returns: A dict of parameters extracted from the URL.
"""
token = dict(url_decode(urlparse.urlparse(url).query))
self.token = token
return token
def _fetch_token(self, url, **kwargs):
resp = self.session.post(url, auth=self.auth, **kwargs)
token = self.parse_response_token(resp.status_code, resp.text)
self.token = token
self.auth.verifier = None
return token
def parse_response_token(self, status_code, text):
if status_code >= 400:
message = (
f"Token request failed with code {status_code}, response was '{text}'."
)
self.handle_error("fetch_token_denied", message)
try:
text = text.strip()
if text.startswith("{"):
token = json_loads(text)
else:
token = dict(url_decode(text))
except (TypeError, ValueError) as e:
error = (
"Unable to decode token from token response. "
"This is commonly caused by an unsuccessful request where"
" a non urlencoded error message is returned. "
f"The decoding error was {e}"
)
raise ValueError(error) from e
return token
@staticmethod
def handle_error(error_type, error_description):
raise ValueError(f"{error_type}: {error_description}")
def __del__(self):
try:
del self.session
except AttributeError:
pass
|
OAuth1Client
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/elements.py
|
{
"start": 126755,
"end": 130128
}
|
class ____(UnaryExpression[_T]):
"""Forms the basis for right-hand collection operator modifiers
ANY and ALL.
The ANY and ALL keywords are available in different ways on different
backends. On PostgreSQL, they only work for an ARRAY type. On
MySQL, they only work for subqueries.
"""
inherit_cache = True
_is_collection_aggregate = True
@classmethod
def _create_any(
cls, expr: _ColumnExpressionArgument[_T]
) -> CollectionAggregate[bool]:
"""create CollectionAggregate for the legacy
ARRAY.Comparator.any() method"""
col_expr: ColumnElement[_T] = coercions.expect(
roles.ExpressionElementRole,
expr,
)
col_expr = col_expr.self_group()
return CollectionAggregate(
col_expr,
operator=operators.any_op,
type_=type_api.BOOLEANTYPE,
)
@classmethod
def _create_all(
cls, expr: _ColumnExpressionArgument[_T]
) -> CollectionAggregate[bool]:
"""create CollectionAggregate for the legacy
ARRAY.Comparator.all() method"""
col_expr: ColumnElement[_T] = coercions.expect(
roles.ExpressionElementRole,
expr,
)
col_expr = col_expr.self_group()
return CollectionAggregate(
col_expr,
operator=operators.all_op,
type_=type_api.BOOLEANTYPE,
)
@util.preload_module("sqlalchemy.sql.sqltypes")
def _bind_param(
self,
operator: operators.OperatorType,
obj: Any,
type_: Optional[TypeEngine[_T]] = None,
expanding: bool = False,
) -> BindParameter[_T]:
"""For new style any_(), all_(), ensure compared literal value
receives appropriate bound parameter type."""
# a CollectionAggregate is specific to ARRAY or int
# only. So for ARRAY case, make sure we use correct element type
sqltypes = util.preloaded.sql_sqltypes
if self.element.type._type_affinity is sqltypes.ARRAY:
compared_to_type = cast(
sqltypes.ARRAY[Any], self.element.type
).item_type
else:
compared_to_type = self.element.type
return BindParameter(
None,
obj,
_compared_to_operator=operator,
type_=type_,
_compared_to_type=compared_to_type,
unique=True,
expanding=expanding,
)
# operate and reverse_operate are hardwired to
# dispatch onto the type comparator directly, so that we can
# ensure "reversed" behavior.
def operate(
self, op: OperatorType, *other: Any, **kwargs: Any
) -> ColumnElement[_T]:
if not operators.is_comparison(op):
raise exc.ArgumentError(
"Only comparison operators may be used with ANY/ALL"
)
kwargs["reverse"] = True
return self.comparator.operate(operators.mirror(op), *other, **kwargs)
def reverse_operate(
self, op: OperatorType, other: Any, **kwargs: Any
) -> ColumnElement[_T]:
# comparison operators should never call reverse_operate
assert not operators.is_comparison(op)
raise exc.ArgumentError(
"Only comparison operators may be used with ANY/ALL"
)
|
CollectionAggregate
|
python
|
arrow-py__arrow
|
tests/test_locales.py
|
{
"start": 96057,
"end": 97220
}
|
class ____:
def test_format_timeframe(self):
assert self.locale._format_timeframe("now", 0) == "nunc"
assert self.locale._format_timeframe("second", 1) == "secundum"
assert self.locale._format_timeframe("seconds", 3) == "3 secundis"
assert self.locale._format_timeframe("minute", 1) == "minutam"
assert self.locale._format_timeframe("minutes", 4) == "4 minutis"
assert self.locale._format_timeframe("hour", 1) == "horam"
assert self.locale._format_timeframe("hours", 23) == "23 horas"
assert self.locale._format_timeframe("day", 1) == "diem"
assert self.locale._format_timeframe("days", 12) == "12 dies"
assert self.locale._format_timeframe("month", 1) == "mensem"
assert self.locale._format_timeframe("months", 11) == "11 mensis"
assert self.locale._format_timeframe("year", 1) == "annum"
assert self.locale._format_timeframe("years", 2) == "2 annos"
def test_weekday(self):
dt = arrow.Arrow(2015, 4, 11, 17, 30, 00)
assert self.locale.day_name(dt.isoweekday()) == "dies Saturni"
@pytest.mark.usefixtures("lang_locale")
|
TestLatinLocale
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 857902,
"end": 858098
}
|
class ____(VegaLiteSchema):
"""Padding schema wrapper."""
_schema = {"$ref": "#/definitions/Padding"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
|
Padding
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-the-maximum-sequence-value-of-array.py
|
{
"start": 66,
"end": 1244
}
|
class ____(object):
def maxValue(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
INF = float("inf")
MAX_MASK = 127
def is_submask(a, b):
return (a|b) == b
def dp(direction, npos):
result = [npos]*(MAX_MASK+1)
dp = [INF]*(MAX_MASK+1)
cnt = [0]*(MAX_MASK+1)
for i in direction(xrange(len(nums))):
dp[nums[i]] = 1
for mask in xrange(MAX_MASK+1):
if is_submask(nums[i], mask):
cnt[mask] += 1
dp[mask|nums[i]] = min(dp[mask|nums[i]], dp[mask]+1)
for mask in xrange(MAX_MASK+1):
if cnt[mask] >= k and dp[mask] <= k and result[mask] == npos:
result[mask] = i
return result
left = dp(lambda x: x, len(nums))
right = dp(reversed, -1)
return next(result for result in reversed(xrange(MAX_MASK+1)) for l in xrange(1, MAX_MASK+1) if left[l] < right[result^l])
# Time: O(n * k * r + n * r^2)
# Space: O(n * k * r)
# prefix sum, dp
|
Solution
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_types04.py
|
{
"start": 315,
"end": 1892
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("types04.xlsx")
def test_write_url_default(self):
"""Test writing hyperlinks with strings_to_urls on."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
red = workbook.add_format({"font_color": "red"})
worksheet.write(0, 0, "http://www.google.com/", red)
worksheet.write_string(1, 0, "http://www.google.com/", red)
workbook.close()
self.assertExcelEqual()
def test_write_url_implicit(self):
"""Test writing hyperlinks with strings_to_urls on."""
workbook = Workbook(self.got_filename, {"strings_to_urls": True})
worksheet = workbook.add_worksheet()
red = workbook.add_format({"font_color": "red"})
worksheet.write(0, 0, "http://www.google.com/", red)
worksheet.write_string(1, 0, "http://www.google.com/", red)
workbook.close()
self.assertExcelEqual()
def test_write_url_explicit(self):
"""Test writing hyperlinks with strings_to_urls off."""
workbook = Workbook(self.got_filename, {"strings_to_urls": False})
worksheet = workbook.add_worksheet()
red = workbook.add_format({"font_color": "red"})
worksheet.write_url(0, 0, "http://www.google.com/", red)
worksheet.write(1, 0, "http://www.google.com/", red)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
django__django
|
tests/admin_docs/test_views.py
|
{
"start": 23893,
"end": 24907
}
|
class ____(unittest.TestCase):
def test_field_name(self):
with self.assertRaises(AttributeError):
views.get_readable_field_data_type("NotAField")
def test_builtin_fields(self):
self.assertEqual(
views.get_readable_field_data_type(fields.BooleanField()),
"Boolean (Either True or False)",
)
def test_char_fields(self):
self.assertEqual(
views.get_readable_field_data_type(fields.CharField(max_length=255)),
"String (up to 255)",
)
self.assertEqual(
views.get_readable_field_data_type(fields.CharField()),
"String (unlimited)",
)
def test_custom_fields(self):
self.assertEqual(
views.get_readable_field_data_type(CustomField()), "A custom field type"
)
self.assertEqual(
views.get_readable_field_data_type(DescriptionLackingField()),
"Field of type: DescriptionLackingField",
)
|
TestFieldType
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py
|
{
"start": 91551,
"end": 92576
}
|
class ____:
@mock.patch(VERTEX_AI_PATH.format("batch_prediction_job.BatchPredictionJobHook"))
def test_execute(self, mock_hook):
op = DeleteBatchPredictionJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
batch_prediction_job_id=TEST_BATCH_PREDICTION_JOB_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.delete_batch_prediction_job.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
batch_prediction_job=TEST_BATCH_PREDICTION_JOB_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
|
TestVertexAIDeleteBatchPredictionJobOperator
|
python
|
encode__starlette
|
tests/test_formparsers.py
|
{
"start": 747,
"end": 11142
}
|
class ____(dict[Any, Any]):
def __bool__(self) -> bool:
return True
# FORCE_MULTIPART is an empty dict that boolean-evaluates as `True`.
FORCE_MULTIPART = ForceMultipartDict()
async def app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request(scope, receive)
data = await request.form()
output: dict[str, Any] = {}
for key, value in data.items():
if isinstance(value, UploadFile):
content = await value.read()
output[key] = {
"filename": value.filename,
"size": value.size,
"content": content.decode(),
"content_type": value.content_type,
}
else:
output[key] = value
await request.close()
response = JSONResponse(output)
await response(scope, receive, send)
async def multi_items_app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request(scope, receive)
data = await request.form()
output: dict[str, list[Any]] = {}
for key, value in data.multi_items():
if key not in output:
output[key] = []
if isinstance(value, UploadFile):
content = await value.read()
output[key].append(
{
"filename": value.filename,
"size": value.size,
"content": content.decode(),
"content_type": value.content_type,
}
)
else:
output[key].append(value)
await request.close()
response = JSONResponse(output)
await response(scope, receive, send)
async def app_with_headers(scope: Scope, receive: Receive, send: Send) -> None:
request = Request(scope, receive)
data = await request.form()
output: dict[str, Any] = {}
for key, value in data.items():
if isinstance(value, UploadFile):
content = await value.read()
output[key] = {
"filename": value.filename,
"size": value.size,
"content": content.decode(),
"content_type": value.content_type,
"headers": list(value.headers.items()),
}
else:
output[key] = value
await request.close()
response = JSONResponse(output)
await response(scope, receive, send)
async def app_read_body(scope: Scope, receive: Receive, send: Send) -> None:
request = Request(scope, receive)
# Read bytes, to force request.stream() to return the already parsed body
await request.body()
data = await request.form()
output = {}
for key, value in data.items():
output[key] = value
await request.close()
response = JSONResponse(output)
await response(scope, receive, send)
async def app_monitor_thread(scope: Scope, receive: Receive, send: Send) -> None:
"""Helper app to monitor what thread the app was called on.
This can later be used to validate thread/event loop operations.
"""
request = Request(scope, receive)
# Make sure we parse the form
await request.form()
await request.close()
# Send back the current thread id
response = JSONResponse({"thread_ident": threading.current_thread().ident})
await response(scope, receive, send)
def make_app_max_parts(max_files: int = 1000, max_fields: int = 1000, max_part_size: int = 1024 * 1024) -> ASGIApp:
async def app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request(scope, receive)
data = await request.form(max_files=max_files, max_fields=max_fields, max_part_size=max_part_size)
output: dict[str, Any] = {}
for key, value in data.items():
if isinstance(value, UploadFile):
content = await value.read()
output[key] = {
"filename": value.filename,
"size": value.size,
"content": content.decode(),
"content_type": value.content_type,
}
else:
output[key] = value
await request.close()
response = JSONResponse(output)
await response(scope, receive, send)
return app
def test_multipart_request_data(tmpdir: Path, test_client_factory: TestClientFactory) -> None:
client = test_client_factory(app)
response = client.post("/", data={"some": "data"}, files=FORCE_MULTIPART)
assert response.json() == {"some": "data"}
def test_multipart_request_files(tmpdir: Path, test_client_factory: TestClientFactory) -> None:
path = os.path.join(tmpdir, "test.txt")
with open(path, "wb") as file:
file.write(b"<file content>")
client = test_client_factory(app)
with open(path, "rb") as f:
response = client.post("/", files={"test": f})
assert response.json() == {
"test": {
"filename": "test.txt",
"size": 14,
"content": "<file content>",
"content_type": "text/plain",
}
}
def test_multipart_request_files_with_content_type(tmpdir: Path, test_client_factory: TestClientFactory) -> None:
path = os.path.join(tmpdir, "test.txt")
with open(path, "wb") as file:
file.write(b"<file content>")
client = test_client_factory(app)
with open(path, "rb") as f:
response = client.post("/", files={"test": ("test.txt", f, "text/plain")})
assert response.json() == {
"test": {
"filename": "test.txt",
"size": 14,
"content": "<file content>",
"content_type": "text/plain",
}
}
def test_multipart_request_multiple_files(tmpdir: Path, test_client_factory: TestClientFactory) -> None:
path1 = os.path.join(tmpdir, "test1.txt")
with open(path1, "wb") as file:
file.write(b"<file1 content>")
path2 = os.path.join(tmpdir, "test2.txt")
with open(path2, "wb") as file:
file.write(b"<file2 content>")
client = test_client_factory(app)
with open(path1, "rb") as f1, open(path2, "rb") as f2:
response = client.post("/", files={"test1": f1, "test2": ("test2.txt", f2, "text/plain")})
assert response.json() == {
"test1": {
"filename": "test1.txt",
"size": 15,
"content": "<file1 content>",
"content_type": "text/plain",
},
"test2": {
"filename": "test2.txt",
"size": 15,
"content": "<file2 content>",
"content_type": "text/plain",
},
}
def test_multipart_request_multiple_files_with_headers(tmpdir: Path, test_client_factory: TestClientFactory) -> None:
path1 = os.path.join(tmpdir, "test1.txt")
with open(path1, "wb") as file:
file.write(b"<file1 content>")
path2 = os.path.join(tmpdir, "test2.txt")
with open(path2, "wb") as file:
file.write(b"<file2 content>")
client = test_client_factory(app_with_headers)
with open(path1, "rb") as f1, open(path2, "rb") as f2:
response = client.post(
"/",
files=[
("test1", (None, f1)),
("test2", ("test2.txt", f2, "text/plain", {"x-custom": "f2"})),
],
)
assert response.json() == {
"test1": "<file1 content>",
"test2": {
"filename": "test2.txt",
"size": 15,
"content": "<file2 content>",
"content_type": "text/plain",
"headers": [
[
"content-disposition",
'form-data; name="test2"; filename="test2.txt"',
],
["x-custom", "f2"],
["content-type", "text/plain"],
],
},
}
def test_multi_items(tmpdir: Path, test_client_factory: TestClientFactory) -> None:
path1 = os.path.join(tmpdir, "test1.txt")
with open(path1, "wb") as file:
file.write(b"<file1 content>")
path2 = os.path.join(tmpdir, "test2.txt")
with open(path2, "wb") as file:
file.write(b"<file2 content>")
client = test_client_factory(multi_items_app)
with open(path1, "rb") as f1, open(path2, "rb") as f2:
response = client.post(
"/",
data={"test1": "abc"},
files=[("test1", f1), ("test1", ("test2.txt", f2, "text/plain"))],
)
assert response.json() == {
"test1": [
"abc",
{
"filename": "test1.txt",
"size": 15,
"content": "<file1 content>",
"content_type": "text/plain",
},
{
"filename": "test2.txt",
"size": 15,
"content": "<file2 content>",
"content_type": "text/plain",
},
]
}
def test_multipart_request_mixed_files_and_data(tmpdir: Path, test_client_factory: TestClientFactory) -> None:
client = test_client_factory(app)
response = client.post(
"/",
data=(
# data
b"--a7f7ac8d4e2e437c877bb7b8d7cc549c\r\n" # type: ignore
b'Content-Disposition: form-data; name="field0"\r\n\r\n'
b"value0\r\n"
# file
b"--a7f7ac8d4e2e437c877bb7b8d7cc549c\r\n"
b'Content-Disposition: form-data; name="file"; filename="file.txt"\r\n'
b"Content-Type: text/plain\r\n\r\n"
b"<file content>\r\n"
# data
b"--a7f7ac8d4e2e437c877bb7b8d7cc549c\r\n"
b'Content-Disposition: form-data; name="field1"\r\n\r\n'
b"value1\r\n"
b"--a7f7ac8d4e2e437c877bb7b8d7cc549c--\r\n"
),
headers={"Content-Type": ("multipart/form-data; boundary=a7f7ac8d4e2e437c877bb7b8d7cc549c")},
)
assert response.json() == {
"file": {
"filename": "file.txt",
"size": 14,
"content": "<file content>",
"content_type": "text/plain",
},
"field0": "value0",
"field1": "value1",
}
|
ForceMultipartDict
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/rocm/rocm_kernel.py
|
{
"start": 1028,
"end": 1166
}
|
class ____(Kernel):
"""
Baseclass for ROCm based Kernels
"""
overrides = OpOverrides # type: ignore[assignment]
|
ROCmKernel
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/distribute/worker_training_state.py
|
{
"start": 1463,
"end": 6091
}
|
class ____(object):
"""Training state management class.
This class provides apis for backing up and restoring the training state.
This allows model and epoch information to be saved periodically and restore
for fault-tolerance, also known as preemption-recovery purpose.
"""
def __init__(self, model, checkpoint_dir):
self._model = model
# The epoch at which the checkpoint is saved. Used for fault-tolerance.
# GPU device only has int64 dtype registered VarHandleOp.
self._ckpt_saved_epoch = variables.Variable(
initial_value=constant_op.constant(
CKPT_SAVED_EPOCH_UNUSED_VALUE, dtype=dtypes.int64),
name='ckpt_saved_epoch')
# Variable initialization.
backend.set_value(self._ckpt_saved_epoch, CKPT_SAVED_EPOCH_UNUSED_VALUE)
# _ckpt_saved_epoch gets tracked and is included in the checkpoint file
# when backing up.
checkpoint = trackable_util.Checkpoint(
model=self._model, ckpt_saved_epoch=self._ckpt_saved_epoch)
# If this is single-worker training, checkpoint_dir are the same for
# write_checkpoint_manager and read_checkpoint_manager.
#
# If this is multi-worker training, and this worker should not
# save checkpoint, we replace the write_checkpoint_manager's checkpoint_dir
# with a temp filepath, so it writes to a file that will be removed at the
# end of back_up() call. This is necessary because the SyncOnReadVariable
# needs to be synced across all the workers in order to be read, and all
# workers need to perform `save()`.
# But all workers should restore from the same checkpoint_dir as passed in
# read_checkpoint_manager.
self.read_checkpoint_manager = checkpoint_management.CheckpointManager(
checkpoint,
directory=os.path.join(checkpoint_dir, 'chief'),
max_to_keep=1)
write_checkpoint_dir = distributed_file_utils.write_dirpath(
checkpoint_dir, self._model.distribute_strategy)
if self._model.distribute_strategy.extended.should_checkpoint:
self.write_checkpoint_manager = self.read_checkpoint_manager
else:
self.write_checkpoint_manager = checkpoint_management.CheckpointManager(
checkpoint, directory=write_checkpoint_dir, max_to_keep=1)
def back_up(self, epoch):
"""Back up the current state of training into a checkpoint file.
Args:
epoch: The current epoch information to be saved.
"""
backend.set_value(self._ckpt_saved_epoch, epoch)
# Save the model plus CKPT_SAVED_EPOCH variable.
if self.write_checkpoint_manager.save():
distributed_file_utils.remove_temp_dirpath(
self.write_checkpoint_manager.directory,
self._model.distribute_strategy)
def restore(self):
"""Restore the training state from the backed up checkpoint file.
Returns:
True if the training state is successfully restored. False if the training
state doesn't need to be restored, or error occurred so it can't.
"""
self.read_checkpoint_manager.restore_or_initialize()
def delete_backup(self):
"""Delete the backup directories.
Delete the backup directories which should not exist after `fit()`
successfully finishes.
"""
if self.write_checkpoint_manager is self.read_checkpoint_manager:
try:
file_io.delete_recursively_v2(self.write_checkpoint_manager.directory)
except errors.NotFoundError:
pass
def maybe_load_initial_epoch_from_ckpt(self, initial_epoch, mode):
"""Maybe load initial epoch from ckpt considering possible worker recovery.
When `_ckpt_saved_epoch` attribute exists and is not
`CKPT_SAVED_EPOCH_UNUSED_VALUE`, this is under multi-worker training setting
and indicates the worker is recovering from previous failure. In this case,
infer `initial_epoch` from `self._ckpt_saved_epoch` to continue previous
unfinished training from certain epoch.
Args:
initial_epoch: The original initial_epoch user passes in in `fit()`.
mode: The mode for running `model.fit()`.
Returns:
If the training is recovering from previous failure under multi-worker
training setting, return the epoch the training is supposed to continue
at. Otherwise, return the `initial_epoch` the user passes in.
"""
epoch = backend.eval(self._ckpt_saved_epoch)
if mode == mode_keys.ModeKeys.TRAIN and epoch >= 0:
# The most recently saved epoch is one epoch prior to the epoch it
# failed at, so return the value of 'self._ckpt_saved_epoch' plus one.
return epoch + 1
return initial_epoch
|
WorkerTrainingState
|
python
|
huggingface__transformers
|
tests/models/m2m_100/test_modeling_m2m_100.py
|
{
"start": 8735,
"end": 13251
}
|
class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
M2M100Model,
M2M100ForConditionalGeneration,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": M2M100Model,
"summarization": M2M100ForConditionalGeneration,
"text2text-generation": M2M100ForConditionalGeneration,
"translation": M2M100ForConditionalGeneration,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
test_missing_keys = False
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if pipeline_test_case_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def setUp(self):
self.model_tester = M2M100ModelTester(self)
self.config_tester = ConfigTester(self, config_class=M2M100Config)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], set())
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (M2M100Model, M2M100ForConditionalGeneration):
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
@require_torch_fp16
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = M2M100ForConditionalGeneration(config).eval().to(torch_device)
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
@unittest.skip(
reason="This architecture has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245"
)
def test_load_save_without_tied_weights(self):
pass
def _long_tensor(tok_lst):
return torch.tensor(tok_lst, dtype=torch.long, device=torch_device)
TOLERANCE = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
|
M2M100ModelTest
|
python
|
ray-project__ray
|
release/train_tests/benchmark/config.py
|
{
"start": 428,
"end": 497
}
|
class ____(BaseModel):
TASK_NAME: ClassVar[str] = "base"
|
TaskConfig
|
python
|
django__django
|
django/views/generic/dates.py
|
{
"start": 20423,
"end": 20594
}
|
class ____(MultipleObjectTemplateResponseMixin, BaseTodayArchiveView):
"""List of objects published today."""
template_name_suffix = "_archive_day"
|
TodayArchiveView
|
python
|
scrapy__scrapy
|
tests/test_command_parse.py
|
{
"start": 2130,
"end": 2432
}
|
class ____(BaseSpider):
name = "asyncdef_asyncio_gen_exc"
async def parse(self, response):
for i in range(10):
await asyncio.sleep(0.1)
yield {{'foo': i}}
if i > 5:
raise ValueError("Stopping the processing")
|
AsyncDefAsyncioGenExcSpider
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/sparse.py
|
{
"start": 2626,
"end": 2962
}
|
class ____:
def setup(self):
N = 10000
k = 10
arr = np.zeros((N, k), dtype=float)
arr[0, 0] = 3.0
arr[12, 7] = -1.0
arr[0, 9] = 11.2
self.df = pd.DataFrame(arr, dtype=pd.SparseDtype("float", fill_value=0.0))
def time_to_coo(self):
self.df.sparse.to_coo()
|
ToCooFrame
|
python
|
kubernetes-client__python
|
kubernetes/client/api/apis_api.py
|
{
"start": 543,
"end": 5205
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_versions(self, **kwargs): # noqa: E501
"""get_api_versions # noqa: E501
get available API versions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_versions(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIGroupList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_versions_with_http_info(**kwargs) # noqa: E501
def get_api_versions_with_http_info(self, **kwargs): # noqa: E501
"""get_api_versions # noqa: E501
get available API versions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_versions_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIGroupList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_versions" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroupList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
ApisApi
|
python
|
jina-ai__jina
|
tests/unit/serve/runtimes/worker/test_worker_request_handler.py
|
{
"start": 309,
"end": 455
}
|
class ____(Executor):
@requests
def foo(self, docs, **kwargs):
return DocumentArray([Document(text='new document')])
|
NewDocsExecutor
|
python
|
PyCQA__pylint
|
pylint/config/callback_actions.py
|
{
"start": 8731,
"end": 9767
}
|
class ____(_CallbackAction):
"""Action that has access to the Linter object."""
def __init__(
self,
option_strings: Sequence[str],
dest: str,
nargs: None = None,
const: None = None,
default: None = None,
type: None = None,
choices: None = None,
required: bool = False,
help: str = "",
metavar: str = "",
**kwargs: PyLinter,
) -> None:
self.linter = kwargs["linter"]
super().__init__(
option_strings,
dest,
1,
const,
default,
type,
choices,
required,
help,
metavar,
)
@abc.abstractmethod
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = None,
) -> None:
raise NotImplementedError # pragma: no cover
|
_AccessLinterObjectAction
|
python
|
getsentry__sentry
|
src/sentry/migrations/0992_latestrepoerelease_indexes.py
|
{
"start": 186,
"end": 1987
}
|
class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("sentry", "0991_projectownership_json_field"),
]
operations = [
migrations.AlterField(
model_name="latestreporeleaseenvironment",
name="commit_id",
field=sentry.db.models.fields.bounded.BoundedBigIntegerField(db_index=True, null=True),
),
migrations.AlterField(
model_name="latestreporeleaseenvironment",
name="environment_id",
field=sentry.db.models.fields.bounded.BoundedBigIntegerField(db_index=True),
),
migrations.AlterField(
model_name="latestreporeleaseenvironment",
name="release_id",
field=sentry.db.models.fields.bounded.BoundedBigIntegerField(db_index=True),
),
]
|
Migration
|
python
|
django-haystack__django-haystack
|
test_haystack/elasticsearch5_tests/test_backend.py
|
{
"start": 26547,
"end": 28777
}
|
class ____(TestCase):
fixtures = ["base_data.json"]
def setUp(self):
super().setUp()
# Wipe it clean.
clear_elasticsearch_index()
# Stow.
self.old_ui = connections["elasticsearch"].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = Elasticsearch5MockSearchIndex()
self.ui.build(indexes=[self.smmi])
connections["elasticsearch"]._index = self.ui
self.sb = connections["elasticsearch"].get_backend()
self.sq = connections["elasticsearch"].get_query()
# Force indexing of the content.
self.smmi.update(using="elasticsearch")
def tearDown(self):
connections["elasticsearch"]._index = self.old_ui
super().tearDown()
def test_log_query(self):
reset_search_queries()
self.assertEqual(len(connections["elasticsearch"].queries), 0)
with self.settings(DEBUG=False):
len(self.sq.get_results())
self.assertEqual(len(connections["elasticsearch"].queries), 0)
with self.settings(DEBUG=True):
# Redefine it to clear out the cached results.
self.sq = connections["elasticsearch"].query(using="elasticsearch")
self.sq.add_filter(SQ(name="bar"))
len(self.sq.get_results())
self.assertEqual(len(connections["elasticsearch"].queries), 1)
self.assertEqual(
connections["elasticsearch"].queries[0]["query_string"], "name:(bar)"
)
# And again, for good measure.
self.sq = connections["elasticsearch"].query("elasticsearch")
self.sq.add_filter(SQ(name="bar"))
self.sq.add_filter(SQ(text="moof"))
len(self.sq.get_results())
self.assertEqual(len(connections["elasticsearch"].queries), 2)
self.assertEqual(
connections["elasticsearch"].queries[0]["query_string"], "name:(bar)"
)
self.assertEqual(
connections["elasticsearch"].queries[1]["query_string"],
"(name:(bar) AND text:(moof))",
)
lssqstc_all_loaded = None
@override_settings(DEBUG=True)
|
LiveElasticsearch5SearchQueryTestCase
|
python
|
ray-project__ray
|
python/ray/util/client/server/proxier.py
|
{
"start": 1894,
"end": 3749
}
|
class ____:
port: int
process_handle_future: futures.Future
channel: "grpc._channel.Channel"
def is_ready(self) -> bool:
"""Check if the server is ready or not (doesn't block)."""
return self.process_handle_future.done()
def wait_ready(self, timeout: Optional[float] = None) -> None:
"""
Wait for the server to actually start up.
"""
res = self.process_handle_future.result(timeout=timeout)
if res is None:
# This is only set to none when server creation specifically fails.
raise RuntimeError("Server startup failed.")
def poll(self) -> Optional[int]:
"""Check if the process has exited."""
try:
proc = self.process_handle_future.result(timeout=0.1)
if proc is not None:
return proc.process.poll()
except futures.TimeoutError:
return
def kill(self) -> None:
"""Try to send a KILL signal to the process."""
try:
proc = self.process_handle_future.result(timeout=0.1)
if proc is not None:
proc.process.kill()
except futures.TimeoutError:
# Server has not been started yet.
pass
def set_result(self, proc: Optional[ProcessInfo]) -> None:
"""Set the result of the internal future if it is currently unset."""
if not self.is_ready():
self.process_handle_future.set_result(proc)
def _match_running_client_server(command: List[str]) -> bool:
"""
Detects if the main process in the given command is the RayClient Server.
This works by ensuring that the command is of the form:
<py_executable> -m ray.util.client.server <args>
"""
flattened = " ".join(command)
return "-m ray.util.client.server" in flattened
|
SpecificServer
|
python
|
PrefectHQ__prefect
|
tests/runner/test_runner.py
|
{
"start": 3435,
"end": 3765
}
|
class ____:
@flow(on_cancellation=[instance_on_cancellation], log_prints=True)
def cancellable_flow(self, sleep_time: int = 100):
sleep(sleep_time)
def instance_on_crashed(flow, flow_run, state):
logger = flow_run_logger(flow_run, flow)
logger.info("Instance method flow crashed!")
|
ClassWithCancellableFlow
|
python
|
huggingface__transformers
|
src/transformers/models/emu3/modular_emu3.py
|
{
"start": 1504,
"end": 1612
}
|
class ____(LlamaAttention):
pass
# Has extra dropout which no other model in the library has
|
Emu3Attention
|
python
|
facebookresearch__faiss
|
tests/test_rowwise_minmax.py
|
{
"start": 277,
"end": 1545
}
|
class ____(unittest.TestCase):
def compare_train_vs_train_inplace(self, factory_key):
d = 96
nb = 1000
nq = 0
nt = 2000
xt, x, _ = get_dataset_2(d, nt, nb, nq)
assert x.size > 0
codec = faiss.index_factory(d, factory_key)
# use the regular .train()
codec.train(xt)
codes_train = codec.sa_encode(x)
decoded = codec.sa_decode(codes_train)
# use .train_inplace()
xt_cloned = np.copy(xt)
codec.train_inplace(xt_cloned)
codes_train_inplace = codec.sa_encode(x)
# compare .train and .train_inplace codes
n_diff = (codes_train != codes_train_inplace).sum()
self.assertEqual(n_diff, 0)
# make sure that the array used for .train_inplace got affected
n_diff_xt = (xt_cloned != xt).sum()
self.assertNotEqual(n_diff_xt, 0)
# make sure that the reconstruction error is not crazy
reconstruction_err = ((x - decoded) ** 2).sum()
self.assertLess(reconstruction_err, 0.6)
def test_fp32(self) -> None:
self.compare_train_vs_train_inplace("MinMax,SQ8")
def test_fp16(self) -> None:
self.compare_train_vs_train_inplace("MinMaxFP16,SQ8")
|
TestIndexRowwiseMinmax
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.