language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
getsentry__sentry
|
tests/sentry/notifications/api/endpoints/test_user_notification_settings_options_details.py
|
{
"start": 363,
"end": 515
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-user-notification-options-details"
@control_silo_test
|
UserNotificationSettingsOptionsDetailsBaseTest
|
python
|
huggingface__transformers
|
src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
{
"start": 16162,
"end": 17257
}
|
class ____(nn.Module):
"""
The `PatchTSMixer` layer that does all three kinds of mixing.
Args:
config (`PatchTSMixerConfig`):
Configuration.
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.patch_mixer = PatchMixerBlock(config=config)
self.feature_mixer = FeatureMixerBlock(config=config)
self.mode = config.mode
if config.mode == "mix_channel":
self.channel_feature_mixer = PatchTSMixerChannelFeatureMixerBlock(config=config)
def forward(self, hidden: torch.Tensor):
"""
Args:
hidden (`torch.Tensor` of shape `(batch_size, num_patches, d_model)`):
Input tensor to the layer.
Returns:
`torch.Tensor`: Transformed tensor.
"""
if self.mode == "mix_channel":
hidden = self.channel_feature_mixer(hidden)
hidden = self.patch_mixer(hidden)
hidden = self.feature_mixer(hidden) # hidden: (batch_size x num_patches x d_model)
return hidden
|
PatchTSMixerLayer
|
python
|
astropy__astropy
|
astropy/table/tests/conftest.py
|
{
"start": 1285,
"end": 1343
}
|
class ____(pprint.TableFormatter):
pass
|
MyTableFormatter
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/experimental/kernel_tests/make_csv_dataset_test.py
|
{
"start": 1301,
"end": 27096
}
|
class ____(test_base.DatasetTestBase, parameterized.TestCase):
def _make_csv_dataset(self, filenames, batch_size, num_epochs=1, **kwargs):
return readers.make_csv_dataset(
filenames, batch_size=batch_size, num_epochs=num_epochs, **kwargs)
def _setup_files(self,
inputs,
linebreak="\n",
compression_type=None,
encoding="utf-8"):
filenames = []
for i, ip in enumerate(inputs):
fn = os.path.join(self.get_temp_dir(), "temp_%d.csv" % i)
contents = linebreak.join(ip).encode(encoding)
if compression_type is None:
with open(fn, "wb") as f:
f.write(contents)
elif compression_type == "GZIP":
with gzip.GzipFile(fn, "wb") as f:
f.write(contents)
elif compression_type == "ZLIB":
contents = zlib.compress(contents)
with open(fn, "wb") as f:
f.write(contents)
else:
raise ValueError("Unsupported compression_type", compression_type)
filenames.append(fn)
return filenames
def _next_expected_batch(self, expected_output, expected_keys, batch_size,
num_epochs):
features = {k: [] for k in expected_keys}
for _ in range(num_epochs):
for values in expected_output:
for n, key in enumerate(expected_keys):
features[key].append(values[n])
if len(features[expected_keys[0]]) == batch_size:
yield features
features = {k: [] for k in expected_keys}
if features[expected_keys[0]]: # Leftover from the last batch
yield features
def _verify_output(
self,
dataset,
batch_size,
num_epochs,
label_name,
expected_output,
expected_keys,
):
get_next = self.getNext(dataset)
for expected_features in self._next_expected_batch(
expected_output,
expected_keys,
batch_size,
num_epochs,
):
actual_features = self.evaluate(get_next())
if label_name is not None:
expected_labels = expected_features.pop(label_name)
self.assertAllEqual(expected_labels, actual_features[1])
actual_features = actual_features[0]
for k in expected_features.keys():
# Compare features
self.assertAllEqual(expected_features[k], actual_features[k])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def _test_dataset(self,
inputs,
expected_output,
expected_keys,
batch_size=1,
num_epochs=1,
label_name=None,
encoding="utf-8",
**kwargs):
"""Checks that elements produced by CsvDataset match expected output."""
# Convert str type because py3 tf strings are bytestrings
filenames = self._setup_files(
inputs,
compression_type=kwargs.get("compression_type", None),
encoding=encoding)
dataset = self._make_csv_dataset(
filenames,
batch_size=batch_size,
num_epochs=num_epochs,
label_name=label_name,
encoding=encoding,
**kwargs)
self._verify_output(dataset, batch_size, num_epochs, label_name,
expected_output, expected_keys)
@combinations.generate(test_base.default_test_combinations())
def testBasic(self):
"""Tests making a CSV dataset with keys and defaults provided."""
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
]]
expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
[10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
column_defaults=record_defaults,
)
@combinations.generate(test_base.default_test_combinations())
def testEncoding(self):
"""Tests making a CSV dataset with an encoding except for utf-8."""
record_defaults = [
constant_op.constant([], dtypes.string),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(2)]
inputs = [[",".join(x for x in column_names), "さる,猿", "とり,鳥"],
[",".join(x for x in column_names), "いぬ,犬", "ねこ,猫"]]
expected_output = [["さる".encode("shift-jis"), "猿".encode("shift-jis")],
["とり".encode("shift-jis"), "鳥".encode("shift-jis")],
["いぬ".encode("shift-jis"), "犬".encode("shift-jis")],
["ねこ".encode("shift-jis"), "猫".encode("shift-jis")]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
column_defaults=record_defaults,
encoding="shift-jis",
)
@combinations.generate(test_base.default_test_combinations())
def testWithBatchSizeAndEpochs(self):
"""Tests making a CSV dataset with keys and defaults provided."""
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
]]
expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
[10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
label_name=label,
batch_size=3,
num_epochs=10,
shuffle=False,
header=True,
column_defaults=record_defaults,
)
@combinations.generate(test_base.default_test_combinations())
def testWithCompressionType(self):
"""Tests `compression_type` argument."""
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
]]
expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
[10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
label = "col0"
for compression_type in ("GZIP", "ZLIB"):
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
column_defaults=record_defaults,
compression_type=compression_type,
)
@combinations.generate(test_base.default_test_combinations())
def testWithCompressionTypeAndNoColumnNames(self):
"""Tests `compression_type` argument."""
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"],
[
",".join(x for x in column_names), "10,11,12,13,14",
"15,16,17,18,19"
]]
expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
[10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
column_defaults=record_defaults,
compression_type="GZIP",
)
with self.assertRaisesRegex(ValueError,
"`compression_type` ZLIB is not supported"):
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
column_defaults=record_defaults,
compression_type="ZLIB",
)
@combinations.generate(test_base.default_test_combinations())
def testWithBadInputs(self):
"""Tests that exception is raised when input is malformed.
"""
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
]]
filenames = self._setup_files(inputs)
# Duplicate column names
with self.assertRaises(ValueError):
self._make_csv_dataset(
filenames,
batch_size=1,
column_defaults=record_defaults,
label_name="col0",
column_names=column_names * 2)
# Label key not one of column names
with self.assertRaises(ValueError):
self._make_csv_dataset(
filenames,
batch_size=1,
column_defaults=record_defaults,
label_name="not_a_real_label",
column_names=column_names)
@combinations.generate(test_base.default_test_combinations())
def testWithNoLabel(self):
"""Tests making a CSV dataset with no label provided."""
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
]]
expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
[10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
column_defaults=record_defaults,
)
@combinations.generate(test_base.default_test_combinations())
def testWithNoHeader(self):
"""Tests that datasets can be created from CSV files with no header line.
"""
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
inputs = [["0,1,2,3,4", "5,6,7,8,9"], ["10,11,12,13,14", "15,16,17,18,19"]]
expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
[10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=False,
column_defaults=record_defaults,
)
@combinations.generate(test_base.default_test_combinations())
def testWithTypes(self):
"""Tests that defaults can be a dtype instead of a Tensor for required vals.
"""
record_defaults = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64,
dtypes.string
]
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x[0] for x in column_names), "0,1,2,3,4", "5,6,7,8,9"],
[
",".join(x[0] for x in column_names), "10,11,12,13,14",
"15,16,17,18,19"
]]
expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
[10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
column_defaults=record_defaults,
)
@combinations.generate(test_base.default_test_combinations())
def testWithNoColNames(self):
"""Tests that datasets can be created when column names are not specified.
In that case, we should infer the column names from the header lines.
"""
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
]]
expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
[10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
column_defaults=record_defaults,
)
@combinations.generate(test_base.default_test_combinations())
def testWithTypeInferenceMismatch(self):
# Test that error is thrown when num fields doesn't match columns
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
]]
filenames = self._setup_files(inputs)
with self.assertRaises(ValueError):
self._make_csv_dataset(
filenames,
column_names=column_names + ["extra_name"],
column_defaults=None,
batch_size=2,
num_epochs=10)
@combinations.generate(test_base.default_test_combinations())
def testWithTypeInference(self):
"""Tests that datasets can be created when no defaults are specified.
In that case, we should infer the types from the first N records.
"""
column_names = ["col%d" % i for i in range(5)]
str_int32_max = str(2**33)
inputs = [[
",".join(x for x in column_names),
"0,%s,2.0,3e50,rabbit" % str_int32_max
]]
expected_output = [[0, 2**33, 2.0, 3e50, b"rabbit"]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
)
@combinations.generate(test_base.default_test_combinations())
def testWithTypeInferenceFallthrough(self):
"""Tests that datasets can be created when no defaults are specified.
Tests on a deliberately tricky file.
"""
column_names = ["col%d" % i for i in range(5)]
str_int32_max = str(2**33)
inputs = [[
",".join(x for x in column_names),
",,,,",
"0,0,0.0,0.0,0.0",
"0,%s,2.0,3e50,rabbit" % str_int32_max,
",,,,",
]]
expected_output = [[0, 0, 0, 0, b""], [0, 0, 0, 0, b"0.0"],
[0, 2**33, 2.0, 3e50, b"rabbit"], [0, 0, 0, 0, b""]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
)
@combinations.generate(test_base.default_test_combinations())
def testWithNAValuesAndFieldDelim(self):
"""Tests that datasets can be created from different delim and na_value."""
column_names = ["col%d" % i for i in range(5)]
inputs = [["0 1 2 3 4", "5 6 7 8 9"], ["10 11 12 13 14", "15 16 17 ? 19"]]
expected_output = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14],
[15, 16, 17, 0, 19]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=False,
na_value="?",
field_delim=" ",
)
@combinations.generate(test_base.default_test_combinations())
def testWithSelectCols(self):
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
str_int32_max = str(2**33)
inputs = [[
",".join(x for x in column_names),
"0,%s,2.0,3e50,rabbit" % str_int32_max
]]
expected_output = [[0, 2**33, 2.0, 3e50, b"rabbit"]]
select_cols = [1, 3, 4]
self._test_dataset(
inputs,
expected_output=[[x[i] for i in select_cols] for x in expected_output],
expected_keys=[column_names[i] for i in select_cols],
column_names=column_names,
column_defaults=[record_defaults[i] for i in select_cols],
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
select_columns=select_cols,
)
# Can still do inference without provided defaults
self._test_dataset(
inputs,
expected_output=[[x[i] for i in select_cols] for x in expected_output],
expected_keys=[column_names[i] for i in select_cols],
column_names=column_names,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
select_columns=select_cols,
)
# Can still do column name inference
self._test_dataset(
inputs,
expected_output=[[x[i] for i in select_cols] for x in expected_output],
expected_keys=[column_names[i] for i in select_cols],
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
select_columns=select_cols,
)
# Can specify column names instead of indices
self._test_dataset(
inputs,
expected_output=[[x[i] for i in select_cols] for x in expected_output],
expected_keys=[column_names[i] for i in select_cols],
column_names=column_names,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
select_columns=[column_names[i] for i in select_cols],
)
@combinations.generate(test_base.default_test_combinations())
def testWithSelectColsError(self):
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
str_int32_max = str(2**33)
inputs = [[
",".join(x for x in column_names),
"0,%s,2.0,3e50,rabbit" % str_int32_max
]]
select_cols = [1, 3, 4]
filenames = self._setup_files(inputs)
with self.assertRaises(ValueError):
# Mismatch in number of defaults and number of columns selected,
# should raise an error
self._make_csv_dataset(
filenames,
batch_size=1,
column_defaults=record_defaults,
column_names=column_names,
select_columns=select_cols)
with self.assertRaises(ValueError):
# Invalid column name should raise an error
self._make_csv_dataset(
filenames,
batch_size=1,
column_defaults=[[0]],
column_names=column_names,
label_name=None,
select_columns=["invalid_col_name"])
@combinations.generate(test_base.default_test_combinations())
def testWithShuffle(self):
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
def str_series(st):
return ",".join(str(i) for i in range(st, st + 5))
column_names = ["col%d" % i for i in range(5)]
inputs = [
[",".join(x for x in column_names)
] + [str_series(5 * i) for i in range(15)],
[",".join(x for x in column_names)] +
[str_series(5 * i) for i in range(15, 20)],
]
filenames = self._setup_files(inputs)
total_records = 20
for batch_size in [1, 2]:
# Test that shuffling with the same seed produces the same result
dataset1 = self._make_csv_dataset(
filenames,
column_defaults=record_defaults,
column_names=column_names,
batch_size=batch_size,
header=True,
shuffle=True,
shuffle_seed=5,
num_epochs=2,
)
dataset2 = self._make_csv_dataset(
filenames,
column_defaults=record_defaults,
column_names=column_names,
batch_size=batch_size,
header=True,
shuffle=True,
shuffle_seed=5,
num_epochs=2,
)
next1 = self.getNext(dataset1)
next2 = self.getNext(dataset2)
for _ in range(total_records // batch_size):
batch1 = nest.flatten(self.evaluate(next1()))
batch2 = nest.flatten(self.evaluate(next2()))
for i in range(len(batch1)):
self.assertAllEqual(batch1[i], batch2[i])
# Test that shuffling with a different seed produces different results
dataset1 = self._make_csv_dataset(
filenames,
column_defaults=record_defaults,
column_names=column_names,
batch_size=batch_size,
header=True,
shuffle=True,
shuffle_seed=5,
num_epochs=2,
)
dataset2 = self._make_csv_dataset(
filenames,
column_defaults=record_defaults,
column_names=column_names,
batch_size=batch_size,
header=True,
shuffle=True,
shuffle_seed=6,
num_epochs=2,
)
next1 = self.getNext(dataset1)
next2 = self.getNext(dataset2)
all_equal = False
for _ in range(total_records // batch_size):
batch1 = nest.flatten(self.evaluate(next1()))
batch2 = nest.flatten(self.evaluate(next2()))
for i in range(len(batch1)):
all_equal = all_equal and np.array_equal(batch1[i], batch2[i])
self.assertFalse(all_equal)
@combinations.generate(test_base.default_test_combinations())
def testIndefiniteRepeatShapeInference(self):
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
]]
filenames = self._setup_files(inputs)
dataset = self._make_csv_dataset(filenames, batch_size=32, num_epochs=None)
for shape in nest.flatten(dataset_ops.get_legacy_output_shapes(dataset)):
self.assertEqual(32, shape[0])
@combinations.generate(test_base.default_test_combinations())
def testFieldOrder(self):
data = [[
"1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19",
"1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19"
]]
file_path = self._setup_files(data)
ds = readers.make_csv_dataset(
file_path, batch_size=1, shuffle=False, num_epochs=1)
nxt = self.getNext(ds)
result = list(self.evaluate(nxt()).values())
self.assertEqual(result, sorted(result))
if __name__ == "__main__":
test.main()
|
MakeCsvDatasetTest
|
python
|
pypa__warehouse
|
warehouse/admin/views/organizations.py
|
{
"start": 42120,
"end": 47500
}
|
class ____(wtforms.Form):
issuer_type = wtforms.SelectField(
choices=[(issuer.value, issuer.name) for issuer in OIDCIssuerType],
coerce=OIDCIssuerType,
validators=[
wtforms.validators.InputRequired(message="Select an issuer type"),
],
)
issuer_url = wtforms.URLField(
validators=[
wtforms.validators.InputRequired(message="Specify issuer URL"),
wtforms.validators.Length(max=400),
wtforms.validators.Regexp(
r"^https://",
message="Issuer URL must start with https://",
),
],
)
@view_config(
route_name="admin.organization.add_oidc_issuer",
permission=Permissions.AdminOrganizationsWrite,
request_method="POST",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def add_oidc_issuer(request):
organization_service = request.find_service(IOrganizationService, context=None)
user_service = request.find_service(IUserService)
organization_id = request.matchdict["organization_id"]
organization = organization_service.get_organization(organization_id)
if organization is None:
raise HTTPNotFound
form = OrganizationOIDCIssuerForm(request.POST)
if not form.validate():
for field, errors in form.errors.items():
for error in errors:
request.session.flash(f"{field}: {error}", queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
# Check if this issuer already exists for this organization
existing_issuer = (
request.db.query(OrganizationOIDCIssuer)
.filter(
OrganizationOIDCIssuer.organization_id == organization.id,
OrganizationOIDCIssuer.issuer_type == form.issuer_type.data,
OrganizationOIDCIssuer.issuer_url == form.issuer_url.data,
)
.first()
)
if existing_issuer:
request.session.flash(
f"Issuer '{form.issuer_url.data}' already exists for organization "
f"'{organization.name}'",
queue="error",
)
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
# Create OIDC issuer
oidc_issuer = OrganizationOIDCIssuer(
organization_id=organization.id,
issuer_type=form.issuer_type.data,
issuer_url=form.issuer_url.data,
created_by_id=request.user.id,
)
request.db.add(oidc_issuer)
# Record the event
organization.record_event(
request=request,
tag=EventTag.Organization.OIDCPublisherAdded,
additional={
"issuer_type": form.issuer_type.data.value,
"issuer_url": form.issuer_url.data,
"submitted_by_user_id": str(user_service.get_admin_user().id),
"redact_ip": True,
},
)
request.session.flash(
f"OIDC issuer '{form.issuer_url.data}' ({form.issuer_type.data.value}) "
f"added to '{organization.name}'",
queue="success",
)
return HTTPSeeOther(
request.route_path("admin.organization.detail", organization_id=organization.id)
)
@view_config(
route_name="admin.organization.delete_oidc_issuer",
permission=Permissions.AdminOrganizationsWrite,
request_method="POST",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def delete_oidc_issuer(request):
organization_service = request.find_service(IOrganizationService, context=None)
user_service = request.find_service(IUserService)
organization_id = request.matchdict["organization_id"]
organization = organization_service.get_organization(organization_id)
if organization is None:
raise HTTPNotFound
issuer_id = request.matchdict.get("issuer_id")
issuer = request.db.get(OrganizationOIDCIssuer, issuer_id)
if not issuer or issuer.organization_id != organization.id:
request.session.flash("This issuer does not exist", queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
confirm = request.POST.get("confirm")
if not confirm or confirm != issuer.issuer_url:
request.session.flash("Confirm the request", queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
# Record the event before deleting
organization.record_event(
request=request,
tag=EventTag.Organization.OIDCPublisherRemoved,
additional={
"issuer_type": issuer.issuer_type.value,
"issuer_url": issuer.issuer_url,
"deleted_by_user_id": str(user_service.get_admin_user().id),
"redact_ip": True,
},
)
request.session.flash(
f"OIDC issuer '{issuer.issuer_url}' removed from '{organization.name}'",
queue="success",
)
request.db.delete(issuer)
return HTTPSeeOther(
request.route_path("admin.organization.detail", organization_id=organization.id)
)
|
OrganizationOIDCIssuerForm
|
python
|
huggingface__transformers
|
src/transformers/models/llava_next/configuration_llava_next.py
|
{
"start": 831,
"end": 6776
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LlavaNextForConditionalGeneration`]. It is used to instantiate an
Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the [llava-hf/llava-v1.6-mistral-7b-hf](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf)
model.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
The config object or dictionary of the text backbone.
image_token_index (`int`, *optional*, defaults to 32000):
The image token index to encode the image prompt.
projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function used by the multimodal projector.
vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
If `"full"`, the full vision features are used.
vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -2):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
image_grid_pinpoints (`List`, *optional*, defaults to `[[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]`):
A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list
of the form `(height, width)`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
image_seq_length (`int`, *optional*, defaults to 576):
Sequence length of one image embedding.
multimodal_projector_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the multimodal projector.
Example:
```python
>>> from transformers import LlavaNextForConditionalGeneration, LlavaNextConfig, CLIPVisionConfig, LlamaConfig
>>> # Initializing a CLIP-vision config
>>> vision_config = CLIPVisionConfig()
>>> # Initializing a Llama config
>>> text_config = LlamaConfig()
>>> # Initializing a Llava-Next llava-hf/llava-v1.6-mistral-7b-hf style configuration
>>> configuration = LlavaNextConfig(vision_config, text_config)
>>> # Initializing a model from the llava-hf/llava-v1.6-mistral-7b-hf style configuration
>>> model = LlavaNextForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "llava_next"
attribute_map = {
"image_token_id": "image_token_index",
}
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
vision_config=None,
text_config=None,
image_token_index=32000,
projector_hidden_act="gelu",
vision_feature_select_strategy="default",
vision_feature_layer=-2,
image_grid_pinpoints=None,
tie_word_embeddings=False,
image_seq_length=576,
multimodal_projector_bias=True,
**kwargs,
):
self.image_token_index = image_token_index
self.projector_hidden_act = projector_hidden_act
self.image_seq_length = image_seq_length
self.multimodal_projector_bias = multimodal_projector_bias
if vision_feature_select_strategy not in ["default", "full"]:
raise ValueError(
"vision_feature_select_strategy should be one of 'default', 'full'."
f"Got: {vision_feature_select_strategy}"
)
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
image_grid_pinpoints = (
image_grid_pinpoints
if image_grid_pinpoints is not None
else [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]
)
self.image_grid_pinpoints = image_grid_pinpoints
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "clip_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["clip_vision_model"](
intermediate_size=4096,
hidden_size=1024,
patch_size=14,
image_size=336,
num_hidden_layers=24,
num_attention_heads=16,
vocab_size=32000,
projection_dim=768,
)
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "llama")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["llama"]()
self.text_config = text_config
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
__all__ = ["LlavaNextConfig"]
|
LlavaNextConfig
|
python
|
PyCQA__pylint
|
pylint/checkers/non_ascii_names.py
|
{
"start": 1077,
"end": 7243
}
|
class ____(base_checker.BaseChecker):
"""A strict name checker only allowing ASCII.
Note: This check only checks Names, so it ignores the content of
docstrings and comments!
"""
msgs = {
"C2401": (
'%s name "%s" contains a non-ASCII character, consider renaming it.',
"non-ascii-name",
NON_ASCII_HELP,
{"old_names": [("C0144", "old-non-ascii-name")]},
),
# First %s will always be "file"
"W2402": (
'%s name "%s" contains a non-ASCII character.',
"non-ascii-file-name",
(
# Some = PyCharm at the time of writing didn't display the non_ascii_name_loł
# files. That's also why this is a warning and not only a convention!
"Under python 3.5, PEP 3131 allows non-ascii identifiers, but not non-ascii file names."
"Since Python 3.5, even though Python supports UTF-8 files, some editors or tools "
"don't."
),
),
# First %s will always be "module"
"C2403": (
'%s name "%s" contains a non-ASCII character, use an ASCII-only alias for import.',
"non-ascii-module-import",
NON_ASCII_HELP,
),
}
name = "NonASCII-Checker"
def _check_name(self, node_type: str, name: str | None, node: nodes.NodeNG) -> None:
"""Check whether a name is using non-ASCII characters."""
if name is None:
# For some nodes i.e. *kwargs from a dict, the name will be empty
return
if not str(name).isascii():
type_label = constants.HUMAN_READABLE_TYPES[node_type]
args = (type_label.capitalize(), name)
# Some node types have customized messages
match node_type:
case "file":
msg = "non-ascii-file-name"
case "module":
msg = "non-ascii-module-import"
case _:
msg = "non-ascii-name"
self.add_message(msg, node=node, args=args, confidence=interfaces.HIGH)
@utils.only_required_for_messages("non-ascii-name", "non-ascii-file-name")
def visit_module(self, node: nodes.Module) -> None:
self._check_name("file", node.name.split(".")[-1], node)
@utils.only_required_for_messages("non-ascii-name")
def visit_functiondef(
self, node: nodes.FunctionDef | nodes.AsyncFunctionDef
) -> None:
self._check_name("function", node.name, node)
# Check argument names
arguments = node.args
# Check position only arguments
if arguments.posonlyargs:
for pos_only_arg in arguments.posonlyargs:
self._check_name("argument", pos_only_arg.name, pos_only_arg)
# Check "normal" arguments
if arguments.args:
for arg in arguments.args:
self._check_name("argument", arg.name, arg)
# Check key word only arguments
if arguments.kwonlyargs:
for kwarg in arguments.kwonlyargs:
self._check_name("argument", kwarg.name, kwarg)
visit_asyncfunctiondef = visit_functiondef
@utils.only_required_for_messages("non-ascii-name")
def visit_global(self, node: nodes.Global) -> None:
for name in node.names:
self._check_name("const", name, node)
@utils.only_required_for_messages("non-ascii-name")
def visit_assignname(self, node: nodes.AssignName) -> None:
"""Check module level assigned names."""
# The NameChecker from which this Checker originates knows a lot of different
# versions of variables, i.e. constants, inline variables etc.
# To simplify we use only `variable` here, as we don't need to apply different
# rules to different types of variables.
match frame := node.frame():
case nodes.FunctionDef():
if node.parent in frame.body:
# Only perform the check if the assignment was done in within the body
# of the function (and not the function parameter definition
# (will be handled in visit_functiondef)
# or within a decorator (handled in visit_call)
self._check_name("variable", node.name, node)
case nodes.ClassDef():
self._check_name("attr", node.name, node)
case _:
# Possibilities here:
# - isinstance(node.assign_type(), nodes.Comprehension) == inlinevar
# - isinstance(frame, nodes.Module) == variable (constant?)
# - some other kind of assignment missed but still most likely a variable
self._check_name("variable", node.name, node)
@utils.only_required_for_messages("non-ascii-name")
def visit_classdef(self, node: nodes.ClassDef) -> None:
self._check_name("class", node.name, node)
for attr, anodes in node.instance_attrs.items():
if not any(node.instance_attr_ancestors(attr)):
self._check_name("attr", attr, anodes[0])
def _check_module_import(self, node: nodes.ImportFrom | nodes.Import) -> None:
for module_name, alias in node.names:
name = alias or module_name
self._check_name("module", name, node)
@utils.only_required_for_messages("non-ascii-name", "non-ascii-module-import")
def visit_import(self, node: nodes.Import) -> None:
self._check_module_import(node)
@utils.only_required_for_messages("non-ascii-name", "non-ascii-module-import")
def visit_importfrom(self, node: nodes.ImportFrom) -> None:
self._check_module_import(node)
@utils.only_required_for_messages("non-ascii-name")
def visit_call(self, node: nodes.Call) -> None:
"""Check if the used keyword args are correct."""
for keyword in node.keywords:
self._check_name("argument", keyword.arg, keyword)
def register(linter: lint.PyLinter) -> None:
linter.register_checker(NonAsciiNameChecker(linter))
|
NonAsciiNameChecker
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/unconnected_gradients.py
|
{
"start": 838,
"end": 1677
}
|
class ____(enum.Enum):
"""Controls how gradient computation behaves when y does not depend on x.
The gradient of y with respect to x can be zero in two different ways: there
could be no differentiable path in the graph connecting x to y (and so we can
statically prove that the gradient is zero) or it could be that runtime values
of tensors in a particular execution lead to a gradient of zero (say, if a
relu unit happens to not be activated). To allow you to distinguish between
these two cases you can choose what value gets returned for the gradient when
there is no path in the graph from x to y:
* `NONE`: Indicates that [None] will be returned if there is no path from x
to y
* `ZERO`: Indicates that a zero tensor will be returned in the shape of x.
"""
NONE = "none"
ZERO = "zero"
|
UnconnectedGradients
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-postgres/dagster_postgres/schedule_storage/schedule_storage.py
|
{
"start": 1377,
"end": 8826
}
|
class ____(SqlScheduleStorage, ConfigurableClass):
"""Postgres-backed run storage.
Users should not directly instantiate this class; it is instantiated by internal machinery when
``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in
``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.
To use Postgres for all of the components of your instance storage, you can add the following
block to your ``dagster.yaml``:
.. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg.yaml
:caption: dagster.yaml
:lines: 1-8
:language: YAML
If you are configuring the different storage components separately and are specifically
configuring your schedule storage to use Postgres, you can add a block such as the following
to your ``dagster.yaml``:
.. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg-legacy.yaml
:caption: dagster.yaml
:lines: 23-32
:language: YAML
Note that the fields in this config are :py:class:`~dagster.StringSource` and
:py:class:`~dagster.IntSource` and can be configured from environment variables.
"""
def __init__(
self,
postgres_url: str,
should_autocreate_tables: bool = True,
inst_data: Optional[ConfigurableClassData] = None,
):
self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
self.postgres_url = postgres_url
self.should_autocreate_tables = check.bool_param(
should_autocreate_tables, "should_autocreate_tables"
)
# Default to not holding any connections open to prevent accumulating connections per DagsterInstance
self._engine = create_engine(
self.postgres_url, isolation_level="AUTOCOMMIT", poolclass=db_pool.NullPool
)
# Stamp and create tables if the main table does not exist (we can't check alembic
# revision because alembic config may be shared with other storage classes)
if self.should_autocreate_tables:
table_names = retry_pg_connection_fn(lambda: db.inspect(self._engine).get_table_names())
missing_main_table = "schedules" not in table_names and "jobs" not in table_names
if missing_main_table:
retry_pg_creation_fn(self._init_db)
super().__init__()
def _init_db(self) -> None:
with self.connect() as conn:
with conn.begin():
ScheduleStorageSqlMetadata.create_all(conn)
stamp_alembic_rev(pg_alembic_config(__file__), conn)
# mark all the data migrations as applied
self.migrate()
self.optimize()
def optimize_for_webserver(
self, statement_timeout: int, pool_recycle: int, max_overflow: int
) -> None:
# When running in dagster-webserver, hold an open connection and set statement_timeout
kwargs = {
"isolation_level": "AUTOCOMMIT",
"pool_size": 1,
"pool_recycle": pool_recycle,
"max_overflow": max_overflow,
}
existing_options = self._engine.url.query.get("options")
if existing_options:
kwargs["connect_args"] = {"options": existing_options}
self._engine = create_engine(self.postgres_url, **kwargs)
event.listen(
self._engine,
"connect",
lambda connection, _: set_pg_statement_timeout(connection, statement_timeout),
)
@property
def inst_data(self) -> Optional[ConfigurableClassData]:
return self._inst_data
@classmethod
def config_type(cls) -> UserConfigSchema:
return pg_config()
@classmethod
def from_config_value( # pyright: ignore[reportIncompatibleMethodOverride]
cls, inst_data: Optional[ConfigurableClassData], config_value: PostgresStorageConfig
) -> "PostgresScheduleStorage":
return PostgresScheduleStorage(
inst_data=inst_data,
postgres_url=pg_url_from_config(config_value),
should_autocreate_tables=config_value.get("should_autocreate_tables", True),
)
@staticmethod
def create_clean_storage(
postgres_url: str, should_autocreate_tables: bool = True
) -> "PostgresScheduleStorage":
engine = create_engine(
postgres_url, isolation_level="AUTOCOMMIT", poolclass=db_pool.NullPool
)
try:
ScheduleStorageSqlMetadata.drop_all(engine)
finally:
engine.dispose()
return PostgresScheduleStorage(postgres_url, should_autocreate_tables)
def connect(self, run_id: Optional[str] = None) -> ContextManager[Connection]:
return create_pg_connection(self._engine)
def upgrade(self) -> None:
alembic_config = pg_alembic_config(__file__)
with self.connect() as conn:
run_alembic_upgrade(alembic_config, conn)
def _add_or_update_instigators_table(self, conn: Connection, state: InstigatorState) -> None:
selector_id = state.selector_id
conn.execute(
db_dialects.postgresql.insert(InstigatorsTable)
.values(
selector_id=selector_id,
repository_selector_id=state.repository_selector_id,
status=state.status.value,
instigator_type=state.instigator_type.value,
instigator_body=serialize_value(state),
)
.on_conflict_do_update(
index_elements=[InstigatorsTable.c.selector_id],
set_={
"status": state.status.value,
"instigator_type": state.instigator_type.value,
"instigator_body": serialize_value(state),
"update_timestamp": get_current_datetime(),
},
)
)
def add_auto_materialize_asset_evaluations(
self,
evaluation_id: int,
asset_evaluations: Sequence[AutomationConditionEvaluationWithRunIds[EntityKey]],
):
if not asset_evaluations:
return
insert_stmt = db_dialects.postgresql.insert(AssetDaemonAssetEvaluationsTable).values(
[
{
"evaluation_id": evaluation_id,
"asset_key": evaluation.key.to_db_string(),
"asset_evaluation_body": serialize_value(evaluation),
"num_requested": evaluation.num_requested,
}
for evaluation in asset_evaluations
]
)
upsert_stmt = insert_stmt.on_conflict_do_update(
index_elements=[
AssetDaemonAssetEvaluationsTable.c.evaluation_id,
AssetDaemonAssetEvaluationsTable.c.asset_key,
],
set_={
"asset_evaluation_body": insert_stmt.excluded.asset_evaluation_body,
"num_requested": insert_stmt.excluded.num_requested,
},
)
with self.connect() as conn:
conn.execute(upsert_stmt)
def alembic_version(self) -> AlembicVersion:
alembic_config = pg_alembic_config(__file__)
with self.connect() as conn:
return check_alembic_revision(alembic_config, conn)
|
PostgresScheduleStorage
|
python
|
django__django
|
tests/expressions/tests.py
|
{
"start": 54911,
"end": 58263
}
|
class ____(TestCase):
def test_F_reuse(self):
f = F("id")
n = Number.objects.create(integer=-1)
c = Company.objects.create(
name="Example Inc.",
num_employees=2300,
num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith"),
)
c_qs = Company.objects.filter(id=f)
self.assertEqual(c_qs.get(), c)
# Reuse the same F-object for another queryset
n_qs = Number.objects.filter(id=f)
self.assertEqual(n_qs.get(), n)
# The original query still works correctly
self.assertEqual(c_qs.get(), c)
def test_patterns_escape(self):
r"""
Special characters (e.g. %, _ and \) stored in database are
properly escaped when using a pattern lookup with an expression
refs #16731
"""
Employee.objects.bulk_create(
[
Employee(firstname="Johnny", lastname="%John"),
Employee(firstname="Jean-Claude", lastname="Claud_"),
Employee(firstname="Jean-Claude", lastname="Claude%"),
Employee(firstname="Johnny", lastname="Joh\\n"),
Employee(firstname="Johnny", lastname="_ohn"),
]
)
claude = Employee.objects.create(firstname="Jean-Claude", lastname="Claude")
john = Employee.objects.create(firstname="Johnny", lastname="John")
john_sign = Employee.objects.create(firstname="%Joh\\nny", lastname="%Joh\\n")
self.assertCountEqual(
Employee.objects.filter(firstname__contains=F("lastname")),
[john_sign, john, claude],
)
self.assertCountEqual(
Employee.objects.filter(firstname__startswith=F("lastname")),
[john_sign, john],
)
self.assertSequenceEqual(
Employee.objects.filter(firstname__endswith=F("lastname")),
[claude],
)
def test_insensitive_patterns_escape(self):
r"""
Special characters (e.g. %, _ and \) stored in database are
properly escaped when using a case insensitive pattern lookup with an
expression -- refs #16731
"""
Employee.objects.bulk_create(
[
Employee(firstname="Johnny", lastname="%john"),
Employee(firstname="Jean-Claude", lastname="claud_"),
Employee(firstname="Jean-Claude", lastname="claude%"),
Employee(firstname="Johnny", lastname="joh\\n"),
Employee(firstname="Johnny", lastname="_ohn"),
]
)
claude = Employee.objects.create(firstname="Jean-Claude", lastname="claude")
john = Employee.objects.create(firstname="Johnny", lastname="john")
john_sign = Employee.objects.create(firstname="%Joh\\nny", lastname="%joh\\n")
self.assertCountEqual(
Employee.objects.filter(firstname__icontains=F("lastname")),
[john_sign, john, claude],
)
self.assertCountEqual(
Employee.objects.filter(firstname__istartswith=F("lastname")),
[john_sign, john],
)
self.assertSequenceEqual(
Employee.objects.filter(firstname__iendswith=F("lastname")),
[claude],
)
@isolate_apps("expressions")
|
ExpressionsTests
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/hooks/bigquery.py
|
{
"start": 61920,
"end": 62828
}
|
class ____:
"""
BigQuery connection.
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs) -> None:
self._args = args
self._kwargs = kwargs
def close(self) -> None:
"""Do nothing. Not needed for BigQueryConnection."""
def commit(self) -> None:
"""Do nothing. BigQueryConnection does not support transactions."""
def cursor(self) -> BigQueryCursor:
"""Return a new :py:class:`Cursor` object using the connection."""
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self) -> NoReturn:
"""Do nothing. BigQueryConnection does not support transactions."""
raise NotImplementedError("BigQueryConnection does not have transactions")
|
BigQueryConnection
|
python
|
tensorflow__tensorflow
|
tensorflow/python/debug/cli/debugger_cli_common_test.py
|
{
"start": 10488,
"end": 22465
}
|
class ____(test_util.TensorFlowTestCase):
def setUp(self):
self._intentional_error_msg = "Intentionally raised exception"
def _noop_handler(self, argv, screen_info=None):
# A handler that does nothing other than returning "Done."
return debugger_cli_common.RichTextLines(["Done."])
def _handler_raising_exception(self, argv, screen_info=None):
# A handler that intentionally raises an exception.
raise RuntimeError(self._intentional_error_msg)
def _handler_returning_wrong_type(self, argv, screen_info=None):
# A handler that returns a wrong type, instead of the correct type
# (RichTextLines).
return "Hello"
def _echo_screen_cols(self, argv, screen_info=None):
# A handler that uses screen_info.
return debugger_cli_common.RichTextLines(
["cols = %d" % screen_info["cols"]])
def _exiting_handler(self, argv, screen_info=None):
"""A handler that exits with an exit token."""
if argv:
exit_token = argv[0]
else:
exit_token = None
raise debugger_cli_common.CommandLineExit(exit_token=exit_token)
def testRegisterEmptyCommandPrefix(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Attempt to register an empty-string as a command prefix should trigger
# an exception.
with self.assertRaisesRegex(ValueError, "Empty command prefix"):
registry.register_command_handler("", self._noop_handler, "")
def testRegisterAndInvokeHandler(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("noop", self._noop_handler, "")
self.assertTrue(registry.is_registered("noop"))
self.assertFalse(registry.is_registered("beep"))
cmd_output = registry.dispatch_command("noop", [])
self.assertEqual(["Done."], cmd_output.lines)
# Attempt to invoke an unregistered command prefix should trigger an
# exception.
with self.assertRaisesRegex(ValueError, "No handler is registered"):
registry.dispatch_command("beep", [])
# Empty command prefix should trigger an exception.
with self.assertRaisesRegex(ValueError, "Prefix is empty"):
registry.dispatch_command("", [])
def testExitingHandler(self):
"""Test that exit exception is correctly raised."""
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("exit", self._exiting_handler, "")
self.assertTrue(registry.is_registered("exit"))
exit_token = None
try:
registry.dispatch_command("exit", ["foo"])
except debugger_cli_common.CommandLineExit as e:
exit_token = e.exit_token
self.assertEqual("foo", exit_token)
def testInvokeHandlerWithScreenInfo(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Register and invoke a command handler that uses screen_info.
registry.register_command_handler("cols", self._echo_screen_cols, "")
cmd_output = registry.dispatch_command(
"cols", [], screen_info={"cols": 100})
self.assertEqual(["cols = 100"], cmd_output.lines)
def testRegisterAndInvokeHandlerWithAliases(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop", self._noop_handler, "", prefix_aliases=["n", "NOOP"])
# is_registered() should work for full prefix and aliases.
self.assertTrue(registry.is_registered("noop"))
self.assertTrue(registry.is_registered("n"))
self.assertTrue(registry.is_registered("NOOP"))
cmd_output = registry.dispatch_command("n", [])
self.assertEqual(["Done."], cmd_output.lines)
cmd_output = registry.dispatch_command("NOOP", [])
self.assertEqual(["Done."], cmd_output.lines)
def testHandlerWithWrongReturnType(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("wrong_return",
self._handler_returning_wrong_type, "")
# If the command handler fails to return a RichTextLines instance, an error
# should be triggered.
with self.assertRaisesRegex(
ValueError,
"Return value from command handler.*is not None or a RichTextLines "
"instance"):
registry.dispatch_command("wrong_return", [])
def testRegisterDuplicateHandlers(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("noop", self._noop_handler, "")
# Registering the same command prefix more than once should trigger an
# exception.
with self.assertRaisesRegex(
ValueError, "A handler is already registered for command prefix"):
registry.register_command_handler("noop", self._noop_handler, "")
cmd_output = registry.dispatch_command("noop", [])
self.assertEqual(["Done."], cmd_output.lines)
def testRegisterDuplicateAliases(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop", self._noop_handler, "", prefix_aliases=["n"])
# Clash with existing alias.
with self.assertRaisesRegex(ValueError,
"clashes with existing prefixes or aliases"):
registry.register_command_handler(
"cols", self._echo_screen_cols, "", prefix_aliases=["n"])
# The name clash should have prevent the handler from being registered.
self.assertFalse(registry.is_registered("cols"))
# Aliases can also clash with command prefixes.
with self.assertRaisesRegex(ValueError,
"clashes with existing prefixes or aliases"):
registry.register_command_handler(
"cols", self._echo_screen_cols, "", prefix_aliases=["noop"])
self.assertFalse(registry.is_registered("cols"))
def testDispatchHandlerRaisingException(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("raise_exception",
self._handler_raising_exception, "")
# The registry should catch and wrap exceptions that occur during command
# handling.
cmd_output = registry.dispatch_command("raise_exception", [])
# The error output contains a stack trace.
# So the line count should be >= 2.
self.assertGreater(len(cmd_output.lines), 2)
self.assertTrue(cmd_output.lines[0].startswith(
"Error occurred during handling of command"))
self.assertTrue(cmd_output.lines[1].endswith(self._intentional_error_msg))
def testRegisterNonCallableHandler(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Attempt to register a non-callable handler should fail.
with self.assertRaisesRegex(ValueError, "handler is not callable"):
registry.register_command_handler("non_callable", 1, "")
def testRegisterHandlerWithInvalidHelpInfoType(self):
registry = debugger_cli_common.CommandHandlerRegistry()
with self.assertRaisesRegex(ValueError, "help_info is not a str"):
registry.register_command_handler("noop", self._noop_handler, ["foo"])
def testGetHelpFull(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
help_lines = registry.get_help().lines
# The help info should list commands in alphabetically sorted order,
# regardless of order in which the commands are registered.
self.assertEqual("cols", help_lines[0])
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
self.assertFalse(help_lines[4])
self.assertFalse(help_lines[5])
# The default help command should appear in the help output.
self.assertEqual("help", help_lines[6])
self.assertEqual("noop", help_lines[12])
self.assertTrue(help_lines[13].endswith("Aliases: n, NOOP"))
self.assertFalse(help_lines[14])
self.assertTrue(help_lines[15].endswith("No operation."))
self.assertTrue(help_lines[16].endswith("I.e., do nothing."))
def testGetHelpSingleCommand(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
# Get help info for one of the two commands, using full prefix.
help_lines = registry.get_help("cols").lines
self.assertTrue(help_lines[0].endswith("cols"))
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
# Get help info for one of the two commands, using alias.
help_lines = registry.get_help("c").lines
self.assertTrue(help_lines[0].endswith("cols"))
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
# Get help info for a nonexistent command.
help_lines = registry.get_help("foo").lines
self.assertEqual("Invalid command prefix: \"foo\"", help_lines[0])
def testHelpCommandWithoutIntro(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
# Get help for all commands.
output = registry.dispatch_command("help", [])
self.assertEqual(["cols", " Aliases: c", "",
" Show screen width in number of columns.", "", "",
"help", " Aliases: h", "", " Print this help message.",
"", "", "noop", " Aliases: n, NOOP", "",
" No operation.", " I.e., do nothing.", "", "",
"version", " Aliases: ver", "",
" Print the versions of TensorFlow and its key "
"dependencies.", "", ""],
output.lines)
# Get help for one specific command prefix.
output = registry.dispatch_command("help", ["noop"])
self.assertEqual(["noop", " Aliases: n, NOOP", "", " No operation.",
" I.e., do nothing."], output.lines)
# Get help for a nonexistent command prefix.
output = registry.dispatch_command("help", ["foo"])
self.assertEqual(["Invalid command prefix: \"foo\""], output.lines)
def testHelpCommandWithIntro(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
help_intro = debugger_cli_common.RichTextLines(
["Introductory comments.", ""])
registry.set_help_intro(help_intro)
output = registry.dispatch_command("help", [])
self.assertEqual(help_intro.lines + [
"help", " Aliases: h", "", " Print this help message.", "", "",
"noop", " Aliases: n, NOOP", "", " No operation.",
" I.e., do nothing.", "", "",
"version", " Aliases: ver", "",
" Print the versions of TensorFlow and its key dependencies.", "", ""
], output.lines)
|
CommandHandlerRegistryTest
|
python
|
miyuchina__mistletoe
|
mistletoe/block_token.py
|
{
"start": 12241,
"end": 13917
}
|
class ____(BlockToken):
"""
Indented code block token.
This is a leaf block token with a single child of type span_token.RawText.
Attributes:
language (str): always the empty string.
"""
repr_attributes = BlockToken.repr_attributes + ("language",)
def __init__(self, lines):
self.language = ''
self.children = (span_token.RawText(''.join(lines).strip('\n') + '\n'),)
@property
def content(self):
"""Returns the code block content."""
return self.children[0].content
@staticmethod
def start(line):
return line.replace('\t', ' ', 1).startswith(' ')
@classmethod
def read(cls, lines):
line_buffer = []
trailing_blanks = 0
for line in lines:
if line.strip() == '':
line_buffer.append(line.lstrip(' ') if len(line) < 5 else line[4:])
trailing_blanks = trailing_blanks + 1 if line == '\n' else 0
continue
if not line.replace('\t', ' ', 1).startswith(' '):
lines.backstep()
break
line_buffer.append(cls.strip(line))
trailing_blanks = 0
for _ in range(trailing_blanks):
line_buffer.pop()
lines.backstep()
return line_buffer
@staticmethod
def strip(string):
count = 0
for i, c in enumerate(string):
if c == '\t':
return string[i + 1:]
elif c == ' ':
count += 1
else:
break
if count == 4:
return string[i + 1:]
return string
|
BlockCode
|
python
|
pytorch__pytorch
|
torch/nn/modules/container.py
|
{
"start": 1397,
"end": 11326
}
|
class ____(Module):
r"""A sequential container.
Modules will be added to it in the order they are passed in the
constructor. Alternatively, an ``OrderedDict`` of modules can be
passed in. The ``forward()`` method of ``Sequential`` accepts any
input and forwards it to the first module it contains. It then
"chains" outputs to inputs sequentially for each subsequent module,
finally returning the output of the last module.
The value a ``Sequential`` provides over manually calling a sequence
of modules is that it allows treating the whole container as a
single module, such that performing a transformation on the
``Sequential`` applies to each of the modules it stores (which are
each a registered submodule of the ``Sequential``).
What's the difference between a ``Sequential`` and a
:class:`torch.nn.ModuleList`? A ``ModuleList`` is exactly what it
sounds like--a list for storing ``Module`` s! On the other hand,
the layers in a ``Sequential`` are connected in a cascading way.
Example::
# Using Sequential to create a small model. When `model` is run,
# input will first be passed to `Conv2d(1,20,5)`. The output of
# `Conv2d(1,20,5)` will be used as the input to the first
# `ReLU`; the output of the first `ReLU` will become the input
# for `Conv2d(20,64,5)`. Finally, the output of
# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
model = nn.Sequential(
nn.Conv2d(1, 20, 5), nn.ReLU(), nn.Conv2d(20, 64, 5), nn.ReLU()
)
# Using Sequential with OrderedDict. This is functionally the
# same as the above code
model = nn.Sequential(
OrderedDict(
[
("conv1", nn.Conv2d(1, 20, 5)),
("relu1", nn.ReLU()),
("conv2", nn.Conv2d(20, 64, 5)),
("relu2", nn.ReLU()),
]
)
)
"""
_modules: dict[str, Module] # type: ignore[assignment]
@overload
def __init__(self, *args: Module) -> None: ...
@overload
# pyrefly: ignore [inconsistent-overload]
def __init__(self, arg: OrderedDict[str, Module]) -> None: ...
def __init__(self, *args):
super().__init__()
if len(args) == 1 and isinstance(args[0], OrderedDict):
for key, module in args[0].items():
self.add_module(key, module)
else:
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def _get_item_by_idx(self, iterator: Iterable[_V], idx: int) -> _V:
"""Get the idx-th item of the iterator."""
size = len(self)
idx = operator.index(idx)
if not -size <= idx < size:
raise IndexError(f"index {idx} is out of range")
idx %= size
return next(islice(iterator, idx, None))
@_copy_to_script_wrapper
def __getitem__(self, idx: slice | int) -> Sequential | Module:
if isinstance(idx, slice):
return self.__class__(OrderedDict(list(self._modules.items())[idx]))
else:
return self._get_item_by_idx(self._modules.values(), idx)
def __setitem__(self, idx: int, module: Module) -> None:
key: str = self._get_item_by_idx(self._modules.keys(), idx)
return setattr(self, key, module)
def __delitem__(self, idx: slice | int) -> None:
if isinstance(idx, slice):
for key in list(self._modules.keys())[idx]:
delattr(self, key)
else:
key = self._get_item_by_idx(self._modules.keys(), idx)
delattr(self, key)
# To preserve numbering
str_indices = [str(i) for i in range(len(self._modules))]
self._modules = OrderedDict(
zip(str_indices, self._modules.values(), strict=True)
)
@_copy_to_script_wrapper
def __len__(self) -> int:
return len(self._modules)
def __add__(self, other) -> Sequential:
if isinstance(other, Sequential):
ret = Sequential()
for layer in self:
ret.append(layer)
for layer in other:
ret.append(layer)
return ret
else:
raise ValueError(
"add operator supports only objects "
f"of Sequential class, but {str(type(other))} is given."
)
def pop(self, key: int | slice) -> Module:
"""
Pop ``key`` from self.
"""
v = self[key]
del self[key]
return v
def __iadd__(self, other) -> Self:
if isinstance(other, Sequential):
offset = len(self)
for i, module in enumerate(other):
self.add_module(str(i + offset), module)
return self
else:
raise ValueError(
"add operator supports only objects "
f"of Sequential class, but {str(type(other))} is given."
)
def __mul__(self, other: int) -> Sequential:
if not isinstance(other, int):
raise TypeError(
f"unsupported operand type(s) for *: {type(self)} and {type(other)}"
)
elif other <= 0:
raise ValueError(
f"Non-positive multiplication factor {other} for {type(self)}"
)
else:
combined = Sequential()
offset = 0
for _ in range(other):
for module in self:
combined.add_module(str(offset), module)
offset += 1
return combined
def __rmul__(self, other: int) -> Sequential:
return self.__mul__(other)
def __imul__(self, other: int) -> Self:
if not isinstance(other, int):
raise TypeError(
f"unsupported operand type(s) for *: {type(self)} and {type(other)}"
)
elif other <= 0:
raise ValueError(
f"Non-positive multiplication factor {other} for {type(self)}"
)
else:
len_original = len(self)
offset = len(self)
for _ in range(other - 1):
for i in range(len_original):
self.add_module(str(i + offset), self._modules[str(i)])
offset += len_original
return self
@_copy_to_script_wrapper
def __dir__(self) -> list[str]:
keys = super().__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
@_copy_to_script_wrapper
def __iter__(self) -> Iterator[Module]:
return iter(self._modules.values())
# NB: We can't really type check this function as the type of input
# may change dynamically (as is tested in
# TestScript.test_sequential_intermediary_types). Cannot annotate
# with Any as TorchScript expects a more precise type
def forward(self, input):
"""
Runs the forward pass.
"""
for module in self:
input = module(input)
return input
def append(self, module: Module) -> Self:
r"""Append a given module to the end.
Args:
module (nn.Module): module to append
Example::
>>> import torch.nn as nn
>>> n = nn.Sequential(nn.Linear(1, 2), nn.Linear(2, 3))
>>> n.append(nn.Linear(3, 4))
Sequential(
(0): Linear(in_features=1, out_features=2, bias=True)
(1): Linear(in_features=2, out_features=3, bias=True)
(2): Linear(in_features=3, out_features=4, bias=True)
)
"""
self.add_module(str(len(self)), module)
return self
def insert(self, index: int, module: Module) -> Self:
"""
Inserts a module into the Sequential container at the specified index.
Args:
index (int): The index to insert the module.
module (Module): The module to be inserted.
Example::
>>> import torch.nn as nn
>>> n = nn.Sequential(nn.Linear(1, 2), nn.Linear(2, 3))
>>> n.insert(0, nn.Linear(3, 4))
Sequential(
(0): Linear(in_features=3, out_features=4, bias=True)
(1): Linear(in_features=1, out_features=2, bias=True)
(2): Linear(in_features=2, out_features=3, bias=True)
)
"""
if not isinstance(module, Module):
raise AssertionError(f"module should be of type: {Module}")
n = len(self._modules)
if not (-n <= index <= n):
raise IndexError(f"Index out of range: {index}")
if index < 0:
index += n
for i in range(n, index, -1):
self._modules[str(i)] = self._modules[str(i - 1)]
self._modules[str(index)] = module
return self
def extend(self, sequential: Iterable[Module]) -> Self:
"""
Extends the current Sequential container with layers from another Sequential container.
Args:
sequential (Sequential): A Sequential container whose layers will be added to the current container.
Example::
>>> import torch.nn as nn
>>> n = nn.Sequential(nn.Linear(1, 2), nn.Linear(2, 3))
>>> other = nn.Sequential(nn.Linear(3, 4), nn.Linear(4, 5))
>>> n.extend(other) # or `n + other`
Sequential(
(0): Linear(in_features=1, out_features=2, bias=True)
(1): Linear(in_features=2, out_features=3, bias=True)
(2): Linear(in_features=3, out_features=4, bias=True)
(3): Linear(in_features=4, out_features=5, bias=True)
)
"""
for layer in sequential:
self.append(layer)
return self
|
Sequential
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/elements/markdown_test.py
|
{
"start": 8217,
"end": 8707
}
|
class ____(DeltaGeneratorTestCase):
"""Test st.latex APIs."""
def test_st_latex_with_help(self):
"""Test st.latex with help."""
st.latex(
r"""
a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} =
\sum_{k=0}^{n-1} ar^k =
a \left(\frac{1-r^{n}}{1-r}\right)
""",
help="help text",
)
el = self.get_delta_from_queue().new_element
assert el.markdown.help == "help text"
|
StLatexAPITest
|
python
|
lazyprogrammer__machine_learning_examples
|
rl2/a3c/nets.py
|
{
"start": 2786,
"end": 4367
}
|
class ____:
def __init__(self):
# Placeholders for our input
# After resizing we have 4 consecutive frames of size 84 x 84
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.uint8, name="X")
# The TD target value
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name="y")
# Since we set reuse=True here, that means we MUST
# create the PolicyNetwork before creating the ValueNetwork
# PolictyNetwork will use reuse=False
with tf.variable_scope("shared", reuse=True):
fc1 = build_feature_extractor(self.states)
# Use a separate scope for output and loss
with tf.variable_scope("value_network"):
self.vhat = tf.contrib.layers.fully_connected(
inputs=fc1,
num_outputs=1,
activation_fn=None)
self.vhat = tf.squeeze(self.vhat, squeeze_dims=[1], name="vhat")
self.loss = tf.squared_difference(self.vhat, self.targets)
self.loss = tf.reduce_sum(self.loss, name="loss")
# training
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)
# we'll need these later for running gradient descent steps
self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.grads_and_vars if grad is not None]
# Should use this to create networks
# to ensure they're created in the correct order
def create_networks(num_outputs):
policy_network = PolicyNetwork(num_outputs=num_outputs)
value_network = ValueNetwork()
return policy_network, value_network
|
ValueNetwork
|
python
|
eventlet__eventlet
|
tests/db_pool_test.py
|
{
"start": 1417,
"end": 9984
}
|
class ____(DBTester):
__test__ = False # so that nose doesn't try to execute this directly
def setUp(self):
super().setUp()
self.pool = self.create_pool()
self.connection = self.pool.get()
def tearDown(self):
if self.connection:
self.pool.put(self.connection)
self.pool.clear()
super().tearDown()
def assert_cursor_works(self, cursor):
cursor.execute("select 1")
rows = cursor.fetchall()
assert rows
def test_connecting(self):
assert self.connection is not None
def test_create_cursor(self):
cursor = self.connection.cursor()
cursor.close()
def test_run_query(self):
cursor = self.connection.cursor()
self.assert_cursor_works(cursor)
cursor.close()
def test_run_bad_query(self):
cursor = self.connection.cursor()
try:
cursor.execute("garbage blah blah")
assert False
except AssertionError:
raise
except Exception:
pass
cursor.close()
def test_put_none(self):
# the pool is of size 1, and its only connection is out
assert self.pool.free() == 0
self.pool.put(None)
# ha ha we fooled it into thinking that we had a dead process
assert self.pool.free() == 1
conn2 = self.pool.get()
assert conn2 is not None
assert conn2.cursor
self.pool.put(conn2)
def test_close_does_a_put(self):
assert self.pool.free() == 0
self.connection.close()
assert self.pool.free() == 1
self.assertRaises(AttributeError, self.connection.cursor)
def test_put_doesnt_double_wrap(self):
self.pool.put(self.connection)
conn = self.pool.get()
assert not isinstance(conn._base, db_pool.PooledConnectionWrapper)
self.pool.put(conn)
def test_bool(self):
assert self.connection
self.connection.close()
assert not self.connection
def fill_up_table(self, conn):
curs = conn.cursor()
for i in range(1000):
curs.execute('insert into test_table (value_int) values (%s)' % i)
conn.commit()
def test_returns_immediately(self):
self.pool = self.create_pool()
conn = self.pool.get()
self.set_up_dummy_table(conn)
self.fill_up_table(conn)
curs = conn.cursor()
results = []
SHORT_QUERY = "select * from test_table"
evt = eventlet.Event()
def a_query():
self.assert_cursor_works(curs)
curs.execute(SHORT_QUERY)
results.append(2)
evt.send()
eventlet.spawn(a_query)
results.append(1)
self.assertEqual([1], results)
evt.wait()
self.assertEqual([1, 2], results)
self.pool.put(conn)
def test_connection_is_clean_after_put(self):
self.pool = self.create_pool()
conn = self.pool.get()
self.set_up_dummy_table(conn)
curs = conn.cursor()
for i in range(10):
curs.execute('insert into test_table (value_int) values (%s)' % i)
# do not commit :-)
self.pool.put(conn)
del conn
conn2 = self.pool.get()
curs2 = conn2.cursor()
for i in range(10):
curs2.execute('insert into test_table (value_int) values (%s)' % i)
conn2.commit()
curs2.execute("select * from test_table")
# we should have only inserted them once
self.assertEqual(10, curs2.rowcount)
self.pool.put(conn2)
def test_visibility_from_other_connections(self):
self.pool = self.create_pool(max_size=3)
conn = self.pool.get()
conn2 = self.pool.get()
curs = conn.cursor()
try:
curs2 = conn2.cursor()
curs2.execute("insert into gargleblatz (a) values (%s)" % (314159))
self.assertEqual(curs2.rowcount, 1)
conn2.commit()
selection_query = "select * from gargleblatz"
curs2.execute(selection_query)
self.assertEqual(curs2.rowcount, 1)
del curs2
self.pool.put(conn2)
# create a new connection, it should see the addition
conn3 = self.pool.get()
curs3 = conn3.cursor()
curs3.execute(selection_query)
self.assertEqual(curs3.rowcount, 1)
# now, does the already-open connection see it?
curs.execute(selection_query)
self.assertEqual(curs.rowcount, 1)
self.pool.put(conn3)
finally:
# clean up my litter
curs.execute("delete from gargleblatz where a=314159")
conn.commit()
self.pool.put(conn)
def test_clear(self):
self.pool = self.create_pool()
self.pool.put(self.connection)
self.pool.clear()
self.assertEqual(len(self.pool.free_items), 0)
def test_clear_warmup(self):
"""Clear implicitly created connections (min_size > 0)"""
self.pool = self.create_pool(min_size=1)
self.pool.clear()
self.assertEqual(len(self.pool.free_items), 0)
def test_unwrap_connection(self):
self.assertTrue(isinstance(self.connection,
db_pool.GenericConnectionWrapper))
conn = self.pool._unwrap_connection(self.connection)
assert not isinstance(conn, db_pool.GenericConnectionWrapper)
self.assertEqual(None, self.pool._unwrap_connection(None))
self.assertEqual(None, self.pool._unwrap_connection(1))
# testing duck typing here -- as long as the connection has a
# _base attribute, it should be unwrappable
x = Mock()
x._base = 'hi'
self.assertEqual('hi', self.pool._unwrap_connection(x))
conn.close()
def test_safe_close(self):
self.pool._safe_close(self.connection, quiet=True)
self.assertEqual(len(self.pool.free_items), 1)
self.pool._safe_close(None)
self.pool._safe_close(1)
# now we're really going for 100% coverage
x = Mock()
def fail():
raise KeyboardInterrupt()
x.close = fail
self.assertRaises(KeyboardInterrupt, self.pool._safe_close, x)
x = Mock()
def fail2():
raise RuntimeError("if this line has been printed, the test succeeded")
x.close = fail2
self.pool._safe_close(x, quiet=False)
def test_zero_max_idle(self):
self.pool.put(self.connection)
self.pool.clear()
self.pool = self.create_pool(max_size=2, max_idle=0)
self.connection = self.pool.get()
self.connection.close()
self.assertEqual(len(self.pool.free_items), 0)
def test_zero_max_age(self):
self.pool.put(self.connection)
self.pool.clear()
self.pool = self.create_pool(max_size=2, max_age=0)
self.connection = self.pool.get()
self.connection.close()
self.assertEqual(len(self.pool.free_items), 0)
def test_waiters_get_woken(self):
# verify that when there's someone waiting on an empty pool
# and someone puts an immediately-closed connection back in
# the pool that the waiter gets woken
self.pool.put(self.connection)
self.pool.clear()
self.pool = self.create_pool(max_size=1, max_age=0)
self.connection = self.pool.get()
self.assertEqual(self.pool.free(), 0)
self.assertEqual(self.pool.waiting(), 0)
e = eventlet.Event()
def retrieve(pool, ev):
c = pool.get()
ev.send(c)
eventlet.spawn(retrieve, self.pool, e)
eventlet.sleep(0) # these two sleeps should advance the retrieve
eventlet.sleep(0) # coroutine until it's waiting in get()
self.assertEqual(self.pool.free(), 0)
self.assertEqual(self.pool.waiting(), 1)
self.pool.put(self.connection)
timer = eventlet.Timeout(1)
conn = e.wait()
timer.cancel()
self.assertEqual(self.pool.free(), 0)
self.assertEqual(self.pool.waiting(), 0)
self.pool.put(conn)
def test_raising_create(self):
# if the create() method raises an exception the pool should
# not lose any connections
self.pool = self.create_pool(max_size=1, module=RaisingDBModule())
self.assertRaises(RuntimeError, self.pool.get)
self.assertEqual(self.pool.free(), 1)
|
DBConnectionPool
|
python
|
aimacode__aima-python
|
search.py
|
{
"start": 25944,
"end": 27226
}
|
class ____(Problem):
"""Problem of finding the highest peak in a limited grid"""
def __init__(self, initial, grid, defined_actions=directions4):
"""The grid is a 2 dimensional array/list whose state is specified by tuple of indices"""
super().__init__(initial)
self.grid = grid
self.defined_actions = defined_actions
self.n = len(grid)
assert self.n > 0
self.m = len(grid[0])
assert self.m > 0
def actions(self, state):
"""Returns the list of actions which are allowed to be taken from the given state"""
allowed_actions = []
for action in self.defined_actions:
next_state = vector_add(state, self.defined_actions[action])
if 0 <= next_state[0] <= self.n - 1 and 0 <= next_state[1] <= self.m - 1:
allowed_actions.append(action)
return allowed_actions
def result(self, state, action):
"""Moves in the direction specified by action"""
return vector_add(state, self.defined_actions[action])
def value(self, state):
"""Value of a state is the value it is the index to"""
x, y = state
assert 0 <= x < self.n
assert 0 <= y < self.m
return self.grid[x][y]
|
PeakFindingProblem
|
python
|
google__python-fire
|
fire/console/console_attr.py
|
{
"start": 15921,
"end": 23495
}
|
class ____(object):
"""Resource string colorizer.
Attributes:
_con: ConsoleAttr object.
_color: Color name.
_string: The string to colorize.
_justify: The justification function, no justification if None. For example,
justify=lambda s: s.center(10)
"""
def __init__(self, string, color, justify=None):
"""Constructor.
Args:
string: The string to colorize.
color: Color name used to index ConsoleAttr._ANSI_COLOR.
justify: The justification function, no justification if None. For
example, justify=lambda s: s.center(10)
"""
self._con = GetConsoleAttr()
self._color = color
self._string = string
self._justify = justify
def __eq__(self, other):
return self._string == str(other)
def __ne__(self, other):
return not self == other
def __gt__(self, other):
return self._string > str(other)
def __lt__(self, other):
return self._string < str(other)
def __ge__(self, other):
return not self < other
def __le__(self, other):
return not self > other
def __len__(self):
return self._con.DisplayWidth(self._string)
def __str__(self):
return self._string
def Render(self, stream, justify=None):
"""Renders the string as self._color on the console.
Args:
stream: The stream to render the string to. The stream given here *must*
have the same encoding as sys.stdout for this to work properly.
justify: The justification function, self._justify if None.
"""
stream.write(
self._con.Colorize(self._string, self._color, justify or self._justify))
def GetConsoleAttr(encoding=None, reset=False):
"""Gets the console attribute state.
If this is the first call or reset is True or encoding is not None and does
not match the current encoding or out is not None and does not match the
current out then the state is (re)initialized. Otherwise the current state
is returned.
This call associates the out file stream with the console. All console related
output should go to the same stream.
Args:
encoding: Encoding override.
ascii -- ASCII. This is the default.
utf8 -- UTF-8 unicode.
win -- Windows code page 437.
reset: Force re-initialization if True.
Returns:
The global ConsoleAttr state object.
"""
attr = ConsoleAttr._CONSOLE_ATTR_STATE # pylint: disable=protected-access
if not reset:
if not attr:
reset = True
elif encoding and encoding != attr.GetEncoding():
reset = True
if reset:
attr = ConsoleAttr(encoding=encoding)
ConsoleAttr._CONSOLE_ATTR_STATE = attr # pylint: disable=protected-access
return attr
def ResetConsoleAttr(encoding=None):
"""Resets the console attribute state to the console default.
Args:
encoding: Reset to this encoding instead of the default.
ascii -- ASCII. This is the default.
utf8 -- UTF-8 unicode.
win -- Windows code page 437.
Returns:
The global ConsoleAttr state object.
"""
return GetConsoleAttr(encoding=encoding, reset=True)
def GetCharacterDisplayWidth(char):
"""Returns the monospaced terminal display width of char.
Assumptions:
- monospaced display
- ambiguous or unknown chars default to width 1
- ASCII control char width is 1 => don't use this for control chars
Args:
char: The character to determine the display width of.
Returns:
The monospaced terminal display width of char: either 0, 1, or 2.
"""
if not isinstance(char, str):
# Non-unicode chars have width 1. Don't use this function on control chars.
return 1
# Normalize to avoid special cases.
char = unicodedata.normalize('NFC', char)
if unicodedata.combining(char) != 0:
# Modifies the previous character and does not move the cursor.
return 0
elif unicodedata.category(char) == 'Cf':
# Unprintable formatting char.
return 0
elif unicodedata.east_asian_width(char) in 'FW':
# Fullwidth or Wide chars take 2 character positions.
return 2
else:
# Don't use this function on control chars.
return 1
def SafeText(data, encoding=None, escape=True):
br"""Converts the data to a text string compatible with the given encoding.
This works the same way as Decode() below except it guarantees that any
characters in the resulting text string can be re-encoded using the given
encoding (or GetConsoleAttr().GetEncoding() if None is given). This means
that the string will be safe to print to sys.stdout (for example) without
getting codec exceptions if the user's terminal doesn't support the encoding
used by the source of the text.
Args:
data: Any bytes, string, or object that has str() or unicode() methods.
encoding: The encoding name to ensure compatibility with. Defaults to
GetConsoleAttr().GetEncoding().
escape: Replace unencodable characters with a \uXXXX or \xXX equivalent if
True. Otherwise replace unencodable characters with an appropriate unknown
character, '?' for ASCII, and the unicode unknown replacement character
\uFFFE for unicode.
Returns:
A text string representation of the data, but modified to remove any
characters that would result in an encoding exception with the target
encoding. In the worst case, with escape=False, it will contain only ?
characters.
"""
if data is None:
return 'None'
encoding = encoding or GetConsoleAttr().GetEncoding()
string = encoding_util.Decode(data, encoding=encoding)
try:
# No change needed if the string encodes to the output encoding.
string.encode(encoding)
return string
except UnicodeError:
# The string does not encode to the output encoding. Encode it with error
# handling then convert it back into a text string (which will be
# guaranteed to only contain characters that can be encoded later.
return (string
.encode(encoding, 'backslashreplace' if escape else 'replace')
.decode(encoding))
def EncodeToBytes(data):
r"""Encode data to bytes.
The primary use case is for base64/mime style 7-bit ascii encoding where the
encoder input must be bytes. "safe" means that the conversion always returns
bytes and will not raise codec exceptions.
If data is text then an 8-bit ascii encoding is attempted, then the console
encoding, and finally utf-8.
Args:
data: Any bytes, string, or object that has str() or unicode() methods.
Returns:
A bytes string representation of the data.
"""
if data is None:
return b''
if isinstance(data, bytes):
# Already bytes - our work is done.
return data
# Coerce to text that will be converted to bytes.
s = str(data)
try:
# Assume the text can be directly converted to bytes (8-bit ascii).
return s.encode('iso-8859-1')
except UnicodeEncodeError:
pass
try:
# Try the output encoding.
return s.encode(GetConsoleAttr().GetEncoding())
except UnicodeEncodeError:
pass
# Punt to utf-8.
return s.encode('utf-8')
def Decode(data, encoding=None):
"""Converts the given string, bytes, or object to a text string.
Args:
data: Any bytes, string, or object that has str() or unicode() methods.
encoding: A suggesting encoding used to decode. If this encoding doesn't
work, other defaults are tried. Defaults to
GetConsoleAttr().GetEncoding().
Returns:
A text string representation of the data.
"""
encoding = encoding or GetConsoleAttr().GetEncoding()
return encoding_util.Decode(data, encoding=encoding)
|
Colorizer
|
python
|
huggingface__transformers
|
src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py
|
{
"start": 2488,
"end": 3962
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
def simple_eager_attention_forward(
module: nn.Module,
query_states: torch.Tensor,
key_states: torch.Tensor,
value_states: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
|
Phi4MultimodalVisionMLP
|
python
|
ApeWorX__ape
|
src/ape/cli/choices.py
|
{
"start": 14175,
"end": 14627
}
|
class ____(Choice):
"""
A simple lazy-choice where choices are evaluated lazily.
"""
def __init__(self, get_choices: Callable[[], Sequence[str]], case_sensitive: bool = False):
self._get_choices = get_choices
self.case_sensitive = case_sensitive
# Note: Purposely avoid super init.
@cached_property
def choices(self) -> Sequence[str]: # type: ignore[override]
return self._get_choices()
|
LazyChoice
|
python
|
spack__spack
|
lib/spack/spack/repo.py
|
{
"start": 21756,
"end": 35730
}
|
class ____:
"""A RepoPath is a list of Repo instances that function as one.
It functions exactly like a Repo, but it operates on the combined
results of the Repos in its list instead of on a single package
repository.
"""
def __init__(self, *repos: "Repo") -> None:
self.repos: List[Repo] = []
self.by_namespace = nm.NamespaceTrie()
self._provider_index: Optional[spack.provider_index.ProviderIndex] = None
self._patch_index: Optional[spack.patch.PatchCache] = None
self._tag_index: Optional[spack.tag.TagIndex] = None
for repo in repos:
self.put_last(repo)
@staticmethod
def from_descriptors(
descriptors: "RepoDescriptors",
cache: spack.util.file_cache.FileCache,
overrides: Optional[Dict[str, Any]] = None,
) -> "RepoPath":
repo_path, errors = descriptors.construct(cache=cache, fetch=True, overrides=overrides)
# Merely warn if package repositories from config could not be constructed.
if errors:
for path, error in errors.items():
tty.warn(f"Error constructing repository '{path}': {error}")
return repo_path
@staticmethod
def from_config(config: spack.config.Configuration) -> "RepoPath":
"""Create a RepoPath from a configuration object."""
overrides = {
pkg_name: data["package_attributes"]
for pkg_name, data in config.get_config("packages").items()
if pkg_name != "all" and "package_attributes" in data
}
return RepoPath.from_descriptors(
descriptors=RepoDescriptors.from_config(lock=package_repository_lock(), config=config),
cache=spack.caches.MISC_CACHE,
overrides=overrides,
)
def enable(self) -> None:
"""Set the relevant search paths for package module loading"""
REPOS_FINDER.repo_path = self
for p in reversed(self.python_paths()):
if p not in sys.path:
sys.path.insert(0, p)
def disable(self) -> None:
"""Disable the search paths for package module loading"""
if hasattr(REPOS_FINDER, "repo_path"):
del REPOS_FINDER.repo_path
for p in self.python_paths():
if p in sys.path:
sys.path.remove(p)
def ensure_unwrapped(self) -> "RepoPath":
"""Ensure we unwrap this object from any dynamic wrapper (like Singleton)"""
return self
def put_first(self, repo: Union["Repo", "RepoPath"]) -> None:
"""Add repo first in the search path."""
if isinstance(repo, RepoPath):
for r in reversed(repo.repos):
self.put_first(r)
return
self.repos.insert(0, repo)
self.by_namespace[repo.namespace] = repo
def put_last(self, repo):
"""Add repo last in the search path."""
if isinstance(repo, RepoPath):
for r in repo.repos:
self.put_last(r)
return
self.repos.append(repo)
# don't mask any higher-precedence repos with same namespace
if repo.namespace not in self.by_namespace:
self.by_namespace[repo.namespace] = repo
def remove(self, repo):
"""Remove a repo from the search path."""
if repo in self.repos:
self.repos.remove(repo)
def get_repo(self, namespace: str) -> "Repo":
"""Get a repository by namespace."""
if namespace not in self.by_namespace:
raise UnknownNamespaceError(namespace)
return self.by_namespace[namespace]
def first_repo(self) -> Optional["Repo"]:
"""Get the first repo in precedence order."""
return self.repos[0] if self.repos else None
@spack.llnl.util.lang.memoized
def _all_package_names_set(self, include_virtuals) -> Set[str]:
return {name for repo in self.repos for name in repo.all_package_names(include_virtuals)}
@spack.llnl.util.lang.memoized
def _all_package_names(self, include_virtuals: bool) -> List[str]:
"""Return all unique package names in all repositories."""
return sorted(self._all_package_names_set(include_virtuals), key=lambda n: n.lower())
def all_package_names(self, include_virtuals: bool = False) -> List[str]:
return self._all_package_names(include_virtuals)
def package_path(self, name: str) -> str:
"""Get path to package.py file for this repo."""
return self.repo_for_pkg(name).package_path(name)
def all_package_paths(self) -> Generator[str, None, None]:
for name in self.all_package_names():
yield self.package_path(name)
def packages_with_tags(self, *tags: str, full: bool = False) -> Set[str]:
"""Returns a set of packages matching any of the tags in input.
Args:
full: if True the package names in the output are fully-qualified
"""
return {
f"{repo.namespace}.{pkg}" if full else pkg
for repo in self.repos
for pkg in repo.packages_with_tags(*tags)
}
def all_package_classes(self) -> Generator[Type["spack.package_base.PackageBase"], None, None]:
for name in self.all_package_names():
yield self.get_pkg_class(name)
@property
def provider_index(self) -> spack.provider_index.ProviderIndex:
"""Merged ProviderIndex from all Repos in the RepoPath."""
if self._provider_index is None:
self._provider_index = spack.provider_index.ProviderIndex(repository=self)
for repo in reversed(self.repos):
self._provider_index.merge(repo.provider_index)
return self._provider_index
@property
def tag_index(self) -> spack.tag.TagIndex:
"""Merged TagIndex from all Repos in the RepoPath."""
if self._tag_index is None:
self._tag_index = spack.tag.TagIndex()
for repo in reversed(self.repos):
self._tag_index.merge(repo.tag_index)
return self._tag_index
@property
def patch_index(self) -> spack.patch.PatchCache:
"""Merged PatchIndex from all Repos in the RepoPath."""
if self._patch_index is None:
from spack.patch import PatchCache
self._patch_index = PatchCache(repository=self)
for repo in reversed(self.repos):
self._patch_index.update(repo.patch_index)
return self._patch_index
def providers_for(self, virtual: Union[str, "spack.spec.Spec"]) -> List["spack.spec.Spec"]:
all_packages = self._all_package_names_set(include_virtuals=False)
providers = [
spec
for spec in self.provider_index.providers_for(virtual)
if spec.name in all_packages
]
if not providers:
raise UnknownPackageError(virtual if isinstance(virtual, str) else virtual.fullname)
return providers
@autospec
def extensions_for(
self, extendee_spec: "spack.spec.Spec"
) -> List["spack.package_base.PackageBase"]:
from spack.spec import Spec
return [
pkg_cls(Spec(pkg_cls.name))
for pkg_cls in self.all_package_classes()
if pkg_cls(Spec(pkg_cls.name)).extends(extendee_spec)
]
def last_mtime(self):
"""Time a package file in this repo was last updated."""
return max(repo.last_mtime() for repo in self.repos)
def repo_for_pkg(self, spec: Union[str, "spack.spec.Spec"]) -> "Repo":
"""Given a spec, get the repository for its package."""
# We don't @_autospec this function b/c it's called very frequently
# and we want to avoid parsing str's into Specs unnecessarily.
from spack.spec import Spec
if isinstance(spec, Spec):
namespace = spec.namespace
name = spec.name
else:
# handle strings directly for speed instead of @_autospec'ing
namespace, _, name = spec.rpartition(".")
# If the spec already has a namespace, then return the
# corresponding repo if we know about it.
if namespace:
if namespace not in self.by_namespace:
raise UnknownNamespaceError(namespace, name=name)
return self.by_namespace[namespace]
# If there's no namespace, search in the RepoPath.
for repo in self.repos:
if name in repo:
return repo
# If the package isn't in any repo, return the one with
# highest precedence. This is for commands like `spack edit`
# that can operate on packages that don't exist yet.
selected = self.first_repo()
if selected is None:
raise UnknownPackageError(name)
return selected
def get(self, spec: "spack.spec.Spec") -> "spack.package_base.PackageBase":
"""Returns the package associated with the supplied spec."""
from spack.spec import Spec
msg = "RepoPath.get can only be called on concrete specs"
assert isinstance(spec, Spec) and spec.concrete, msg
return self.repo_for_pkg(spec).get(spec)
def python_paths(self) -> List[str]:
"""Return a list of all the Python paths in the repos."""
return [repo.python_path for repo in self.repos if repo.python_path]
def get_pkg_class(self, pkg_name: str) -> Type["spack.package_base.PackageBase"]:
"""Find a class for the spec's package and return the class object."""
return self.repo_for_pkg(pkg_name).get_pkg_class(pkg_name)
@autospec
def dump_provenance(self, spec, path):
"""Dump provenance information for a spec to a particular path.
This dumps the package file and any associated patch files.
Raises UnknownPackageError if not found.
"""
return self.repo_for_pkg(spec).dump_provenance(spec, path)
def dirname_for_package_name(self, pkg_name: str) -> str:
return self.repo_for_pkg(pkg_name).dirname_for_package_name(pkg_name)
def filename_for_package_name(self, pkg_name: str) -> str:
return self.repo_for_pkg(pkg_name).filename_for_package_name(pkg_name)
def exists(self, pkg_name: str) -> bool:
"""Whether package with the give name exists in the path's repos.
Note that virtual packages do not "exist".
"""
return any(repo.exists(pkg_name) for repo in self.repos)
def _have_name(self, pkg_name: str) -> bool:
have_name = pkg_name is not None
if have_name and not isinstance(pkg_name, str):
raise ValueError(f"is_virtual(): expected package name, got {type(pkg_name)}")
return have_name
def is_virtual(self, pkg_name: str) -> bool:
"""Return True if the package with this name is virtual, False otherwise.
This function use the provider index. If calling from a code block that
is used to construct the provider index use the ``is_virtual_safe`` function.
Args:
pkg_name (str): name of the package we want to check
"""
have_name = self._have_name(pkg_name)
return have_name and pkg_name in self.provider_index
def is_virtual_safe(self, pkg_name: str) -> bool:
"""Return True if the package with this name is virtual, False otherwise.
This function doesn't use the provider index.
Args:
pkg_name (str): name of the package we want to check
"""
have_name = self._have_name(pkg_name)
return have_name and (not self.exists(pkg_name) or self.get_pkg_class(pkg_name).virtual)
def __contains__(self, pkg_name):
return self.exists(pkg_name)
def marshal(self):
return (self.repos,)
@staticmethod
def unmarshal(repos):
return RepoPath(*repos)
def __reduce__(self):
return RepoPath.unmarshal, self.marshal()
def _parse_package_api_version(
config: Dict[str, Any],
min_api: Tuple[int, int] = spack.min_package_api_version,
max_api: Tuple[int, int] = spack.package_api_version,
) -> Tuple[int, int]:
api = config.get("api")
if api is None:
package_api = (1, 0)
else:
if not isinstance(api, str):
raise BadRepoError(f"Invalid Package API version '{api}'. Must be of the form vX.Y")
api_match = _API_REGEX.match(api)
if api_match is None:
raise BadRepoError(f"Invalid Package API version '{api}'. Must be of the form vX.Y")
package_api = (int(api_match.group(1)), int(api_match.group(2)))
if min_api <= package_api <= max_api:
return package_api
min_str = ".".join(str(i) for i in min_api)
max_str = ".".join(str(i) for i in max_api)
curr_str = ".".join(str(i) for i in package_api)
raise BadRepoVersionError(
api,
f"Package API v{curr_str} is not supported by this version of Spack ("
f"must be between v{min_str} and v{max_str})",
)
def _validate_and_normalize_subdir(subdir: Any, root: str, package_api: Tuple[int, int]) -> str:
if not isinstance(subdir, str):
raise BadRepoError(f"Invalid subdirectory '{subdir}' in '{root}'. Must be a string")
if package_api < (2, 0):
return subdir # In v1.x we did not validate subdir names
if subdir in (".", ""):
raise BadRepoError(
f"Invalid subdirectory '{subdir}' in '{root}'. Use a symlink packages -> . instead"
)
# Otherwise we expect a directory name (not path) that can be used as a Python module.
if os.sep in subdir:
raise BadRepoError(
f"Invalid subdirectory '{subdir}' in '{root}'. Expected a directory name, not a path"
)
if not nm.valid_module_name(subdir, package_api):
raise BadRepoError(
f"Invalid subdirectory '{subdir}' in '{root}'. Must be a valid Python module name"
)
return subdir
|
RepoPath
|
python
|
huggingface__transformers
|
src/transformers/models/openai/modeling_openai.py
|
{
"start": 10348,
"end": 10613
}
|
class ____(PreTrainedModel):
config: OpenAIGPTConfig
base_model_prefix = "transformer"
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of models predicting if two sentences are consecutive or not.
"""
)
|
OpenAIGPTPreTrainedModel
|
python
|
joke2k__faker
|
tests/providers/test_date_time.py
|
{
"start": 27161,
"end": 30564
}
|
class ____(unittest.TestCase):
"""
Test Dates of Birth
"""
def setUp(self):
self.fake = Faker()
Faker.seed(0)
def test_date_of_birth(self):
dob = self.fake.date_of_birth()
assert isinstance(dob, date)
@freezegun.freeze_time("2020-02-29")
def test_date_of_birth_on_leap_day(self):
"""
Freeze the date to a leap day to verify that the date_of_birth method does not
raise an error
"""
dob = self.fake.date_of_birth()
assert isinstance(dob, date)
def test_value_errors(self):
with self.assertRaises(ValueError):
self.fake.date_of_birth(minimum_age=-1)
with self.assertRaises(ValueError):
self.fake.date_of_birth(maximum_age=-1)
with self.assertRaises(ValueError):
self.fake.date_of_birth(minimum_age=-2, maximum_age=-1)
with self.assertRaises(ValueError):
self.fake.date_of_birth(minimum_age=5, maximum_age=4)
def test_type_errors(self):
with self.assertRaises(TypeError):
self.fake.date_of_birth(minimum_age=0.5)
with self.assertRaises(TypeError):
self.fake.date_of_birth(maximum_age="hello")
def test_bad_age_range(self):
with self.assertRaises(ValueError):
self.fake.date_of_birth(minimum_age=5, maximum_age=0)
def test_acceptable_age_range_five_years(self):
for _ in range(100):
now = datetime.now(utc).date()
days_since_now = now - now
days_since_six_years_ago = now - change_year(now, -6)
dob = self.fake.date_of_birth(tzinfo=utc, minimum_age=0, maximum_age=5)
days_since_dob = now - dob
assert isinstance(dob, date)
assert days_since_six_years_ago > days_since_dob >= days_since_now
def test_acceptable_age_range_eighteen_years(self):
for _ in range(100):
now = datetime.now(utc).date()
days_since_now = now - now
days_since_nineteen_years_ago = now - change_year(now, -19)
dob = self.fake.date_of_birth(tzinfo=utc, minimum_age=0, maximum_age=18)
days_since_dob = now - dob
assert isinstance(dob, date)
assert days_since_nineteen_years_ago > days_since_dob >= days_since_now
def test_identical_age_range(self):
for _ in range(100):
now = datetime.now(utc).date()
days_since_five_years_ago = now - change_year(now, -5)
days_since_six_years_ago = now - change_year(now, -6)
dob = self.fake.date_of_birth(tzinfo=utc, minimum_age=5, maximum_age=5)
days_since_dob = now - dob
assert isinstance(dob, date)
assert days_since_six_years_ago > days_since_dob >= days_since_five_years_ago
def test_distant_age_range(self):
for _ in range(100):
now = datetime.now(utc).date()
days_since_one_hundred_years_ago = now - change_year(now, -100)
days_since_one_hundred_eleven_years_ago = now - change_year(now, -111)
dob = self.fake.date_of_birth(minimum_age=100, maximum_age=110)
days_since_dob = now - dob
assert isinstance(dob, date)
assert days_since_one_hundred_eleven_years_ago > days_since_dob >= days_since_one_hundred_years_ago
|
DatesOfBirth
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/weak_tensor_special_math_ops_test.py
|
{
"start": 7501,
"end": 9574
}
|
class ____(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_fresnel_sin_boundary(self):
self.assertAllClose(0., special_math_ops.fresnel_sin(0.))
self.assertTrue(
np.isnan(self.evaluate(special_math_ops.fresnel_sin(np.nan))))
@parameterized.parameters(np.float32, np.float64)
def test_fresnel_sin_odd(self, dtype):
x = _get_weak_tensor(
np.random.uniform(-100.0, 100.0, size=int(1e4)).astype(dtype)
)
y = special_math_ops.fresnel_sin(x)
neg_y = -special_math_ops.fresnel_sin(-x)
self.assertIsInstance(y, WeakTensor)
self.assertIsInstance(neg_y, WeakTensor)
self.assertAllClose(y, neg_y)
@parameterized.parameters(np.float32, np.float64)
def test_fresnel_sin_small(self, dtype):
x = np.random.uniform(0., 1., size=int(1e4)).astype(dtype)
x_wt = _get_weak_tensor(x)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.fresnel(x)[0],
self.evaluate(special_math_ops.fresnel_sin(x_wt)),
)
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_fresnel_sin_larger(self, dtype):
x = np.random.uniform(1., 100., size=int(1e4)).astype(dtype)
x_wt = _get_weak_tensor(x)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.fresnel(x)[0],
self.evaluate(special_math_ops.fresnel_sin(x_wt)),
rtol=1e-5,
)
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
def test_fresnel_sin_gradient(self):
inputs = [_get_weak_tensor(np.random.uniform(1.0, 50.0, size=int(1e2)))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.fresnel_sin, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 5e-3)
@test_util.run_all_in_graph_and_eager_modes
|
FresnelSinTest
|
python
|
run-llama__llama_index
|
llama-index-integrations/tools/llama-index-tools-scrapegraph/tests/test_integration.py
|
{
"start": 12023,
"end": 16573
}
|
class ____:
"""Test scenarios that simulate real-world usage."""
@pytest.fixture
def mock_tool_spec(self):
"""Create a mocked tool spec for real-world testing."""
with patch("llama_index.tools.scrapegraph.base.Client") as mock_client_class:
mock_client = Mock()
mock_client_class.from_env.return_value = mock_client
tool_spec = ScrapegraphToolSpec()
tool_spec.client = mock_client
return tool_spec, mock_client
def test_e_commerce_product_extraction(self, mock_tool_spec):
"""Test extracting product information from e-commerce sites."""
tool_spec, mock_client = mock_tool_spec
# Mock e-commerce product data
mock_response = {
"products": [
{"name": "Laptop", "price": "$999", "rating": "4.5/5"},
{"name": "Mouse", "price": "$29", "rating": "4.2/5"},
]
}
mock_client.smartscraper.return_value = mock_response
result = tool_spec.scrapegraph_smartscraper(
prompt="Extract product names, prices, and ratings from this e-commerce page",
url="https://shop.example.com/laptops",
)
assert result == mock_response
mock_client.smartscraper.assert_called_once()
def test_news_article_summarization(self, mock_tool_spec):
"""Test extracting and summarizing news articles."""
tool_spec, mock_client = mock_tool_spec
# Mock news article markdown
mock_markdown = """# Breaking News: AI Advances
## Summary
Artificial Intelligence has made significant breakthroughs...
## Key Points
- New neural network architecture
- 30% improvement in efficiency
- Applications in healthcare
"""
mock_client.markdownify.return_value = mock_markdown
result = tool_spec.scrapegraph_markdownify(
url="https://news.example.com/ai-breakthrough"
)
assert result == mock_markdown
assert "# Breaking News" in result
assert "Key Points" in result
def test_complex_site_navigation(self, mock_tool_spec):
"""Test complex site navigation with agentic scraper."""
tool_spec, mock_client = mock_tool_spec
# Mock complex navigation result
mock_response = {
"contact_info": {
"email": "contact@company.com",
"phone": "+1-555-0123",
"address": "123 Tech Street, Silicon Valley",
},
"navigation_path": ["Home", "About", "Contact", "Support"],
}
mock_client.agentic_scraper.return_value = mock_response
result = tool_spec.scrapegraph_agentic_scraper(
prompt="Navigate through the website to find comprehensive contact information",
url="https://company.example.com",
)
assert result == mock_response
assert "contact_info" in result
assert "navigation_path" in result
def test_multi_step_workflow(self, mock_tool_spec):
"""Test a multi-step workflow combining different tools."""
tool_spec, mock_client = mock_tool_spec
# Step 1: Search for relevant pages
mock_client.search.return_value = "Found relevant pages about Python tutorials"
search_result = tool_spec.scrapegraph_search(
query="Python programming tutorials beginner", max_results=5
)
# Step 2: Scrape the found page
mock_client.scrape.return_value = {
"html": "<html><head><title>Python Tutorial</title></head><body>Learn Python...</body></html>",
"request_id": "req-123",
}
scrape_result = tool_spec.scrapegraph_scrape(
url="https://python-tutorial.example.com"
)
# Step 3: Convert to markdown for analysis
mock_client.markdownify.return_value = (
"# Python Tutorial\n\nLearn Python programming..."
)
markdown_result = tool_spec.scrapegraph_markdownify(
url="https://python-tutorial.example.com"
)
# Verify all steps executed correctly
assert "Python tutorials" in search_result
assert "html" in scrape_result
assert "# Python Tutorial" in markdown_result
# Verify all client methods were called
mock_client.search.assert_called_once()
mock_client.scrape.assert_called_once()
mock_client.markdownify.assert_called_once()
|
TestRealWorldScenarios
|
python
|
encode__django-rest-framework
|
tests/test_fields.py
|
{
"start": 61021,
"end": 61319
}
|
class ____(FieldValues):
"""
Values for `DurationField` with a no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.timedelta(1): datetime.timedelta(1)
}
field = serializers.DurationField(format=None)
|
TestNoOutputFormatDurationField
|
python
|
zarr-developers__zarr-python
|
src/zarr/core/chunk_key_encodings.py
|
{
"start": 717,
"end": 864
}
|
class ____(TypedDict):
name: Literal["v2", "default"]
separator: NotRequired[SeparatorLiteral]
@dataclass(frozen=True)
|
ChunkKeyEncodingParams
|
python
|
hynek__structlog
|
tests/test_native.py
|
{
"start": 620,
"end": 11043
}
|
class ____:
def test_is_enabled_for(self, bl):
"""
is_enabled_for returns True if the log level is enabled.
"""
assert bl.is_enabled_for(20)
assert bl.is_enabled_for(logging.INFO)
assert not bl.is_enabled_for(19)
assert not bl.is_enabled_for(logging.DEBUG)
def test_get_effective_level(self, bl):
"""
get_effective_level returns the log level.
"""
assert 20 == logging.INFO == bl.get_effective_level()
def test_exact_level(self, bl, cl):
"""
if log level is exactly the min_level, log.
"""
bl.info("yep")
assert [("info", (), {"event": "yep"})] == cl.calls
async def test_async_exact_level(self, bl, cl):
"""
if log level is exactly the min_level, log.
"""
await bl.ainfo("yep")
assert [("info", (), {"event": "yep"})] == cl.calls
def test_one_below(self, bl, cl):
"""
if log level is below the min_level, don't log.
"""
bl.debug("nope")
assert [] == cl.calls
async def test_async_one_below(self, bl, cl):
"""
if log level is below the min_level, don't log.
"""
await bl.adebug("nope")
assert [] == cl.calls
def test_filtered_interp(self, bl, cl):
"""
Passing interpolation args works if the log entry is filtered out.
"""
bl.debug("hello %s!", "world")
assert [] == cl.calls
async def test_async_filtered_interp(self, bl, cl):
"""
Passing interpolation args works if the log entry is filtered out.
"""
await bl.adebug("hello %s!", "world")
assert [] == cl.calls
def test_no_args(self, bl, cl):
"""
If no args are passed, don't attempt interpolation.
See also #473
"""
bl.info(42)
assert 42 == cl.calls[0][2]["event"]
async def test_async_no_args(self, bl, cl):
"""
If no args are passed, don't attempt interpolation.
See also #473
"""
await bl.ainfo(42)
assert 42 == cl.calls[0][2]["event"]
def test_log_exact_level(self, bl, cl):
"""
if log level is exactly the min_level, log.
"""
bl.log(logging.INFO, "yep")
assert [("info", (), {"event": "yep"})] == cl.calls
async def test_alog_exact_level(self, bl, cl):
"""
if log level is exactly the min_level, log.
"""
await bl.alog(logging.INFO, "yep")
assert [("info", (), {"event": "yep"})] == cl.calls
def test_log_one_below(self, bl, cl):
"""
if log level is below the min_level, don't log.
"""
bl.log(logging.DEBUG, "nope")
assert [] == cl.calls
async def test_alog_one_below(self, bl, cl):
"""
if log level is below the min_level, don't log.
"""
await bl.alog(logging.DEBUG, "nope")
assert [] == cl.calls
async def test_alog_no_args(self, bl, cl):
"""
If no args are passed, interpolation is not attempted.
See also #473
"""
await bl.alog(logging.INFO, 42)
assert 42 == cl.calls[0][2]["event"]
def test_log_interp(self, bl, cl):
"""
Interpolation happens if args are passed.
"""
bl.log(logging.INFO, "answer is %d.", 42)
assert "answer is 42." == cl.calls[0][2]["event"]
def test_log_interp_dict(self, bl, cl):
"""
Dict-based interpolation happens if a mapping is passed.
"""
bl.log(logging.INFO, "answer is %(answer)d.", {"answer": 42})
assert "answer is 42." == cl.calls[0][2]["event"]
async def test_alog_interp(self, bl, cl):
"""
Interpolation happens if args are passed.
"""
await bl.alog(logging.INFO, "answer is %d.", 42)
assert "answer is 42." == cl.calls[0][2]["event"]
async def test_alog_interp_dict(self, bl, cl):
"""
Dict-based interpolation happens if a mapping is passed.
"""
await bl.alog(logging.INFO, "answer is %(answer)d.", {"answer": 42})
assert "answer is 42." == cl.calls[0][2]["event"]
def test_filter_bound_below_missing_event_string(self, bl):
"""
Missing event arg causes exception below min_level.
"""
with pytest.raises(TypeError) as exc_info:
bl.debug(missing="event string!")
assert exc_info.type is TypeError
message = "missing 1 required positional argument: 'event'"
assert message in exc_info.value.args[0]
def test_filter_bound_exact_missing_event_string(self, bl):
"""
Missing event arg causes exception even at min_level.
"""
with pytest.raises(TypeError) as exc_info:
bl.info(missing="event string!")
assert exc_info.type is TypeError
message = "missing 1 required positional argument: 'event'"
assert message in exc_info.value.args[0]
def test_exception(self, bl, cl):
"""
exception ensures that exc_info is set to True, unless it's already
set.
"""
bl.exception("boom")
assert [("error", (), {"event": "boom", "exc_info": True})] == cl.calls
async def test_async_exception(self, bl, cl):
"""
aexception sets exc_info to current exception info, if it's not already
set.
"""
try:
raise Exception("boom")
except Exception as e:
await bl.aexception("foo")
exc = e
assert 1 == len(cl.calls)
assert isinstance(cl.calls[0][2]["exc_info"], tuple)
assert exc == cl.calls[0][2]["exc_info"][1]
def test_exception_positional_args(self, bl, cl):
"""
exception allows for positional args
"""
bl.exception("%s %s", "boom", "bastic")
assert [
("error", (), {"event": "boom bastic", "exc_info": True})
] == cl.calls
def test_exception_dict_args(self, bl, cl):
"""
exception allows for dict-based args
"""
bl.exception(
"%(action)s %(what)s", {"action": "boom", "what": "bastic"}
)
assert [
("error", (), {"event": "boom bastic", "exc_info": True})
] == cl.calls
async def test_aexception_positional_args(self, bl, cl):
"""
aexception allows for positional args
"""
await bl.aexception("%s %s", "boom", "bastic")
assert 1 == len(cl.calls)
assert "boom bastic" == cl.calls[0][2]["event"]
async def test_aexception_dict_args(self, bl, cl):
"""
aexception allows for dict-based args
"""
await bl.aexception(
"%(action)s %(what)s", {"action": "boom", "what": "bastic"}
)
assert 1 == len(cl.calls)
assert "boom bastic" == cl.calls[0][2]["event"]
async def test_async_exception_true(self, bl, cl):
"""
aexception replaces exc_info with current exception info, if exc_info
is True.
"""
try:
raise Exception("boom")
except Exception as e:
await bl.aexception("foo", exc_info=True)
exc = e
assert 1 == len(cl.calls)
assert isinstance(cl.calls[0][2]["exc_info"], tuple)
assert exc is cl.calls[0][2]["exc_info"][1]
def test_exception_passed(self, bl, cl):
"""
exception if exc_info has a value, exception doesn't tamper with it.
"""
bl.exception("boom", exc_info=42)
assert [("error", (), {"event": "boom", "exc_info": 42})] == cl.calls
async def test_async_exception_passed(self, bl, cl):
"""
exception if exc_info has a value (other than True), exception doesn't
tamper with it.
"""
await bl.aexception("boom", exc_info=42)
assert [("error", (), {"event": "boom", "exc_info": 42})] == cl.calls
def test_exception_pass_exception(self, bl, cl):
"""
If an Exception is passed for the event, don't explode.
Not a documented feature, but a regression for some people. See #473.
"""
try:
raise Exception("foo")
except Exception as e:
bl.exception(e)
exc = e
assert exc is cl.calls[0][2]["event"]
@pytest.mark.parametrize("level", tuple(LEVEL_TO_NAME.keys()))
def test_pickle(self, level):
"""
FilteringBoundLogger are pickleable.
"""
bl = make_filtering_bound_logger(level)
assert bl == pickle.loads(pickle.dumps(bl))
def test_pos_args(self, bl, cl):
"""
Positional arguments are used for string interpolation.
"""
bl.info("hello %s -- %d!", "world", 42)
assert [("info", (), {"event": "hello world -- 42!"})] == cl.calls
async def test_async_pos_args(self, bl, cl):
"""
Positional arguments are used for string interpolation.
"""
await bl.ainfo("hello %s -- %d!", "world", 42)
assert [("info", (), {"event": "hello world -- 42!"})] == cl.calls
@pytest.mark.parametrize(
("meth", "args"),
[
("aexception", ("ev",)),
("ainfo", ("ev",)),
("alog", (logging.INFO, "ev")),
],
)
async def test_async_contextvars_merged(self, meth, args, cl):
"""
Contextvars are merged into the event dict.
"""
clear_contextvars()
bl = make_filtering_bound_logger(logging.INFO)(
cl, [merge_contextvars], {}
)
bind_contextvars(context_included="yep")
await getattr(bl, meth)(*args)
assert len(cl.calls) == 1
assert "context_included" in cl.calls[0].kwargs
def test_log_percent(self, bl, cl):
"""
As long as there's no positional args passed, logging % is possible.
"""
bl.info("hey %! %%!")
assert [("info", (), {"event": "hey %! %%!"})] == cl.calls
def test_log_level_str(self):
"""
*min_level* can be a string and the case doesn't matter.
"""
bl = make_filtering_bound_logger("wArNiNg")
assert bl.warning is not _nop
assert bl.info is _nop
|
TestNativeFilteringLogger
|
python
|
sympy__sympy
|
sympy/tensor/array/expressions/array_expressions.py
|
{
"start": 60626,
"end": 62284
}
|
class ____(_CodegenArrayAbstract):
"""
Reshape the dimensions of an array expression.
Examples
========
>>> from sympy.tensor.array.expressions import ArraySymbol, Reshape
>>> A = ArraySymbol("A", (6,))
>>> A.shape
(6,)
>>> Reshape(A, (3, 2)).shape
(3, 2)
Check the component-explicit forms:
>>> A.as_explicit()
[A[0], A[1], A[2], A[3], A[4], A[5]]
>>> Reshape(A, (3, 2)).as_explicit()
[[A[0], A[1]], [A[2], A[3]], [A[4], A[5]]]
"""
def __new__(cls, expr, shape):
expr = _sympify(expr)
if not isinstance(shape, Tuple):
shape = Tuple(*shape)
if Equality(Mul.fromiter(expr.shape), Mul.fromiter(shape)) == False:
raise ValueError("shape mismatch")
obj = Expr.__new__(cls, expr, shape)
obj._shape = tuple(shape)
obj._expr = expr
return obj
@property
def shape(self):
return self._shape
@property
def expr(self):
return self._expr
def doit(self, *args, **kwargs):
if kwargs.get("deep", True):
expr = self.expr.doit(*args, **kwargs)
else:
expr = self.expr
if isinstance(expr, (MatrixBase, NDimArray)):
return expr.reshape(*self.shape)
return Reshape(expr, self.shape)
def as_explicit(self):
ee = self.expr
if hasattr(ee, "as_explicit"):
ee = ee.as_explicit()
if isinstance(ee, MatrixBase):
from sympy import Array
ee = Array(ee)
elif isinstance(ee, MatrixExpr):
return self
return ee.reshape(*self.shape)
|
Reshape
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/dataprep.py
|
{
"start": 1374,
"end": 2527
}
|
class ____(GoogleCloudBaseOperator):
"""
Get information about the batch jobs within a Cloud Dataprep job.
API documentation: https://clouddataprep.com/documentation/api#section/Overview.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataprepGetJobsForJobGroupOperator`
:param job_group_id The ID of the job group that will be requests
"""
template_fields: Sequence[str] = ("job_group_id",)
def __init__(
self,
*,
dataprep_conn_id: str = "dataprep_default",
job_group_id: int | str,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataprep_conn_id = dataprep_conn_id
self.job_group_id = job_group_id
def execute(self, context: Context) -> dict:
self.log.info("Fetching data for job with id: %d ...", self.job_group_id)
hook = GoogleDataprepHook(
dataprep_conn_id=self.dataprep_conn_id,
)
response = hook.get_jobs_for_job_group(job_id=int(self.job_group_id))
return response
|
DataprepGetJobsForJobGroupOperator
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/workers.py
|
{
"start": 74658,
"end": 83491
}
|
class ____(Request):
"""
Called periodically by the worker daemon to report machine status
:param worker: Worker id.
:type worker: str
:param task: ID of a task currently being run by the worker. If no task is
sent, the worker's task field will be cleared.
:type task: str
:param queue: ID of the queue from which task was received. If no queue is
sent, the worker's queue field will be cleared.
:type queue: str
:param queues: List of queue IDs on which the worker is listening. If null, the
worker's queues list will not be updated.
:type queues: Sequence[str]
:param timestamp: UNIX time in seconds since epoch.
:type timestamp: int
:param machine_stats: The machine statistics.
:type machine_stats: MachineStats
:param tags: New user tags for the worker
:type tags: Sequence[str]
"""
_service = "workers"
_action = "status_report"
_version = "2.13"
_schema = {
"definitions": {
"machine_stats": {
"properties": {
"cpu_temperature": {
"description": "CPU temperature",
"items": {"type": "number"},
"type": ["array", "null"],
},
"cpu_usage": {
"description": "Average CPU usage per core",
"items": {"type": "number"},
"type": ["array", "null"],
},
"disk_free_home": {
"description": "Mbytes free space of /home drive",
"type": ["integer", "null"],
},
"disk_free_temp": {
"description": "Mbytes free space of /tmp drive",
"type": ["integer", "null"],
},
"disk_read": {
"description": "Mbytes read per second",
"type": ["integer", "null"],
},
"disk_write": {
"description": "Mbytes write per second",
"type": ["integer", "null"],
},
"gpu_memory_free": {
"description": "GPU free memory MBs",
"items": {"type": "integer"},
"type": ["array", "null"],
},
"gpu_memory_used": {
"description": "GPU used memory MBs",
"items": {"type": "integer"},
"type": ["array", "null"],
},
"gpu_temperature": {
"description": "GPU temperature",
"items": {"type": "number"},
"type": ["array", "null"],
},
"gpu_usage": {
"description": "Average GPU usage per GPU card",
"items": {"type": "number"},
"type": ["array", "null"],
},
"memory_free": {
"description": "Free memory MBs",
"type": ["integer", "null"],
},
"memory_used": {
"description": "Used memory MBs",
"type": ["integer", "null"],
},
"network_rx": {
"description": "Mbytes per second",
"type": ["integer", "null"],
},
"network_tx": {
"description": "Mbytes per second",
"type": ["integer", "null"],
},
},
"type": "object",
}
},
"properties": {
"machine_stats": {
"$ref": "#/definitions/machine_stats",
"description": "The machine statistics.",
},
"queue": {
"description": "ID of the queue from which task was received. If no queue is sent, the worker's queue field will be cleared.",
"type": "string",
},
"queues": {
"description": "List of queue IDs on which the worker is listening. If null, the worker's queues list will not be updated.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "New user tags for the worker",
"items": {"type": "string"},
"type": "array",
},
"task": {
"description": "ID of a task currently being run by the worker. If no task is sent, the worker's task field will be cleared.",
"type": "string",
},
"timestamp": {
"description": "UNIX time in seconds since epoch.",
"type": "integer",
},
"worker": {"description": "Worker id.", "type": "string"},
},
"required": ["worker", "timestamp"],
"type": "object",
}
def __init__(
self,
worker: str,
timestamp: int,
task: Optional[str] = None,
queue: Optional[str] = None,
queues: Optional[List[str]] = None,
machine_stats: Any = None,
tags: Optional[List[str]] = None,
**kwargs: Any
) -> None:
super(StatusReportRequest, self).__init__(**kwargs)
self.worker = worker
self.task = task
self.queue = queue
self.queues = queues
self.timestamp = timestamp
self.machine_stats = machine_stats
self.tags = tags
@schema_property("worker")
def worker(self) -> str:
return self._property_worker
@worker.setter
def worker(self, value: str) -> None:
if value is None:
self._property_worker = None
return
self.assert_isinstance(value, "worker", six.string_types)
self._property_worker = value
@schema_property("task")
def task(self) -> Optional[str]:
return self._property_task
@task.setter
def task(self, value: Optional[str]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("queue")
def queue(self) -> Optional[str]:
return self._property_queue
@queue.setter
def queue(self, value: Optional[str]) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("queues")
def queues(self) -> Optional[List[str]]:
return self._property_queues
@queues.setter
def queues(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_queues = None
return
self.assert_isinstance(value, "queues", (list, tuple))
self.assert_isinstance(value, "queues", six.string_types, is_array=True)
self._property_queues = value
@schema_property("timestamp")
def timestamp(self) -> int:
return self._property_timestamp
@timestamp.setter
def timestamp(self, value: int) -> None:
if value is None:
self._property_timestamp = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "timestamp", six.integer_types)
self._property_timestamp = value
@schema_property("machine_stats")
def machine_stats(self) -> Any:
return self._property_machine_stats
@machine_stats.setter
def machine_stats(self, value: Any) -> None:
if value is None:
self._property_machine_stats = None
return
if isinstance(value, dict):
value = MachineStats.from_dict(value)
else:
self.assert_isinstance(value, "machine_stats", MachineStats)
self._property_machine_stats = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
|
StatusReportRequest
|
python
|
lazyprogrammer__machine_learning_examples
|
rl2/cartpole/pg_theano.py
|
{
"start": 767,
"end": 1234
}
|
class ____:
def __init__(self, M1, M2, f=T.tanh, use_bias=True):
self.W = theano.shared(np.random.randn(M1, M2) * np.sqrt(2 / M1))
self.params = [self.W]
self.use_bias = use_bias
if use_bias:
self.b = theano.shared(np.zeros(M2))
self.params += [self.b]
self.f = f
def forward(self, X):
if self.use_bias:
a = X.dot(self.W) + self.b
else:
a = X.dot(self.W)
return self.f(a)
# approximates pi(a | s)
|
HiddenLayer
|
python
|
pallets__jinja
|
tests/test_lexnparse.py
|
{
"start": 10407,
"end": 19362
}
|
class ____:
def test_call(self, env):
env = Environment()
env.globals["foo"] = lambda a, b, c, e, g: a + b + c + e + g
tmpl = env.from_string("{{ foo('a', c='d', e='f', *['b'], **{'g': 'h'}) }}")
assert tmpl.render() == "abdfh"
def test_slicing(self, env):
tmpl = env.from_string("{{ [1, 2, 3][:] }}|{{ [1, 2, 3][::-1] }}")
assert tmpl.render() == "[1, 2, 3]|[3, 2, 1]"
def test_attr(self, env):
tmpl = env.from_string("{{ foo.bar }}|{{ foo['bar'] }}")
assert tmpl.render(foo={"bar": 42}) == "42|42"
def test_subscript(self, env):
tmpl = env.from_string("{{ foo[0] }}|{{ foo[-1] }}")
assert tmpl.render(foo=[0, 1, 2]) == "0|2"
def test_tuple(self, env):
tmpl = env.from_string("{{ () }}|{{ (1,) }}|{{ (1, 2) }}")
assert tmpl.render() == "()|(1,)|(1, 2)"
def test_math(self, env):
tmpl = env.from_string("{{ (1 + 1 * 2) - 3 / 2 }}|{{ 2**3 }}")
assert tmpl.render() == "1.5|8"
def test_div(self, env):
tmpl = env.from_string("{{ 3 // 2 }}|{{ 3 / 2 }}|{{ 3 % 2 }}")
assert tmpl.render() == "1|1.5|1"
def test_unary(self, env):
tmpl = env.from_string("{{ +3 }}|{{ -3 }}")
assert tmpl.render() == "3|-3"
def test_concat(self, env):
tmpl = env.from_string("{{ [1, 2] ~ 'foo' }}")
assert tmpl.render() == "[1, 2]foo"
@pytest.mark.parametrize(
("a", "op", "b"),
[
(1, ">", 0),
(1, ">=", 1),
(2, "<", 3),
(3, "<=", 4),
(4, "==", 4),
(4, "!=", 5),
],
)
def test_compare(self, env, a, op, b):
t = env.from_string(f"{{{{ {a} {op} {b} }}}}")
assert t.render() == "True"
def test_compare_parens(self, env):
t = env.from_string("{{ i * (j < 5) }}")
assert t.render(i=2, j=3) == "2"
@pytest.mark.parametrize(
("src", "expect"),
[
("{{ 4 < 2 < 3 }}", "False"),
("{{ a < b < c }}", "False"),
("{{ 4 > 2 > 3 }}", "False"),
("{{ a > b > c }}", "False"),
("{{ 4 > 2 < 3 }}", "True"),
("{{ a > b < c }}", "True"),
],
)
def test_compare_compound(self, env, src, expect):
t = env.from_string(src)
assert t.render(a=4, b=2, c=3) == expect
def test_inop(self, env):
tmpl = env.from_string("{{ 1 in [1, 2, 3] }}|{{ 1 not in [1, 2, 3] }}")
assert tmpl.render() == "True|False"
@pytest.mark.parametrize("value", ("[]", "{}", "()"))
def test_collection_literal(self, env, value):
t = env.from_string(f"{{{{ {value} }}}}")
assert t.render() == value
@pytest.mark.parametrize(
("value", "expect"),
(
("1", "1"),
("123", "123"),
("12_34_56", "123456"),
("1.2", "1.2"),
("34.56", "34.56"),
("3_4.5_6", "34.56"),
("1e0", "1.0"),
("10e1", "100.0"),
("2.5e100", "2.5e+100"),
("2.5e+100", "2.5e+100"),
("25.6e-10", "2.56e-09"),
("1_2.3_4e5_6", "1.234e+57"),
("0", "0"),
("0_00", "0"),
("0b1001_1111", "159"),
("0o123", "83"),
("0o1_23", "83"),
("0x123abc", "1194684"),
("0x12_3abc", "1194684"),
),
)
def test_numeric_literal(self, env, value, expect):
t = env.from_string(f"{{{{ {value} }}}}")
assert t.render() == expect
def test_bool(self, env):
tmpl = env.from_string(
"{{ true and false }}|{{ false or true }}|{{ not false }}"
)
assert tmpl.render() == "False|True|True"
def test_grouping(self, env):
tmpl = env.from_string(
"{{ (true and false) or (false and true) and not false }}"
)
assert tmpl.render() == "False"
def test_django_attr(self, env):
tmpl = env.from_string("{{ [1, 2, 3].0 }}|{{ [[1]].0.0 }}")
assert tmpl.render() == "1|1"
def test_conditional_expression(self, env):
tmpl = env.from_string("""{{ 0 if true else 1 }}""")
assert tmpl.render() == "0"
def test_short_conditional_expression(self, env):
tmpl = env.from_string("<{{ 1 if false }}>")
assert tmpl.render() == "<>"
tmpl = env.from_string("<{{ (1 if false).bar }}>")
pytest.raises(UndefinedError, tmpl.render)
def test_filter_priority(self, env):
tmpl = env.from_string('{{ "foo"|upper + "bar"|upper }}')
assert tmpl.render() == "FOOBAR"
def test_function_calls(self, env):
tests = [
(True, "*foo, bar"),
(True, "*foo, *bar"),
(True, "**foo, *bar"),
(True, "**foo, bar"),
(True, "**foo, **bar"),
(True, "**foo, bar=42"),
(False, "foo, bar"),
(False, "foo, bar=42"),
(False, "foo, bar=23, *args"),
(False, "foo, *args, bar=23"),
(False, "a, b=c, *d, **e"),
(False, "*foo, bar=42"),
(False, "*foo, **bar"),
(False, "*foo, bar=42, **baz"),
(False, "foo, *args, bar=23, **baz"),
]
for should_fail, sig in tests:
if should_fail:
with pytest.raises(TemplateSyntaxError):
env.from_string(f"{{{{ foo({sig}) }}}}")
else:
env.from_string(f"foo({sig})")
def test_tuple_expr(self, env):
for tmpl in [
"{{ () }}",
"{{ (1, 2) }}",
"{{ (1, 2,) }}",
"{{ 1, }}",
"{{ 1, 2 }}",
"{% for foo, bar in seq %}...{% endfor %}",
"{% for x in foo, bar %}...{% endfor %}",
"{% for x in foo, %}...{% endfor %}",
]:
assert env.from_string(tmpl)
def test_trailing_comma(self, env):
tmpl = env.from_string("{{ (1, 2,) }}|{{ [1, 2,] }}|{{ {1: 2,} }}")
assert tmpl.render().lower() == "(1, 2)|[1, 2]|{1: 2}"
def test_block_end_name(self, env):
env.from_string("{% block foo %}...{% endblock foo %}")
pytest.raises(
TemplateSyntaxError, env.from_string, "{% block x %}{% endblock y %}"
)
def test_constant_casing(self, env):
for const in True, False, None:
const = str(const)
tmpl = env.from_string(
f"{{{{ {const} }}}}|{{{{ {const.lower()} }}}}|{{{{ {const.upper()} }}}}"
)
assert tmpl.render() == f"{const}|{const}|"
def test_test_chaining(self, env):
pytest.raises(
TemplateSyntaxError, env.from_string, "{{ foo is string is sequence }}"
)
assert env.from_string("{{ 42 is string or 42 is number }}").render() == "True"
def test_string_concatenation(self, env):
tmpl = env.from_string('{{ "foo" "bar" "baz" }}')
assert tmpl.render() == "foobarbaz"
def test_notin(self, env):
bar = range(100)
tmpl = env.from_string("""{{ not 42 in bar }}""")
assert tmpl.render(bar=bar) == "False"
def test_operator_precedence(self, env):
tmpl = env.from_string("""{{ 2 * 3 + 4 % 2 + 1 - 2 }}""")
assert tmpl.render() == "5"
def test_implicit_subscribed_tuple(self, env):
class Foo:
def __getitem__(self, x):
return x
t = env.from_string("{{ foo[1, 2] }}")
assert t.render(foo=Foo()) == "(1, 2)"
def test_raw2(self, env):
tmpl = env.from_string("{% raw %}{{ FOO }} and {% BAR %}{% endraw %}")
assert tmpl.render() == "{{ FOO }} and {% BAR %}"
def test_const(self, env):
tmpl = env.from_string(
"{{ true }}|{{ false }}|{{ none }}|"
"{{ none is defined }}|{{ missing is defined }}"
)
assert tmpl.render() == "True|False|None|True|False"
def test_neg_filter_priority(self, env):
node = env.parse("{{ -1|foo }}")
assert isinstance(node.body[0].nodes[0], nodes.Filter)
assert isinstance(node.body[0].nodes[0].node, nodes.Neg)
def test_const_assign(self, env):
constass1 = """{% set true = 42 %}"""
constass2 = """{% for none in seq %}{% endfor %}"""
for tmpl in constass1, constass2:
pytest.raises(TemplateSyntaxError, env.from_string, tmpl)
def test_localset(self, env):
tmpl = env.from_string(
"""{% set foo = 0 %}\
{% for item in [1, 2] %}{% set foo = 1 %}{% endfor %}\
{{ foo }}"""
)
assert tmpl.render() == "0"
def test_parse_unary(self, env):
tmpl = env.from_string('{{ -foo["bar"] }}')
assert tmpl.render(foo={"bar": 42}) == "-42"
tmpl = env.from_string('{{ -foo["bar"]|abs }}')
assert tmpl.render(foo={"bar": 42}) == "42"
|
TestSyntax
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/sharedstrings/test_write_sst.py
|
{
"start": 328,
"end": 1431
}
|
class ____(unittest.TestCase):
"""
Test the SharedStrings _write_sst() method.
"""
def setUp(self):
self.fh = StringIO()
self.sharedstrings = SharedStrings()
self.sharedstrings._set_filehandle(self.fh)
def test_write_sst(self):
"""Test the _write_sst() method"""
string_table = SharedStringTable()
# Add some strings and check the returned indices.
string_table._get_shared_string_index("neptune")
string_table._get_shared_string_index("neptune")
string_table._get_shared_string_index("neptune")
string_table._get_shared_string_index("mars")
string_table._get_shared_string_index("venus")
string_table._get_shared_string_index("mars")
string_table._get_shared_string_index("venus")
self.sharedstrings.string_table = string_table
self.sharedstrings._write_sst()
exp = """<sst xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" count="7" uniqueCount="3">"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
|
TestWriteSst
|
python
|
django__django
|
django/template/loader_tags.py
|
{
"start": 2619,
"end": 5780
}
|
class ____(Node):
must_be_first = True
context_key = "extends_context"
def __init__(self, nodelist, parent_name, template_dirs=None):
self.nodelist = nodelist
self.parent_name = parent_name
self.template_dirs = template_dirs
self.blocks = {n.name: n for n in nodelist.get_nodes_by_type(BlockNode)}
def __repr__(self):
return "<%s: extends %s>" % (self.__class__.__name__, self.parent_name.token)
def find_template(self, template_name, context):
"""
This is a wrapper around engine.find_template(). A history is kept in
the render_context attribute between successive extends calls and
passed as the skip argument. This enables extends to work recursively
without extending the same template twice.
"""
history = context.render_context.setdefault(
self.context_key,
[self.origin],
)
template, origin = context.template.engine.find_template(
template_name,
skip=history,
)
history.append(origin)
return template
def get_parent(self, context):
parent = self.parent_name.resolve(context)
if not parent:
error_msg = "Invalid template name in 'extends' tag: %r." % parent
if self.parent_name.filters or isinstance(self.parent_name.var, Variable):
error_msg += (
" Got this from the '%s' variable." % self.parent_name.token
)
raise TemplateSyntaxError(error_msg)
if isinstance(parent, Template):
# parent is a django.template.Template
return parent
if isinstance(getattr(parent, "template", None), Template):
# parent is a django.template.backends.django.Template
return parent.template
return self.find_template(parent, context)
def render(self, context):
compiled_parent = self.get_parent(context)
if BLOCK_CONTEXT_KEY not in context.render_context:
context.render_context[BLOCK_CONTEXT_KEY] = BlockContext()
block_context = context.render_context[BLOCK_CONTEXT_KEY]
# Add the block nodes from this node to the block context
block_context.add_blocks(self.blocks)
# If this block's parent doesn't have an extends node it is the root,
# and its block nodes also need to be added to the block context.
for node in compiled_parent.nodelist:
# The ExtendsNode has to be the first non-text node.
if not isinstance(node, TextNode):
if not isinstance(node, ExtendsNode):
blocks = {
n.name: n
for n in compiled_parent.nodelist.get_nodes_by_type(BlockNode)
}
block_context.add_blocks(blocks)
break
# Call Template._render explicitly so the parser context stays
# the same.
with context.render_context.push_state(compiled_parent, isolated_context=False):
return compiled_parent._render(context)
|
ExtendsNode
|
python
|
kamyu104__LeetCode-Solutions
|
Python/average-of-levels-in-binary-tree.py
|
{
"start": 30,
"end": 600
}
|
class ____(object):
def averageOfLevels(self, root):
"""
:type root: TreeNode
:rtype: List[float]
"""
result = []
q = [root]
while q:
total, count = 0, 0
next_q = []
for n in q:
total += n.val
count += 1
if n.left:
next_q.append(n.left)
if n.right:
next_q.append(n.right)
q = next_q
result.append(float(total) / count)
return result
|
Solution
|
python
|
pytorch__pytorch
|
test/onnx/model_defs/rnn_model_with_packed_sequence.py
|
{
"start": 67,
"end": 611
}
|
class ____(nn.Module):
def __init__(self, model, batch_first):
super().__init__()
self.model = model
self.batch_first = batch_first
def forward(self, input, *args):
args, seq_lengths = args[:-1], args[-1]
input = rnn_utils.pack_padded_sequence(input, seq_lengths, self.batch_first)
rets = self.model(input, *args)
ret, rets = rets[0], rets[1:]
ret, _ = rnn_utils.pad_packed_sequence(ret, self.batch_first)
return tuple([ret] + list(rets))
|
RnnModelWithPackedSequence
|
python
|
tensorflow__tensorflow
|
tensorflow/python/lib/io/tf_record_test.py
|
{
"start": 10409,
"end": 12875
}
|
class ____(TFCompressionTestCase):
"""TFRecordWriter Zlib test"""
def testZLibFlushRecord(self):
"""test ZLib Flush Record"""
original = [b"small record"]
fn = self._WriteRecordsToFile(original, "small_record")
with open(fn, "rb") as h:
buff = h.read()
# creating more blocks and trailing blocks shouldn't break reads
compressor = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS)
output = b""
for c in buff:
if isinstance(c, int):
c = six.int2byte(c)
output += compressor.compress(c)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FINISH)
# overwrite the original file with the compressed data
with open(fn, "wb") as h:
h.write(output)
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
actual = list(tf_record.tf_record_iterator(fn, options=options))
self.assertEqual(actual, original)
def testZlibReadWrite(self):
"""Verify that files produced are zlib compatible."""
original = [b"foo", b"bar"]
fn = self._WriteRecordsToFile(original, "zlib_read_write.tfrecord")
zfn = self._ZlibCompressFile(fn, "zlib_read_write.tfrecord.z")
# read the compressed contents and verify.
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
actual = list(tf_record.tf_record_iterator(zfn, options=options))
self.assertEqual(actual, original)
def testZlibReadWriteLarge(self):
"""Verify that writing large contents also works."""
# Make it large (about 5MB)
original = [_TEXT * 10240]
fn = self._WriteRecordsToFile(original, "zlib_read_write_large.tfrecord")
zfn = self._ZlibCompressFile(fn, "zlib_read_write_large.tfrecord.z")
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
actual = list(tf_record.tf_record_iterator(zfn, options=options))
self.assertEqual(actual, original)
def testGzipReadWrite(self):
"""Verify that files produced are gzip compatible."""
original = [b"foo", b"bar"]
fn = self._WriteRecordsToFile(original, "gzip_read_write.tfrecord")
gzfn = self._GzipCompressFile(fn, "tfrecord.gz")
options = tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)
actual = list(tf_record.tf_record_iterator(gzfn, options=options))
self.assertEqual(actual, original)
|
TFRecordWriterZlibTest
|
python
|
realpython__materials
|
emacs-the-best-python-editor/PyEval/pyeval_operator.py
|
{
"start": 564,
"end": 1431
}
|
class ____:
"""
Common operator class used by the evaluator.
"""
def __init__(self, operator_string):
"""Create a new operator object."""
# String to hold the operator
self._op_string = operator_string
# Integer to hold the precedence
self._op_prec = PRECEDENCE[operator_string]
# Integer to hold the number of operands to use
self._op_cnt = OPERAND_COUNT[operator_string]
@property
def op_string(self):
return self._op_string
@op_string.setter
def op_string(self, new_op_string):
self._op_string = new_op_string
self._op_prec = PRECEDENCE[self._op_string]
self._op_cnt = OPERAND_COUNT[self._op_string]
@property
def count(self):
return self._op_cnt
@property
def precedence(self):
return self._op_prec
|
Operator
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/operators/redshift_cluster.py
|
{
"start": 23231,
"end": 28439
}
|
class ____(AwsBaseOperator[RedshiftHook]):
"""
Resume a paused AWS Redshift Cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RedshiftResumeClusterOperator`
:param cluster_identifier: Unique identifier of the AWS Redshift cluster
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param poll_interval: Time (in seconds) to wait between two consecutive calls to check cluster state
:param max_attempts: The maximum number of attempts to check the state of the cluster.
:param wait_for_completion: If True, the operator will wait for the cluster to be in the
`resumed` state. Default is False.
:param deferrable: If True, the operator will run as a deferrable operator.
"""
template_fields: Sequence[str] = aws_template_fields(
"cluster_identifier",
)
ui_color = "#eeaa11"
ui_fgcolor = "#ffffff"
aws_hook_class = RedshiftHook
def __init__(
self,
*,
cluster_identifier: str,
wait_for_completion: bool = False,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: int = 30,
max_attempts: int = 30,
**kwargs,
):
super().__init__(**kwargs)
self.cluster_identifier = cluster_identifier
self.wait_for_completion = wait_for_completion
self.deferrable = deferrable
self.max_attempts = max_attempts
self.poll_interval = poll_interval
# These parameters are used to address an issue with the boto3 API where the API
# prematurely reports the cluster as available to receive requests. This causes the cluster
# to reject initial attempts to resume the cluster despite reporting the correct state.
self._remaining_attempts = 10
self._attempt_interval = 15
def execute(self, context: Context):
self.log.info("Starting resume cluster")
while self._remaining_attempts:
try:
self.hook.conn.resume_cluster(ClusterIdentifier=self.cluster_identifier)
break
except self.hook.conn.exceptions.InvalidClusterStateFault as error:
self._remaining_attempts -= 1
if self._remaining_attempts:
self.log.error(
"Unable to resume cluster. %d attempts remaining.", self._remaining_attempts
)
time.sleep(self._attempt_interval)
else:
raise error
if self.wait_for_completion:
if self.deferrable:
cluster_state = self.hook.cluster_status(cluster_identifier=self.cluster_identifier)
if cluster_state == "available":
self.log.info("Resumed cluster successfully")
elif cluster_state == "deleting":
raise AirflowException(
"Unable to resume cluster since cluster is currently in status: %s", cluster_state
)
else:
self.defer(
trigger=RedshiftResumeClusterTrigger(
cluster_identifier=self.cluster_identifier,
waiter_delay=self.poll_interval,
waiter_max_attempts=self.max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
# timeout is set to ensure that if a trigger dies, the timeout does not restart
# 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent)
timeout=timedelta(seconds=self.max_attempts * self.poll_interval + 60),
)
else:
waiter = self.hook.get_waiter("cluster_resumed")
waiter.wait(
ClusterIdentifier=self.cluster_identifier,
WaiterConfig={
"Delay": self.poll_interval,
"MaxAttempts": self.max_attempts,
},
)
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> None:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(f"Error resuming cluster: {validated_event}")
self.log.info("Resumed cluster successfully")
|
RedshiftResumeClusterOperator
|
python
|
pytorch__pytorch
|
test/mobile/model_test/nn_ops.py
|
{
"start": 6534,
"end": 7374
}
|
class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.rnn = nn.ModuleList(
[
nn.RNN(4, 8, 2),
nn.RNNCell(4, 8),
]
)
self.gru = nn.ModuleList([nn.GRU(4, 8, 2), nn.GRUCell(4, 8)])
self.lstm = nn.ModuleList(
[
nn.LSTM(4, 8, 2),
nn.LSTMCell(4, 8),
]
)
def forward(self):
input = torch.randn(5, 3, 4)
h = torch.randn(2, 3, 8)
c = torch.randn(2, 3, 8)
r = self.rnn[0](input, h)
r = self.rnn[1](input[0], h[0])
r = self.gru[0](input, h)
r = self.gru[1](input[0], h[0])
r = self.lstm[0](input, (h, c))
r = self.lstm[1](input[0], (h[0], c[0]))
return len(r)
|
NNRecurrentModule
|
python
|
scipy__scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_C.py
|
{
"start": 3282,
"end": 4314
}
|
class ____(Benchmark):
r"""
Cigar objective function.
This class defines the Cigar [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Cigar}}(x) = x_1^2 + 10^6\sum_{i=2}^{n} x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-100, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return x[0] ** 2 + 1e6 * sum(x[1:] ** 2)
|
Cigar
|
python
|
python-poetry__poetry
|
src/poetry/utils/env/mock_env.py
|
{
"start": 305,
"end": 2633
}
|
class ____(NullEnv):
def __init__(
self,
version_info: tuple[int, int, int] | PythonVersion = (3, 7, 0),
*,
python_implementation: str = "CPython",
platform: str = "darwin",
platform_machine: str = "amd64",
os_name: str = "posix",
is_venv: bool = False,
sys_path: list[str] | None = None,
marker_env: dict[str, Any] | None = None,
supported_tags: list[Tag] | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if len(version_info) == 3:
version_info = (*version_info, "final", 0)
self._version_info = version_info
self._python_implementation = python_implementation
self._platform = platform
self._platform_machine = platform_machine
self._os_name = os_name
self._is_venv = is_venv
self._sys_path = sys_path
self._mock_marker_env = marker_env
self._supported_tags = supported_tags
@property
def platform(self) -> str:
return self._platform
@property
def platform_machine(self) -> str:
return self._platform_machine
@property
def os(self) -> str:
return self._os_name
@property
def sys_path(self) -> list[str]:
if self._sys_path is None:
return super().sys_path
return self._sys_path
def get_marker_env(self) -> MarkerEnv:
marker_env = super().get_marker_env()
marker_env["version_info"] = self._version_info
marker_env["python_version"] = ".".join(str(v) for v in self._version_info[:2])
marker_env["python_full_version"] = ".".join(
str(v) for v in self._version_info[:3]
)
marker_env["sys_platform"] = self._platform
marker_env["platform_machine"] = self._platform_machine
marker_env["interpreter_name"] = self._python_implementation.lower()
marker_env["interpreter_version"] = "cp" + "".join(
str(v) for v in self._version_info[:2]
)
if self._mock_marker_env is not None:
for key, value in self._mock_marker_env.items():
marker_env[key] = value # type: ignore[literal-required]
return marker_env
def is_venv(self) -> bool:
return self._is_venv
|
MockEnv
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/_config.py
|
{
"start": 64676,
"end": 65470
}
|
class ____(TypedDict, total=False):
"""
:class:`altair.BindCheckbox` ``TypedDict`` wrapper.
Parameters
----------
input
debounce
If defined, delays event handling until the specified milliseconds have elapsed
since the last event was fired.
element
An optional CSS selector string indicating the parent element to which the input
element should be added. By default, all input elements are added within the parent
container of the Vega view.
name
By default, the signal name is used to label input elements. This ``name`` property
can be used instead to specify a custom label for the bound signal.
"""
input: Literal["checkbox"]
debounce: float
element: str
name: str
|
BindCheckboxKwds
|
python
|
more-itertools__more-itertools
|
tests/test_more.py
|
{
"start": 221048,
"end": 222261
}
|
class ____(TestCase):
def test_concurrent_calls(self):
unique = 10 # Number of distinct counters
repetitions = 5 # Number of times each counter is used
limit = 100 # Calls per counter per repetition
@mi.synchronized
def atomic_counter():
# This is a generator so that non-concurrent calls are detectable.
# To make calls while running more likely, this code uses random
# time delays.
i = 0
while True:
yield i
next_i = i + 1
sleep(random() / 1000)
i = next_i
def consumer(counter):
for i in range(limit):
next(counter)
unique_counters = [atomic_counter() for _ in range(unique)]
counters = unique_counters * repetitions
workers = [
Thread(target=consumer, args=[counter]) for counter in counters
]
for worker in workers:
worker.start()
for worker in workers:
worker.join()
self.assertEqual(
{next(counter) for counter in unique_counters},
{limit * repetitions},
)
|
TestSynchronized
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/control_flow/cond_v2_test.py
|
{
"start": 48258,
"end": 50241
}
|
class ____(test.TestCase):
def testCollectionIntValueAccessInCond(self):
"""Read values from graph collections inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = 2
y = 5
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_const = constant_op.constant(ops.get_collection("x")[0])
y_const = constant_op.constant(ops.get_collection("y")[0])
return math_ops.add(x_const, y_const)
cnd = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
self.assertEqual(self.evaluate(cnd), 7)
def testCollectionTensorValueAccessInCond(self):
"""Read tensors from collections inside of cond_v2 & use them."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_read = ops.get_collection("x")[0]
y_read = ops.get_collection("y")[0]
return math_ops.add(x_read, y_read)
cnd = cond_v2.cond_v2(math_ops.less(x, y), fn, fn)
self.assertEqual(self.evaluate(cnd), 7)
def testCollectionIntValueWriteInCond(self):
"""Make sure Int writes to collections work inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
def true_fn():
z = math_ops.add(x, y)
ops.add_to_collection("z", 7)
return math_ops.mul(x, z)
def false_fn():
z = math_ops.add(x, y)
return math_ops.mul(x, z)
cnd = cond_v2.cond_v2(constant_op.constant(True), true_fn, false_fn)
self.assertEqual(self.evaluate(cnd), 14)
read_z_collection = ops.get_collection("z")
self.assertEqual(read_z_collection, [7])
|
CondV2CollectionTest
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/forward_multi_value/package.py
|
{
"start": 217,
"end": 860
}
|
class ____(Package):
"""A package that forwards the value of a multi-valued variant to a dependency"""
homepage = "http://www.spack.llnl.gov"
url = "http://www.spack.llnl.gov/mpileaks-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
variant("cuda", default=False, description="Build with CUDA")
variant("cuda_arch", values=any_combination_of("10", "11"), when="+cuda")
depends_on("dependency-mv")
requires("^dependency-mv cuda_arch=10", when="+cuda cuda_arch=10 ^dependency-mv+cuda")
requires("^dependency-mv cuda_arch=11", when="+cuda cuda_arch=11 ^dependency-mv+cuda")
|
ForwardMultiValue
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorMetadataDefinitionV0.py
|
{
"start": 8868,
"end": 9423
}
|
class ____(BaseModel):
class Config:
extra = Extra.forbid
suite: Literal["unitTests", "integrationTests", "acceptanceTests", "liveTests"] = (
Field(..., description="Name of the configured test suite")
)
testSecrets: Optional[List[Secret]] = Field(
None, description="List of secrets required to run the test suite"
)
testConnections: Optional[List[TestConnections]] = Field(
None,
description="List of sandbox cloud connections that tests can be run against",
)
|
ConnectorTestSuiteOptions
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/dictionary.py
|
{
"start": 9228,
"end": 12767
}
|
class ____(Dict[Any, Any]):
foo: int = 0
def __setitem__(self, key: Any, value: Any) -> None:
self.foo = value
def setitem_models(d3: Dict[str, Any], x):
# Use the custom model of __setitem__ for MyDict
d1 = MyDict()
d1["a"] = x
# Use the built-in model of __setitem__ for dict
d2 = {}
d2["b"] = x
# Use the built-in model of __setitem__ for any subtype
# of dict. This is incorrect, but can lead to higher SNR.
d3["c"] = x
return d1, d2, d3
def backward_weak_update(d: Dict[Any, Any]):
# This translates to d["x"] = 0; d[**keys] = "x";
# We need to infer that d's keys are a sink, by doing weak updates.
d["x"] = 0
_test_sink(d.keys()) # d[**keys] is a sink
def walrus_operator(y):
d = {}
d[(x := _test_source())] = (x := y)
# We do a weak update on `d.**keys`, which join the results of both
# clearing and not clearing the taint on `d.**keys`
return d, x
def forward_weak_update():
d = {}
d[_test_source()] = 0
d["x"] = 0 # Should not strong update d.**keys
return d
def analyze_getitem_index_issue():
x = _test_source()
d = {}
y = d[_test_sink(x)]
def analyze_getitem_index_backward(x):
d = {}
y = d[_test_sink(x)]
def issue_in_keys():
d = {}
d[_test_source()] = "bar"
backward_weak_update(d) # Issue here
_test_sink(d.keys()) # Issue here
def dictionary_tito_any_index(arg):
return {i: arg for i in range(10)}
def dictionary_int_key():
d = {0: _test_source()}
_test_sink(d[0])
_test_sink(d[1])
def dictionary_bool_key():
d = {True: _test_source()}
_test_sink(d[0])
_test_sink(d[1])
def dictionary_update_keyword():
d = {}
d.update(a={"b": _test_source()})
_test_sink(d["a"]["b"])
# TODO(T136908911): Special case update with keyword arguments.
_test_sink(d["b"])
def dictionary_update_iterable():
d = {"a": 0}
# TODO(T136908911): Special case update with iterable.
d.update([("b", _test_source())])
_test_sink(d["a"])
_test_sink(d["b"])
d = {"a": 0}
d.update([("b", 0), ("c", _test_source())])
_test_sink(d["a"])
_test_sink(d["b"])
_test_sink(d["c"])
def dict_update_keys(arg: Dict[str, str]) -> Dict[str, str]:
return {"key": value for key, value in arg.items()}
def dictionary_keys_and_any_index_bug(arg: Dict[str, str]) -> Dict[str, str]:
d = dict_update_keys(arg)
_test_sink(d.keys())
return d
def dict_get_foo(d: Dict[str, str]) -> Optional[str]:
return d.get("foo")
def dict_get_foo_with_default(d: Dict[str, str], default: str) -> str:
return d.get("foo", default)
def test_dict_get_foo_tito() -> None:
_test_sink(dict_get_foo({"foo": _test_source()})) # Issue.
_test_sink(dict_get_foo({"bar": _test_source()})) # Not an issue.
def test_dict_get_source() -> None:
d = {"foo": _test_source(), "bar": ""}
_test_sink(d.get("foo")) # Issue.
_test_sink(d.get("bar")) # Not an issue.
def test_dict_multiple_targets() -> None:
x: dict[str, Any] = {"foo": ""}
x["foo"], x["bar"] = _test_source(), _test_source()
_test_sink(x["foo"]), _test_sink(x["bar"]) # Issue, Issue
x["foo"], x["bar"] = "", ""
_test_sink(x["foo"]), _test_sink(x["bar"]) # No Issue, No Issue
def dict_getitem_mutate(x: str, j: int) -> str:
d: Dict[int, str] = {i: "" for i in range(10)}
d[j] += x
return d[j]
def test_dict_getitem_mutate():
_test_sink(dict_getitem_mutate(_test_source(), 1))
|
MyDict
|
python
|
getsentry__sentry
|
src/sentry/api/bases/project.py
|
{
"start": 1242,
"end": 2168
}
|
class ____(OrganizationPermission):
scope_map = {
"GET": ["project:read", "project:write", "project:admin"],
"POST": ["project:write", "project:admin"],
"PUT": ["project:write", "project:admin"],
"DELETE": ["project:admin"],
}
def has_object_permission(self, request: Request, view: APIView, project: Project) -> bool: # type: ignore[override] # XXX: inheritance-for-convenience
has_org_scope = super().has_object_permission(request, view, project.organization)
# If allow_joinleave is False, some org-roles will not have project:read for all projects
if has_org_scope and request.access.has_project_access(project):
return has_org_scope
assert request.method is not None
allowed_scopes = set(self.scope_map.get(request.method, []))
return request.access.has_any_project_scope(project, allowed_scopes)
|
ProjectPermission
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_grpc/server.py
|
{
"start": 53923,
"end": 58658
}
|
class ____:
def __init__(
self,
server_termination_event: threading.Event,
dagster_api_servicer: DagsterApiServicer,
logger: logging.Logger,
threadpool_executor: FuturesAwareThreadPoolExecutor,
host="localhost",
port: Optional[int] = None,
socket: Optional[str] = None,
enable_metrics: bool = False,
):
check.invariant(
port is not None if seven.IS_WINDOWS else True,
"You must pass a valid `port` on Windows: `socket` not supported.",
)
check.invariant(
(port or socket) and not (port and socket),
"You must pass one and only one of `port` or `socket`.",
)
check.invariant(
host is not None if port else True,
"Must provide a host when serving on a port",
)
self._logger = logger
self._enable_metrics = check.bool_param(enable_metrics, "enable_metrics")
self._threadpool_executor = threadpool_executor
if self._enable_metrics:
_update_threadpool_metrics(self._threadpool_executor)
self.server = grpc.server(
self._threadpool_executor,
compression=grpc.Compression.Gzip,
options=[
("grpc.max_send_message_length", max_send_bytes()),
("grpc.max_receive_message_length", max_rx_bytes()),
],
)
self._server_termination_event = server_termination_event
self._api_servicer = dagster_api_servicer
# Create a health check servicer
self._health_servicer = health.HealthServicer()
health_pb2_grpc.add_HealthServicer_to_server(self._health_servicer, self.server)
add_DagsterApiServicer_to_server(self._api_servicer, self.server)
if port:
server_address = host + ":" + str(port)
else:
server_address = "unix:" + os.path.abspath(check.not_none(socket))
# grpc.Server.add_insecure_port returns:
# - 0 on failure
# - port number when a port is successfully bound
# - 1 when a UDS is successfully bound
res = self.server.add_insecure_port(server_address)
if socket and res != 1:
raise CouldNotBindGrpcServerToAddress(socket)
if port and res != port:
raise CouldNotBindGrpcServerToAddress(port)
def serve(self):
# Unfortunately it looks like ports bind late (here) and so this can fail with an error
# from C++ like:
#
# E0625 08:46:56.180112000 4697443776 server_chttp2.cc:40]
# {"created":"@1593089216.180085000","description":"Only 1 addresses added out of total
# 2 resolved","file":"src/core/ext/transport/chttp2/server/chttp2_server.cc",
# "file_line":406,"referenced_errors":[{"created":"@1593089216.180083000","description":
# "Unable to configure socket","fd":6,"file":
# "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":217,
# "referenced_errors":[{"created":"@1593089216.180079000",
# "description":"Address already in use","errno":48,"file":
# "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":190,"os_error":
# "Address already in use","syscall":"bind"}]}]}
#
# This is printed to stdout and there is no return value from server.start or exception
# raised in Python that we can use to handle this. The standard recipes for hijacking C
# stdout (so we could inspect this output and respond accordingly), e.g.
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/, don't seem
# to work (at least on Mac OS X) against grpc, and in any case would involve a huge
# cross-version and cross-platform maintenance burden. We have an issue open against grpc,
# https://github.com/grpc/grpc/issues/23315, and our own tracking issue at
self.server.start()
# Note: currently this is hardcoded as serving, since both services are cohosted
self._health_servicer.set("DagsterApi", health_pb2.HealthCheckResponse.SERVING)
server_termination_thread = threading.Thread(
target=server_termination_target,
args=[self._server_termination_event, self.server, self._logger],
name="grpc-server-termination",
daemon=True,
)
server_termination_thread.start()
try:
self.server.wait_for_termination()
finally:
self._api_servicer.cleanup() # pyright: ignore[reportAttributeAccessIssue]
server_termination_thread.join()
|
DagsterGrpcServer
|
python
|
kamyu104__LeetCode-Solutions
|
Python/max-chunks-to-make-sorted.py
|
{
"start": 396,
"end": 855
}
|
class ____(object):
def maxChunksToSorted(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
result, increasing_stk = 0, []
for num in arr:
max_num = num if not increasing_stk else max(increasing_stk[-1], num)
while increasing_stk and increasing_stk[-1] > num:
increasing_stk.pop()
increasing_stk.append(max_num)
return len(increasing_stk)
|
Solution2
|
python
|
tornadoweb__tornado
|
tornado/test/httputil_test.py
|
{
"start": 3097,
"end": 3394
}
|
class ____(unittest.TestCase):
def test_parsing(self):
qsstring = "a=1&b=2&a=3"
qs = urllib.parse.parse_qs(qsstring)
qsl = list(qs_to_qsl(qs))
self.assertIn(("a", "1"), qsl)
self.assertIn(("a", "3"), qsl)
self.assertIn(("b", "2"), qsl)
|
QsParseTest
|
python
|
astropy__astropy
|
astropy/cosmology/_src/tests/io/test_cosmology.py
|
{
"start": 2092,
"end": 2308
}
|
class ____(IODirectTestBase, ToFromCosmologyTestMixin):
"""Directly test ``to/from_cosmology``."""
def setup_class(self):
self.functions = {"to": to_cosmology, "from": from_cosmology}
|
TestToFromCosmology
|
python
|
run-llama__llama_index
|
llama-index-core/tests/indices/vector_store/mock_services.py
|
{
"start": 91,
"end": 1980
}
|
class ____(BaseEmbedding):
@classmethod
def class_name(cls) -> str:
return "MockEmbedding"
async def _aget_query_embedding(self, query: str) -> List[float]:
del query
return [0, 0, 1, 0, 0]
async def _aget_text_embedding(self, text: str) -> List[float]:
# assume dimensions are 5
if text == "Hello world.":
return [1, 0, 0, 0, 0]
elif text == "This is a test.":
return [0, 1, 0, 0, 0]
elif text == "This is another test.":
return [0, 0, 1, 0, 0]
elif text == "This is a test v2.":
return [0, 0, 0, 1, 0]
elif text == "This is a test v3.":
return [0, 0, 0, 0, 1]
elif text == "This is bar test.":
return [0, 0, 1, 0, 0]
elif text == "Hello world backup.":
# this is used when "Hello world." is deleted.
return [1, 0, 0, 0, 0]
else:
return [0, 0, 0, 0, 0]
def _get_query_embedding(self, query: str) -> List[float]:
del query # Unused
return [0, 0, 1, 0, 0]
def _get_text_embedding(self, text: str) -> List[float]:
"""Mock get text embedding."""
# assume dimensions are 5
if text == "Hello world.":
return [1, 0, 0, 0, 0]
elif text == "This is a test.":
return [0, 1, 0, 0, 0]
elif text == "This is another test.":
return [0, 0, 1, 0, 0]
elif text == "This is a test v2.":
return [0, 0, 0, 1, 0]
elif text == "This is a test v3.":
return [0, 0, 0, 0, 1]
elif text == "This is bar test.":
return [0, 0, 1, 0, 0]
elif text == "Hello world backup.":
# this is used when "Hello world." is deleted.
return [1, 0, 0, 0, 0]
else:
return [0, 0, 0, 0, 0]
|
MockEmbedding
|
python
|
pytorch__pytorch
|
test/inductor/test_inductor_annotations.py
|
{
"start": 280,
"end": 1305
}
|
class ____(TestCase):
def get_code(self):
def f(a, b):
return a + b, a * b
a = torch.randn(5, device="cuda")
b = torch.randn(5, device="cuda")
f_comp = torch.compile(f)
_, code = run_and_get_code(f_comp, a, b)
return code[0]
@requires_cuda_and_triton
def test_no_annotations(self):
code = self.get_code()
self.assertTrue("from torch.cuda import nvtx" not in code)
self.assertTrue("training_annotation" not in code)
@inductor_config.patch(annotate_training=True)
@requires_cuda_and_triton
def test_training_annotation(self):
code = self.get_code()
self.assertTrue("from torch.cuda import nvtx" in code)
self.assertTrue(
code.count("training_annotation = nvtx._device_range_start('inference')")
>= 1
)
self.assertTrue(code.count("nvtx._device_range_end(training_annotation)") >= 1)
if __name__ == "__main__":
run_tests()
|
InductorAnnotationTestCase
|
python
|
django__django
|
tests/model_forms/tests.py
|
{
"start": 28749,
"end": 29508
}
|
class ____(forms.ModelForm):
class Meta:
model = Category
fields = ["name", "url", "slug"]
widgets = {
"name": forms.Textarea,
"url": forms.TextInput(attrs={"class": "url"}),
}
labels = {
"name": "Title",
}
help_texts = {
"slug": "Watch out! Letters, numbers, underscores and hyphens only.",
}
error_messages = {
"slug": {
"invalid": (
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!"
)
}
}
field_classes = {
"url": forms.URLField,
}
|
FieldOverridesByFormMetaForm
|
python
|
falconry__falcon
|
examples/recipes/output_csv_text_wsgi.py
|
{
"start": 38,
"end": 438
}
|
class ____:
def on_get(self, req, resp):
output = io.StringIO()
writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(('fruit', 'quantity'))
writer.writerow(('apples', 13))
writer.writerow(('oranges', 37))
resp.content_type = falcon.MEDIA_CSV
resp.downloadable_as = 'report.csv'
resp.text = output.getvalue()
|
Report
|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/processors/test_delayed_workflow.py
|
{
"start": 38615,
"end": 44798
}
|
class ____:
def test_event_key_from_redis_key(self) -> None:
key = "123:456:789:1,2,3:10,9,8"
event_key = EventKey.from_redis_key(key)
assert event_key.workflow_id == 123
assert event_key.group_id == 456
assert event_key.when_dcg_id == 789
assert event_key.if_dcg_ids == frozenset([1, 2, 3])
assert event_key.passing_dcg_ids == frozenset([10, 9, 8])
assert event_key.original_key == key
def test_event_key_from_redis_key_invalid(self) -> None:
# Test various invalid key formats
invalid_cases = [
"invalid-key", # missing colons
"1:2:3:4:5:6", # too many parts
"1:2", # too few parts
"1:2:3:invalid_type", # invalid type
"1:2:3:invalid_type:2", # invalid dcg_ids format
"not_a_number:2:3:4:5", # non-numeric workflow_id
"1:not_a_number:3:4:5,6", # non-numeric group_id
]
for key in invalid_cases:
with pytest.raises(ValueError):
EventKey.from_redis_key(key)
def test_event_key_str_and_hash(self) -> None:
key = "123:456:789:1,2,3:10,9,8"
event_key = EventKey.from_redis_key(key)
assert str(event_key) == key
assert hash(event_key) == hash(key)
assert event_key == EventKey.from_redis_key(key)
assert event_key != EventKey.from_redis_key("122:456:789:1,2,3:10,9,8")
def test_event_instance_validation(self) -> None:
# Test valid event instance
instance = EventInstance(event_id="test-event")
assert instance.event_id == "test-event"
assert instance.occurrence_id is None
# Test with occurrence ID
instance = EventInstance(event_id="test-event", occurrence_id="test-occurrence")
assert instance.event_id == "test-event"
assert instance.occurrence_id == "test-occurrence"
# Test empty occurrence ID is converted to None
instance = EventInstance(event_id="test-event", occurrence_id="")
assert instance.event_id == "test-event"
assert instance.occurrence_id is None
# Test whitespace occurrence ID is converted to None
instance = EventInstance(event_id="test-event", occurrence_id=" ")
assert instance.event_id == "test-event"
assert instance.occurrence_id is None
# Test invalid cases
invalid_cases = [
('{"occurrence_id": "test-occurrence"}', "event_id"), # missing event_id
('{"event_id": ""}', "event_id"), # empty event_id
('{"event_id": " "}', "event_id"), # whitespace event_id
('{"event_id": null}', "event_id"), # null event_id
]
for json_data, expected_error in invalid_cases:
with pytest.raises(ValueError, match=expected_error):
EventInstance.parse_raw(json_data)
# Test that extra fields are ignored
instance = EventInstance.parse_raw('{"event_id": "test", "extra": "field"}')
assert instance.event_id == "test"
assert instance.occurrence_id is None
instance = EventInstance.parse_raw(
'{"event_id": "test", "occurrence_id": "test", "extra": "field"}'
)
assert instance.event_id == "test"
assert instance.occurrence_id == "test"
@patch("sentry.workflow_engine.processors.delayed_workflow.logger")
def test_from_redis_data_continue_on_error(self, mock_logger: MagicMock) -> None:
# Create a mix of valid and invalid data
redis_data = {
"123:456:789:1,2,3:10,9,8": '{"event_id": "valid-1"}', # valid
"439:1:3487:134,6:34": '{"occurrence_id": "invalid-1"}', # missing event_id
"5:456:22:1:44,33": '{"event_id": "valid-2"}', # valid
}
# With continue_on_error=True, should return valid entries and log errors
result = EventRedisData.from_redis_data(redis_data, continue_on_error=True)
assert len(result.events) == 2
assert (
result.events[EventKey.from_redis_key("123:456:789:1,2,3:10,9,8")].event_id == "valid-1"
)
assert result.events[EventKey.from_redis_key("5:456:22:1:44,33")].event_id == "valid-2"
# Verify error was logged
mock_logger.exception.assert_called_once_with(
"Failed to parse workflow event data",
extra={
"key": "439:1:3487:134,6:34",
"value": '{"occurrence_id": "invalid-1"}',
"error": ANY,
},
)
# With continue_on_error=False, should raise on first error
with pytest.raises(ValueError, match="event_id"):
EventRedisData.from_redis_data(redis_data, continue_on_error=False)
@patch("sentry.workflow_engine.processors.delayed_workflow.logger")
def test_from_redis_data_invalid_keys(self, mock_logger: MagicMock) -> None:
# Create data with an invalid key structure
redis_data = {
"123:456:789:1,2,3:10,9,8": '{"event_id": "valid-1"}', # valid
"invalid-key": '{"event_id": "valid-2"}', # invalid key format
"5:456:22:1:44,33": '{"event_id": "valid-3"}', # valid
}
# With continue_on_error=True, should return valid entries and log errors
result = EventRedisData.from_redis_data(redis_data, continue_on_error=True)
assert len(result.events) == 2
assert (
result.events[EventKey.from_redis_key("123:456:789:1,2,3:10,9,8")].event_id == "valid-1"
)
assert result.events[EventKey.from_redis_key("5:456:22:1:44,33")].event_id == "valid-3"
# Verify error was logged
mock_logger.exception.assert_called_once_with(
"Failed to parse workflow event data",
extra={
"key": "invalid-key",
"value": '{"event_id": "valid-2"}',
"error": ANY,
},
)
# With continue_on_error=False, should raise on first error
with pytest.raises(ValueError):
EventRedisData.from_redis_data(redis_data, continue_on_error=False)
|
TestEventKeyAndInstance
|
python
|
getsentry__sentry
|
tests/sentry/integrations/msteams/test_message_builder.py
|
{
"start": 17555,
"end": 19569
}
|
class ____(TestCase):
def setUp(self) -> None:
owner = self.create_user()
self.org = self.create_organization(owner=owner)
self.notification = DummyNotificationWithMoreFields(self.org)
self.project1 = self.create_project(organization=self.org)
self.group1 = self.create_group(project=self.project1)
self.notification.group = self.group1
self.context = {"some_field": "some_value"}
self.recipient = owner
def test_simple(self) -> None:
notification_card = MSTeamsNotificationsMessageBuilder(
self.notification,
self.context,
Actor.from_object(self.recipient),
).build_notification_card()
body = notification_card["body"]
assert 4 == len(body)
title = body[0]
assert "Notification Title with some\\_value" == title["text"]
group_title = body[1]
assert "[My Title]" in group_title["text"]
assert TextSize.LARGE == group_title["size"]
assert TextWeight.BOLDER == group_title["weight"]
description = body[2]
assert "Message Description" in description["text"]
assert TextSize.MEDIUM == description["size"]
footer = body[3]
assert 2 == len(footer)
logo = footer["columns"][0]["items"][0]
assert "Image" == logo["type"]
assert "20px" == logo["height"]
footer_text = footer["columns"][1]["items"][0]
assert "Notification Footer" in footer_text["text"]
assert TextSize.SMALL == footer_text["size"]
def test_without_footer(self) -> None:
dummy_notification = DummyNotification(self.org)
dummy_notification.group = self.group1
notification_card = MSTeamsNotificationsMessageBuilder(
dummy_notification,
self.context,
Actor.from_object(self.recipient),
).build_notification_card()
assert 2 == len(notification_card["body"])
|
MSTeamsNotificationMessageBuilderTest
|
python
|
coleifer__peewee
|
tests/migrations.py
|
{
"start": 34081,
"end": 34276
}
|
class ____(TestModel):
key = TextField()
value = IntegerField()
class Meta:
constraints = [
SQL("CHECK (key != '')"),
SQL('CHECK (value > 0)')]
|
HasChecks
|
python
|
huggingface__transformers
|
src/transformers/models/fsmt/modeling_fsmt.py
|
{
"start": 43929,
"end": 47040
}
|
class ____(nn.Embedding):
"""
This module produces sinusoidal positional embeddings of any length.
We don't want to save the weight of this embedding since it's not trained (deterministic) and it can be huge.
Padding symbols are ignored.
These embeddings get automatically extended in forward if more positions is needed.
"""
def __init__(self, num_positions, embedding_dim, padding_idx):
super().__init__(num_positions, embedding_dim, padding_idx)
def make_weight(self, num_positions, embedding_dim, padding_idx):
weight = self.get_embedding(num_positions, embedding_dim, padding_idx)
# in forward put the weights on the correct dtype and device of the param
weight = weight.to(dtype=self.weight.dtype, device=self.weight.device)
self.weight = nn.Parameter(weight)
self.weight.detach_()
self.weight.requires_grad = False
@staticmethod
def get_embedding(num_embeddings, embedding_dim, padding_idx):
"""
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
"Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
@staticmethod
def make_positions(tensor, padding_idx: int):
"""
Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
def forward(
self,
input,
incremental_state: Optional[Any] = None,
timestep: Optional[Tensor] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input.shape[:2]
max_pos = self.padding_idx + 1 + seq_len
if max_pos > self.weight.size(0):
# expand embeddings if needed
self.make_weight(max_pos, self.embedding_dim, self.padding_idx)
positions = self.make_positions(input, self.padding_idx)
return super().forward(positions)
__all__ = ["FSMTForConditionalGeneration", "FSMTModel", "PretrainedFSMTModel"]
|
SinusoidalPositionalEmbedding
|
python
|
huggingface__transformers
|
tests/models/smolvlm/test_modeling_smolvlm.py
|
{
"start": 5231,
"end": 13530
}
|
class ____(ModelTesterMixin, unittest.TestCase):
"""
Model tester for `SmolVLM`.
"""
all_model_classes = (SmolVLMModel,) if is_torch_available() else ()
test_resize_embeddings = True
def setUp(self):
self.model_tester = SmolVLMVisionText2TextModelTester(self)
self.config_tester = ConfigTester(
self, config_class=SmolVLMConfig, has_text_modality=False, common_properties=["image_token_id"]
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Model does not support padding right")
def test_flash_attn_2_inference_padding_right(self):
pass
@unittest.skip(reason="Compile not yet supported in SmolVLM models")
@pytest.mark.torch_compile_test
def test_sdpa_can_compile_dynamic(self):
pass
@unittest.skip(reason="Compile not yet supported in SmolVLM models")
def test_sdpa_can_dispatch_on_flash(self):
pass
# We need to override as we need to prepare such that the image token is the last token
def test_resize_tokens_embeddings(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.text_config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# Ignore copy
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2)
n_images = self.model_tester.num_images * self.model_tester.seq_length
model.image_token_id = model_vocab_size - 15 - 1
inputs_dict["input_ids"][:, -n_images:] = model.image_token_id
# make sure that decoder_input_ids are resized as well
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1)
self.assertTrue(model.config.text_config.vocab_size + 10, model_vocab_size)
model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0] // 64, 0)
self.assertTrue(model_embed.weight.shape[0], model.config.text_config.vocab_size)
self.assertTrue(model.config.text_config.vocab_size, model.vocab_size)
model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0] // 64, 0)
# Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size
target_dimension = 128
model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0], target_dimension)
with self.assertRaisesRegex(
ValueError,
"Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer",
):
model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3)
# We need to override as we need to prepare such that the image token is the last token
def test_resize_embeddings_untied(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
original_config.tie_word_embeddings = False
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
model.eval()
# if no output embeddings -> leave test
if model.get_output_embeddings() is None:
continue
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2)
n_images = self.model_tester.num_images * self.model_tester.seq_length
model.image_token_id = model_vocab_size - 15 - 1
inputs_dict["input_ids"][:, -n_images:] = model.image_token_id
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
@require_torch
|
SmolVLMModelTest
|
python
|
apache__airflow
|
providers/databricks/src/airflow/providers/databricks/operators/databricks.py
|
{
"start": 54005,
"end": 68542
}
|
class ____(BaseOperator, ABC):
"""
Base class for operators that are run as Databricks job tasks or tasks within a Databricks workflow.
:param caller: The name of the caller operator to be used in the logs.
:param databricks_conn_id: The name of the Airflow connection to use.
:param databricks_task_key: An optional task_key used to refer to the task by Databricks API. By
default this will be set to the hash of ``dag_id + task_id``.
:param databricks_retry_args: An optional dictionary with arguments passed to ``tenacity.Retrying`` class.
:param databricks_retry_delay: Number of seconds to wait between retries.
:param databricks_retry_limit: Amount of times to retry if the Databricks backend is unreachable.
:param deferrable: Whether to run the operator in the deferrable mode.
:param existing_cluster_id: ID for existing cluster on which to run this task.
:param job_cluster_key: The key for the job cluster.
:param new_cluster: Specs for a new cluster on which this task will be run.
:param notebook_packages: A list of the Python libraries to be installed on the cluster running the
notebook.
:param notebook_params: A dict of key-value pairs to be passed as optional params to the notebook task.
:param polling_period_seconds: Controls the rate which we poll for the result of this notebook job run.
:param wait_for_termination: if we should wait for termination of the job run. ``True`` by default.
:param workflow_run_metadata: Metadata for the workflow run. This is used when the operator is used within
a workflow. It is expected to be a dictionary containing the run_id and conn_id for the workflow.
"""
def __init__(
self,
caller: str = "DatabricksTaskBaseOperator",
databricks_conn_id: str = "databricks_default",
databricks_task_key: str = "",
databricks_retry_args: dict[Any, Any] | None = None,
databricks_retry_delay: int = 1,
databricks_retry_limit: int = 3,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
existing_cluster_id: str = "",
job_cluster_key: str = "",
new_cluster: dict[str, Any] | None = None,
polling_period_seconds: int = 5,
wait_for_termination: bool = True,
workflow_run_metadata: dict[str, Any] | None = None,
**kwargs: Any,
):
self.caller = caller
self.databricks_conn_id = databricks_conn_id
self._databricks_task_key = databricks_task_key
self.databricks_retry_args = databricks_retry_args
self.databricks_retry_delay = databricks_retry_delay
self.databricks_retry_limit = databricks_retry_limit
self.deferrable = deferrable
self.existing_cluster_id = existing_cluster_id
self.job_cluster_key = job_cluster_key
self.new_cluster = new_cluster or {}
self.polling_period_seconds = polling_period_seconds
self.wait_for_termination = wait_for_termination
self.workflow_run_metadata = workflow_run_metadata
self.databricks_run_id: int | None = None
super().__init__(**kwargs)
if self._databricks_workflow_task_group is not None:
# Conditionally set operator_extra_links based on Airflow version. In Airflow 3, only show the job run link.
# In Airflow 2, show the job run link and the repair link.
# TODO: Once we expand the plugin functionality in Airflow 3.1, this can be re-evaluated on how to handle the repair link.
if AIRFLOW_V_3_0_PLUS:
self.operator_extra_links = (WorkflowJobRunLink(),)
else:
self.operator_extra_links = (
WorkflowJobRunLink(),
WorkflowJobRepairSingleTaskLink(),
)
else:
# Databricks does not support repair for non-workflow tasks, hence do not show the repair link.
self.operator_extra_links = (DatabricksJobRunLink(),)
@cached_property
def _hook(self) -> DatabricksHook:
return self._get_hook(caller=self.caller)
def _get_hook(self, caller: str) -> DatabricksHook:
return DatabricksHook(
self.databricks_conn_id,
retry_limit=self.databricks_retry_limit,
retry_delay=self.databricks_retry_delay,
retry_args=self.databricks_retry_args,
caller=caller,
)
@cached_property
def databricks_task_key(self) -> str:
return self._generate_databricks_task_key()
def _generate_databricks_task_key(
self, task_id: str | None = None, task_dict: dict[str, BaseOperator] | None = None
) -> str:
"""Create a databricks task key using the hash of dag_id and task_id."""
if task_id:
if not task_dict:
raise ValueError(
"Must pass task_dict if task_id is provided in _generate_databricks_task_key."
)
_task = task_dict.get(task_id)
if _task and hasattr(_task, "databricks_task_key"):
_databricks_task_key = _task.databricks_task_key
else:
task_key = f"{self.dag_id}__{task_id}".encode()
_databricks_task_key = hashlib.md5(task_key).hexdigest()
return _databricks_task_key
if not self._databricks_task_key or len(self._databricks_task_key) > 100:
self.log.info(
"databricks_task_key has not be provided or the provided one exceeds 100 characters and will be truncated by the Databricks API. This will cause failure when trying to monitor the task. A task_key will be generated using the hash value of dag_id+task_id"
)
task_key = f"{self.dag_id}__{self.task_id}".encode()
self._databricks_task_key = hashlib.md5(task_key).hexdigest()
self.log.info("Generated databricks task_key: %s", self._databricks_task_key)
return self._databricks_task_key
@property
def _databricks_workflow_task_group(self) -> DatabricksWorkflowTaskGroup | None:
"""
Traverse up parent TaskGroups until the `is_databricks` flag associated with the root DatabricksWorkflowTaskGroup is found.
If found, returns the task group. Otherwise, return None.
"""
parent_tg: TaskGroup | DatabricksWorkflowTaskGroup | None = self.task_group
while parent_tg:
if getattr(parent_tg, "is_databricks", False):
return parent_tg # type: ignore[return-value]
if getattr(parent_tg, "task_group", None):
parent_tg = parent_tg.task_group
else:
return None
return None
@abstractmethod
def _get_task_base_json(self) -> dict[str, Any]:
"""Get the base json for the task."""
raise NotImplementedError()
def _get_run_json(self) -> dict[str, Any]:
"""Get run json to be used for task submissions."""
run_json = {
"run_name": self.databricks_task_key,
**self._get_task_base_json(),
}
if self.new_cluster and self.existing_cluster_id:
raise ValueError("Both new_cluster and existing_cluster_id are set. Only one should be set.")
if self.new_cluster:
run_json["new_cluster"] = self.new_cluster
elif self.existing_cluster_id:
run_json["existing_cluster_id"] = self.existing_cluster_id
else:
raise ValueError("Must specify either existing_cluster_id or new_cluster.")
return run_json
def _launch_job(self, context: Context | None = None) -> int | None:
"""Launch the job on Databricks."""
run_json = self._get_run_json()
self.databricks_run_id = self._hook.submit_run(run_json)
url = self._hook.get_run_page_url(self.databricks_run_id)
self.log.info("Check the job run in Databricks: %s", url)
if self.do_xcom_push and context is not None:
context["ti"].xcom_push(key=XCOM_RUN_ID_KEY, value=self.databricks_run_id)
context["ti"].xcom_push(key=XCOM_RUN_PAGE_URL_KEY, value=url)
return self.databricks_run_id
def _handle_terminal_run_state(self, run_state: RunState, errors: list) -> None:
"""Handle the terminal state of the run."""
if run_state.life_cycle_state != RunLifeCycleState.TERMINATED.value:
raise AirflowException(
f"Databricks job failed with state {run_state.life_cycle_state}. Message: {run_state.state_message}. Errors: {errors}"
)
if not run_state.is_successful:
raise AirflowException(
f"Task failed. Final state {run_state.result_state}. Reason: {run_state.state_message}. Errors: {errors}"
)
self.log.info("Task succeeded. Final state %s.", run_state.result_state)
def _get_current_databricks_task(self) -> dict[str, Any]:
"""Retrieve the Databricks task corresponding to the current Airflow task."""
if self.databricks_run_id is None:
raise ValueError("Databricks job not yet launched. Please run launch_notebook_job first.")
tasks = self._hook.get_run(self.databricks_run_id)["tasks"]
# Because the task_key remains the same across multiple runs, and the Databricks API does not return
# tasks sorted by their attempts/start time, we sort the tasks by start time. This ensures that we
# map the latest attempt (whose status is to be monitored) of the task run to the task_key while
# building the {task_key: task} map below.
sorted_task_runs = sorted(tasks, key=lambda x: x["start_time"])
return {task["task_key"]: task for task in sorted_task_runs}[self.databricks_task_key]
def _convert_to_databricks_workflow_task(
self,
relevant_upstreams: list[BaseOperator],
task_dict: dict[str, BaseOperator],
context: Context | None = None,
) -> dict[str, object]:
"""Convert the operator to a Databricks workflow task that can be a task in a workflow."""
base_task_json = self._get_task_base_json()
result = {
"task_key": self.databricks_task_key,
"depends_on": [
{"task_key": self._generate_databricks_task_key(task_id, task_dict)}
for task_id in self.upstream_task_ids
if task_id in relevant_upstreams
],
**base_task_json,
}
if self.existing_cluster_id and self.job_cluster_key:
raise ValueError(
"Both existing_cluster_id and job_cluster_key are set. Only one can be set per task."
)
if self.existing_cluster_id:
result["existing_cluster_id"] = self.existing_cluster_id
elif self.job_cluster_key:
result["job_cluster_key"] = self.job_cluster_key
return result
def monitor_databricks_job(self) -> None:
"""
Monitor the Databricks job.
Wait for the job to terminate. If deferrable, defer the task.
"""
if self.databricks_run_id is None:
raise ValueError("Databricks job not yet launched. Please run launch_notebook_job first.")
current_task_run_id = self._get_current_databricks_task()["run_id"]
run = self._hook.get_run(current_task_run_id)
run_page_url = run["run_page_url"]
self.log.info("Check the task run in Databricks: %s", run_page_url)
run_state = RunState(**run["state"])
self.log.info(
"Current state of the the databricks task %s is %s",
self.databricks_task_key,
run_state.life_cycle_state,
)
if self.deferrable and not run_state.is_terminal:
self.defer(
trigger=DatabricksExecutionTrigger(
run_id=current_task_run_id,
databricks_conn_id=self.databricks_conn_id,
polling_period_seconds=self.polling_period_seconds,
retry_limit=self.databricks_retry_limit,
retry_delay=self.databricks_retry_delay,
retry_args=self.databricks_retry_args,
caller=self.caller,
),
method_name=DEFER_METHOD_NAME,
)
while not run_state.is_terminal:
time.sleep(self.polling_period_seconds)
run = self._hook.get_run(current_task_run_id)
run_state = RunState(**run["state"])
self.log.info(
"Current state of the databricks task %s is %s",
self.databricks_task_key,
run_state.life_cycle_state,
)
# Extract errors from the run response using utility function
errors = extract_failed_task_errors(self._hook, run, run_state)
self._handle_terminal_run_state(run_state, errors)
def execute(self, context: Context) -> None:
"""Execute the operator. Launch the job and monitor it if wait_for_termination is set to True."""
if self._databricks_workflow_task_group:
# If we are in a DatabricksWorkflowTaskGroup, we should have an upstream task launched.
if not self.workflow_run_metadata:
launch_task_id = next(task for task in self.upstream_task_ids if task.endswith(".launch"))
self.workflow_run_metadata = context["ti"].xcom_pull(task_ids=launch_task_id)
workflow_run_metadata = WorkflowRunMetadata(**self.workflow_run_metadata)
self.databricks_run_id = workflow_run_metadata.run_id
self.databricks_conn_id = workflow_run_metadata.conn_id
# Store operator links in XCom for Airflow 3 compatibility
if AIRFLOW_V_3_0_PLUS:
# Store the job run link
store_databricks_job_run_link(
context=context,
metadata=workflow_run_metadata,
logger=self.log,
)
else:
self._launch_job(context=context)
if self.wait_for_termination:
self.monitor_databricks_job()
def execute_complete(self, context: dict | None, event: dict) -> None:
run_state = RunState.from_json(event["run_state"])
errors = event.get("errors", [])
self._handle_terminal_run_state(run_state, errors)
|
DatabricksTaskBaseOperator
|
python
|
h5py__h5py
|
h5py/tests/test_dataset.py
|
{
"start": 48187,
"end": 48857
}
|
class ____(BaseDataset):
"""
Feature: Dataset dtype is available as .dtype property
"""
def test_dtype(self):
""" Retrieve dtype from dataset """
dset = self.f.create_dataset(make_name(), (5,), '|S10')
self.assertEqual(dset.dtype, np.dtype('|S10'))
def test_dtype_complex32(self):
""" Retrieve dtype from complex float16 dataset (gh-2156) """
# No native support in numpy as of v1.23.3, so expect compound type.
complex32 = np.dtype([('r', np.float16), ('i', np.float16)])
dset = self.f.create_dataset(make_name(), (5,), complex32)
self.assertEqual(dset.dtype, complex32)
|
TestDtype
|
python
|
huggingface__transformers
|
tests/models/dbrx/test_modeling_dbrx.py
|
{
"start": 955,
"end": 2645
}
|
class ____(CausalLMModelTester):
if is_torch_available():
base_model_class = DbrxModel
def __init__(
self,
parent,
clip_qkv=8,
rope_theta=500000,
attn_config_model_type="",
moe_jitter_eps=0,
moe_loss_weight=0.05,
moe_num_experts=8,
moe_top_k=4,
ffn_config_model_type="",
initializer_range=0.02,
resid_pdrop=0.0,
is_decoder=True,
pad_token_id=0,
):
# Call parent init
super().__init__(
parent=parent,
hidden_dropout_prob=resid_pdrop,
attention_probs_dropout_prob=resid_pdrop,
initializer_range=initializer_range,
pad_token_id=pad_token_id,
is_decoder=is_decoder,
)
# Set DBRX's unusual params
self.clip_qkv = clip_qkv
# DBRX takes sub-configurations for the FFN and attention layers, so we need to set that correctly here
self.ffn_config = {
"ffn_hidden_size": self.hidden_size,
"hidden_size": 2 * self.hidden_size,
"moe_jitter_eps": moe_jitter_eps,
"moe_loss_weight": moe_loss_weight,
"moe_num_experts": moe_num_experts,
"moe_top_k": moe_top_k,
"model_type": ffn_config_model_type,
"ffn_act_fn": {"name": self.hidden_act},
}
self.attn_config = {
"clip_qkv": clip_qkv,
"model_type": attn_config_model_type,
"rope_theta": rope_theta,
}
@property
def config_args(self):
return super().config_args + ["ffn_config", "attn_config"]
@require_torch
|
DbrxModelTester
|
python
|
python-markdown__markdown
|
markdown/blockprocessors.py
|
{
"start": 24365,
"end": 25505
}
|
class ____(BlockProcessor):
""" Process link references. """
RE = re.compile(
r'^[ ]{0,3}\[([^\[\]]*)\]:[ ]*\n?[ ]*([^\s]+)[ ]*(?:\n[ ]*)?((["\'])(.*)\4[ ]*|\((.*)\)[ ]*)?$', re.MULTILINE
)
def test(self, parent: etree.Element, block: str) -> bool:
return True
def run(self, parent: etree.Element, blocks: list[str]) -> bool:
block = blocks.pop(0)
m = self.RE.search(block)
if m:
id = m.group(1).strip().lower()
link = m.group(2).lstrip('<').rstrip('>')
title = m.group(5) or m.group(6)
self.parser.md.references[id] = (link, title)
if block[m.end():].strip():
# Add any content after match back to blocks as separate block
blocks.insert(0, block[m.end():].lstrip('\n'))
if block[:m.start()].strip():
# Add any content before match back to blocks as separate block
blocks.insert(0, block[:m.start()].rstrip('\n'))
return True
# No match. Restore block.
blocks.insert(0, block)
return False
|
ReferenceProcessor
|
python
|
ray-project__ray
|
rllib/policy/policy.py
|
{
"start": 5860,
"end": 69980
}
|
class ____(metaclass=ABCMeta):
"""RLlib's base class for all Policy implementations.
Policy is the abstract superclass for all DL-framework specific sub-classes
(e.g. TFPolicy or TorchPolicy). It exposes APIs to
1. Compute actions from observation (and possibly other) inputs.
2. Manage the Policy's NN model(s), like exporting and loading their weights.
3. Postprocess a given trajectory from the environment or other input via the
`postprocess_trajectory` method.
4. Compute losses from a train batch.
5. Perform updates from a train batch on the NN-models (this normally includes loss
calculations) either:
a. in one monolithic step (`learn_on_batch`)
b. via batch pre-loading, then n steps of actual loss computations and updates
(`load_batch_into_buffer` + `learn_on_loaded_batch`).
"""
def __init__(
self,
observation_space: gym.Space,
action_space: gym.Space,
config: AlgorithmConfigDict,
):
"""Initializes a Policy instance.
Args:
observation_space: Observation space of the policy.
action_space: Action space of the policy.
config: A complete Algorithm/Policy config dict. For the default
config keys and values, see rllib/algorithm/algorithm.py.
"""
self.observation_space: gym.Space = observation_space
self.action_space: gym.Space = action_space
# the policy id in the global context.
self.__policy_id = config.get("__policy_id")
# The base struct of the observation/action spaces.
# E.g. action-space = gym.spaces.Dict({"a": Discrete(2)}) ->
# action_space_struct = {"a": Discrete(2)}
self.observation_space_struct = get_base_struct_from_space(observation_space)
self.action_space_struct = get_base_struct_from_space(action_space)
self.config: AlgorithmConfigDict = config
self.framework = self.config.get("framework")
# Create the callbacks object to use for handling custom callbacks.
from ray.rllib.callbacks.callbacks import RLlibCallback
callbacks = self.config.get("callbacks")
if isinstance(callbacks, RLlibCallback):
self.callbacks = callbacks()
elif isinstance(callbacks, (str, type)):
try:
self.callbacks: "RLlibCallback" = deserialize_type(
self.config.get("callbacks")
)()
except Exception:
pass # TEST
else:
self.callbacks: "RLlibCallback" = RLlibCallback()
# The global timestep, broadcast down from time to time from the
# local worker to all remote workers.
self.global_timestep: int = 0
# The number of gradient updates this policy has undergone.
self.num_grad_updates: int = 0
# The action distribution class to use for action sampling, if any.
# Child classes may set this.
self.dist_class: Optional[Type] = None
# Initialize view requirements.
self.init_view_requirements()
# Whether the Model's initial state (method) has been added
# automatically based on the given view requirements of the model.
self._model_init_state_automatically_added = False
# Connectors.
self.agent_connectors = None
self.action_connectors = None
@staticmethod
def from_checkpoint(
checkpoint: Union[str, Checkpoint],
policy_ids: Optional[Collection[PolicyID]] = None,
) -> Union["Policy", Dict[PolicyID, "Policy"]]:
"""Creates new Policy instance(s) from a given Policy or Algorithm checkpoint.
Note: This method must remain backward compatible from 2.1.0 on, wrt.
checkpoints created with Ray 2.0.0 or later.
Args:
checkpoint: The path (str) to a Policy or Algorithm checkpoint directory
or an AIR Checkpoint (Policy or Algorithm) instance to restore
from.
If checkpoint is a Policy checkpoint, `policy_ids` must be None
and only the Policy in that checkpoint is restored and returned.
If checkpoint is an Algorithm checkpoint and `policy_ids` is None,
will return a list of all Policy objects found in
the checkpoint, otherwise a list of those policies in `policy_ids`.
policy_ids: List of policy IDs to extract from a given Algorithm checkpoint.
If None and an Algorithm checkpoint is provided, will restore all
policies found in that checkpoint. If a Policy checkpoint is given,
this arg must be None.
Returns:
An instantiated Policy, if `checkpoint` is a Policy checkpoint. A dict
mapping PolicyID to Policies, if `checkpoint` is an Algorithm checkpoint.
In the latter case, returns all policies within the Algorithm if
`policy_ids` is None, else a dict of only those Policies that are in
`policy_ids`.
"""
checkpoint_info = get_checkpoint_info(checkpoint)
# Algorithm checkpoint: Extract one or more policies from it and return them
# in a dict (mapping PolicyID to Policy instances).
if checkpoint_info["type"] == "Algorithm":
from ray.rllib.algorithms.algorithm import Algorithm
policies = {}
# Old Algorithm checkpoints: State must be completely retrieved from:
# algo state file -> worker -> "state".
if checkpoint_info["checkpoint_version"] < version.Version("1.0"):
with open(checkpoint_info["state_file"], "rb") as f:
state = pickle.load(f)
# In older checkpoint versions, the policy states are stored under
# "state" within the worker state (which is pickled in itself).
worker_state = pickle.loads(state["worker"])
policy_states = worker_state["state"]
for pid, policy_state in policy_states.items():
# Get spec and config, merge config with
serialized_policy_spec = worker_state["policy_specs"][pid]
policy_config = Algorithm.merge_algorithm_configs(
worker_state["policy_config"], serialized_policy_spec["config"]
)
serialized_policy_spec.update({"config": policy_config})
policy_state.update({"policy_spec": serialized_policy_spec})
policies[pid] = Policy.from_state(policy_state)
# Newer versions: Get policy states from "policies/" sub-dirs.
elif checkpoint_info["policy_ids"] is not None:
for policy_id in checkpoint_info["policy_ids"]:
if policy_ids is None or policy_id in policy_ids:
policy_checkpoint_info = get_checkpoint_info(
os.path.join(
checkpoint_info["checkpoint_dir"],
"policies",
policy_id,
)
)
assert policy_checkpoint_info["type"] == "Policy"
with open(policy_checkpoint_info["state_file"], "rb") as f:
policy_state = pickle.load(f)
policies[policy_id] = Policy.from_state(policy_state)
return policies
# Policy checkpoint: Return a single Policy instance.
else:
msgpack = None
if checkpoint_info.get("format") == "msgpack":
msgpack = try_import_msgpack(error=True)
with open(checkpoint_info["state_file"], "rb") as f:
if msgpack is not None:
state = msgpack.load(f)
else:
state = pickle.load(f)
return Policy.from_state(state)
@staticmethod
def from_state(state: PolicyState) -> "Policy":
"""Recovers a Policy from a state object.
The `state` of an instantiated Policy can be retrieved by calling its
`get_state` method. This only works for the V2 Policy classes (EagerTFPolicyV2,
SynamicTFPolicyV2, and TorchPolicyV2). It contains all information necessary
to create the Policy. No access to the original code (e.g. configs, knowledge of
the policy's class, etc..) is needed.
Args:
state: The state to recover a new Policy instance from.
Returns:
A new Policy instance.
"""
serialized_pol_spec: Optional[dict] = state.get("policy_spec")
if serialized_pol_spec is None:
raise ValueError(
"No `policy_spec` key was found in given `state`! "
"Cannot create new Policy."
)
pol_spec = PolicySpec.deserialize(serialized_pol_spec)
actual_class = get_tf_eager_cls_if_necessary(
pol_spec.policy_class,
pol_spec.config,
)
if pol_spec.config["framework"] == "tf":
from ray.rllib.policy.tf_policy import TFPolicy
return TFPolicy._tf1_from_state_helper(state)
# Create the new policy.
new_policy = actual_class(
# Note(jungong) : we are intentionally not using keyward arguments here
# because some policies name the observation space parameter obs_space,
# and some others name it observation_space.
pol_spec.observation_space,
pol_spec.action_space,
pol_spec.config,
)
# Set the new policy's state (weights, optimizer vars, exploration state,
# etc..).
new_policy.set_state(state)
# Return the new policy.
return new_policy
def init_view_requirements(self):
"""Maximal view requirements dict for `learn_on_batch()` and
`compute_actions` calls.
Specific policies can override this function to provide custom
list of view requirements.
"""
# Maximal view requirements dict for `learn_on_batch()` and
# `compute_actions` calls.
# View requirements will be automatically filtered out later based
# on the postprocessing and loss functions to ensure optimal data
# collection and transfer performance.
view_reqs = self._get_default_view_requirements()
if not hasattr(self, "view_requirements"):
self.view_requirements = view_reqs
else:
for k, v in view_reqs.items():
if k not in self.view_requirements:
self.view_requirements[k] = v
def get_connector_metrics(self) -> Dict:
"""Get metrics on timing from connectors."""
return {
"agent_connectors": {
name + "_ms": 1000 * timer.mean
for name, timer in self.agent_connectors.timers.items()
},
"action_connectors": {
name + "_ms": 1000 * timer.mean
for name, timer in self.agent_connectors.timers.items()
},
}
def reset_connectors(self, env_id) -> None:
"""Reset action- and agent-connectors for this policy."""
self.agent_connectors.reset(env_id=env_id)
self.action_connectors.reset(env_id=env_id)
def compute_single_action(
self,
obs: Optional[TensorStructType] = None,
state: Optional[List[TensorType]] = None,
*,
prev_action: Optional[TensorStructType] = None,
prev_reward: Optional[TensorStructType] = None,
info: dict = None,
input_dict: Optional[SampleBatch] = None,
episode=None,
explore: Optional[bool] = None,
timestep: Optional[int] = None,
# Kwars placeholder for future compatibility.
**kwargs,
) -> Tuple[TensorStructType, List[TensorType], Dict[str, TensorType]]:
"""Computes and returns a single (B=1) action value.
Takes an input dict (usually a SampleBatch) as its main data input.
This allows for using this method in case a more complex input pattern
(view requirements) is needed, for example when the Model requires the
last n observations, the last m actions/rewards, or a combination
of any of these.
Alternatively, in case no complex inputs are required, takes a single
`obs` values (and possibly single state values, prev-action/reward
values, etc..).
Args:
obs: Single observation.
state: List of RNN state inputs, if any.
prev_action: Previous action value, if any.
prev_reward: Previous reward, if any.
info: Info object, if any.
input_dict: A SampleBatch or input dict containing the
single (unbatched) Tensors to compute actions. If given, it'll
be used instead of `obs`, `state`, `prev_action|reward`, and
`info`.
episode: This provides access to all of the internal episode state,
which may be useful for model-based or multi-agent algorithms.
explore: Whether to pick an exploitation or
exploration action
(default: None -> use self.config["explore"]).
timestep: The current (sampling) time step.
Keyword Args:
kwargs: Forward compatibility placeholder.
Returns:
Tuple consisting of the action, the list of RNN state outputs (if
any), and a dictionary of extra features (if any).
"""
# Build the input-dict used for the call to
# `self.compute_actions_from_input_dict()`.
if input_dict is None:
input_dict = {SampleBatch.OBS: obs}
if state is not None:
for i, s in enumerate(state):
input_dict[f"state_in_{i}"] = s
if prev_action is not None:
input_dict[SampleBatch.PREV_ACTIONS] = prev_action
if prev_reward is not None:
input_dict[SampleBatch.PREV_REWARDS] = prev_reward
if info is not None:
input_dict[SampleBatch.INFOS] = info
# Batch all data in input dict.
input_dict = tree.map_structure_with_path(
lambda p, s: (
s
if p == "seq_lens"
else s.unsqueeze(0)
if torch and isinstance(s, torch.Tensor)
else np.expand_dims(s, 0)
),
input_dict,
)
episodes = None
if episode is not None:
episodes = [episode]
out = self.compute_actions_from_input_dict(
input_dict=SampleBatch(input_dict),
episodes=episodes,
explore=explore,
timestep=timestep,
)
# Some policies don't return a tuple, but always just a single action.
# E.g. ES and ARS.
if not isinstance(out, tuple):
single_action = out
state_out = []
info = {}
# Normal case: Policy should return (action, state, info) tuple.
else:
batched_action, state_out, info = out
single_action = unbatch(batched_action)
assert len(single_action) == 1
single_action = single_action[0]
# Return action, internal state(s), infos.
return (
single_action,
tree.map_structure(lambda x: x[0], state_out),
tree.map_structure(lambda x: x[0], info),
)
def compute_actions_from_input_dict(
self,
input_dict: Union[SampleBatch, Dict[str, TensorStructType]],
explore: Optional[bool] = None,
timestep: Optional[int] = None,
episodes=None,
**kwargs,
) -> Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:
"""Computes actions from collected samples (across multiple-agents).
Takes an input dict (usually a SampleBatch) as its main data input.
This allows for using this method in case a more complex input pattern
(view requirements) is needed, for example when the Model requires the
last n observations, the last m actions/rewards, or a combination
of any of these.
Args:
input_dict: A SampleBatch or input dict containing the Tensors
to compute actions. `input_dict` already abides to the
Policy's as well as the Model's view requirements and can
thus be passed to the Model as-is.
explore: Whether to pick an exploitation or exploration
action (default: None -> use self.config["explore"]).
timestep: The current (sampling) time step.
episodes: This provides access to all of the internal episodes'
state, which may be useful for model-based or multi-agent
algorithms.
Keyword Args:
kwargs: Forward compatibility placeholder.
Returns:
actions: Batch of output actions, with shape like
[BATCH_SIZE, ACTION_SHAPE].
state_outs: List of RNN state output
batches, if any, each with shape [BATCH_SIZE, STATE_SIZE].
info: Dictionary of extra feature batches, if any, with shape like
{"f1": [BATCH_SIZE, ...], "f2": [BATCH_SIZE, ...]}.
"""
# Default implementation just passes obs, prev-a/r, and states on to
# `self.compute_actions()`.
state_batches = [s for k, s in input_dict.items() if k.startswith("state_in")]
return self.compute_actions(
input_dict[SampleBatch.OBS],
state_batches,
prev_action_batch=input_dict.get(SampleBatch.PREV_ACTIONS),
prev_reward_batch=input_dict.get(SampleBatch.PREV_REWARDS),
info_batch=input_dict.get(SampleBatch.INFOS),
explore=explore,
timestep=timestep,
episodes=episodes,
**kwargs,
)
@abstractmethod
def compute_actions(
self,
obs_batch: Union[List[TensorStructType], TensorStructType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Union[List[TensorStructType], TensorStructType] = None,
prev_reward_batch: Union[List[TensorStructType], TensorStructType] = None,
info_batch: Optional[Dict[str, list]] = None,
episodes: Optional[List] = None,
explore: Optional[bool] = None,
timestep: Optional[int] = None,
**kwargs,
) -> Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:
"""Computes actions for the current policy.
Args:
obs_batch: Batch of observations.
state_batches: List of RNN state input batches, if any.
prev_action_batch: Batch of previous action values.
prev_reward_batch: Batch of previous rewards.
info_batch: Batch of info objects.
episodes: List of Episode objects, one for each obs in
obs_batch. This provides access to all of the internal
episode state, which may be useful for model-based or
multi-agent algorithms.
explore: Whether to pick an exploitation or exploration action.
Set to None (default) for using the value of
`self.config["explore"]`.
timestep: The current (sampling) time step.
Keyword Args:
kwargs: Forward compatibility placeholder
Returns:
actions: Batch of output actions, with shape like
[BATCH_SIZE, ACTION_SHAPE].
state_outs (List[TensorType]): List of RNN state output
batches, if any, each with shape [BATCH_SIZE, STATE_SIZE].
info (List[dict]): Dictionary of extra feature batches, if any,
with shape like
{"f1": [BATCH_SIZE, ...], "f2": [BATCH_SIZE, ...]}.
"""
raise NotImplementedError
def compute_log_likelihoods(
self,
actions: Union[List[TensorType], TensorType],
obs_batch: Union[List[TensorType], TensorType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Optional[Union[List[TensorType], TensorType]] = None,
prev_reward_batch: Optional[Union[List[TensorType], TensorType]] = None,
actions_normalized: bool = True,
in_training: bool = True,
) -> TensorType:
"""Computes the log-prob/likelihood for a given action and observation.
The log-likelihood is calculated using this Policy's action
distribution class (self.dist_class).
Args:
actions: Batch of actions, for which to retrieve the
log-probs/likelihoods (given all other inputs: obs,
states, ..).
obs_batch: Batch of observations.
state_batches: List of RNN state input batches, if any.
prev_action_batch: Batch of previous action values.
prev_reward_batch: Batch of previous rewards.
actions_normalized: Is the given `actions` already normalized
(between -1.0 and 1.0) or not? If not and
`normalize_actions=True`, we need to normalize the given
actions first, before calculating log likelihoods.
in_training: Whether to use the forward_train() or forward_exploration() of
the underlying RLModule.
Returns:
Batch of log probs/likelihoods, with shape: [BATCH_SIZE].
"""
raise NotImplementedError
@OverrideToImplementCustomLogic_CallToSuperRecommended
def postprocess_trajectory(
self,
sample_batch: SampleBatch,
other_agent_batches: Optional[
Dict[AgentID, Tuple["Policy", SampleBatch]]
] = None,
episode=None,
) -> SampleBatch:
"""Implements algorithm-specific trajectory postprocessing.
This will be called on each trajectory fragment computed during policy
evaluation. Each fragment is guaranteed to be only from one episode.
The given fragment may or may not contain the end of this episode,
depending on the `batch_mode=truncate_episodes|complete_episodes`,
`rollout_fragment_length`, and other settings.
Args:
sample_batch: batch of experiences for the policy,
which will contain at most one episode trajectory.
other_agent_batches: In a multi-agent env, this contains a
mapping of agent ids to (policy, agent_batch) tuples
containing the policy and experiences of the other agents.
episode: An optional multi-agent episode object to provide
access to all of the internal episode state, which may
be useful for model-based or multi-agent algorithms.
Returns:
The postprocessed sample batch.
"""
# The default implementation just returns the same, unaltered batch.
return sample_batch
@OverrideToImplementCustomLogic
def loss(
self, model: ModelV2, dist_class: ActionDistribution, train_batch: SampleBatch
) -> Union[TensorType, List[TensorType]]:
"""Loss function for this Policy.
Override this method in order to implement custom loss computations.
Args:
model: The model to calculate the loss(es).
dist_class: The action distribution class to sample actions
from the model's outputs.
train_batch: The input batch on which to calculate the loss.
Returns:
Either a single loss tensor or a list of loss tensors.
"""
raise NotImplementedError
def learn_on_batch(self, samples: SampleBatch) -> Dict[str, TensorType]:
"""Perform one learning update, given `samples`.
Either this method or the combination of `compute_gradients` and
`apply_gradients` must be implemented by subclasses.
Args:
samples: The SampleBatch object to learn from.
Returns:
Dictionary of extra metadata from `compute_gradients()`.
.. testcode::
:skipif: True
policy, sample_batch = ...
policy.learn_on_batch(sample_batch)
"""
# The default implementation is simply a fused `compute_gradients` plus
# `apply_gradients` call.
grads, grad_info = self.compute_gradients(samples)
self.apply_gradients(grads)
return grad_info
def learn_on_batch_from_replay_buffer(
self, replay_actor: ActorHandle, policy_id: PolicyID
) -> Dict[str, TensorType]:
"""Samples a batch from given replay actor and performs an update.
Args:
replay_actor: The replay buffer actor to sample from.
policy_id: The ID of this policy.
Returns:
Dictionary of extra metadata from `compute_gradients()`.
"""
# Sample a batch from the given replay actor.
# Note that for better performance (less data sent through the
# network), this policy should be co-located on the same node
# as `replay_actor`. Such a co-location step is usually done during
# the Algorithm's `setup()` phase.
batch = ray.get(replay_actor.replay.remote(policy_id=policy_id))
if batch is None:
return {}
# Send to own learn_on_batch method for updating.
# TODO: hack w/ `hasattr`
if hasattr(self, "devices") and len(self.devices) > 1:
self.load_batch_into_buffer(batch, buffer_index=0)
return self.learn_on_loaded_batch(offset=0, buffer_index=0)
else:
return self.learn_on_batch(batch)
def load_batch_into_buffer(self, batch: SampleBatch, buffer_index: int = 0) -> int:
"""Bulk-loads the given SampleBatch into the devices' memories.
The data is split equally across all the Policy's devices.
If the data is not evenly divisible by the batch size, excess data
should be discarded.
Args:
batch: The SampleBatch to load.
buffer_index: The index of the buffer (a MultiGPUTowerStack) to use
on the devices. The number of buffers on each device depends
on the value of the `num_multi_gpu_tower_stacks` config key.
Returns:
The number of tuples loaded per device.
"""
raise NotImplementedError
def get_num_samples_loaded_into_buffer(self, buffer_index: int = 0) -> int:
"""Returns the number of currently loaded samples in the given buffer.
Args:
buffer_index: The index of the buffer (a MultiGPUTowerStack)
to use on the devices. The number of buffers on each device
depends on the value of the `num_multi_gpu_tower_stacks` config
key.
Returns:
The number of tuples loaded per device.
"""
raise NotImplementedError
def learn_on_loaded_batch(self, offset: int = 0, buffer_index: int = 0):
"""Runs a single step of SGD on an already loaded data in a buffer.
Runs an SGD step over a slice of the pre-loaded batch, offset by
the `offset` argument (useful for performing n minibatch SGD
updates repeatedly on the same, already pre-loaded data).
Updates the model weights based on the averaged per-device gradients.
Args:
offset: Offset into the preloaded data. Used for pre-loading
a train-batch once to a device, then iterating over
(subsampling through) this batch n times doing minibatch SGD.
buffer_index: The index of the buffer (a MultiGPUTowerStack)
to take the already pre-loaded data from. The number of buffers
on each device depends on the value of the
`num_multi_gpu_tower_stacks` config key.
Returns:
The outputs of extra_ops evaluated over the batch.
"""
raise NotImplementedError
def compute_gradients(
self, postprocessed_batch: SampleBatch
) -> Tuple[ModelGradients, Dict[str, TensorType]]:
"""Computes gradients given a batch of experiences.
Either this in combination with `apply_gradients()` or
`learn_on_batch()` must be implemented by subclasses.
Args:
postprocessed_batch: The SampleBatch object to use
for calculating gradients.
Returns:
grads: List of gradient output values.
grad_info: Extra policy-specific info values.
"""
raise NotImplementedError
def apply_gradients(self, gradients: ModelGradients) -> None:
"""Applies the (previously) computed gradients.
Either this in combination with `compute_gradients()` or
`learn_on_batch()` must be implemented by subclasses.
Args:
gradients: The already calculated gradients to apply to this
Policy.
"""
raise NotImplementedError
def get_weights(self) -> ModelWeights:
"""Returns model weights.
Note: The return value of this method will reside under the "weights"
key in the return value of Policy.get_state(). Model weights are only
one part of a Policy's state. Other state information contains:
optimizer variables, exploration state, and global state vars such as
the sampling timestep.
Returns:
Serializable copy or view of model weights.
"""
raise NotImplementedError
def set_weights(self, weights: ModelWeights) -> None:
"""Sets this Policy's model's weights.
Note: Model weights are only one part of a Policy's state. Other
state information contains: optimizer variables, exploration state,
and global state vars such as the sampling timestep.
Args:
weights: Serializable copy or view of model weights.
"""
raise NotImplementedError
def get_exploration_state(self) -> Dict[str, TensorType]:
"""Returns the state of this Policy's exploration component.
Returns:
Serializable information on the `self.exploration` object.
"""
return self.exploration.get_state()
def is_recurrent(self) -> bool:
"""Whether this Policy holds a recurrent Model.
Returns:
True if this Policy has-a RNN-based Model.
"""
return False
def num_state_tensors(self) -> int:
"""The number of internal states needed by the RNN-Model of the Policy.
Returns:
int: The number of RNN internal states kept by this Policy's Model.
"""
return 0
def get_initial_state(self) -> List[TensorType]:
"""Returns initial RNN state for the current policy.
Returns:
List[TensorType]: Initial RNN state for the current policy.
"""
return []
@OverrideToImplementCustomLogic_CallToSuperRecommended
def get_state(self) -> PolicyState:
"""Returns the entire current state of this Policy.
Note: Not to be confused with an RNN model's internal state.
State includes the Model(s)' weights, optimizer weights,
the exploration component's state, as well as global variables, such
as sampling timesteps.
Note that the state may contain references to the original variables.
This means that you may need to deepcopy() the state before mutating it.
Returns:
Serialized local state.
"""
state = {
# All the policy's weights.
"weights": self.get_weights(),
# The current global timestep.
"global_timestep": self.global_timestep,
# The current num_grad_updates counter.
"num_grad_updates": self.num_grad_updates,
}
# Add this Policy's spec so it can be retreived w/o access to the original
# code.
policy_spec = PolicySpec(
policy_class=type(self),
observation_space=self.observation_space,
action_space=self.action_space,
config=self.config,
)
state["policy_spec"] = policy_spec.serialize()
# Checkpoint connectors state as well if enabled.
connector_configs = {}
if self.agent_connectors:
connector_configs["agent"] = self.agent_connectors.to_state()
if self.action_connectors:
connector_configs["action"] = self.action_connectors.to_state()
state["connector_configs"] = connector_configs
return state
def restore_connectors(self, state: PolicyState):
"""Restore agent and action connectors if configs available.
Args:
state: The new state to set this policy to. Can be
obtained by calling `self.get_state()`.
"""
# To avoid a circular dependency problem cause by SampleBatch.
from ray.rllib.connectors.util import restore_connectors_for_policy
connector_configs = state.get("connector_configs", {})
if "agent" in connector_configs:
self.agent_connectors = restore_connectors_for_policy(
self, connector_configs["agent"]
)
logger.debug("restoring agent connectors:")
logger.debug(self.agent_connectors.__str__(indentation=4))
if "action" in connector_configs:
self.action_connectors = restore_connectors_for_policy(
self, connector_configs["action"]
)
logger.debug("restoring action connectors:")
logger.debug(self.action_connectors.__str__(indentation=4))
@OverrideToImplementCustomLogic_CallToSuperRecommended
def set_state(self, state: PolicyState) -> None:
"""Restores the entire current state of this Policy from `state`.
Args:
state: The new state to set this policy to. Can be
obtained by calling `self.get_state()`.
"""
if "policy_spec" in state:
policy_spec = PolicySpec.deserialize(state["policy_spec"])
# Assert spaces remained the same.
if (
policy_spec.observation_space is not None
and policy_spec.observation_space != self.observation_space
):
logger.warning(
"`observation_space` in given policy state ("
f"{policy_spec.observation_space}) does not match this Policy's "
f"observation space ({self.observation_space})."
)
if (
policy_spec.action_space is not None
and policy_spec.action_space != self.action_space
):
logger.warning(
"`action_space` in given policy state ("
f"{policy_spec.action_space}) does not match this Policy's "
f"action space ({self.action_space})."
)
# Override config, if part of the spec.
if policy_spec.config:
self.config = policy_spec.config
# Override NN weights.
self.set_weights(state["weights"])
self.restore_connectors(state)
def apply(
self,
func: Callable[["Policy", Optional[Any], Optional[Any]], T],
*args,
**kwargs,
) -> T:
"""Calls the given function with this Policy instance.
Useful for when the Policy class has been converted into a ActorHandle
and the user needs to execute some functionality (e.g. add a property)
on the underlying policy object.
Args:
func: The function to call, with this Policy as first
argument, followed by args, and kwargs.
args: Optional additional args to pass to the function call.
kwargs: Optional additional kwargs to pass to the function call.
Returns:
The return value of the function call.
"""
return func(self, *args, **kwargs)
def on_global_var_update(self, global_vars: Dict[str, TensorType]) -> None:
"""Called on an update to global vars.
Args:
global_vars: Global variables by str key, broadcast from the
driver.
"""
# Store the current global time step (sum over all policies' sample
# steps).
# Make sure, we keep global_timestep as a Tensor for tf-eager
# (leads to memory leaks if not doing so).
if self.framework == "tf2":
self.global_timestep.assign(global_vars["timestep"])
else:
self.global_timestep = global_vars["timestep"]
# Update our lifetime gradient update counter.
num_grad_updates = global_vars.get("num_grad_updates")
if num_grad_updates is not None:
self.num_grad_updates = num_grad_updates
def export_checkpoint(
self,
export_dir: str,
filename_prefix=DEPRECATED_VALUE,
*,
policy_state: Optional[PolicyState] = None,
checkpoint_format: str = "cloudpickle",
) -> None:
"""Exports Policy checkpoint to a local directory and returns an AIR Checkpoint.
Args:
export_dir: Local writable directory to store the AIR Checkpoint
information into.
policy_state: An optional PolicyState to write to disk. Used by
`Algorithm.save_checkpoint()` to save on the additional
`self.get_state()` calls of its different Policies.
checkpoint_format: Either one of 'cloudpickle' or 'msgpack'.
.. testcode::
:skipif: True
from ray.rllib.algorithms.ppo import PPOTorchPolicy
policy = PPOTorchPolicy(...)
policy.export_checkpoint("/tmp/export_dir")
"""
# `filename_prefix` should not longer be used as new Policy checkpoints
# contain more than one file with a fixed filename structure.
if filename_prefix != DEPRECATED_VALUE:
deprecation_warning(
old="Policy.export_checkpoint(filename_prefix=...)",
error=True,
)
if checkpoint_format not in ["cloudpickle", "msgpack"]:
raise ValueError(
f"Value of `checkpoint_format` ({checkpoint_format}) must either be "
"'cloudpickle' or 'msgpack'!"
)
if policy_state is None:
policy_state = self.get_state()
# Write main policy state file.
os.makedirs(export_dir, exist_ok=True)
if checkpoint_format == "cloudpickle":
policy_state["checkpoint_version"] = CHECKPOINT_VERSION
state_file = "policy_state.pkl"
with open(os.path.join(export_dir, state_file), "w+b") as f:
pickle.dump(policy_state, f)
else:
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
msgpack = try_import_msgpack(error=True)
policy_state["checkpoint_version"] = str(CHECKPOINT_VERSION)
# Serialize the config for msgpack dump'ing.
policy_state["policy_spec"]["config"] = AlgorithmConfig._serialize_dict(
policy_state["policy_spec"]["config"]
)
state_file = "policy_state.msgpck"
with open(os.path.join(export_dir, state_file), "w+b") as f:
msgpack.dump(policy_state, f)
# Write RLlib checkpoint json.
with open(os.path.join(export_dir, "rllib_checkpoint.json"), "w") as f:
json.dump(
{
"type": "Policy",
"checkpoint_version": str(policy_state["checkpoint_version"]),
"format": checkpoint_format,
"state_file": state_file,
"ray_version": ray.__version__,
"ray_commit": ray.__commit__,
},
f,
)
# Add external model files, if required.
if self.config["export_native_model_files"]:
self.export_model(os.path.join(export_dir, "model"))
def export_model(self, export_dir: str, onnx: Optional[int] = None) -> None:
"""Exports the Policy's Model to local directory for serving.
Note: The file format will depend on the deep learning framework used.
See the child classed of Policy and their `export_model`
implementations for more details.
Args:
export_dir: Local writable directory.
onnx: If given, will export model in ONNX format. The
value of this parameter set the ONNX OpSet version to use.
Raises:
ValueError: If a native DL-framework based model (e.g. a keras Model)
cannot be saved to disk for various reasons.
"""
raise NotImplementedError
def import_model_from_h5(self, import_file: str) -> None:
"""Imports Policy from local file.
Args:
import_file: Local readable file.
"""
raise NotImplementedError
def get_session(self) -> Optional["tf1.Session"]:
"""Returns tf.Session object to use for computing actions or None.
Note: This method only applies to TFPolicy sub-classes. All other
sub-classes should expect a None to be returned from this method.
Returns:
The tf Session to use for computing actions and losses with
this policy or None.
"""
return None
def get_host(self) -> str:
"""Returns the computer's network name.
Returns:
The computer's networks name or an empty string, if the network
name could not be determined.
"""
return platform.node()
def _get_num_gpus_for_policy(self) -> int:
"""Decide on the number of CPU/GPU nodes this policy should run on.
Return:
0 if policy should run on CPU. >0 if policy should run on 1 or
more GPUs.
"""
worker_idx = self.config.get("worker_index", 0)
fake_gpus = self.config.get("_fake_gpus", False)
if (
ray._private.worker._mode() == ray._private.worker.LOCAL_MODE
and not fake_gpus
):
# If in local debugging mode, and _fake_gpus is not on.
num_gpus = 0
elif worker_idx == 0:
# If head node, take num_gpus.
num_gpus = self.config["num_gpus"]
else:
# If worker node, take `num_gpus_per_env_runner`.
num_gpus = self.config["num_gpus_per_env_runner"]
if num_gpus == 0:
dev = "CPU"
else:
dev = "{} {}".format(num_gpus, "fake-GPUs" if fake_gpus else "GPUs")
logger.info(
"Policy (worker={}) running on {}.".format(
worker_idx if worker_idx > 0 else "local", dev
)
)
return num_gpus
def _create_exploration(self) -> Exploration:
"""Creates the Policy's Exploration object.
This method only exists b/c some Algorithms do not use TfPolicy nor
TorchPolicy, but inherit directly from Policy. Others inherit from
TfPolicy w/o using DynamicTFPolicy.
Returns:
Exploration: The Exploration object to be used by this Policy.
"""
if getattr(self, "exploration", None) is not None:
return self.exploration
exploration = from_config(
Exploration,
self.config.get("exploration_config", {"type": "StochasticSampling"}),
action_space=self.action_space,
policy_config=self.config,
model=getattr(self, "model", None),
num_workers=self.config.get("num_env_runners", 0),
worker_index=self.config.get("worker_index", 0),
framework=getattr(self, "framework", self.config.get("framework", "tf")),
)
return exploration
def _get_default_view_requirements(self):
"""Returns a default ViewRequirements dict.
Note: This is the base/maximum requirement dict, from which later
some requirements will be subtracted again automatically to streamline
data collection, batch creation, and data transfer.
Returns:
ViewReqDict: The default view requirements dict.
"""
# Default view requirements (equal to those that we would use before
# the trajectory view API was introduced).
return {
SampleBatch.OBS: ViewRequirement(space=self.observation_space),
SampleBatch.NEXT_OBS: ViewRequirement(
data_col=SampleBatch.OBS,
shift=1,
space=self.observation_space,
used_for_compute_actions=False,
),
SampleBatch.ACTIONS: ViewRequirement(
space=self.action_space, used_for_compute_actions=False
),
# For backward compatibility with custom Models that don't specify
# these explicitly (will be removed by Policy if not used).
SampleBatch.PREV_ACTIONS: ViewRequirement(
data_col=SampleBatch.ACTIONS, shift=-1, space=self.action_space
),
SampleBatch.REWARDS: ViewRequirement(),
# For backward compatibility with custom Models that don't specify
# these explicitly (will be removed by Policy if not used).
SampleBatch.PREV_REWARDS: ViewRequirement(
data_col=SampleBatch.REWARDS, shift=-1
),
SampleBatch.TERMINATEDS: ViewRequirement(),
SampleBatch.TRUNCATEDS: ViewRequirement(),
SampleBatch.INFOS: ViewRequirement(used_for_compute_actions=False),
SampleBatch.EPS_ID: ViewRequirement(),
SampleBatch.UNROLL_ID: ViewRequirement(),
SampleBatch.AGENT_INDEX: ViewRequirement(),
SampleBatch.T: ViewRequirement(),
}
def _initialize_loss_from_dummy_batch(
self,
auto_remove_unneeded_view_reqs: bool = True,
stats_fn=None,
) -> None:
"""Performs test calls through policy's model and loss.
NOTE: This base method should work for define-by-run Policies such as
torch and tf-eager policies.
If required, will thereby detect automatically, which data views are
required by a) the forward pass, b) the postprocessing, and c) the loss
functions, and remove those from self.view_requirements that are not
necessary for these computations (to save data storage and transfer).
Args:
auto_remove_unneeded_view_reqs: Whether to automatically
remove those ViewRequirements records from
self.view_requirements that are not needed.
stats_fn (Optional[Callable[[Policy, SampleBatch], Dict[str,
TensorType]]]): An optional stats function to be called after
the loss.
"""
if self.config.get("_disable_initialize_loss_from_dummy_batch", False):
return
# Signal Policy that currently we do not like to eager/jit trace
# any function calls. This is to be able to track, which columns
# in the dummy batch are accessed by the different function (e.g.
# loss) such that we can then adjust our view requirements.
self._no_tracing = True
# Save for later so that loss init does not change global timestep
global_ts_before_init = int(convert_to_numpy(self.global_timestep))
sample_batch_size = min(
max(self.batch_divisibility_req * 4, 32),
self.config["train_batch_size"], # Don't go over the asked batch size.
)
self._dummy_batch = self._get_dummy_batch_from_view_requirements(
sample_batch_size
)
self._lazy_tensor_dict(self._dummy_batch)
explore = False
actions, state_outs, extra_outs = self.compute_actions_from_input_dict(
self._dummy_batch, explore=explore
)
for key, view_req in self.view_requirements.items():
if key not in self._dummy_batch.accessed_keys:
view_req.used_for_compute_actions = False
# Add all extra action outputs to view reqirements (these may be
# filtered out later again, if not needed for postprocessing or loss).
for key, value in extra_outs.items():
self._dummy_batch[key] = value
if key not in self.view_requirements:
if isinstance(value, (dict, np.ndarray)):
# the assumption is that value is a nested_dict of np.arrays leaves
space = get_gym_space_from_struct_of_tensors(value)
self.view_requirements[key] = ViewRequirement(
space=space, used_for_compute_actions=False
)
else:
raise ValueError(
"policy.compute_actions_from_input_dict() returns an "
"extra action output that is neither a numpy array nor a dict."
)
for key in self._dummy_batch.accessed_keys:
if key not in self.view_requirements:
self.view_requirements[key] = ViewRequirement()
self.view_requirements[key].used_for_compute_actions = False
# TODO (kourosh) Why did we use to make used_for_compute_actions True here?
new_batch = self._get_dummy_batch_from_view_requirements(sample_batch_size)
# Make sure the dummy_batch will return numpy arrays when accessed
self._dummy_batch.set_get_interceptor(None)
# try to re-use the output of the previous run to avoid overriding things that
# would break (e.g. scale = 0 of Normal distribution cannot be zero)
for k in new_batch:
if k not in self._dummy_batch:
self._dummy_batch[k] = new_batch[k]
# Make sure the book-keeping of dummy_batch keys are reset to correcly track
# what is accessed, what is added and what's deleted from now on.
self._dummy_batch.accessed_keys.clear()
self._dummy_batch.deleted_keys.clear()
self._dummy_batch.added_keys.clear()
if self.exploration:
# Policies with RLModules don't have an exploration object.
self.exploration.postprocess_trajectory(self, self._dummy_batch)
postprocessed_batch = self.postprocess_trajectory(self._dummy_batch)
seq_lens = None
if state_outs:
B = 4 # For RNNs, have B=4, T=[depends on sample_batch_size]
i = 0
while "state_in_{}".format(i) in postprocessed_batch:
postprocessed_batch["state_in_{}".format(i)] = postprocessed_batch[
"state_in_{}".format(i)
][:B]
if "state_out_{}".format(i) in postprocessed_batch:
postprocessed_batch["state_out_{}".format(i)] = postprocessed_batch[
"state_out_{}".format(i)
][:B]
i += 1
seq_len = sample_batch_size // B
seq_lens = np.array([seq_len for _ in range(B)], dtype=np.int32)
postprocessed_batch[SampleBatch.SEQ_LENS] = seq_lens
# Switch on lazy to-tensor conversion on `postprocessed_batch`.
train_batch = self._lazy_tensor_dict(postprocessed_batch)
# Calling loss, so set `is_training` to True.
train_batch.set_training(True)
if seq_lens is not None:
train_batch[SampleBatch.SEQ_LENS] = seq_lens
train_batch.count = self._dummy_batch.count
# Call the loss function, if it exists.
# TODO(jungong) : clean up after all agents get migrated.
# We should simply do self.loss(...) here.
if self._loss is not None:
self._loss(self, self.model, self.dist_class, train_batch)
elif is_overridden(self.loss) and not self.config["in_evaluation"]:
self.loss(self.model, self.dist_class, train_batch)
# Call the stats fn, if given.
# TODO(jungong) : clean up after all agents get migrated.
# We should simply do self.stats_fn(train_batch) here.
if stats_fn is not None:
stats_fn(self, train_batch)
if hasattr(self, "stats_fn") and not self.config["in_evaluation"]:
self.stats_fn(train_batch)
# Re-enable tracing.
self._no_tracing = False
# Add new columns automatically to view-reqs.
if auto_remove_unneeded_view_reqs:
# Add those needed for postprocessing and training.
all_accessed_keys = (
train_batch.accessed_keys
| self._dummy_batch.accessed_keys
| self._dummy_batch.added_keys
)
for key in all_accessed_keys:
if key not in self.view_requirements and key != SampleBatch.SEQ_LENS:
self.view_requirements[key] = ViewRequirement(
used_for_compute_actions=False
)
if self._loss or is_overridden(self.loss):
# Tag those only needed for post-processing (with some
# exceptions).
for key in self._dummy_batch.accessed_keys:
if (
key not in train_batch.accessed_keys
and key in self.view_requirements
and key not in self.model.view_requirements
and key
not in [
SampleBatch.EPS_ID,
SampleBatch.AGENT_INDEX,
SampleBatch.UNROLL_ID,
SampleBatch.TERMINATEDS,
SampleBatch.TRUNCATEDS,
SampleBatch.REWARDS,
SampleBatch.INFOS,
SampleBatch.T,
]
):
self.view_requirements[key].used_for_training = False
# Remove those not needed at all (leave those that are needed
# by Sampler to properly execute sample collection). Also always leave
# TERMINATEDS, TRUNCATEDS, REWARDS, INFOS, no matter what.
for key in list(self.view_requirements.keys()):
if (
key not in all_accessed_keys
and key
not in [
SampleBatch.EPS_ID,
SampleBatch.AGENT_INDEX,
SampleBatch.UNROLL_ID,
SampleBatch.TERMINATEDS,
SampleBatch.TRUNCATEDS,
SampleBatch.REWARDS,
SampleBatch.INFOS,
SampleBatch.T,
]
and key not in self.model.view_requirements
):
# If user deleted this key manually in postprocessing
# fn, warn about it and do not remove from
# view-requirements.
if key in self._dummy_batch.deleted_keys:
logger.warning(
"SampleBatch key '{}' was deleted manually in "
"postprocessing function! RLlib will "
"automatically remove non-used items from the "
"data stream. Remove the `del` from your "
"postprocessing function.".format(key)
)
# If we are not writing output to disk, save to erase
# this key to save space in the sample batch.
elif self.config["output"] is None:
del self.view_requirements[key]
if type(self.global_timestep) is int:
self.global_timestep = global_ts_before_init
elif isinstance(self.global_timestep, tf.Variable):
self.global_timestep.assign(global_ts_before_init)
else:
raise ValueError(
"Variable self.global_timestep of policy {} needs to be "
"either of type `int` or `tf.Variable`, "
"but is of type {}.".format(self, type(self.global_timestep))
)
def maybe_remove_time_dimension(self, input_dict: Dict[str, TensorType]):
"""Removes a time dimension for recurrent RLModules.
Args:
input_dict: The input dict.
Returns:
The input dict with a possibly removed time dimension.
"""
raise NotImplementedError
def _get_dummy_batch_from_view_requirements(
self, batch_size: int = 1
) -> SampleBatch:
"""Creates a numpy dummy batch based on the Policy's view requirements.
Args:
batch_size: The size of the batch to create.
Returns:
Dict[str, TensorType]: The dummy batch containing all zero values.
"""
ret = {}
for view_col, view_req in self.view_requirements.items():
data_col = view_req.data_col or view_col
# Flattened dummy batch.
if (isinstance(view_req.space, (gym.spaces.Tuple, gym.spaces.Dict))) and (
(
data_col == SampleBatch.OBS
and not self.config["_disable_preprocessor_api"]
)
or (
data_col == SampleBatch.ACTIONS
and not self.config.get("_disable_action_flattening")
)
):
_, shape = ModelCatalog.get_action_shape(
view_req.space, framework=self.config["framework"]
)
ret[view_col] = np.zeros((batch_size,) + shape[1:], np.float32)
# Non-flattened dummy batch.
else:
# Range of indices on time-axis, e.g. "-50:-1".
if isinstance(view_req.space, gym.spaces.Space):
time_size = (
len(view_req.shift_arr) if len(view_req.shift_arr) > 1 else None
)
ret[view_col] = get_dummy_batch_for_space(
view_req.space, batch_size=batch_size, time_size=time_size
)
else:
ret[view_col] = [view_req.space for _ in range(batch_size)]
# Due to different view requirements for the different columns,
# columns in the resulting batch may not all have the same batch size.
return SampleBatch(ret)
def _update_model_view_requirements_from_init_state(self):
"""Uses Model's (or this Policy's) init state to add needed ViewReqs.
Can be called from within a Policy to make sure RNNs automatically
update their internal state-related view requirements.
Changes the `self.view_requirements` dict.
"""
self._model_init_state_automatically_added = True
model = getattr(self, "model", None)
obj = model or self
if model and not hasattr(model, "view_requirements"):
model.view_requirements = {
SampleBatch.OBS: ViewRequirement(space=self.observation_space)
}
view_reqs = obj.view_requirements
# Add state-ins to this model's view.
init_state = []
if hasattr(obj, "get_initial_state") and callable(obj.get_initial_state):
init_state = obj.get_initial_state()
else:
# Add this functionality automatically for new native model API.
if (
tf
and isinstance(model, tf.keras.Model)
and "state_in_0" not in view_reqs
):
obj.get_initial_state = lambda: [
np.zeros_like(view_req.space.sample())
for k, view_req in model.view_requirements.items()
if k.startswith("state_in_")
]
else:
obj.get_initial_state = lambda: []
if "state_in_0" in view_reqs:
self.is_recurrent = lambda: True
# Make sure auto-generated init-state view requirements get added
# to both Policy and Model, no matter what.
view_reqs = [view_reqs] + (
[self.view_requirements] if hasattr(self, "view_requirements") else []
)
for i, state in enumerate(init_state):
# Allow `state` to be either a Space (use zeros as initial values)
# or any value (e.g. a dict or a non-zero tensor).
fw = (
np
if isinstance(state, np.ndarray)
else torch
if torch and torch.is_tensor(state)
else None
)
if fw:
space = (
Box(-1.0, 1.0, shape=state.shape) if fw.all(state == 0.0) else state
)
else:
space = state
for vr in view_reqs:
# Only override if user has not already provided
# custom view-requirements for state_in_n.
if "state_in_{}".format(i) not in vr:
vr["state_in_{}".format(i)] = ViewRequirement(
"state_out_{}".format(i),
shift=-1,
used_for_compute_actions=True,
batch_repeat_value=self.config.get("model", {}).get(
"max_seq_len", 1
),
space=space,
)
# Only override if user has not already provided
# custom view-requirements for state_out_n.
if "state_out_{}".format(i) not in vr:
vr["state_out_{}".format(i)] = ViewRequirement(
space=space, used_for_training=True
)
def __repr__(self):
return type(self).__name__
@OldAPIStack
def get_gym_space_from_struct_of_tensors(
value: Union[Dict, Tuple, List, TensorType],
batched_input=True,
) -> gym.Space:
start_idx = 1 if batched_input else 0
struct = tree.map_structure(
lambda x: gym.spaces.Box(
-1.0, 1.0, shape=x.shape[start_idx:], dtype=get_np_dtype(x)
),
value,
)
space = get_gym_space_from_struct_of_spaces(struct)
return space
@OldAPIStack
def get_gym_space_from_struct_of_spaces(value: Union[Dict, Tuple]) -> gym.spaces.Dict:
if isinstance(value, dict):
return gym.spaces.Dict(
{k: get_gym_space_from_struct_of_spaces(v) for k, v in value.items()}
)
elif isinstance(value, (tuple, list)):
return gym.spaces.Tuple([get_gym_space_from_struct_of_spaces(v) for v in value])
else:
assert isinstance(value, gym.spaces.Space), (
f"The struct of spaces should only contain dicts, tiples and primitive "
f"gym spaces. Space is of type {type(value)}"
)
return value
|
Policy
|
python
|
encode__django-rest-framework
|
tests/test_permissions.py
|
{
"start": 18723,
"end": 18941
}
|
class ____(permissions.BasePermission):
message = 'Custom: You cannot access this resource'
code = 'permission_denied_custom'
def has_permission(self, request, view):
return False
|
BasicPermWithDetail
|
python
|
getsentry__sentry
|
src/sentry/workflow_engine/migration_helpers/issue_alert_migration.py
|
{
"start": 1568,
"end": 12686
}
|
class ____:
def __init__(
self,
rule: Rule,
user_id: int | None = None,
is_dry_run: bool | None = False,
should_create_actions: bool | None = True,
):
self.rule = rule
self.user_id = user_id
self.is_dry_run = is_dry_run
self.should_create_actions = should_create_actions
self.data = rule.data
self.project = rule.project
self.organization = self.project.organization
def run(self) -> Workflow:
conditions, filters = split_conditions_and_filters(self.data["conditions"])
action_match = self.data.get("action_match") or Rule.DEFAULT_CONDITION_MATCH
workflow = self._create_workflow_and_lookup(
conditions=conditions,
filters=filters,
action_match=action_match,
)
filter_match = self.data.get("filter_match") or Rule.DEFAULT_FILTER_MATCH
if_dcg = self._create_if_dcg(
filter_match=filter_match,
workflow=workflow,
conditions=conditions,
filters=filters,
)
if self.should_create_actions:
self._create_workflow_actions(if_dcg=if_dcg, actions=self.data["actions"])
return workflow
def _create_detector_lookups(self) -> list[Detector | None]:
if self.rule.source == RuleSource.CRON_MONITOR:
# Find the cron detector that was created before the rule
monitor_slug = None
for condition in self.data.get("conditions", []):
if condition.get("key") == "monitor.slug":
monitor_slug = condition.get("value")
break
if not monitor_slug:
return [None]
try:
with in_test_hide_transaction_boundary():
monitor = Monitor.objects.get(
slug=monitor_slug,
organization_id=self.organization.id,
)
detector = Detector.objects.get(
datasource__type=DATA_SOURCE_CRON_MONITOR,
datasource__source_id=str(monitor.id),
datasource__organization_id=self.organization.id,
)
return [detector]
except (Monitor.DoesNotExist, Detector.DoesNotExist):
pass
return [None]
if self.is_dry_run:
error_detector = Detector.objects.filter(
type=ErrorGroupType.slug, project=self.project
).first()
if not error_detector:
error_detector = Detector(type=ErrorGroupType.slug, project=self.project)
issue_stream_detector = Detector.objects.filter(
type=IssueStreamGroupType.slug, project=self.project
).first()
if not issue_stream_detector:
issue_stream_detector = Detector(
type=IssueStreamGroupType.slug, project=self.project
)
else:
error_detector, _ = Detector.objects.get_or_create(
type=ErrorGroupType.slug,
project=self.project,
defaults={"config": {}, "name": ERROR_DETECTOR_NAME},
)
AlertRuleDetector.objects.get_or_create(detector=error_detector, rule_id=self.rule.id)
issue_stream_detector, _ = Detector.objects.get_or_create(
type=IssueStreamGroupType.slug,
project=self.project,
defaults={"config": {}, "name": ISSUE_STREAM_DETECTOR_NAME},
)
return [error_detector, issue_stream_detector]
def _connect_default_detectors(self, workflow: Workflow) -> None:
default_detectors = self._create_detector_lookups()
for detector in default_detectors:
if detector:
DetectorWorkflow.objects.get_or_create(detector=detector, workflow=workflow)
def _bulk_create_data_conditions(
self,
conditions: list[dict[str, Any]],
dcg: DataConditionGroup,
filters: list[dict[str, Any]] | None = None,
):
dcg_conditions: list[DataCondition] = []
for condition in conditions:
try:
if (
condition["id"] == EventUniqueUserFrequencyConditionWithConditions.id
): # special case: this condition uses filters, so the migration needs to combine the filters into the condition
dcg_conditions.append(
create_event_unique_user_frequency_condition_with_conditions(
dict(condition), dcg, filters
)
)
else:
dcg_conditions.append(translate_to_data_condition(dict(condition), dcg=dcg))
except Exception as e:
logger.exception(
"workflow_engine.issue_alert_migration.error",
extra={"rule_id": self.rule.id, "error": str(e)},
)
if self.is_dry_run:
raise
else:
continue
filtered_data_conditions = [
dc for dc in dcg_conditions if dc.type not in SKIPPED_CONDITIONS
]
if self.is_dry_run:
for dc in filtered_data_conditions:
dc.full_clean(
exclude=["condition_group"]
) # condition_group will be null, which is not allowed
enforce_data_condition_json_schema(dc)
return filtered_data_conditions
data_conditions: list[DataCondition] = []
# try one by one, ignoring errors
for dc in filtered_data_conditions:
try:
dc.save()
data_conditions.append(dc)
except Exception as e:
logger.exception(
"workflow_engine.issue_alert_migration.error",
extra={"rule_id": self.rule.id, "error": str(e)},
)
return data_conditions
def _create_when_dcg(
self,
action_match: str,
):
if action_match == "any":
logic_type = DataConditionGroup.Type.ANY_SHORT_CIRCUIT.value
else:
logic_type = DataConditionGroup.Type(action_match)
kwargs = {"organization": self.organization, "logic_type": logic_type}
if self.is_dry_run:
when_dcg = DataConditionGroup(**kwargs)
when_dcg.full_clean()
else:
when_dcg = DataConditionGroup.objects.create(**kwargs)
return when_dcg
def _create_workflow_and_lookup(
self,
conditions: list[dict[str, Any]],
filters: list[dict[str, Any]],
action_match: str,
) -> Workflow:
when_dcg = self._create_when_dcg(action_match=action_match)
data_conditions = self._bulk_create_data_conditions(
conditions=conditions, filters=filters, dcg=when_dcg
)
# the only time the data_conditions list will be empty is if somebody only has EveryEventCondition in their conditions list.
# if it's empty and this is not the case, we should not migrate
no_conditions = len(conditions) == 0
no_data_conditions = len(data_conditions) == 0
only_has_every_event_cond = (
len(
[condition for condition in conditions if condition["id"] == EveryEventCondition.id]
)
> 0
)
if not self.is_dry_run:
if no_data_conditions and no_conditions:
# originally no conditions and we expect no data conditions
pass
elif no_data_conditions and not only_has_every_event_cond:
raise Exception("No valid trigger conditions, skipping migration")
enabled = True
rule_snooze = RuleSnooze.objects.filter(rule=self.rule, user_id=None).first()
if rule_snooze and rule_snooze.until is None:
enabled = False
if self.rule.status == ObjectStatus.DISABLED:
enabled = False
config = {"frequency": self.rule.data.get("frequency") or Workflow.DEFAULT_FREQUENCY}
kwargs = {
"organization": self.organization,
"name": self.rule.label,
"environment_id": self.rule.environment_id,
"when_condition_group": when_dcg,
"created_by_id": self.user_id,
"owner_user_id": self.rule.owner_user_id,
"owner_team": self.rule.owner_team,
"config": config,
"enabled": enabled,
}
if self.is_dry_run:
workflow = Workflow(**kwargs)
workflow.full_clean(exclude=["when_condition_group"])
workflow.validate_config(workflow.config_schema)
if AlertRuleWorkflow.objects.filter(rule_id=self.rule.id).exists():
raise Exception("Issue alert already migrated")
else:
workflow = Workflow.objects.create(**kwargs)
workflow.update(date_added=self.rule.date_added)
self._connect_default_detectors(workflow=workflow)
AlertRuleWorkflow.objects.create(rule_id=self.rule.id, workflow=workflow)
return workflow
def _create_if_dcg(
self,
filter_match: str,
workflow: Workflow,
conditions: list[dict[str, Any]],
filters: list[dict[str, Any]],
) -> DataConditionGroup:
if (
filter_match == "any" or filter_match is None
): # must create IF DCG even if it's empty, to attach actions
logic_type = DataConditionGroup.Type.ANY_SHORT_CIRCUIT
else:
logic_type = DataConditionGroup.Type(filter_match)
kwargs = {
"organization": self.organization,
"logic_type": logic_type,
}
if self.is_dry_run:
if_dcg = DataConditionGroup(**kwargs)
if_dcg.full_clean()
else:
if_dcg = DataConditionGroup.objects.create(**kwargs)
WorkflowDataConditionGroup.objects.create(workflow=workflow, condition_group=if_dcg)
conditions_ids = [condition["id"] for condition in conditions]
# skip migrating filters for special case
if EventUniqueUserFrequencyConditionWithConditions.id not in conditions_ids:
self._bulk_create_data_conditions(conditions=filters, dcg=if_dcg)
return if_dcg
def _create_workflow_actions(
self, if_dcg: DataConditionGroup, actions: list[dict[str, Any]]
) -> None:
notification_actions = build_notification_actions_from_rule_data_actions(
actions, is_dry_run=self.is_dry_run or False
)
dcg_actions = [
DataConditionGroupAction(action=action, condition_group=if_dcg)
for action in notification_actions
]
if not self.is_dry_run:
DataConditionGroupAction.objects.bulk_create(dcg_actions)
|
IssueAlertMigrator
|
python
|
h5py__h5py
|
h5py/tests/test_h5t.py
|
{
"start": 357,
"end": 1703
}
|
class ____(ut.TestCase):
"""
Feature: Compound types can be created from Python dtypes
"""
def test_ref(self):
""" Reference types are correctly stored in compound types (issue 144)
"""
dt = np.dtype([('a', h5py.ref_dtype), ('b', '<f4')])
tid = h5t.py_create(dt, logical=True)
t1, t2 = tid.get_member_type(0), tid.get_member_type(1)
self.assertEqual(t1, h5t.STD_REF_OBJ)
self.assertEqual(t2, h5t.IEEE_F32LE)
self.assertEqual(tid.get_member_offset(0), 0)
self.assertEqual(tid.get_member_offset(1), h5t.STD_REF_OBJ.get_size())
def test_out_of_order_offsets(self):
size = 20
type_dict = {
'names': ['f1', 'f2', 'f3'],
'formats': ['<f4', '<i4', '<f8'],
'offsets': [0, 16, 8]
}
expected_dtype = np.dtype(type_dict)
tid = h5t.create(h5t.COMPOUND, size)
for name, offset, dt in zip(
type_dict["names"], type_dict["offsets"], type_dict["formats"], strict=True
):
tid.insert(
name.encode("utf8") if isinstance(name, str) else name,
offset,
h5t.py_create(dt)
)
self.assertEqual(tid.dtype, expected_dtype)
self.assertEqual(tid.dtype.itemsize, size)
|
TestCompound
|
python
|
PyCQA__pylint
|
tests/functional/d/deprecated/deprecated_decorators.py
|
{
"start": 169,
"end": 277
}
|
class ____:
@abc.abstractclassmethod # [deprecated-decorator]
def my_method(cls):
pass
|
MyClass
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/v1_compat_tests/array_ops_test.py
|
{
"start": 2876,
"end": 3291
}
|
class ____(test_util.TensorFlowTestCase):
@test_util.run_v1_only("Variables need initialization only in V1")
def testUninitialized(self):
with self.assertRaisesRegex(
errors.FailedPreconditionError,
"Attempting to use uninitialized value Variable"):
v = variable_v1.VariableV1([1, 2])
self.evaluate(v[:].assign([1, 2]))
if __name__ == "__main__":
test_lib.main()
|
SliceAssignTest
|
python
|
pypa__pip
|
src/pip/_internal/metadata/pkg_resources.py
|
{
"start": 8415,
"end": 10544
}
|
class ____(BaseEnvironment):
def __init__(self, ws: pkg_resources.WorkingSet) -> None:
self._ws = ws
@classmethod
def default(cls) -> BaseEnvironment:
return cls(pkg_resources.working_set)
@classmethod
def from_paths(cls, paths: list[str] | None) -> BaseEnvironment:
return cls(pkg_resources.WorkingSet(paths))
def _iter_distributions(self) -> Iterator[BaseDistribution]:
for dist in self._ws:
yield Distribution(dist)
def _search_distribution(self, name: str) -> BaseDistribution | None:
"""Find a distribution matching the ``name`` in the environment.
This searches from *all* distributions available in the environment, to
match the behavior of ``pkg_resources.get_distribution()``.
"""
canonical_name = canonicalize_name(name)
for dist in self.iter_all_distributions():
if dist.canonical_name == canonical_name:
return dist
return None
def get_distribution(self, name: str) -> BaseDistribution | None:
# Search the distribution by looking through the working set.
dist = self._search_distribution(name)
if dist:
return dist
# If distribution could not be found, call working_set.require to
# update the working set, and try to find the distribution again.
# This might happen for e.g. when you install a package twice, once
# using setup.py develop and again using setup.py install. Now when
# running pip uninstall twice, the package gets removed from the
# working set in the first uninstall, so we have to populate the
# working set again so that pip knows about it and the packages gets
# picked up and is successfully uninstalled the second time too.
try:
# We didn't pass in any version specifiers, so this can never
# raise pkg_resources.VersionConflict.
self._ws.require(name)
except pkg_resources.DistributionNotFound:
return None
return self._search_distribution(name)
|
Environment
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/sensors/batch.py
|
{
"start": 1376,
"end": 4853
}
|
class ____(AwsBaseSensor[BatchClientHook]):
"""
Poll the state of the Batch Job until it reaches a terminal state; fails if the job fails.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:BatchSensor`
:param job_id: Batch job_id to check the state for
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param deferrable: Run sensor in the deferrable mode.
:param poke_interval: polling period in seconds to check for the status of the job.
:param max_retries: Number of times to poll for job state before
returning the current state.
"""
aws_hook_class = BatchClientHook
template_fields: Sequence[str] = aws_template_fields(
"job_id",
)
template_ext: Sequence[str] = ()
ui_color = "#66c3ff"
def __init__(
self,
*,
job_id: str,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poke_interval: float = 30,
max_retries: int = 4200,
**kwargs,
):
super().__init__(**kwargs)
self.job_id = job_id
self.deferrable = deferrable
self.poke_interval = poke_interval
self.max_retries = max_retries
def poke(self, context: Context) -> bool:
job_description = self.hook.get_job_description(self.job_id)
state = job_description["status"]
if state == BatchClientHook.SUCCESS_STATE:
return True
if state in BatchClientHook.INTERMEDIATE_STATES:
return False
raise AirflowException(f"Batch sensor failed. AWS Batch job status: {state}")
def execute(self, context: Context) -> None:
if not self.deferrable:
super().execute(context=context)
else:
timeout = (
timedelta(seconds=self.max_retries * self.poke_interval + 60)
if self.max_retries
else self.execution_timeout
)
self.defer(
timeout=timeout,
trigger=BatchJobTrigger(
job_id=self.job_id,
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
waiter_delay=int(self.poke_interval),
waiter_max_attempts=self.max_retries,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, Any]) -> None:
"""
Execute when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event["status"] != "success":
raise AirflowException(f"Error while running job: {event}")
job_id = event["job_id"]
self.log.info("Batch Job %s complete", job_id)
|
BatchSensor
|
python
|
huggingface__transformers
|
src/transformers/models/roformer/modeling_roformer.py
|
{
"start": 3327,
"end": 4578
}
|
class ____(nn.Module):
"""Construct the embeddings from word and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=inputs_embeds.device)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
RoFormerEmbeddings
|
python
|
huggingface__transformers
|
tests/models/hiera/test_modeling_hiera.py
|
{
"start": 8158,
"end": 22106
}
|
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Hiera does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (
(
HieraModel,
HieraBackbone,
HieraForImageClassification,
HieraForPreTraining,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{"image-feature-extraction": HieraModel, "image-classification": HieraForImageClassification}
if is_torch_available()
else {}
)
test_resize_embeddings = False
test_torch_exportable = True
def setUp(self):
self.model_tester = HieraModelTester(self)
self.config_tester = ConfigTester(self, config_class=HieraConfig, has_text_modality=False)
def test_config(self):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def test_batching_equivalence(self, atol=3e-4, rtol=3e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
# Overriding as Hiera `get_input_embeddings` returns HieraPatchEmbeddings
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
# Overriding as attention shape depends on patch_stride and mask_unit_size
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
expected_num_attentions = len(self.model_tester.depths)
self.assertEqual(len(attentions), expected_num_attentions)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
seq_len = math.prod([i // s for i, s in zip(config.image_size, config.patch_stride)])
mask_unit_area = math.prod(config.masked_unit_size)
num_windows = seq_len // mask_unit_area
if model_class.__name__ == "HieraForPreTraining":
num_windows = int(num_windows * (1 - config.mask_ratio))
seq_len = int(num_windows * mask_unit_area)
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
self.assertListEqual(
list(attentions[0].shape[-4:]),
[self.model_tester.num_heads[0], num_windows, mask_unit_area, seq_len // num_windows],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
# also another +1 for reshaped_hidden_states
added_hidden_states = 1 if model_class.__name__ == "HieraBackbone" else 2
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), expected_num_attentions)
self.assertListEqual(
list(self_attentions[0].shape[-4:]),
[self.model_tester.num_heads[0], num_windows, mask_unit_area, seq_len // num_windows],
)
# Overriding as attention shape depends on patch_stride and mask_unit_size
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class, image_size):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
# Hiera has a different seq_length
patch_size = config.patch_stride
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
if model_class.__name__ == "HieraForPreTraining":
mask_unit_area = math.prod(config.masked_unit_size)
num_windows = num_patches // mask_unit_area
num_windows = int(num_windows * (1 - config.mask_ratio))
num_patches = int(num_windows * mask_unit_area)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[num_patches, self.model_tester.embed_dim],
)
if model_class.__name__ != "HieraBackbone":
reshaped_hidden_states = outputs.reshaped_hidden_states
self.assertEqual(len(reshaped_hidden_states), expected_num_layers)
batch_size = reshaped_hidden_states[0].shape[0]
num_channels = reshaped_hidden_states[0].shape[-1]
reshaped_hidden_states = reshaped_hidden_states[0].view(batch_size, -1, num_channels)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]),
[num_patches, self.model_tester.embed_dim],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
image_size = self.model_tester.image_size
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class, image_size)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class, image_size)
# Overriding since HieraForPreTraining outputs bool_masked_pos which has to be converted to float in the msg
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (list, tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, dict):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values()
):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object.float() - dict_object.float()))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
additional_kwargs = {}
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
additional_kwargs["output_hidden_states"] = True
check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs)
if self.has_attentions:
# Removing "output_hidden_states"
del additional_kwargs["output_hidden_states"]
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
additional_kwargs["output_attentions"] = True
check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
additional_kwargs["output_hidden_states"] = True
check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs)
@unittest.skip(reason="Hiera Transformer does not use feedforward chunking")
def test_feed_forward_chunking(self):
pass
@unittest.skip(reason="Hiera does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in ["facebook/hiera-tiny-224-hf"]:
model = HieraModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
@slow
|
HieraModelTest
|
python
|
pandas-dev__pandas
|
pandas/core/groupby/ops.py
|
{
"start": 40491,
"end": 40846
}
|
class ____(DataSplitter):
def _chop(self, sdata: Series, slice_obj: slice) -> Series:
# fastpath equivalent to `sdata.iloc[slice_obj]`
mgr = sdata._mgr.get_slice(slice_obj)
ser = sdata._constructor_from_mgr(mgr, axes=mgr.axes)
ser._name = sdata.name
return ser.__finalize__(sdata, method="groupby")
|
SeriesSplitter
|
python
|
getsentry__sentry
|
tests/sentry/utils/test_samples.py
|
{
"start": 1508,
"end": 4296
}
|
class ____:
@pytest.mark.parametrize("platform", list(CONSOLES))
def test_console_platforms_trigger_screenshot_attachment(
self, default_project: Project, platform: str
):
with mock.patch(
"sentry.utils.samples.create_console_screenshot_attachment"
) as mock_attachment:
event = create_sample_event(default_project, platform=platform)
assert event is not None
mock_attachment.assert_called_once_with(event, default_project, platform)
def test_non_console_platforms_skip_screenshot_attachment(self, default_project):
with mock.patch(
"sentry.utils.samples.create_console_screenshot_attachment"
) as mock_attachment:
event = create_sample_event(default_project, platform="python")
assert event is not None
mock_attachment.assert_not_called()
# TODO(telemetry): Enable test when we have the screenshots
# @pytest.mark.parametrize("platform", list(CONSOLES))
# def test_console_platforms_create_screenshot_attachment(self, default_project, platform):
# from sentry.models.eventattachment import EventAttachment
# EventAttachment.objects.filter(project_id=default_project.id).delete()
# event = create_sample_event(default_project, platform=platform)
# assert event is not None
# attachments = EventAttachment.objects.filter(
# event_id=event.event_id, project_id=default_project.id, name="screenshot.png"
# )
# assert attachments.exists()
# attachment = attachments.first()
# assert attachment.content_type == "image/png"
# assert attachment.name == "screenshot.png"
# assert attachment.size > 0
def test_screenshot_attachment_handles_error_gracefully(self, default_project):
with mock.patch("sentry.utils.samples.load_console_screenshot") as mock_load:
mock_load.return_value = None
event = create_sample_event(default_project, platform="xbox")
assert event is not None
from sentry.models.eventattachment import EventAttachment
attachments = EventAttachment.objects.filter(
event_id=event.event_id, project_id=default_project.id, name="screenshot.png"
)
assert not attachments.exists()
def test_screenshot_attachment_handles_database_error_gracefully(self, default_project):
with mock.patch(
"sentry.models.eventattachment.EventAttachment.objects.create"
) as mock_create:
mock_create.side_effect = Exception("Database connection failed")
event = create_sample_event(default_project, platform="xbox")
assert event is not None
|
TestConsoleSamples
|
python
|
networkx__networkx
|
networkx/algorithms/isomorphism/tests/test_isomorphvf2.py
|
{
"start": 192,
"end": 2143
}
|
class ____:
# Source: https://en.wikipedia.org/wiki/Graph_isomorphism
# Nodes 'a', 'b', 'c' and 'd' form a column.
# Nodes 'g', 'h', 'i' and 'j' form a column.
g1edges = [
["a", "g"],
["a", "h"],
["a", "i"],
["b", "g"],
["b", "h"],
["b", "j"],
["c", "g"],
["c", "i"],
["c", "j"],
["d", "h"],
["d", "i"],
["d", "j"],
]
# Nodes 1,2,3,4 form the clockwise corners of a large square.
# Nodes 5,6,7,8 form the clockwise corners of a small square
g2edges = [
[1, 2],
[2, 3],
[3, 4],
[4, 1],
[5, 6],
[6, 7],
[7, 8],
[8, 5],
[1, 5],
[2, 6],
[3, 7],
[4, 8],
]
def test_graph(self):
g1 = nx.Graph()
g2 = nx.Graph()
g1.add_edges_from(self.g1edges)
g2.add_edges_from(self.g2edges)
gm = iso.GraphMatcher(g1, g2)
assert gm.is_isomorphic()
# Just testing some cases
assert gm.subgraph_is_monomorphic()
mapping = sorted(gm.mapping.items())
# this mapping is only one of the possibilities
# so this test needs to be reconsidered
# isomap = [('a', 1), ('b', 6), ('c', 3), ('d', 8),
# ('g', 2), ('h', 5), ('i', 4), ('j', 7)]
# assert_equal(mapping, isomap)
def test_subgraph(self):
g1 = nx.Graph()
g2 = nx.Graph()
g1.add_edges_from(self.g1edges)
g2.add_edges_from(self.g2edges)
g3 = g2.subgraph([1, 2, 3, 4])
gm = iso.GraphMatcher(g1, g3)
assert gm.subgraph_is_isomorphic()
def test_subgraph_mono(self):
g1 = nx.Graph()
g2 = nx.Graph()
g1.add_edges_from(self.g1edges)
g2.add_edges_from([[1, 2], [2, 3], [3, 4]])
gm = iso.GraphMatcher(g1, g2)
assert gm.subgraph_is_monomorphic()
|
TestWikipediaExample
|
python
|
getsentry__sentry
|
src/sentry/integrations/discord/handlers/discord_handler.py
|
{
"start": 790,
"end": 2331
}
|
class ____(IntegrationActionHandler):
group = ActionHandler.Group.NOTIFICATION
provider_slug = IntegrationProviderSlug.DISCORD
# Main difference between the discord and slack action config schemas is that the target_display is possibly null
config_schema = {
"$schema": "https://json-schema.org/draft/2020-12/schema",
"description": "The configuration schema for a Discord Action",
"type": "object",
"properties": {
"target_identifier": {"type": "string"},
"target_display": {
"type": ["string", "null"],
},
"target_type": {
"type": ["integer"],
"enum": [ActionTarget.SPECIFIC.value],
},
},
"required": ["target_identifier", "target_type"],
"additionalProperties": False,
}
data_schema = {
"$schema": "https://json-schema.org/draft/2020-12/schema",
"type": "object",
"description": "Schema for Discord action data blob",
"properties": {
"tags": TAGS_SCHEMA,
},
"additionalProperties": False,
}
@staticmethod
def get_config_transformer() -> ConfigTransformer | None:
return TargetTypeConfigTransformer.from_config_schema(DiscordActionHandler.config_schema)
@staticmethod
def execute(
job: WorkflowEventData,
action: Action,
detector: Detector,
) -> None:
execute_via_group_type_registry(job, action, detector)
|
DiscordActionHandler
|
python
|
pytorch__pytorch
|
test/distributed/fsdp/test_fsdp_fx.py
|
{
"start": 1321,
"end": 4720
}
|
class ____(TestCase):
def test_symbolic_tracing_outputs(self):
"""
Tests running ``tracer.trace()`` inside ``patch_tracer()`` by checking
the saved data structures.
"""
model = Model()
tracer = torch.fx.Tracer()
orig_call_module = tracer.call_module
orig_create_proxy = tracer.create_proxy
exec_order_tracer = _ExecOrderTracer()
with exec_order_tracer.patch_tracer(tracer=tracer, root_module=model):
concrete_args = {"run_all_layers": True}
tracer.trace(model, concrete_args)
# Check that the tracer methods are unchanged after exiting the context
self.assertEqual(orig_call_module, tracer.call_module)
self.assertEqual(orig_create_proxy, tracer.create_proxy)
# Check `module_forward_order`
correct_module_forward_order = [
model,
model.layer0,
model.relu,
model.layer2,
model.layer2[0],
model.layer2[1],
model.layer2[2],
model.relu,
model.layer1,
model.relu,
model.layer0,
model.relu,
]
exec_info = exec_order_tracer.exec_info
self.assertEqual(exec_info.module_forward_order, correct_module_forward_order)
# Check `module_to_param_usage_infos`
self.assertEqual(
exec_info.module_to_param_usage_infos[model],
[
(model.layer0, list(model.layer0.named_parameters())),
(model.layer2, list(model.layer2.named_parameters())),
(model, [("weight1", model.weight1)]),
(model.layer1, list(model.layer1.named_parameters())),
(model, [("weight2", model.weight2)]),
(model.layer0, list(model.layer0.named_parameters())),
],
)
self.assertEqual(
exec_info.module_to_param_usage_infos[model.layer0],
[(model.layer0, list(model.layer0.named_parameters()))],
)
self.assertEqual(
exec_info.module_to_param_usage_infos[model.layer1],
[(model.layer1, list(model.layer1.named_parameters()))],
)
self.assertEqual(
exec_info.module_to_param_usage_infos[model.layer2],
[
(model.layer2[0], list(model.layer2[0].named_parameters())),
(model.layer2[2], list(model.layer2[2].named_parameters())),
],
)
self.assertEqual(exec_info.module_to_param_usage_infos[model.relu], [])
# Check `param_forward_order`
correct_param_order = [
model.layer0.weight,
model.layer0.bias,
model.layer2[0].weight,
model.layer2[2].weight,
model.weight1,
model.layer1.weight,
model.weight2,
]
self.assertEqual(exec_info.param_forward_order, correct_param_order)
# Check `visited_params`
self.assertEqual(
len(exec_info.visited_params), len(exec_info.param_forward_order)
)
self.assertEqual(exec_info.visited_params, set(exec_info.param_forward_order))
devices = ("cuda", "hpu", "xpu")
instantiate_device_type_tests(
TestSymbolicTracing, globals(), only_for=devices, allow_xpu=True
)
if __name__ == "__main__":
run_tests()
|
TestSymbolicTracing
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_unitofworkv2.py
|
{
"start": 1970,
"end": 2617
}
|
class ____:
def _get_test_uow(self, session):
uow = unitofwork.UOWTransaction(session)
deleted = set(session._deleted)
new = set(session._new)
dirty = set(session._dirty_states).difference(deleted)
for s in new.union(dirty):
uow.register_object(s)
for d in deleted:
uow.register_object(d, isdelete=True)
return uow
def _assert_uow_size(self, session, expected):
uow = self._get_test_uow(session)
postsort_actions = uow._generate_actions()
print(postsort_actions)
eq_(len(postsort_actions), expected, postsort_actions)
|
AssertsUOW
|
python
|
dagster-io__dagster
|
examples/docs_snippets/docs_snippets/guides/dagster/dagster_pipes/dagster_pipes_details_and_customization/custom_bootstrap_loader.py
|
{
"start": 216,
"end": 582
}
|
class ____(PipesParamsLoader):
def is_dagster_pipes_process(self) -> bool:
return DAGSTER_PIPES_CONTEXT_ENV_VAR in METADATA
def load_context_params(self) -> PipesParams:
return METADATA[DAGSTER_PIPES_CONTEXT_ENV_VAR]
def load_messages_params(self) -> PipesParams:
return METADATA[DAGSTER_PIPES_MESSAGES_ENV_VAR]
|
MyCustomParamsLoader
|
python
|
streamlit__streamlit
|
lib/streamlit/runtime/caching/storage/cache_storage_protocol.py
|
{
"start": 5695,
"end": 8610
}
|
class ____(Protocol):
"""Cache storage manager protocol, that should be implemented by the concrete
cache storage managers.
It is responsible for:
- Creating cache storage instances for the specific
decorated functions,
- Validating the context for the cache storages.
- Optionally clearing all cache storages in optimal way.
It should be created during Runtime initialization.
"""
@abstractmethod
def create(self, context: CacheStorageContext) -> CacheStorage:
"""Creates a new cache storage instance
Please note that the ttl, max_entries and other context fields are specific
for whole storage, not for individual key.
Notes
-----
Threading: Should be safe to call from any thread.
"""
raise NotImplementedError
def clear_all(self) -> None:
"""Remove everything what possible from the cache storages in optimal way.
meaningful default behaviour is to raise NotImplementedError, so this is not
abstractmethod.
The method is optional to implement: cache data API will fall back to remove
all available storages one by one via storage.clear() method
if clear_all raises NotImplementedError.
Raises
------
NotImplementedError
Raised if the storage manager does not provide an ability to clear
all storages at once in optimal way.
Notes
-----
Threading: This method could be called from multiple threads.
This is a responsibility of the concrete implementation to ensure
thread safety guarantees.
"""
raise NotImplementedError
def check_context(self, context: CacheStorageContext) -> None:
"""Checks if the context is valid for the storage manager.
This method should not return anything, but log message or raise an exception
if the context is invalid.
In case of raising an exception, we not handle it and let the exception to be
propagated.
check_context is called only once at the moment of creating `@st.cache_data`
decorator for specific function, so it is not called for every cache hit.
Parameters
----------
context: CacheStorageContext
The context to check for the storage manager, dummy function_key in context
will be used, since it is not computed at the point of calling this method.
Raises
------
InvalidCacheStorageContext
Raised if the cache storage manager is not able to work with provided
CacheStorageContext. When possible we should log message instead, since
this exception will be propagated to the user.
Notes
-----
Threading: Should be safe to call from any thread.
"""
pass
|
CacheStorageManager
|
python
|
django__django
|
tests/i18n/tests.py
|
{
"start": 19753,
"end": 20815
}
|
class ____(SimpleTestCase):
def setUp(self):
self._old_language = get_language()
self._translations = trans_real._translations
# here we rely on .split() being called inside the _fetch()
# in trans_real.translation()
class sideeffect_str(str):
def split(self, *args, **kwargs):
res = str.split(self, *args, **kwargs)
trans_real._translations["en-YY"] = None
return res
trans_real._translations = {sideeffect_str("en-XX"): None}
def tearDown(self):
trans_real._translations = self._translations
activate(self._old_language)
def test_bug14894_translation_activate_thread_safety(self):
translation_count = len(trans_real._translations)
# May raise RuntimeError if translation.activate() isn't thread-safe.
translation.activate("pl")
# make sure sideeffect_str actually added a new translation
self.assertLess(translation_count, len(trans_real._translations))
|
TranslationThreadSafetyTests
|
python
|
getsentry__sentry
|
tests/sentry/middleware/test_access_log_middleware.py
|
{
"start": 3533,
"end": 5711
}
|
class ____(ControlSiloOrganizationEndpoint):
def get(self, request, organization_context, organization):
return Response({"ok": True})
urlpatterns = [
re_path(r"^/dummy$", DummyEndpoint.as_view(), name="dummy-endpoint"),
re_path(r"^api/0/internal/test$", DummyEndpoint.as_view(), name="internal-dummy-endpoint"),
re_path(r"^/dummyfail$", DummyFailEndpoint.as_view(), name="dummy-fail-endpoint"),
re_path(
r"^snubaratelimit$", SnubaRateLimitedEndpoint.as_view(), name="snuba-ratelimit-endpoint"
),
re_path(r"^/dummyratelimit$", RateLimitedEndpoint.as_view(), name="ratelimit-endpoint"),
re_path(
r"^/dummyratelimitconcurrent$",
ConcurrentRateLimitedEndpoint.as_view(),
name="concurrent-ratelimit-endpoint",
),
re_path(
r"^(?P<organization_id_or_slug>[^/]+)/stats_v2/$",
MyOrganizationEndpoint.as_view(),
name="sentry-api-0-organization-stats-v2",
),
re_path(
r"^(?P<organization_id_or_slug>[^/]+)/members/$",
MyControlOrganizationEndpoint.as_view(),
name="sentry-api-0-organization-members",
),
# Need to retain RPC endpoint for cross-silo calls
re_path(
r"^api/0/internal/rpc/(?P<service_name>\w+)/(?P<method_name>\w+)/$",
InternalRpcServiceEndpoint.as_view(),
name="sentry-api-0-rpc-service",
),
]
required_access_log_fields = (
"method",
"view",
"response",
"path",
"rate_limit_type",
"rate_limited",
"caller_ip",
"request_duration_seconds",
)
# All of these fields may be None, and thus may not appear in every access log
optional_access_log_fields = (
"organization_id",
"is_app",
"user_id",
"token_type",
"entity_id",
"user_agent",
"rate_limit_category",
"group",
"concurrent_limit",
"concurrent_requests",
"reset_time",
"limit",
"remaining",
"snuba_policy",
"snuba_quota_unit",
"snuba_storage_key",
"snuba_quota_used",
"snuba_rejection_threshold",
"token_last_characters",
)
@override_settings(ROOT_URLCONF=__name__)
@override_settings(LOG_API_ACCESS=True)
|
MyControlOrganizationEndpoint
|
python
|
etianen__django-reversion
|
tests/test_app/tests/base.py
|
{
"start": 2566,
"end": 2618
}
|
class ____(TestBaseMixin, TestCase):
pass
|
TestBase
|
python
|
pydantic__pydantic
|
tests/mypy/modules/pydantic_settings.py
|
{
"start": 288,
"end": 461
}
|
class ____(BaseSettings):
bar: str
model_config = SettingsConfigDict(env_file='.env', env_file_encoding='utf-8')
scd = SettingsWithConfigDict()
|
SettingsWithConfigDict
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocol53.py
|
{
"start": 4412,
"end": 4603
}
|
class ____(Proto_ContraGeneric):
# This should not generate a reportIncompatibleMethodOverride error
# but does currently.
def m(self, x: Self) -> None: ...
|
Impl_ContraSelfExplicit3
|
python
|
getsentry__sentry
|
src/sentry/discover/endpoints/discover_saved_query_detail.py
|
{
"start": 2050,
"end": 6094
}
|
class ____(DiscoverSavedQueryBase):
publish_status = {
"DELETE": ApiPublishStatus.PUBLIC,
"GET": ApiPublishStatus.PUBLIC,
"PUT": ApiPublishStatus.PUBLIC,
}
def has_feature(self, organization, request):
return features.has(
"organizations:discover", organization, actor=request.user
) or features.has("organizations:discover-query", organization, actor=request.user)
@extend_schema(
operation_id="Retrieve an Organization's Discover Saved Query",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
DiscoverSavedQueryParams.DISCOVER_SAVED_QUERY_ID,
],
request=None,
responses={
200: DiscoverSavedQueryModelSerializer,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=DiscoverExamples.DISCOVER_SAVED_QUERY_GET_RESPONSE,
)
def get(self, request: Request, organization, query) -> Response:
"""
Retrieve a saved query.
"""
if not self.has_feature(organization, request):
return self.respond(status=404)
self.check_object_permissions(request, query)
return Response(serialize(query), status=200)
@extend_schema(
operation_id="Edit an Organization's Discover Saved Query",
parameters=[GlobalParams.ORG_ID_OR_SLUG, DiscoverSavedQueryParams.DISCOVER_SAVED_QUERY_ID],
request=DiscoverSavedQuerySerializer,
responses={
200: DiscoverSavedQueryModelSerializer,
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=DiscoverExamples.DISCOVER_SAVED_QUERY_GET_RESPONSE,
)
def put(self, request: Request, organization: Organization, query) -> Response:
"""
Modify a saved query.
"""
if not self.has_feature(organization, request):
return self.respond(status=404)
self.check_object_permissions(request, query)
try:
params = self.get_filter_params(
request, organization, project_ids=request.data.get("projects")
)
except NoProjects:
raise ParseError(detail="No Projects found, join a Team")
serializer = DiscoverSavedQuerySerializer(
data=request.data,
context={"params": params, "organization": organization, "user": request.user},
)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
data = serializer.validated_data
user_selected_dataset = data["query_dataset"] != DiscoverSavedQueryTypes.DISCOVER
query.update(
organization=organization,
name=data["name"],
query=data["query"],
version=data["version"],
dataset=data["query_dataset"],
dataset_source=(
DatasetSourcesTypes.USER.value
if user_selected_dataset
else DatasetSourcesTypes.UNKNOWN.value
),
)
query.set_projects(data["project_ids"])
return Response(serialize(query), status=200)
@extend_schema(
operation_id="Delete an Organization's Discover Saved Query",
parameters=[GlobalParams.ORG_ID_OR_SLUG, DiscoverSavedQueryParams.DISCOVER_SAVED_QUERY_ID],
responses={
204: RESPONSE_NO_CONTENT,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def delete(self, request: Request, organization, query) -> Response:
"""
Delete a saved query.
"""
if not self.has_feature(organization, request):
return self.respond(status=404)
self.check_object_permissions(request, query)
query.delete()
return Response(status=204)
from rest_framework.request import Request
from rest_framework.response import Response
@region_silo_endpoint
|
DiscoverSavedQueryDetailEndpoint
|
python
|
boto__boto3
|
tests/unit/resources/test_params.py
|
{
"start": 6655,
"end": 8520
}
|
class ____(BaseTestCase):
def test_simple_value(self):
params = {}
build_param_structure(params, 'foo', 'bar')
assert params['foo'] == 'bar'
def test_nested_dict(self):
params = {}
build_param_structure(params, 'foo.bar.baz', 123)
assert params['foo']['bar']['baz'] == 123
def test_nested_list(self):
params = {}
build_param_structure(params, 'foo.bar[0]', 'test')
assert params['foo']['bar'][0] == 'test'
def test_strange_offset(self):
params = {}
build_param_structure(params, 'foo[2]', 'test')
assert params['foo'] == [{}, {}, 'test']
def test_nested_list_dict(self):
params = {}
build_param_structure(params, 'foo.bar[0].baz', 123)
assert params['foo']['bar'][0]['baz'] == 123
def test_modify_existing(self):
params = {'foo': [{'key': 'abc'}]}
build_param_structure(params, 'foo[0].secret', 123)
assert params['foo'][0]['key'] == 'abc'
assert params['foo'][0]['secret'] == 123
def test_append_no_index(self):
params = {}
build_param_structure(params, 'foo[]', 123)
assert params['foo'] == [123]
build_param_structure(params, 'foo[]', 456)
assert params['foo'] == [123, 456]
def test_provided_index_with_wildcard(self):
params = {}
index = 0
build_param_structure(params, 'foo[*].bar', 123, index)
build_param_structure(params, 'foo[*].baz', 456, index)
assert params['foo'][index] == {'bar': 123, 'baz': 456}
index = 1
build_param_structure(params, 'foo[*].bar', 789, index)
build_param_structure(params, 'foo[*].baz', 123, index)
assert params['foo'] == [
{'bar': 123, 'baz': 456},
{'bar': 789, 'baz': 123},
]
|
TestStructBuilder
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/execution_api/datamodels/taskinstance.py
|
{
"start": 9334,
"end": 10192
}
|
class ____(StrictBaseModel):
"""Schema for DagRun model with minimal required fields needed for Runtime."""
# TODO: `dag_id` and `run_id` are duplicated from TaskInstance
# See if we can avoid sending these fields from API server and instead
# use the TaskInstance data to get the DAG run information in the client (Task Execution Interface).
dag_id: str
run_id: str
logical_date: UtcDateTime | None
data_interval_start: UtcDateTime | None
data_interval_end: UtcDateTime | None
run_after: UtcDateTime
start_date: UtcDateTime
end_date: UtcDateTime | None
clear_number: int = 0
run_type: DagRunType
state: DagRunState
conf: dict[str, Any] | None = None
triggering_user_name: str | None = None
consumed_asset_events: list[AssetEventDagRunReference]
partition_key: str | None
|
DagRun
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/clustering_ops.py
|
{
"start": 25455,
"end": 36028
}
|
class ____:
"""Internal class to create the op to initialize the clusters.
The op performs this algorithm (see constructor args):
num_remaining = num_clusters - length(cluster_centers)
if num_remaining == 0:
assert that cluster_centers_initialized is true
else:
assert that num_remaining > 0
new_centers = choose up to num_remaining initial centers
l2-normalize new_centers if using cosine distance
all_centers = concat(cluster_centers, new_centers)
cluster_centers := all_centers
if there is a cluster_centers_updated variable:
cluster_centers_updated := cluster_centers
num_now_remaining = num_clusters - length(cluster_centers)
if num_now_remaining == 0:
cluster_centers_initialized := true
"""
# TODO(ccolby): Refactor this class so that kmc2 isn't so much a special case.
def __init__(self, inputs, num_clusters, initial_clusters, distance_metric,
random_seed, kmeans_plus_plus_num_retries, kmc2_chain_length,
cluster_centers, cluster_centers_updated,
cluster_centers_initialized):
"""Creates an op factory.
Args:
inputs: See KMeans constructor.
num_clusters: An integer Tensor providing the number of clusters.
initial_clusters: See KMeans constructor.
distance_metric: See KMeans constructor.
random_seed: See KMeans constructor.
kmeans_plus_plus_num_retries: See KMeans constructor.
kmc2_chain_length: See KMeans constructor.
cluster_centers: The TF variable holding the initial centers. It may
already contain some centers when the op is executed.
cluster_centers_updated: A second TF variable to hold a copy of the
initial centers, used for full-batch mode. In mini-batch mode,
cluster_centers_updated is the same variable as cluster_centers.
cluster_centers_initialized: A boolean TF variable that will be set to
true when all the initial centers have been chosen.
"""
# All of these instance variables are constants.
self._inputs = inputs
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._seed = random_seed
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._kmc2_chain_length = kmc2_chain_length
self._cluster_centers = cluster_centers
self._cluster_centers_updated = cluster_centers_updated
self._cluster_centers_initialized = cluster_centers_initialized
self._num_selected = array_ops.shape(self._cluster_centers)[0]
self._num_remaining = self._num_clusters - self._num_selected
self._num_data = math_ops.add_n(
[array_ops.shape(i)[0] for i in self._inputs])
def _random(self):
indices = random_ops.random_uniform(
array_ops.reshape(self._num_remaining, [-1]),
minval=0,
maxval=math_ops.cast(self._num_data, dtypes.int64),
seed=self._seed,
dtype=dtypes.int64)
return embedding_lookup(self._inputs, indices, partition_strategy='div')
def _kmeans_plus_plus(self):
# Points from only the first shard are used for initializing centers.
# TODO(ands): Use all points.
inp = self._inputs[0]
if self._distance_metric == COSINE_DISTANCE:
inp = nn_impl.l2_normalize(inp, dim=1)
return gen_clustering_ops.kmeans_plus_plus_initialization(
inp, math_ops.cast(self._num_remaining, dtypes.int64), self._seed,
self._kmeans_plus_plus_num_retries)
def _kmc2_multiple_centers(self):
"""Adds new initial cluster centers using the k-MC2 algorithm.
In each call to the op, the provided batch is split into subsets based on
the specified `kmc2_chain_length`. On each subset, a single Markov chain of
the k-MC2 algorithm is used to add *one* new center cluster center. If there
are less than `kmc2_chain_length` points in the subset, a single center is
added using one Markov chain on the full input. It is assumed that the
provided batch has previously been randomly permuted. Otherwise, k-MC2 may
return suboptimal centers.
Returns:
An op that adds new cluster centers.
"""
# The op only operates on the first shard of data.
first_shard = self._inputs[0]
# Number of points in the input that can be used.
batch_size = array_ops.shape(first_shard)[0]
# Maximum number of subsets such that the size of each subset is at least
# `kmc2_chain_length`. Final subsets may be larger.
max_to_sample = math_ops.cast(
batch_size / self._kmc2_chain_length, dtype=dtypes.int32)
# We sample at least one new center and at most all remaining centers.
num_to_sample = math_ops.maximum(
math_ops.minimum(self._num_remaining, max_to_sample), 1)
def _cond(i, _):
"""Stopping condition for the while loop."""
return math_ops.less(i, num_to_sample)
def _body(i, _):
"""Body that adds a single new center based on a subset."""
def _sample_random():
"""Returns a random point as a cluster center."""
# By assumption the batch is reshuffled and _sample_random is always
# called for i=0. Hence, we simply return the first point.
new_center = array_ops.reshape(first_shard[0], [1, -1])
if self._distance_metric == COSINE_DISTANCE:
new_center = nn_impl.l2_normalize(new_center, dim=1)
return new_center
def _sample_kmc2_chain():
"""Returns previous centers as well as a new center sampled using k-MC2."""
# Extract the subset from the underlying batch.
start = i * self._kmc2_chain_length
end = start + self._kmc2_chain_length
subset = first_shard[start:end]
# Compute the distances from points in the subset to previous centers.
_, distances = gen_clustering_ops.nearest_neighbors(
subset, self._cluster_centers, 1)
# Sample index of new center using k-MC2 Markov chain.
new_center_index = gen_clustering_ops.kmc2_chain_initialization(
array_ops.squeeze(distances), self._seed)
# Extract actual new center.
newly_sampled_center = array_ops.reshape(subset[new_center_index],
[1, -1])
# Return concatenation with previously sampled centers.
if self._distance_metric == COSINE_DISTANCE:
newly_sampled_center = nn_impl.l2_normalize(
newly_sampled_center, dim=1)
return array_ops.concat([self._cluster_centers, newly_sampled_center],
0)
# Obtain a random point if there are no previously sampled centers.
# Otherwise, construct a k-MC2 Markov chain.
new_centers = cond.cond(
math_ops.equal(self._num_selected, 0), _sample_random,
_sample_kmc2_chain)
# Assign new cluster centers to underlying variable.
assigned_centers = state_ops.assign(
self._cluster_centers, new_centers, validate_shape=False)
if self._cluster_centers_updated is not self._cluster_centers:
assigned_centers = state_ops.assign(
self._cluster_centers_updated,
assigned_centers,
validate_shape=False)
return i + 1, self._num_clusters - array_ops.shape(assigned_centers)[0]
# Add num_to_sample new data points.
_, num_remaining = while_loop.while_loop(_cond, _body, [0, 0])
return num_remaining
def _greedy_batch_sampler(self, sampler):
# If the input dataset size is smaller than the number of centers
# remaining, choose the entire input dataset as centers. This can happen
# with mini-batch. Otherwise, sample the batch according to the provided
# sampler.
return cond.cond(self._num_data <= self._num_remaining,
lambda: array_ops.concat(self._inputs, 0),
sampler)
def _single_batch_sampler(self, sampler):
# Enforce that there are at least as many data points as centers
# remaining. This gives the provided sampler the chance to select all
# remaining centers from a single batch.
with ops.control_dependencies(
[check_ops.assert_greater_equal(self._num_data, self._num_remaining)]):
return sampler()
def _choose_initial_centers(self):
if isinstance(self._initial_clusters, str):
if self._initial_clusters == RANDOM_INIT:
return self._greedy_batch_sampler(self._random)
else: # self._initial_clusters == KMEANS_PLUS_PLUS_INIT
return self._single_batch_sampler(self._kmeans_plus_plus)
elif callable(self._initial_clusters):
return self._initial_clusters(self._inputs, self._num_remaining)
else:
with ops.control_dependencies([
check_ops.assert_equal(self._num_remaining,
array_ops.shape(self._initial_clusters)[0])
]):
return self._initial_clusters
def _add_new_centers(self):
"""Adds some centers and returns the number of centers remaining."""
new_centers = self._choose_initial_centers()
if self._distance_metric == COSINE_DISTANCE:
new_centers = nn_impl.l2_normalize(new_centers, dim=1)
# If cluster_centers is empty, it doesn't have the right shape for concat.
all_centers = cond.cond(
math_ops.equal(self._num_selected, 0), lambda: new_centers,
lambda: array_ops.concat([self._cluster_centers, new_centers], 0))
# TODO(ccolby): De-dupe all_centers?
a = state_ops.assign(
self._cluster_centers, all_centers, validate_shape=False)
if self._cluster_centers_updated is not self._cluster_centers:
a = state_ops.assign(
self._cluster_centers_updated, a, validate_shape=False)
return self._num_clusters - array_ops.shape(a)[0]
def _initialize(self):
with ops.control_dependencies([
check_ops.assert_positive(self._num_remaining),
]):
if self._initial_clusters == KMC2_INIT:
num_now_remaining = self._kmc2_multiple_centers()
else:
num_now_remaining = self._add_new_centers()
return cond.cond(
math_ops.equal(num_now_remaining, 0),
lambda: state_ops.assign(self._cluster_centers_initialized, True),
control_flow_ops.no_op)
def op(self):
"""Returns the cluster initializer op."""
return cond.cond(
math_ops.equal(self._num_remaining, 0),
lambda: check_ops.assert_equal(self._cluster_centers_initialized, True),
self._initialize)
|
_InitializeClustersOpFactory
|
python
|
nedbat__coveragepy
|
tests/test_plugins.py
|
{
"start": 11682,
"end": 11775
}
|
class ____(CoverageTest):
"""Tests of plugins that implement file_tracer."""
|
FileTracerTest
|
python
|
ray-project__ray
|
python/ray/actor.py
|
{
"start": 3716,
"end": 4199
}
|
class ____(Generic[_Ret, _T0, _T1, _T2, _T3, _T4]):
def remote(
self,
__arg0: "Union[_T0, ObjectRef[_T0]]",
__arg1: "Union[_T1, ObjectRef[_T1]]",
__arg2: "Union[_T2, ObjectRef[_T2]]",
__arg3: "Union[_T3, ObjectRef[_T3]]",
__arg4: "Union[_T4, ObjectRef[_T4]]",
) -> "ObjectRef[_Ret]":
...
def bind(
self, __arg0: _T0, __arg1: _T1, __arg2: _T2, __arg3: _T3, __arg4: _T4
) -> Any:
...
|
_RemoteMethod4
|
python
|
TheAlgorithms__Python
|
data_structures/heap/randomized_heap.py
|
{
"start": 175,
"end": 1812
}
|
class ____[T: bool]:
"""
One node of the randomized heap. Contains the value and references to
two children.
"""
def __init__(self, value: T) -> None:
self._value: T = value
self.left: RandomizedHeapNode[T] | None = None
self.right: RandomizedHeapNode[T] | None = None
@property
def value(self) -> T:
"""
Return the value of the node.
>>> rhn = RandomizedHeapNode(10)
>>> rhn.value
10
>>> rhn = RandomizedHeapNode(-10)
>>> rhn.value
-10
"""
return self._value
@staticmethod
def merge(
root1: RandomizedHeapNode[T] | None, root2: RandomizedHeapNode[T] | None
) -> RandomizedHeapNode[T] | None:
"""
Merge 2 nodes together.
>>> rhn1 = RandomizedHeapNode(10)
>>> rhn2 = RandomizedHeapNode(20)
>>> RandomizedHeapNode.merge(rhn1, rhn2).value
10
>>> rhn1 = RandomizedHeapNode(20)
>>> rhn2 = RandomizedHeapNode(10)
>>> RandomizedHeapNode.merge(rhn1, rhn2).value
10
>>> rhn1 = RandomizedHeapNode(5)
>>> rhn2 = RandomizedHeapNode(0)
>>> RandomizedHeapNode.merge(rhn1, rhn2).value
0
"""
if not root1:
return root2
if not root2:
return root1
if root1.value > root2.value:
root1, root2 = root2, root1
if random.choice([True, False]):
root1.left, root1.right = root1.right, root1.left
root1.left = RandomizedHeapNode.merge(root1.left, root2)
return root1
|
RandomizedHeapNode
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 168680,
"end": 169591
}
|
class ____(sgqlc.types.Input):
"""Autogenerated input type of CopyProjectV2"""
__schema__ = github_schema
__field_names__ = ("project_id", "owner_id", "title", "include_draft_issues", "client_mutation_id")
project_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="projectId")
"""The ID of the source Project to copy."""
owner_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="ownerId")
"""The owner ID of the new project."""
title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title")
"""The title of the project."""
include_draft_issues = sgqlc.types.Field(Boolean, graphql_name="includeDraftIssues")
"""Include draft issues in the new project"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
|
CopyProjectV2Input
|
python
|
pypa__hatch
|
backend/src/hatchling/version/source/env.py
|
{
"start": 126,
"end": 911
}
|
class ____(VersionSourceInterface):
PLUGIN_NAME = "env"
def get_version_data(self) -> dict:
variable = self.config.get("variable", "")
if not variable:
message = "option `variable` must be specified"
raise ValueError(message)
if not isinstance(variable, str):
message = "option `variable` must be a string"
raise TypeError(message)
if variable not in os.environ:
message = f"environment variable `{variable}` is not set"
raise RuntimeError(message)
return {"version": os.environ[variable]}
def set_version(self, version: str, version_data: dict) -> None:
message = "Cannot set environment variables"
raise NotImplementedError(message)
|
EnvSource
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.