language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
getsentry__sentry
|
src/sentry/rules/history/endpoints/project_rule_stats.py
|
{
"start": 979,
"end": 1317
}
|
class ____(Serializer):
def serialize(
self, obj: TimeSeriesValue, attrs: Mapping[Any, Any], user: Any, **kwargs: Any
) -> TimeSeriesValueResponse:
return {
"date": obj.bucket,
"count": obj.count,
}
@extend_schema(tags=["issue_alerts"])
@region_silo_endpoint
|
TimeSeriesValueSerializer
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/evaluation/base.py
|
{
"start": 341,
"end": 1523
}
|
class ____(BaseModel):
"""
Evaluation result.
Output of an BaseEvaluator.
"""
query: Optional[str] = Field(default=None, description="Query string")
contexts: Optional[Sequence[str]] = Field(
default=None, description="Context strings"
)
response: Optional[str] = Field(default=None, description="Response string")
passing: Optional[bool] = Field(
default=None, description="Binary evaluation result (passing or not)"
)
feedback: Optional[str] = Field(
default=None, description="Feedback or reasoning for the response"
)
score: Optional[float] = Field(default=None, description="Score for the response")
pairwise_source: Optional[str] = Field(
default=None,
description=(
"Used only for pairwise and specifies whether it is from original order of"
" presented answers or flipped order"
),
)
invalid_result: bool = Field(
default=False, description="Whether the evaluation result is an invalid one."
)
invalid_reason: Optional[str] = Field(
default=None, description="Reason for invalid evaluation."
)
|
EvaluationResult
|
python
|
pytorch__pytorch
|
test/test_datapipe.py
|
{
"start": 147278,
"end": 147630
}
|
class ____(IterDataPipe):
def __init__(self) -> None:
self.n = 10
self.iter = iter(range(self.n))
def __iter__(self):
return self
def __next__(self):
return next(self.iter)
def reset(self):
self.iter = iter(range(self.n))
def __len__(self):
return self.n
|
_CustomSelfNextTestDataPipe
|
python
|
django-compressor__django-compressor
|
compressor/tests/test_offline.py
|
{
"start": 15211,
"end": 15846
}
|
class ____(
SuperMixin, OfflineTestCaseMixin, TestCase
):
templates_dir = "test_block_super_extra"
def _test_offline(self, engine, verbosity=0):
count, result = CompressCommand().handle_inner(
engines=[engine], verbosity=verbosity
)
self.assertEqual(2, count)
self.assertEqual(
[self._render_script("bfcec76e0f28"), self._render_script("817b5defb197")],
result,
)
rendered_template = self._render_template(engine)
self.assertEqual(rendered_template, self._render_result(result, ""))
|
OfflineCompressBlockSuperTestCaseWithExtraContent
|
python
|
keras-team__keras
|
keras/src/layers/rnn/bidirectional_test.py
|
{
"start": 130,
"end": 10040
}
|
class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.Bidirectional,
init_kwargs={"layer": layers.SimpleRNN(4)},
input_shape=(3, 2, 4),
expected_output_shape=(3, 8),
expected_num_trainable_weights=6,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.Bidirectional,
init_kwargs={
"layer": layers.SimpleRNN(4),
"backward_layer": layers.SimpleRNN(4, go_backwards=True),
"merge_mode": "sum",
},
input_shape=(3, 2, 4),
expected_output_shape=(3, 4),
expected_num_trainable_weights=6,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
def test_correctness(self):
sequence = np.arange(24).reshape((2, 3, 4)).astype("float32")
forward_layer = layers.SimpleRNN(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
layer = layers.Bidirectional(
layer=forward_layer,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.39687276, 0.39687276, 0.10004295, 0.10004295],
[0.7237238, 0.7237238, 0.53391594, 0.53391594],
]
),
output,
)
layer = layers.Bidirectional(layer=forward_layer, merge_mode="ave")
output = layer(sequence)
self.assertAllClose(
np.array([[0.24845785, 0.24845785], [0.6288199, 0.6288199]]),
output,
)
layer = layers.Bidirectional(layer=forward_layer, merge_mode=None)
output1, output2 = layer(sequence)
self.assertAllClose(
np.array([[0.39687276, 0.39687276], [0.7237238, 0.7237238]]),
output1,
)
self.assertAllClose(
np.array([[0.10004295, 0.10004295], [0.53391594, 0.53391594]]),
output2,
)
backward_layer = layers.SimpleRNN(
2,
kernel_initializer=initializers.Constant(0.03),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.01),
go_backwards=True,
)
layer = layers.Bidirectional(
layer=forward_layer, backward_layer=backward_layer, merge_mode="mul"
)
output = layer(sequence)
self.assertAllClose(
np.array([[0.08374989, 0.08374989], [0.6740834, 0.6740834]]),
output,
)
forward_layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_sequences=True,
)
layer = layers.Bidirectional(layer=forward_layer, merge_mode="sum")
output = layer(sequence)
self.assertAllClose(
np.array(
[
[
[0.20937867, 0.20937867],
[0.34462988, 0.34462988],
[0.40290534, 0.40290534],
],
[
[0.59829646, 0.59829646],
[0.6734641, 0.6734641],
[0.6479671, 0.6479671],
],
]
),
output,
)
def test_statefulness(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
forward_layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
stateful=True,
)
layer = layers.Bidirectional(layer=forward_layer)
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.26234663, 0.26234663, 0.16959146, 0.16959146],
[0.6137073, 0.6137073, 0.5381646, 0.5381646],
]
),
output,
)
layer.reset_state()
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.26234663, 0.26234663, 0.16959146, 0.16959146],
[0.6137073, 0.6137073, 0.5381646, 0.5381646],
]
),
output,
)
def test_pass_initial_state(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
initial_state = [
np.arange(4).reshape((2, 2)).astype("float32") * 1,
np.arange(4).reshape((2, 2)).astype("float32") * 2,
np.arange(4).reshape((2, 2)).astype("float32") * 3,
np.arange(4).reshape((2, 2)).astype("float32") * 4,
]
forward_layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
layer = layers.Bidirectional(
layer=forward_layer,
)
output = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array(
[
[0.20794602, 0.4577124, 0.14046375, 0.48191673],
[0.6682636, 0.6711909, 0.60943645, 0.60950446],
]
),
output,
)
def test_masking(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
forward_layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
layer = layers.Bidirectional(layer=forward_layer)
mask = np.array([[True, True, False, True], [True, False, False, True]])
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array(
[
[0.19393763, 0.19393763, 0.11669192, 0.11669192],
[0.30818558, 0.30818558, 0.28380975, 0.28380975],
]
),
output,
)
def test_return_state(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
forward_layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_state=True,
)
layer = layers.Bidirectional(layer=forward_layer)
output, h1, c1, h2, c2 = layer(sequence)
self.assertAllClose(
np.array(
[
[0.1990008, 0.1990008, 0.12659755, 0.12659755],
[0.52335435, 0.52335435, 0.44717982, 0.44717982],
]
),
output,
)
self.assertAllClose(
np.array([[0.1990008, 0.1990008], [0.52335435, 0.52335435]]),
h1,
)
self.assertAllClose(
np.array([[0.35567185, 0.35567185], [1.0492687, 1.0492687]]),
c1,
)
self.assertAllClose(
np.array([[0.12659755, 0.12659755], [0.44717982, 0.44717982]]),
h2,
)
self.assertAllClose(
np.array([[0.2501858, 0.2501858], [0.941473, 0.941473]]),
c2,
)
@pytest.mark.requires_trainable_backend
def test_output_shape(self):
x = np.array([[[101, 202], [303, 404]]])
for merge_mode in ["ave", "concat", "mul", "sum", None]:
sub_layer = layers.LSTM(2, return_state=True)
layer = layers.Bidirectional(sub_layer, merge_mode=merge_mode)
output = layer(x)
output_shape = layer.compute_output_shape(x.shape)
for out, shape in zip(output, output_shape):
self.assertEqual(out.shape, shape)
for merge_mode in ["concat", "ave", "mul", "sum"]:
sub_layer = layers.LSTM(2, return_state=False)
layer = layers.Bidirectional(sub_layer, merge_mode=merge_mode)
output = layer(x)
output_shape = layer.compute_output_shape(x.shape)
self.assertEqual(output.shape, output_shape)
# return_state=False & merge_mode=None
sub_layer = layers.LSTM(2, return_state=False)
layer = layers.Bidirectional(sub_layer, merge_mode=None)
output = layer(x)
output_shape = layer.compute_output_shape(x.shape)
for out, shape in zip(output, output_shape):
self.assertEqual(out.shape, shape)
def test_keeps_use_cudnn(self):
# keep use_cudnn if the layer has it
for rnn_class in [layers.GRU, layers.LSTM]:
for use_cudnn in [True, False, "auto"]:
rnn = rnn_class(1, use_cudnn=use_cudnn)
bidi = layers.Bidirectional(rnn)
self.assertEqual(bidi.forward_layer.use_cudnn, use_cudnn)
self.assertEqual(bidi.backward_layer.use_cudnn, use_cudnn)
# otherwise ignore it
rnn = layers.SimpleRNN(1)
bidi = layers.Bidirectional(rnn)
self.assertFalse(hasattr(bidi.forward_layer, "use_cudnn"))
self.assertFalse(hasattr(bidi.backward_layer, "use_cudnn"))
|
SimpleRNNTest
|
python
|
Pylons__pyramid
|
tests/test_config/test_assets.py
|
{
"start": 18844,
"end": 30536
}
|
class ____(unittest.TestCase):
def _getTargetClass(self):
from pyramid.config.assets import PackageOverrides
return PackageOverrides
def _makeOne(self, package=None, pkg_resources=None):
if package is None:
package = DummyPackage('package')
klass = self._getTargetClass()
if pkg_resources is None:
pkg_resources = DummyPkgResources()
return klass(package, pkg_resources=pkg_resources)
def test_class_conforms_to_IPackageOverrides(self):
from zope.interface.verify import verifyClass
from pyramid.interfaces import IPackageOverrides
verifyClass(IPackageOverrides, self._getTargetClass())
def test_instance_conforms_to_IPackageOverrides(self):
from zope.interface.verify import verifyObject
from pyramid.interfaces import IPackageOverrides
verifyObject(IPackageOverrides, self._makeOne())
def test_class_conforms_to_IPEP302Loader(self):
from zope.interface.verify import verifyClass
from pyramid.interfaces import IPEP302Loader
verifyClass(IPEP302Loader, self._getTargetClass())
def test_instance_conforms_to_IPEP302Loader(self):
from zope.interface.verify import verifyObject
from pyramid.interfaces import IPEP302Loader
verifyObject(IPEP302Loader, self._makeOne())
def test_ctor_package_already_has_loader_of_different_type(self):
package = DummyPackage('package')
loader = package.__loader__ = DummyLoader()
po = self._makeOne(package)
self.assertTrue(package.__loader__ is po)
self.assertTrue(po.real_loader is loader)
def test_ctor_package_already_has_loader_of_same_type(self):
package = DummyPackage('package')
package.__loader__ = self._makeOne(package)
po = self._makeOne(package)
self.assertEqual(package.__loader__, po)
def test_ctor_sets_loader(self):
package = DummyPackage('package')
po = self._makeOne(package)
self.assertEqual(package.__loader__, po)
def test_ctor_registers_loader_type(self):
from pyramid.config.assets import OverrideProvider
dummy_pkg_resources = DummyPkgResources()
package = DummyPackage('package')
po = self._makeOne(package, dummy_pkg_resources)
self.assertEqual(
dummy_pkg_resources.registered, [(po.__class__, OverrideProvider)]
)
def test_ctor_sets_local_state(self):
package = DummyPackage('package')
po = self._makeOne(package)
self.assertEqual(po.overrides, [])
self.assertEqual(po.overridden_package_name, 'package')
def test_insert_directory(self):
from pyramid.config.assets import DirectoryOverride
package = DummyPackage('package')
po = self._makeOne(package)
po.overrides = [None]
po.insert('foo/', DummyAssetSource())
self.assertEqual(len(po.overrides), 2)
override = po.overrides[0]
self.assertEqual(override.__class__, DirectoryOverride)
def test_insert_file(self):
from pyramid.config.assets import FileOverride
package = DummyPackage('package')
po = self._makeOne(package)
po.overrides = [None]
po.insert('foo.pt', DummyAssetSource())
self.assertEqual(len(po.overrides), 2)
override = po.overrides[0]
self.assertEqual(override.__class__, FileOverride)
def test_insert_emptystring(self):
# XXX is this a valid case for a directory?
from pyramid.config.assets import DirectoryOverride
package = DummyPackage('package')
po = self._makeOne(package)
po.overrides = [None]
source = DummyAssetSource()
po.insert('', source)
self.assertEqual(len(po.overrides), 2)
override = po.overrides[0]
self.assertEqual(override.__class__, DirectoryOverride)
def test_filtered_sources(self):
overrides = [DummyOverride(None), DummyOverride('foo')]
package = DummyPackage('package')
po = self._makeOne(package)
po.overrides = overrides
self.assertEqual(list(po.filtered_sources('whatever')), ['foo'])
def test_get_filename(self):
source = DummyAssetSource(filename='foo.pt')
overrides = [DummyOverride(None), DummyOverride((source, ''))]
package = DummyPackage('package')
po = self._makeOne(package)
po.overrides = overrides
result = po.get_filename('whatever')
self.assertEqual(result, 'foo.pt')
self.assertEqual(source.resource_name, '')
def test_get_filename_file_doesnt_exist(self):
source = DummyAssetSource(filename=None)
overrides = [
DummyOverride(None),
DummyOverride((source, 'wont_exist')),
]
package = DummyPackage('package')
po = self._makeOne(package)
po.overrides = overrides
self.assertEqual(po.get_filename('whatever'), None)
self.assertEqual(source.resource_name, 'wont_exist')
def test_get_stream(self):
source = DummyAssetSource(stream='a stream?')
overrides = [DummyOverride(None), DummyOverride((source, 'foo.pt'))]
package = DummyPackage('package')
po = self._makeOne(package)
po.overrides = overrides
self.assertEqual(po.get_stream('whatever'), 'a stream?')
self.assertEqual(source.resource_name, 'foo.pt')
def test_get_stream_file_doesnt_exist(self):
source = DummyAssetSource(stream=None)
overrides = [
DummyOverride(None),
DummyOverride((source, 'wont_exist')),
]
package = DummyPackage('package')
po = self._makeOne(package)
po.overrides = overrides
self.assertEqual(po.get_stream('whatever'), None)
self.assertEqual(source.resource_name, 'wont_exist')
def test_get_string(self):
source = DummyAssetSource(string='a string')
overrides = [DummyOverride(None), DummyOverride((source, 'foo.pt'))]
package = DummyPackage('package')
po = self._makeOne(package)
po.overrides = overrides
self.assertEqual(po.get_string('whatever'), 'a string')
self.assertEqual(source.resource_name, 'foo.pt')
def test_get_string_file_doesnt_exist(self):
source = DummyAssetSource(string=None)
overrides = [
DummyOverride(None),
DummyOverride((source, 'wont_exist')),
]
package = DummyPackage('package')
po = self._makeOne(package)
po.overrides = overrides
self.assertEqual(po.get_string('whatever'), None)
self.assertEqual(source.resource_name, 'wont_exist')
def test_has_resource(self):
source = DummyAssetSource(exists=True)
overrides = [DummyOverride(None), DummyOverride((source, 'foo.pt'))]
package = DummyPackage('package')
po = self._makeOne(package)
po.overrides = overrides
self.assertEqual(po.has_resource('whatever'), True)
self.assertEqual(source.resource_name, 'foo.pt')
def test_has_resource_file_doesnt_exist(self):
source = DummyAssetSource(exists=None)
overrides = [
DummyOverride(None),
DummyOverride((source, 'wont_exist')),
]
package = DummyPackage('package')
po = self._makeOne(package)
po.overrides = overrides
self.assertEqual(po.has_resource('whatever'), None)
self.assertEqual(source.resource_name, 'wont_exist')
def test_isdir_false(self):
source = DummyAssetSource(isdir=False)
overrides = [DummyOverride(None), DummyOverride((source, 'foo.pt'))]
package = DummyPackage('package')
po = self._makeOne(package)
po.overrides = overrides
self.assertEqual(po.isdir('whatever'), False)
self.assertEqual(source.resource_name, 'foo.pt')
def test_isdir_true(self):
source = DummyAssetSource(isdir=True)
overrides = [DummyOverride(None), DummyOverride((source, 'foo.pt'))]
package = DummyPackage('package')
po = self._makeOne(package)
po.overrides = overrides
self.assertEqual(po.isdir('whatever'), True)
self.assertEqual(source.resource_name, 'foo.pt')
def test_isdir_doesnt_exist(self):
source = DummyAssetSource(isdir=None)
overrides = [
DummyOverride(None),
DummyOverride((source, 'wont_exist')),
]
package = DummyPackage('package')
po = self._makeOne(package)
po.overrides = overrides
self.assertEqual(po.isdir('whatever'), None)
self.assertEqual(source.resource_name, 'wont_exist')
def test_listdir(self):
source = DummyAssetSource(listdir=True)
overrides = [DummyOverride(None), DummyOverride((source, 'foo.pt'))]
package = DummyPackage('package')
po = self._makeOne(package)
po.overrides = overrides
self.assertEqual(po.listdir('whatever'), True)
self.assertEqual(source.resource_name, 'foo.pt')
def test_listdir_doesnt_exist(self):
source = DummyAssetSource(listdir=None)
overrides = [
DummyOverride(None),
DummyOverride((source, 'wont_exist')),
]
package = DummyPackage('package')
po = self._makeOne(package)
po.overrides = overrides
self.assertEqual(po.listdir('whatever'), None)
self.assertEqual(source.resource_name, 'wont_exist')
# PEP 302 __loader__ extensions: use the "real" __loader__, if present.
def test_get_data_pkg_has_no___loader__(self):
package = DummyPackage('package')
po = self._makeOne(package)
self.assertRaises(NotImplementedError, po.get_data, 'whatever')
def test_get_data_pkg_has___loader__(self):
package = DummyPackage('package')
loader = package.__loader__ = DummyLoader()
po = self._makeOne(package)
self.assertEqual(po.get_data('whatever'), b'DEADBEEF')
self.assertEqual(loader._got_data, 'whatever')
def test_is_package_pkg_has_no___loader__(self):
package = DummyPackage('package')
po = self._makeOne(package)
self.assertRaises(NotImplementedError, po.is_package, 'whatever')
def test_is_package_pkg_has___loader__(self):
package = DummyPackage('package')
loader = package.__loader__ = DummyLoader()
po = self._makeOne(package)
self.assertTrue(po.is_package('whatever'))
self.assertEqual(loader._is_package, 'whatever')
def test_get_code_pkg_has_no___loader__(self):
package = DummyPackage('package')
po = self._makeOne(package)
self.assertRaises(NotImplementedError, po.get_code, 'whatever')
def test_get_code_pkg_has___loader__(self):
package = DummyPackage('package')
loader = package.__loader__ = DummyLoader()
po = self._makeOne(package)
self.assertEqual(po.get_code('whatever'), b'DEADBEEF')
self.assertEqual(loader._got_code, 'whatever')
def test_get_source_pkg_has_no___loader__(self):
package = DummyPackage('package')
po = self._makeOne(package)
self.assertRaises(NotImplementedError, po.get_source, 'whatever')
def test_get_source_pkg_has___loader__(self):
package = DummyPackage('package')
loader = package.__loader__ = DummyLoader()
po = self._makeOne(package)
self.assertEqual(po.get_source('whatever'), 'def foo():\n pass')
self.assertEqual(loader._got_source, 'whatever')
|
TestPackageOverrides
|
python
|
pytest-dev__pytest
|
src/_pytest/capture.py
|
{
"start": 6323,
"end": 6618
}
|
class ____(io.TextIOWrapper):
def __init__(self) -> None:
super().__init__(io.BytesIO(), encoding="UTF-8", newline="", write_through=True)
def getvalue(self) -> str:
assert isinstance(self.buffer, io.BytesIO)
return self.buffer.getvalue().decode("UTF-8")
|
CaptureIO
|
python
|
facelessuser__pymdown-extensions
|
pymdownx/fancylists.py
|
{
"start": 17084,
"end": 17401
}
|
class ____(Treeprocessor):
"""Clean up fancy list metadata."""
def run(self, root):
"""Remove intermediate fancy list type metadata."""
for ol in root.iter('ol'):
if '__fancylist' in ol.attrib:
del ol.attrib['__fancylist']
return root
|
FancyListTreeprocessor
|
python
|
pallets__flask
|
src/flask/cli.py
|
{
"start": 793,
"end": 8628
}
|
class ____(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(module: ModuleType) -> Flask:
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ("app", "application"):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for v in module.__dict__.values() if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
"Detected multiple Flask applications in module"
f" '{module.__name__}'. Use '{module.__name__}:name'"
" to specify the correct one."
)
# Search for app factory functions.
for attr_name in ("create_app", "make_app"):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = app_factory()
if isinstance(app, Flask):
return app
except TypeError as e:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
f"Detected factory '{attr_name}' in module '{module.__name__}',"
" but could not call it without arguments. Use"
f" '{module.__name__}:{attr_name}(args)'"
" to specify arguments."
) from e
raise NoAppException(
"Failed to find Flask application or factory in module"
f" '{module.__name__}'. Use '{module.__name__}:name'"
" to specify one."
)
def _called_with_wrong_args(f: t.Callable[..., Flask]) -> bool:
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param f: The function that was called.
:return: ``True`` if the call failed.
"""
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is f.__code__:
# In the function, it was called successfully.
return False
tb = tb.tb_next
# Didn't reach the function.
return True
finally:
# Delete tb to break a circular reference.
# https://docs.python.org/2/library/sys.html#sys.exc_info
del tb
def find_app_by_string(module: ModuleType, app_name: str) -> Flask:
"""Check if the given string is a variable name or a function. Call
a function to get the app instance, or return the variable directly.
"""
from . import Flask
# Parse app_name as a single expression to determine if it's a valid
# attribute name or function call.
try:
expr = ast.parse(app_name.strip(), mode="eval").body
except SyntaxError:
raise NoAppException(
f"Failed to parse {app_name!r} as an attribute name or function call."
) from None
if isinstance(expr, ast.Name):
name = expr.id
args = []
kwargs = {}
elif isinstance(expr, ast.Call):
# Ensure the function name is an attribute name only.
if not isinstance(expr.func, ast.Name):
raise NoAppException(
f"Function reference must be a simple name: {app_name!r}."
)
name = expr.func.id
# Parse the positional and keyword arguments as literals.
try:
args = [ast.literal_eval(arg) for arg in expr.args]
kwargs = {
kw.arg: ast.literal_eval(kw.value)
for kw in expr.keywords
if kw.arg is not None
}
except ValueError:
# literal_eval gives cryptic error messages, show a generic
# message with the full expression instead.
raise NoAppException(
f"Failed to parse arguments as literal values: {app_name!r}."
) from None
else:
raise NoAppException(
f"Failed to parse {app_name!r} as an attribute name or function call."
)
try:
attr = getattr(module, name)
except AttributeError as e:
raise NoAppException(
f"Failed to find attribute {name!r} in {module.__name__!r}."
) from e
# If the attribute is a function, call it with any args and kwargs
# to get the real application.
if inspect.isfunction(attr):
try:
app = attr(*args, **kwargs)
except TypeError as e:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
f"The factory {app_name!r} in module"
f" {module.__name__!r} could not be called with the"
" specified arguments."
) from e
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
"A valid Flask application was not obtained from"
f" '{module.__name__}:{app_name}'."
)
def prepare_import(path: str) -> str:
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
fname, ext = os.path.splitext(path)
if ext == ".py":
path = fname
if os.path.basename(path) == "__init__":
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, "__init__.py")):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return ".".join(module_name[::-1])
@t.overload
def locate_app(
module_name: str, app_name: str | None, raise_if_not_found: t.Literal[True] = True
) -> Flask: ...
@t.overload
def locate_app(
module_name: str, app_name: str | None, raise_if_not_found: t.Literal[False] = ...
) -> Flask | None: ...
def locate_app(
module_name: str, app_name: str | None, raise_if_not_found: bool = True
) -> Flask | None:
try:
__import__(module_name)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[2].tb_next: # type: ignore[union-attr]
raise NoAppException(
f"While importing {module_name!r}, an ImportError was"
f" raised:\n\n{traceback.format_exc()}"
) from None
elif raise_if_not_found:
raise NoAppException(f"Could not import {module_name!r}.") from None
else:
return None
module = sys.modules[module_name]
if app_name is None:
return find_best_app(module)
else:
return find_app_by_string(module, app_name)
def get_version(ctx: click.Context, param: click.Parameter, value: t.Any) -> None:
if not value or ctx.resilient_parsing:
return
flask_version = importlib.metadata.version("flask")
werkzeug_version = importlib.metadata.version("werkzeug")
click.echo(
f"Python {platform.python_version()}\n"
f"Flask {flask_version}\n"
f"Werkzeug {werkzeug_version}",
color=ctx.color,
)
ctx.exit()
version_option = click.Option(
["--version"],
help="Show the Flask version.",
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True,
)
|
NoAppException
|
python
|
pypa__pip
|
src/pip/_vendor/urllib3/contrib/ntlmpool.py
|
{
"start": 731,
"end": 4528
}
|
class ____(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = "https"
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split("\\", 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug(
"Starting NTLM HTTPS connection no. %d: https://%s%s",
self.num_connections,
self.host,
self.authurl,
)
headers = {"Connection": "Keep-Alive"}
req_header = "Authorization"
resp_header = "www-authenticate"
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = "NTLM %s" % ntlm.create_NTLM_NEGOTIATE_MESSAGE(
self.rawuser
)
log.debug("Request headers: %s", headers)
conn.request("GET", self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.headers)
log.debug("Response status: %s %s", res.status, res.reason)
log.debug("Response headers: %s", reshdr)
log.debug("Response data: %s [...]", res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(", ")
auth_header_value = None
for s in auth_header_values:
if s[:5] == "NTLM ":
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception(
"Unexpected %s response header: %s" % (resp_header, reshdr[resp_header])
)
# Send authentication message
ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE(
auth_header_value
)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(
ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags
)
headers[req_header] = "NTLM %s" % auth_msg
log.debug("Request headers: %s", headers)
conn.request("GET", self.authurl, None, headers)
res = conn.getresponse()
log.debug("Response status: %s %s", res.status, res.reason)
log.debug("Response headers: %s", dict(res.headers))
log.debug("Response data: %s [...]", res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception("Server rejected request: wrong username or password")
raise Exception("Wrong server response: %s %s" % (res.status, res.reason))
res.fp = None
log.debug("Connection established")
return conn
def urlopen(
self,
method,
url,
body=None,
headers=None,
retries=3,
redirect=True,
assert_same_host=True,
):
if headers is None:
headers = {}
headers["Connection"] = "Keep-Alive"
return super(NTLMConnectionPool, self).urlopen(
method, url, body, headers, retries, redirect, assert_same_host
)
|
NTLMConnectionPool
|
python
|
scipy__scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_R.py
|
{
"start": 7584,
"end": 8729
}
|
class ____(Benchmark):
r"""
Rosenbrock objective function.
This class defines the Rosenbrock [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Rosenbrock}}(x) = \sum_{i=1}^{n-1} [100(x_i^2
- x_{i+1})^2 + (x_i - 1)^2]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 1` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-30.] * self.N, [30.0] * self.N))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [[1 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return rosen(x)
|
Rosenbrock
|
python
|
huggingface__transformers
|
tests/utils/import_structures/import_structure_register_with_duplicates.py
|
{
"start": 713,
"end": 882
}
|
class ____:
def __init__(self):
pass
@requires(backends=("torch", "torch"))
def c0():
pass
@requires(backends=("torch", "torch"))
# That's a statement
|
C0
|
python
|
bokeh__bokeh
|
src/bokeh/core/property/visual.py
|
{
"start": 4264,
"end": 4757
}
|
class ____(Either):
""" Accept built-in fill hatching specifications.
Accepts either "long" names, e.g. "horizontal-wave" or the single letter
abbreviations, e.g. "v"
"""
def __init__(self, default=[], *, help: str | None = None) -> None:
types = Enum(enums.HatchPattern), Enum(enums.HatchPatternAbbreviation), String
super().__init__(*types, default=default, help=help)
def __str__(self) -> str:
return self.__class__.__name__
|
HatchPatternType
|
python
|
cherrypy__cherrypy
|
cherrypy/test/test_conn.py
|
{
"start": 2885,
"end": 9270
}
|
class ____(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_HTTP11(self):
if cherrypy.server.protocol_version != 'HTTP/1.1':
return self.skip()
self.PROTOCOL = 'HTTP/1.1'
self.persistent = True
# Make the first request and assert there's no "Connection: close".
self.getPage('/')
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader('Connection')
# Make another request on the same connection.
self.getPage('/page1')
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader('Connection')
# Test client-side close.
self.getPage('/page2', headers=[('Connection', 'close')])
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertHeader('Connection', 'close')
# Make another request on the same connection, which should error.
self.assertRaises(NotConnected, self.getPage, '/')
def test_Streaming_no_len(self):
try:
self._streaming(set_cl=False)
finally:
try:
self.HTTP_CONN.close()
except (TypeError, AttributeError):
pass
def test_Streaming_with_len(self):
try:
self._streaming(set_cl=True)
finally:
try:
self.HTTP_CONN.close()
except (TypeError, AttributeError):
pass
def _streaming(self, set_cl):
if cherrypy.server.protocol_version == 'HTTP/1.1':
self.PROTOCOL = 'HTTP/1.1'
self.persistent = True
# Make the first request and assert there's no "Connection: close".
self.getPage('/')
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader('Connection')
# Make another, streamed request on the same connection.
if set_cl:
# When a Content-Length is provided, the content should stream
# without closing the connection.
self.getPage('/stream?set_cl=Yes')
self.assertHeader('Content-Length')
self.assertNoHeader('Connection', 'close')
self.assertNoHeader('Transfer-Encoding')
self.assertStatus('200 OK')
self.assertBody('0123456789')
else:
# When no Content-Length response header is provided,
# streamed output will either close the connection, or use
# chunked encoding, to determine transfer-length.
self.getPage('/stream')
self.assertNoHeader('Content-Length')
self.assertStatus('200 OK')
self.assertBody('0123456789')
chunked_response = False
for k, v in self.headers:
if k.lower() == 'transfer-encoding':
if str(v) == 'chunked':
chunked_response = True
if chunked_response:
self.assertNoHeader('Connection', 'close')
else:
self.assertHeader('Connection', 'close')
# Make another request on the same connection, which should
# error.
self.assertRaises(NotConnected, self.getPage, '/')
# Try HEAD. See
# https://github.com/cherrypy/cherrypy/issues/864.
self.getPage('/stream', method='HEAD')
self.assertStatus('200 OK')
self.assertBody('')
self.assertNoHeader('Transfer-Encoding')
else:
self.PROTOCOL = 'HTTP/1.0'
self.persistent = True
# Make the first request and assert Keep-Alive.
self.getPage('/', headers=[('Connection', 'Keep-Alive')])
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertHeader('Connection', 'Keep-Alive')
# Make another, streamed request on the same connection.
if set_cl:
# When a Content-Length is provided, the content should
# stream without closing the connection.
self.getPage(
'/stream?set_cl=Yes',
headers=[('Connection', 'Keep-Alive')],
)
self.assertHeader('Content-Length')
self.assertHeader('Connection', 'Keep-Alive')
self.assertNoHeader('Transfer-Encoding')
self.assertStatus('200 OK')
self.assertBody('0123456789')
else:
# When a Content-Length is not provided,
# the server should close the connection.
self.getPage('/stream', headers=[('Connection', 'Keep-Alive')])
self.assertStatus('200 OK')
self.assertBody('0123456789')
self.assertNoHeader('Content-Length')
self.assertNoHeader('Connection', 'Keep-Alive')
self.assertNoHeader('Transfer-Encoding')
# Make another request on the same connection, which should
# error.
self.assertRaises(NotConnected, self.getPage, '/')
def test_HTTP10_KeepAlive(self):
self.PROTOCOL = 'HTTP/1.0'
if self.scheme == 'https':
self.HTTP_CONN = HTTPSConnection
else:
self.HTTP_CONN = HTTPConnection
# Test a normal HTTP/1.0 request.
self.getPage('/page2')
self.assertStatus('200 OK')
self.assertBody(pov)
# Apache, for example, may emit a Connection header even for HTTP/1.0
# self.assertNoHeader("Connection")
# Test a keep-alive HTTP/1.0 request.
self.persistent = True
self.getPage('/page3', headers=[('Connection', 'Keep-Alive')])
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertHeader('Connection', 'Keep-Alive')
# Remove the keep-alive header again.
self.getPage('/page3')
self.assertStatus('200 OK')
self.assertBody(pov)
# Apache, for example, may emit a Connection header even for HTTP/1.0
# self.assertNoHeader("Connection")
|
ConnectionCloseTests
|
python
|
pydata__xarray
|
xarray/tests/test_datatree.py
|
{
"start": 61422,
"end": 63642
}
|
class ____:
def test_drop_nodes(self) -> None:
sue = DataTree.from_dict({"Mary": None, "Kate": None, "Ashley": None})
# test drop just one node
dropped_one = sue.drop_nodes(names="Mary")
assert "Mary" not in dropped_one.children
# test drop multiple nodes
dropped = sue.drop_nodes(names=["Mary", "Kate"])
assert not {"Mary", "Kate"}.intersection(set(dropped.children))
assert "Ashley" in dropped.children
# test raise
with pytest.raises(KeyError, match=r"nodes {'Mary'} not present"):
dropped.drop_nodes(names=["Mary", "Ashley"])
# test ignore
childless = dropped.drop_nodes(names=["Mary", "Ashley"], errors="ignore")
assert childless.children == {}
def test_assign(self) -> None:
dt = DataTree()
expected = DataTree.from_dict({"/": xr.Dataset({"foo": 0}), "/a": None})
# kwargs form
result = dt.assign(foo=xr.DataArray(0), a=DataTree())
assert_equal(result, expected)
# dict form
result = dt.assign({"foo": xr.DataArray(0), "a": DataTree()})
assert_equal(result, expected)
def test_filter_like(self) -> None:
flower_tree = DataTree.from_dict(
{"root": None, "trunk": None, "leaves": None, "flowers": None}
)
fruit_tree = DataTree.from_dict(
{"root": None, "trunk": None, "leaves": None, "fruit": None}
)
barren_tree = DataTree.from_dict({"root": None, "trunk": None})
# test filter_like tree
filtered_tree = flower_tree.filter_like(barren_tree)
assert filtered_tree.equals(barren_tree)
assert "flowers" not in filtered_tree.children
# test symmetrical pruning results in isomorphic trees
assert flower_tree.filter_like(fruit_tree).isomorphic(
fruit_tree.filter_like(flower_tree)
)
# test "deep" pruning
dt = DataTree.from_dict(
{"/a/A": None, "/a/B": None, "/b/A": None, "/b/B": None}
)
other = DataTree.from_dict({"/a/A": None, "/b/A": None})
filtered = dt.filter_like(other)
assert filtered.equals(other)
|
TestRestructuring
|
python
|
readthedocs__readthedocs.org
|
readthedocs/api/v3/permissions.py
|
{
"start": 1014,
"end": 1269
}
|
class ____(BasePermission):
"""Grant permission if user is the same as the one being accessed."""
def has_permission(self, request, view):
user = view._get_parent_user()
if user == request.user:
return True
|
IsCurrentUser
|
python
|
apache__airflow
|
airflow-core/src/airflow/utils/context.py
|
{
"start": 3021,
"end": 3644
}
|
class ____(ConnectionAccessorSDK):
"""Wrapper to access Connection entries in template."""
def __getattr__(self, conn_id: str) -> Any:
from airflow.models.connection import Connection
return Connection.get_connection_from_secrets(conn_id)
def get(self, conn_id: str, default_conn: Any = None) -> Any:
from airflow.exceptions import AirflowNotFoundException
from airflow.models.connection import Connection
try:
return Connection.get_connection_from_secrets(conn_id)
except AirflowNotFoundException:
return default_conn
|
ConnectionAccessor
|
python
|
pytorch__pytorch
|
torch/ao/nn/quantized/modules/rnn.py
|
{
"start": 67,
"end": 1898
}
|
class ____(torch.ao.nn.quantizable.LSTM):
r"""A quantized long short-term memory (LSTM).
For the description and the argument types, please, refer to :class:`~torch.nn.LSTM`
Attributes:
layers : instances of the `_LSTMLayer`
.. note::
To access the weights and biases, you need to access them per layer.
See examples in :class:`~torch.ao.nn.quantizable.LSTM`
Examples::
>>> # xdoctest: +SKIP
>>> custom_module_config = {
... 'float_to_observed_custom_module_class': {
... nn.LSTM: nn.quantizable.LSTM,
... },
... 'observed_to_quantized_custom_module_class': {
... nn.quantizable.LSTM: nn.quantized.LSTM,
... }
... }
>>> tq.prepare(model, prepare_custom_module_class=custom_module_config)
>>> tq.convert(model, convert_custom_module_class=custom_module_config)
"""
_FLOAT_MODULE = torch.ao.nn.quantizable.LSTM # type: ignore[assignment]
def _get_name(self) -> str:
return "QuantizedLSTM"
@classmethod
def from_float(cls, *args: Any, **kwargs: Any) -> None:
# The whole flow is float -> observed -> quantized
# This class does observed -> quantized only
raise NotImplementedError(
"It looks like you are trying to convert a "
"non-observed LSTM module. Please, see "
"the examples on quantizable LSTMs."
)
@classmethod
def from_observed(cls: type["LSTM"], other: torch.ao.nn.quantizable.LSTM) -> "LSTM":
assert isinstance(other, cls._FLOAT_MODULE) # type: ignore[has-type]
converted = torch.ao.quantization.convert(
other, inplace=False, remove_qconfig=True
)
converted.__class__ = cls
return converted
|
LSTM
|
python
|
openai__openai-python
|
src/openai/types/responses/tool_choice_types_param.py
|
{
"start": 220,
"end": 838
}
|
class ____(TypedDict, total=False):
type: Required[
Literal[
"file_search",
"web_search_preview",
"computer_use_preview",
"web_search_preview_2025_03_11",
"image_generation",
"code_interpreter",
]
]
"""The type of hosted tool the model should to use.
Learn more about
[built-in tools](https://platform.openai.com/docs/guides/tools).
Allowed values are:
- `file_search`
- `web_search_preview`
- `computer_use_preview`
- `code_interpreter`
- `image_generation`
"""
|
ToolChoiceTypesParam
|
python
|
python-pillow__Pillow
|
src/PIL/ImageFilter.py
|
{
"start": 8782,
"end": 8969
}
|
class ____(BuiltinFilter):
name = "Find Edges"
# fmt: off
filterargs = (3, 3), 1, 0, (
-1, -1, -1,
-1, 8, -1,
-1, -1, -1,
)
# fmt: on
|
FIND_EDGES
|
python
|
scikit-learn__scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
{
"start": 1631,
"end": 2173
}
|
class ____(linear_model.SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
# XXX untested as of v0.22
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.decision_function(self, X, *args, **kw)
|
_SparseSGDRegressor
|
python
|
django__django
|
django/db/models/fields/json.py
|
{
"start": 10733,
"end": 10933
}
|
class ____(HasKeyLookup):
lookup_name = "has_keys"
postgres_operator = "?&"
logical_operator = " AND "
def get_prep_lookup(self):
return [str(item) for item in self.rhs]
|
HasKeys
|
python
|
pytorch__pytorch
|
test/dynamo/test_tree_map.py
|
{
"start": 2917,
"end": 11601
}
|
class ____(TestCase):
def setUp(self):
super().setUp()
torch._dynamo.reset()
def _run_tree_map(self, tree_map_impl, kwargs):
lhs = _build_tree(0)
rhs = _build_tree(7)
def fn(a, b):
return tree_map_impl(_combine_leaves, a, b, **kwargs)
compiled = torch.compile(fn, backend="eager", fullgraph=True)
expected = fn(lhs, rhs)
result = compiled(lhs, rhs)
_assert_trees_allclose(self, expected, result)
@parametrize("tree_map_name,tree_map_impl", TREE_MAP_IMPLEMENTATIONS)
@parametrize("kwargs_name,kwargs,allowed_impls", KWARG_CASES)
def test_tree_map_variants(
self,
tree_map_name: str,
tree_map_impl,
kwargs_name: str,
kwargs: dict,
allowed_impls,
) -> None:
if tree_map_name == "pytree_cxx" and cxx_pytree is None:
self.skipTest("torch.utils._cxx_pytree is unavailable")
if allowed_impls is not None and tree_map_name not in allowed_impls:
self.skipTest("kwargs unsupported for implementation")
self._run_tree_map(tree_map_impl, kwargs)
def test_tree_map_rejects_mismatched_container_types(self) -> None:
def fn(a, b):
return pytree.tree_map(lambda u, v: u + v, a, b)
lhs = [torch.ones(2), torch.ones(2)]
rhs = (torch.ones(2), torch.ones(2))
with self.assertRaises(ValueError):
fn(lhs, rhs)
compiled = torch.compile(fn, backend="eager", fullgraph=True)
with self.assertRaisesRegex(
(ValueError, torch._dynamo.exc.Unsupported),
"Node type mismatch",
):
compiled(lhs, rhs)
def test_tree_map_is_leaf_handles_tensor_nodes(self) -> None:
def fn(tree):
return pytree.tree_map(
lambda pair: torch.stack(pair).sum(dim=0),
tree,
is_leaf=lambda node: isinstance(node, tuple),
)
tree = [(torch.ones(2), torch.ones(2) * 4)]
compiled = torch.compile(fn, backend="eager", fullgraph=True)
expected = fn(tree)
result = compiled(tree)
_assert_trees_allclose(self, expected, result)
def test_tree_map_only_applies_to_tensor_nodes(self) -> None:
tree = {"tensor": torch.ones(2), "int": 3}
def mapper(node):
if not isinstance(node, torch.Tensor):
raise AssertionError("mapper should only see tensors")
return node + 2
def fn(arg):
return pytree.tree_map_only(torch.Tensor, mapper, arg)
compiled = torch.compile(fn, backend="eager", fullgraph=True)
expected = fn(tree)
result = compiled(tree)
_assert_trees_allclose(self, expected, result)
def test_tree_map_only_multiple_trees_falls_back(self) -> None:
lhs = {"a": torch.ones(2), "b": torch.ones(2) * 2}
rhs = {"a": torch.ones(2) * 3, "b": torch.ones(2) * 4}
def fn(a, b):
return pytree.tree_map_only(torch.Tensor, lambda x, y: x + y, a, b)
with self.assertRaisesRegex(TypeError, "callable"):
fn(lhs, rhs)
compiled = torch.compile(fn, backend="eager", fullgraph=True)
with self.assertRaisesRegex(
(TypeError, torch._dynamo.exc.Unsupported),
r"(callable|Unsupported function call)",
):
compiled(lhs, rhs)
def test_tree_map_only_handles_multiple_types(self) -> None:
tree = {"int": 7, "tuple": (1, 2), "tensor": torch.ones(2)}
def mapper(node):
if isinstance(node, int):
return node + 1
if isinstance(node, tuple):
return tuple(val + 10 for val in node)
raise AssertionError("unexpected node passed to mapper")
def fn(arg):
return pytree.tree_map_only((int, tuple), mapper, arg)
compiled = torch.compile(fn, backend="eager", fullgraph=True)
expected = fn(tree)
result = compiled(tree)
_assert_trees_allclose(self, expected, result)
def test_tree_map_is_leaf_non_constant_fallback(self) -> None:
tree = {"a": torch.arange(2.0), "b": torch.arange(2.0) + 1}
def is_leaf(node):
if isinstance(node, torch.Tensor):
# Depends on runtime tensor value; cannot be folded to a constant.
return (node.sum() > 1).item()
return False
def mapper(node):
return node * 2 if isinstance(node, torch.Tensor) else node
def fn(arg):
return pytree.tree_map(mapper, arg, is_leaf=is_leaf)
compiled = torch.compile(fn, backend="eager", fullgraph=True)
expected = fn(tree)
result = compiled(tree)
_assert_trees_allclose(self, expected, result)
def test_tree_map_only_predicate_selector_skips_fastpath(self) -> None:
tree = {"keep": torch.ones(2), "other": (1, 2)}
def selector(node):
return isinstance(node, torch.Tensor) and node.shape == (2,)
def mapper(node):
return node + 5 if isinstance(node, torch.Tensor) else node
def fn(arg):
return pytree.tree_map_only(selector, mapper, arg)
compiled = torch.compile(fn, backend="eager", fullgraph=True)
expected = fn(tree)
result = compiled(tree)
_assert_trees_allclose(self, expected, result)
def test_tree_map_none_nodes_reject_mismatched_siblings(self) -> None:
_require_optree(self)
def fn(a, b):
return optree.tree_map(lambda u, v: (u, v), a, b)
lhs = {"k": None}
rhs = {"k": torch.ones(2)}
with self.assertRaisesRegex(ValueError, "Expected None"):
fn(lhs, rhs)
compiled = torch.compile(fn, backend="eager", fullgraph=True)
with self.assertRaisesRegex(
(ValueError, torch._dynamo.exc.Unsupported),
r"(Expected None|expected <class 'NoneType'>)",
):
compiled(lhs, rhs)
@parametrize("tree_map_name,tree_map_impl", TREE_MAP_IMPLEMENTATIONS)
def test_tree_map_none_nodes_default_behavior(
self, tree_map_name: str, tree_map_impl
) -> None:
if tree_map_name == "optree":
self.skipTest("optree treats None as an internal node by default")
def fn(a, b):
return tree_map_impl(lambda u, v: (u, v), a, b)
tree = {"k": None}
compiled = torch.compile(fn, backend="eager", fullgraph=True)
expected = fn(tree, tree)
result = compiled(tree, tree)
self.assertEqual(result["k"], (None, None))
self.assertEqual(result, expected)
def test_constantvariable_handles_none_is_leaf_kwarg(self) -> None:
_require_optree(self)
tree = {"none": None}
def run_case(none_is_leaf_flag):
def fn(arg):
def mapper(node):
if node is None:
return "visited"
return node
kwargs = {}
if none_is_leaf_flag is not _NONE_IS_LEAF_UNSET:
kwargs["none_is_leaf"] = none_is_leaf_flag
return optree.tree_map(mapper, arg, **kwargs)
compiled = torch.compile(fn, backend="eager", fullgraph=True)
expected = fn(tree)
result = compiled(tree)
self.assertEqual(result, expected)
return result["none"]
self.assertEqual(run_case(_NONE_IS_LEAF_UNSET), None)
self.assertEqual(run_case(False), None)
self.assertEqual(run_case(True), "visited")
def test_constantvariable_handles_python_and_dtype_leaves(self) -> None:
_require_optree(self)
tree = {
"int": 7,
"nested": {"string": "foo", "dtype": torch.float32},
}
def fn(arg):
def mapper(node):
if isinstance(node, int):
return node + 1
if isinstance(node, str):
return node.upper()
if isinstance(node, torch.dtype):
return torch.float64
return node
return optree.tree_map(mapper, arg)
compiled = torch.compile(fn, backend="eager", fullgraph=True)
expected = fn(tree)
result = compiled(tree)
self.assertEqual(result["int"], 8)
self.assertEqual(result["nested"]["string"], "FOO")
self.assertIs(result["nested"]["dtype"], torch.float64)
self.assertEqual(result, expected)
if __name__ == "__main__": # pragma: no cover
run_tests()
|
TreeMapCompileTests
|
python
|
numba__numba
|
numba/cuda/stubs.py
|
{
"start": 6030,
"end": 6342
}
|
class ____(Stub):
'''
lanemask_lt()
Returns a 32-bit integer mask of all lanes (including inactive ones) with
ID less than the current lane.
'''
_description_ = '<lanemask_lt()>'
# -------------------------------------------------------------------------------
# memory fences
|
lanemask_lt
|
python
|
ijl__orjson
|
test/test_transform.py
|
{
"start": 248,
"end": 3750
}
|
class ____:
def _pass_transform(self, filename, reference=None):
data = _read_file(filename)
assert orjson.dumps(orjson.loads(data)) == (reference or data)
def _fail_transform(self, filename):
data = _read_file(filename)
with pytest.raises(orjson.JSONDecodeError):
orjson.loads(data)
def test_number_1(self):
"""
number_1.0.json
"""
self._pass_transform("number_1.0.json")
def test_number_1e6(self):
"""
number_1e6.json
"""
self._pass_transform("number_1e6.json", b"[1000000.0]")
def test_number_1e_999(self):
"""
number_1e-999.json
"""
self._pass_transform("number_1e-999.json", b"[0.0]")
def test_number_10000000000000000999(self):
"""
number_10000000000000000999.json
"""
# cannot serialize due to range
assert orjson.loads(_read_file("number_10000000000000000999.json")) == [
10000000000000000999,
]
def test_number_1000000000000000(self):
"""
number_1000000000000000.json
"""
self._pass_transform("number_1000000000000000.json")
def test_object_key_nfc_nfd(self):
"""
object_key_nfc_nfd.json
"""
self._pass_transform("object_key_nfc_nfd.json")
def test_object_key_nfd_nfc(self):
"""
object_key_nfd_nfc.json
"""
self._pass_transform("object_key_nfd_nfc.json")
def test_object_same_key_different_values(self):
"""
object_same_key_different_values.json
"""
self._pass_transform("object_same_key_different_values.json", b'{"a":2}')
def test_object_same_key_same_value(self):
"""
object_same_key_same_value.json
"""
self._pass_transform("object_same_key_same_value.json", b'{"a":1}')
def test_object_same_key_unclear_values(self):
"""
object_same_key_unclear_values.json
"""
data = _read_file("object_same_key_unclear_values.json")
# varies by backend
assert data in (b'{"a":-0.0}', b'{"a":0, "a":-0}')
def test_string_1_escaped_invalid_codepoint(self):
"""
string_1_escaped_invalid_codepoint.json
"""
self._fail_transform("string_1_escaped_invalid_codepoint.json")
def test_string_1_invalid_codepoint(self):
"""
string_1_invalid_codepoint.json
"""
self._fail_transform("string_1_invalid_codepoint.json")
def test_string_2_escaped_invalid_codepoints(self):
"""
string_2_escaped_invalid_codepoints.json
"""
self._fail_transform("string_2_escaped_invalid_codepoints.json")
def test_string_2_invalid_codepoints(self):
"""
string_2_invalid_codepoints.json
"""
self._fail_transform("string_2_invalid_codepoints.json")
def test_string_3_escaped_invalid_codepoints(self):
"""
string_3_escaped_invalid_codepoints.json
"""
self._fail_transform("string_3_escaped_invalid_codepoints.json")
def test_string_3_invalid_codepoints(self):
"""
string_3_invalid_codepoints.json
"""
self._fail_transform("string_3_invalid_codepoints.json")
def test_string_with_escaped_NULL(self):
"""
string_with_escaped_NULL.json
"""
self._pass_transform("string_with_escaped_NULL.json")
|
TestJSONTestSuiteTransform
|
python
|
django__django
|
tests/model_forms/models.py
|
{
"start": 9495,
"end": 9619
}
|
class ____(models.Model):
biggie = models.BigIntegerField()
def __str__(self):
return str(self.biggie)
|
BigInt
|
python
|
pydantic__pydantic
|
pydantic-core/tests/serializers/test_any.py
|
{
"start": 29143,
"end": 29256
}
|
class ____(ipaddress.IPv4Network):
def __str__(self):
return super().__str__() + '_subclassed'
|
SubNetV4
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/vertex_ai/test_feature_store.py
|
{
"start": 12123,
"end": 13770
}
|
class ____:
@mock.patch(VERTEX_AI_PATH.format("feature_store.FeatureStoreHook"))
def test_execute(self, mock_hook_class):
# Create the mock hook and set up its return value
mock_hook = mock.MagicMock()
mock_hook_class.return_value = mock_hook
# Set up the return value for get_feature_online_store to match the hook implementation
SAMPLE_RESPONSE = {
"etag": "",
"labels": {},
"name": FEATURE_ONLINE_STORE_ID,
"satisfies_pzi": False,
"satisfies_pzs": False,
"state": 0,
}
mock_hook.get_feature_online_store.return_value = FeatureOnlineStore(SAMPLE_RESPONSE)
common_kwargs = {
"project_id": GCP_PROJECT,
"location": GCP_LOCATION,
"feature_online_store_id": FEATURE_ONLINE_STORE_ID,
"metadata": (),
"timeout": 100,
"retry": None,
}
op = GetFeatureOnlineStoreOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
**common_kwargs,
)
response = op.execute(context={"ti": mock.MagicMock()})
# Verify hook initialization
mock_hook_class.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
# Verify hook method call
mock_hook.get_feature_online_store.assert_called_once_with(**common_kwargs)
# Verify response matches expected value
assert response == SAMPLE_RESPONSE
|
TestGetFeatureOnlineStoreOperator
|
python
|
scikit-learn__scikit-learn
|
sklearn/gaussian_process/kernels.py
|
{
"start": 17778,
"end": 23303
}
|
class ____(Kernel):
"""Kernel which is composed of a set of other kernels.
.. versionadded:: 0.18
Parameters
----------
kernels : list of Kernels
The other kernels
Examples
--------
>>> from sklearn.gaussian_process.kernels import WhiteKernel
>>> from sklearn.gaussian_process.kernels import RBF
>>> from sklearn.gaussian_process.kernels import CompoundKernel
>>> kernel = CompoundKernel(
... [WhiteKernel(noise_level=3.0), RBF(length_scale=2.0)])
>>> print(kernel.bounds)
[[-11.51292546 11.51292546]
[-11.51292546 11.51292546]]
>>> print(kernel.n_dims)
2
>>> print(kernel.theta)
[1.09861229 0.69314718]
"""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims : (i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object, \
default=None
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object, \
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of the
kernel hyperparameter is computed.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : ndarray of shape \
(n_samples_X, n_samples_X, n_dims, n_kernels), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient) for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all(
[self.kernels[i] == b.kernels[i] for i in range(len(self.kernels))]
)
def is_stationary(self):
"""Returns whether the kernel is stationary."""
return np.all([kernel.is_stationary() for kernel in self.kernels])
@property
def requires_vector_input(self):
"""Returns whether the kernel is defined on discrete structures."""
return np.any([kernel.requires_vector_input for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to `np.diag(self(X))`; however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
|
CompoundKernel
|
python
|
pytorch__pytorch
|
test/higher_order_ops/test_invoke_subgraph.py
|
{
"start": 57246,
"end": 58315
}
|
class ____(torch.nn.Module):
def forward(self, primals_1: "f32[8, 8]"):
partitioned_fw_subgraph_0_0 = self.partitioned_fw_subgraph_0_0
invoke_subgraph_2 = torch.ops.higher_order.invoke_subgraph(partitioned_fw_subgraph_0_0, 'partitioned_fw_subgraph_0_0', primals_1); partitioned_fw_subgraph_0_0 = primals_1 = None
getitem: "f32[8, 8]" = invoke_subgraph_2[0]
getitem_1: "f32[8, 8]" = invoke_subgraph_2[1]; invoke_subgraph_2 = None
add: "f32[8, 8]" = torch.ops.aten.add.Tensor(getitem, getitem_1); getitem = getitem_1 = None
return (add,)
class partitioned_fw_subgraph_0_0(torch.nn.Module):
def forward(self, primals_0: "f32[8, 8]"):
mul: "f32[8, 8]" = torch.ops.aten.mul.Tensor(primals_0, 2)
mul_1: "f32[8, 8]" = torch.ops.aten.mul.Tensor(primals_0, 3); primals_0 = None
return (mul, mul_1)
""",
)
self.assertExpectedInline(
normalize_gm(backend.bw_graphs[0].print_readable(print_output=False)),
"""\
|
GraphModule
|
python
|
patrick-kidger__equinox
|
equinox/_errors.py
|
{
"start": 2577,
"end": 14002
}
|
class ____(RuntimeError):
pass
@filter_custom_jvp
def _error(x, pred, index, *, msgs, on_error, stack):
if on_error == "raise":
def raises(_index):
# Sneakily smuggle out the information about the error. Inspired by
# `sys.last_value`.
msg = msgs[_index.item()]
_jit.last_error_info = (msg, stack)
raise _EquinoxRuntimeError(
f"{msg}\n\n\n"
"--------------------\n"
"An error occurred during the runtime of your JAX program! "
"Unfortunately you do not appear to be using `equinox.filter_jit` "
"(perhaps you are using `jax.jit` instead?) and so further information "
"about the error cannot be displayed. (Probably you are seeing a very "
"large but uninformative error message right now.) Please wrap your "
"program with `equinox.filter_jit`.\n"
"--------------------\n"
)
def tpu_msg(_out, _index):
msg = msgs[_index.item()]
# `print` doesn't work; nor does `jax.debug.print`.
# But both `input` and `jax.debug.breakpoint` do. The former allows us to
# actually display something to the user.
input(msg + _tpu_msg)
# We do the tree_map inside the pure_callback, not outside, so that `out`
# has a data dependency and doesn't get optimised out.
return jtu.tree_map(_nan_like, _out)
def handle_error(): # pyright: ignore
out = jax.pure_callback(raises, struct, index)
# If we make it this far then we're on the TPU, which squelches runtime
# errors and returns dummy values instead.
# Fortunately, we're able to outsmart it!
return jax.pure_callback(tpu_msg, struct, out, index)
struct = jax.eval_shape(lambda: x)
return lax.cond(pred, handle_error, lambda: x)
elif on_error == "breakpoint":
def display_msg(_index):
print(_frames_msg)
print("equinox.EquinoxRuntimeError: " + msgs[_index.item()])
return _index
def to_nan(_index):
del _index
return jtu.tree_map(_nan_like, struct)
def handle_error():
index_struct = jax.eval_shape(lambda: index)
_index = jax.pure_callback(display_msg, index_struct, index)
_index = jax.debug.breakpoint(
token=_index, num_frames=EQX_ON_ERROR_BREAKPOINT_FRAMES
)
_index = unvmap_max(cast(Any, _index))
return jax.pure_callback(to_nan, struct, _index)
struct = jax.eval_shape(lambda: x)
return lax.cond(pred, handle_error, lambda: x)
elif on_error == "nan":
return lax.cond(pred, ft.partial(jtu.tree_map, _nan_like), lambda y: y, x)
else:
assert False
# Use a custom_jvp to put the lax.cond outside of AD.
# This is needed as (a) lax.cond will unnecessarily promote symbolic
# zeros to non-symbolic-zeros, and we'd really like to avoid that, and (b) we need to
# wrap our pure_callbacks in custom JVP rules.
@_error.def_jvp
def _error_jvp(primals, tangents, *, msgs, on_error, stack):
x, pred, index = primals
tx, _, _ = tangents
return _error(x, pred, index, msgs=msgs, on_error=on_error, stack=stack), tx
if EQX_ON_ERROR == "breakpoint":
# TODO: remove this branch once JAX issue #16732 is fixed.
_old_jit = jax.jit
@ft.wraps(jax.jit)
def fixed_jit(fun, *args, **kwargs):
jit_fun = _old_jit(fun, *args, **kwargs)
def fixed_jit_impl(*args2, **kwargs2):
if currently_jitting():
warnings.warn(
"Ignoring intermediate `jax.jit` decorator, to work around JAX "
"issue #16732, as `EQX_ON_ERROR=breakpoint` is set."
)
return fun(*args2, **kwargs2)
else:
return jit_fun(*args2, **kwargs2)
return fixed_jit_impl
jax.jit = fixed_jit
# Remove the `on_error` argument from the public API for now. If you pass
# `on_error="breakpoint"` -- rather than setting `EQX_ON_ERROR=breakpoint` -- then our
# fix for JAX issue #16732 -- above -- can't kick in. So in practice this
# argument probably won't work.
@doc_remove_args("on_error")
def error_if(
x: PyTree,
pred: Bool[ArrayLike, "..."],
msg: str,
*,
on_error: Literal["default", "raise", "breakpoint", "nan"] = "default",
) -> PyTree:
"""Throws an error based on runtime values. Works even under JIT.
**Arguments:**
- `x`: will be returned unchanged. This is used to determine where the error check
happens in the overall computation: it will happen after `x` is computed and
before the return value is used. `x` can be any PyTree, and it must contain at
least one array.
- `pred`: a boolean for whether to raise an error. Can be an array of bools; an
error will be raised if any of them are `True`. If vmap'd then an error will be
raised if any batch element has `True`.
- `msg`: the string to display as an error message.
In addition, the `EQX_ON_ERROR` environment variable is checked for how any runtime
errors should be handled. Possible values are:
- `EQX_ON_ERROR=raise` will raise a runtime error.
- `EQX_ON_ERROR=nan` will return `NaN` instead of `x`, and then continue the
computation.
- `EQX_ON_ERROR=breakpoint` will open a debugger.
- Note that this option may prevent certain compiler optimisations, so
permanently fixing this value is not recommended.
- You will need to also pass the `-s` flag to `pytest`, if you are
also using that.
- By default this only allows you to see a single frame in the debugger. This is
to work around JAX bug [#16732](https://github.com/google/jax/issues/16732).
(Bugs whilst debugging bugs, eek!) In practice you may like to set the
`EQX_ON_ERROR_BREAKPOINT_FRAMES` environment variable to a small integer,
which specifies how many frames upwards the debugger should capture. The
JAX bug is triggered when taking too many frames.
After changing an environment variable, the Python process must be restarted.
**Returns:**
The original argument `x` unchanged. **If this return value is unused then the error
check will not be performed.** (It will be removed as part of dead code
elimination.)
!!! Example
```python
@jax.jit
def f(x):
x = error_if(x, x < 0, "x must be >= 0")
# ...use x in your computation...
return x
f(jax.numpy.array(-1))
```
"""
return branched_error_if(x, pred, 0, [msg], on_error=on_error)
@doc_remove_args("on_error")
def branched_error_if(
x: PyTree,
pred: Bool[ArrayLike, "..."],
index: Int[ArrayLike, "..."],
msgs: Sequence[str],
*,
on_error: Literal["default", "raise", "breakpoint", "nan"] = "default",
) -> PyTree:
"""As [`equinox.error_if`][], but will raise one of
several `msgs` depending on the value of `index`. If `index` is vmap'd, then the
error message from the largest value (across the whole batch) will be used.
"""
leaves = jtu.tree_leaves((x, pred, index))
# This carefully does not perform any JAX operations if `pred` and `index` are
# a bool and an int.
# This ensures we can use `error_if` before init_google.
if any(is_array(leaf) for leaf in leaves):
return branched_error_if_impl_jit(x, pred, index, msgs, on_error=on_error)
else:
return branched_error_if_impl(x, pred, index, msgs, on_error=on_error)
def branched_error_if_impl(
x: PyTree,
pred: Bool[ArrayLike, "..."],
index: Int[ArrayLike, "..."],
msgs: Sequence[str],
*,
on_error: Literal["default", "raise", "breakpoint", "nan"],
) -> PyTree:
if on_error == "default":
on_error = EQX_ON_ERROR
elif on_error not in ("raise", "breakpoint", "nan"):
raise RuntimeError("Unrecognised value for `on_error`.")
with jax.ensure_compile_time_eval():
# This carefully does not perform any JAX operations if `pred` and `index` are
# a bool and an int.
# This ensures we can use `error_if` before init_google.
if not isinstance(pred, bool):
pred = unvmap_any(pred)
if not isinstance(index, int):
index = unvmap_max(index)
if not isinstance(pred, jax.core.Tracer):
if isinstance(pred, Array):
pred = pred.item()
assert type(pred) is bool
if pred:
if not isinstance(index, jax.core.Tracer):
if isinstance(index, Array):
index = index.item()
assert type(index) is int
if on_error == "raise":
raise EquinoxTracetimeError(msgs[index])
elif on_error == "breakpoint":
print(msgs[index])
breakpoint()
elif on_error == "nan":
warnings.warn(
"Resolving error at trace time (because the predicate is "
"statically resolvable), by substituting NaNs (because "
"`on_error='nan'`)."
)
return jtu.tree_map(_nan_like, x)
else:
assert False
# else defer error to runtime, when the index is known.
else:
return x
stack: list[bytes | str] = []
for frame, lineno in traceback.walk_stack(None):
frame_id = frame.f_locals.get("__equinox_jit_id__", None)
if type(frame_id) is bytes:
stack.append(frame_id)
if traceback_util.include_frame(frame):
# This seems to be the simplest way to format a single frame?
frame_str: str = "".join(
traceback.format_tb(
types.TracebackType(None, frame, frame.f_lasti, lineno)
)
)
stack.append(frame_str)
dynamic_x, static_x = partition(x, is_array)
flat = jtu.tree_leaves(dynamic_x)
if len(flat) == 0:
raise ValueError("No arrays to thread error on to.")
dynamic_x = _error(
dynamic_x, pred, index, msgs=msgs, on_error=on_error, stack=stack
)
return combine(dynamic_x, static_x)
# filter_jit does some work to produce nicer runtime error messages.
# We also place it here to ensure a consistent experience when using JAX in eager mode.
branched_error_if_impl_jit = _jit.filter_jit(branched_error_if_impl)
def assert_dce(
x: PyTree,
msg: str,
*,
on_error: Literal["default", "raise", "breakpoint", "nan"] = "default",
) -> PyTree:
"""Asserts that a particular array (or PyTree of arrays) is DCE'd."""
if currently_jitting():
pred = jnp.invert(False) # Prevent the trace-time error-raising from running.
return error_if(x, pred, msg, on_error=on_error)
else:
# Don't run if not JIT'ing, as without the compiler nothing will be DCE'd.
return x
|
EquinoxTracetimeError
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/mapper.py
|
{
"start": 4981,
"end": 156646
}
|
class ____(
ORMFromClauseRole,
ORMEntityColumnsClauseRole[_O],
MemoizedHasCacheKey,
InspectionAttr,
log.Identified,
inspection.Inspectable["Mapper[_O]"],
EventTarget,
Generic[_O],
):
"""Defines an association between a Python class and a database table or
other relational structure, so that ORM operations against the class may
proceed.
The :class:`_orm.Mapper` object is instantiated using mapping methods
present on the :class:`_orm.registry` object. For information
about instantiating new :class:`_orm.Mapper` objects, see
:ref:`orm_mapping_classes_toplevel`.
"""
dispatch: dispatcher[Mapper[_O]]
_dispose_called = False
_configure_failed: Any = False
_ready_for_configure = False
def __init__(
self,
class_: Type[_O],
local_table: Optional[FromClause] = None,
properties: Optional[Mapping[str, MapperProperty[Any]]] = None,
primary_key: Optional[Iterable[_ORMColumnExprArgument[Any]]] = None,
inherits: Optional[Union[Mapper[Any], Type[Any]]] = None,
inherit_condition: Optional[_ColumnExpressionArgument[bool]] = None,
inherit_foreign_keys: Optional[
Sequence[_ORMColumnExprArgument[Any]]
] = None,
always_refresh: bool = False,
version_id_col: Optional[_ORMColumnExprArgument[Any]] = None,
version_id_generator: Optional[
Union[Literal[False], Callable[[Any], Any]]
] = None,
polymorphic_on: Optional[
Union[_ORMColumnExprArgument[Any], str, MapperProperty[Any]]
] = None,
_polymorphic_map: Optional[Dict[Any, Mapper[Any]]] = None,
polymorphic_identity: Optional[Any] = None,
concrete: bool = False,
with_polymorphic: Optional[_WithPolymorphicArg] = None,
polymorphic_abstract: bool = False,
polymorphic_load: Optional[Literal["selectin", "inline"]] = None,
allow_partial_pks: bool = True,
batch: bool = True,
column_prefix: Optional[str] = None,
include_properties: Optional[Sequence[str]] = None,
exclude_properties: Optional[Sequence[str]] = None,
passive_updates: bool = True,
passive_deletes: bool = False,
confirm_deleted_rows: bool = True,
eager_defaults: Literal[True, False, "auto"] = "auto",
legacy_is_orphan: bool = False,
_compiled_cache_size: int = 100,
):
r"""Direct constructor for a new :class:`_orm.Mapper` object.
The :class:`_orm.Mapper` constructor is not called directly, and
is normally invoked through the
use of the :class:`_orm.registry` object through either the
:ref:`Declarative <orm_declarative_mapping>` or
:ref:`Imperative <orm_imperative_mapping>` mapping styles.
.. versionchanged:: 2.0 The public facing ``mapper()`` function is
removed; for a classical mapping configuration, use the
:meth:`_orm.registry.map_imperatively` method.
Parameters documented below may be passed to either the
:meth:`_orm.registry.map_imperatively` method, or may be passed in the
``__mapper_args__`` declarative class attribute described at
:ref:`orm_declarative_mapper_options`.
:param class\_: The class to be mapped. When using Declarative,
this argument is automatically passed as the declared class
itself.
:param local_table: The :class:`_schema.Table` or other
:class:`_sql.FromClause` (i.e. selectable) to which the class is
mapped. May be ``None`` if this mapper inherits from another mapper
using single-table inheritance. When using Declarative, this
argument is automatically passed by the extension, based on what is
configured via the :attr:`_orm.DeclarativeBase.__table__` attribute
or via the :class:`_schema.Table` produced as a result of
the :attr:`_orm.DeclarativeBase.__tablename__` attribute being
present.
:param polymorphic_abstract: Indicates this class will be mapped in a
polymorphic hierarchy, but not directly instantiated. The class is
mapped normally, except that it has no requirement for a
:paramref:`_orm.Mapper.polymorphic_identity` within an inheritance
hierarchy. The class however must be part of a polymorphic
inheritance scheme which uses
:paramref:`_orm.Mapper.polymorphic_on` at the base.
.. versionadded:: 2.0
.. seealso::
:ref:`orm_inheritance_abstract_poly`
:param always_refresh: If True, all query operations for this mapped
class will overwrite all data within object instances that already
exist within the session, erasing any in-memory changes with
whatever information was loaded from the database. Usage of this
flag is highly discouraged; as an alternative, see the method
:meth:`_query.Query.populate_existing`.
:param allow_partial_pks: Defaults to True. Indicates that a
composite primary key with some NULL values should be considered as
possibly existing within the database. This affects whether a
mapper will assign an incoming row to an existing identity, as well
as if :meth:`.Session.merge` will check the database first for a
particular primary key value. A "partial primary key" can occur if
one has mapped to an OUTER JOIN, for example.
The :paramref:`.orm.Mapper.allow_partial_pks` parameter also
indicates to the ORM relationship lazy loader, when loading a
many-to-one related object, if a composite primary key that has
partial NULL values should result in an attempt to load from the
database, or if a load attempt is not necessary.
.. versionadded:: 2.0.36 :paramref:`.orm.Mapper.allow_partial_pks`
is consulted by the relationship lazy loader strategy, such that
when set to False, a SELECT for a composite primary key that
has partial NULL values will not be emitted.
:param batch: Defaults to ``True``, indicating that save operations
of multiple entities can be batched together for efficiency.
Setting to False indicates
that an instance will be fully saved before saving the next
instance. This is used in the extremely rare case that a
:class:`.MapperEvents` listener requires being called
in between individual row persistence operations.
:param column_prefix: A string which will be prepended
to the mapped attribute name when :class:`_schema.Column`
objects are automatically assigned as attributes to the
mapped class. Does not affect :class:`.Column` objects that
are mapped explicitly in the :paramref:`.Mapper.properties`
dictionary.
This parameter is typically useful with imperative mappings
that keep the :class:`.Table` object separate. Below, assuming
the ``user_table`` :class:`.Table` object has columns named
``user_id``, ``user_name``, and ``password``::
class User(Base):
__table__ = user_table
__mapper_args__ = {"column_prefix": "_"}
The above mapping will assign the ``user_id``, ``user_name``, and
``password`` columns to attributes named ``_user_id``,
``_user_name``, and ``_password`` on the mapped ``User`` class.
The :paramref:`.Mapper.column_prefix` parameter is uncommon in
modern use. For dealing with reflected tables, a more flexible
approach to automating a naming scheme is to intercept the
:class:`.Column` objects as they are reflected; see the section
:ref:`mapper_automated_reflection_schemes` for notes on this usage
pattern.
:param concrete: If True, indicates this mapper should use concrete
table inheritance with its parent mapper.
See the section :ref:`concrete_inheritance` for an example.
:param confirm_deleted_rows: defaults to True; when a DELETE occurs
of one more rows based on specific primary keys, a warning is
emitted when the number of rows matched does not equal the number
of rows expected. This parameter may be set to False to handle the
case where database ON DELETE CASCADE rules may be deleting some of
those rows automatically. The warning may be changed to an
exception in a future release.
:param eager_defaults: if True, the ORM will immediately fetch the
value of server-generated default values after an INSERT or UPDATE,
rather than leaving them as expired to be fetched on next access.
This can be used for event schemes where the server-generated values
are needed immediately before the flush completes.
The fetch of values occurs either by using ``RETURNING`` inline
with the ``INSERT`` or ``UPDATE`` statement, or by adding an
additional ``SELECT`` statement subsequent to the ``INSERT`` or
``UPDATE``, if the backend does not support ``RETURNING``.
The use of ``RETURNING`` is extremely performant in particular for
``INSERT`` statements where SQLAlchemy can take advantage of
:ref:`insertmanyvalues <engine_insertmanyvalues>`, whereas the use of
an additional ``SELECT`` is relatively poor performing, adding
additional SQL round trips which would be unnecessary if these new
attributes are not to be accessed in any case.
For this reason, :paramref:`.Mapper.eager_defaults` defaults to the
string value ``"auto"``, which indicates that server defaults for
INSERT should be fetched using ``RETURNING`` if the backing database
supports it and if the dialect in use supports "insertmanyreturning"
for an INSERT statement. If the backing database does not support
``RETURNING`` or "insertmanyreturning" is not available, server
defaults will not be fetched.
.. versionchanged:: 2.0.0rc1 added the "auto" option for
:paramref:`.Mapper.eager_defaults`
.. seealso::
:ref:`orm_server_defaults`
.. versionchanged:: 2.0.0 RETURNING now works with multiple rows
INSERTed at once using the
:ref:`insertmanyvalues <engine_insertmanyvalues>` feature, which
among other things allows the :paramref:`.Mapper.eager_defaults`
feature to be very performant on supporting backends.
:param exclude_properties: A list or set of string column names to
be excluded from mapping.
.. seealso::
:ref:`include_exclude_cols`
:param include_properties: An inclusive list or set of string column
names to map.
.. seealso::
:ref:`include_exclude_cols`
:param inherits: A mapped class or the corresponding
:class:`_orm.Mapper`
of one indicating a superclass to which this :class:`_orm.Mapper`
should *inherit* from. The mapped class here must be a subclass
of the other mapper's class. When using Declarative, this argument
is passed automatically as a result of the natural class
hierarchy of the declared classes.
.. seealso::
:ref:`inheritance_toplevel`
:param inherit_condition: For joined table inheritance, a SQL
expression which will
define how the two tables are joined; defaults to a natural join
between the two tables.
:param inherit_foreign_keys: When ``inherit_condition`` is used and
the columns present are missing a :class:`_schema.ForeignKey`
configuration, this parameter can be used to specify which columns
are "foreign". In most cases can be left as ``None``.
:param legacy_is_orphan: Boolean, defaults to ``False``.
When ``True``, specifies that "legacy" orphan consideration
is to be applied to objects mapped by this mapper, which means
that a pending (that is, not persistent) object is auto-expunged
from an owning :class:`.Session` only when it is de-associated
from *all* parents that specify a ``delete-orphan`` cascade towards
this mapper. The new default behavior is that the object is
auto-expunged when it is de-associated with *any* of its parents
that specify ``delete-orphan`` cascade. This behavior is more
consistent with that of a persistent object, and allows behavior to
be consistent in more scenarios independently of whether or not an
orphan object has been flushed yet or not.
See the change note and example at :ref:`legacy_is_orphan_addition`
for more detail on this change.
:param passive_deletes: Indicates DELETE behavior of foreign key
columns when a joined-table inheritance entity is being deleted.
Defaults to ``False`` for a base mapper; for an inheriting mapper,
defaults to ``False`` unless the value is set to ``True``
on the superclass mapper.
When ``True``, it is assumed that ON DELETE CASCADE is configured
on the foreign key relationships that link this mapper's table
to its superclass table, so that when the unit of work attempts
to delete the entity, it need only emit a DELETE statement for the
superclass table, and not this table.
When ``False``, a DELETE statement is emitted for this mapper's
table individually. If the primary key attributes local to this
table are unloaded, then a SELECT must be emitted in order to
validate these attributes; note that the primary key columns
of a joined-table subclass are not part of the "primary key" of
the object as a whole.
Note that a value of ``True`` is **always** forced onto the
subclass mappers; that is, it's not possible for a superclass
to specify passive_deletes without this taking effect for
all subclass mappers.
.. seealso::
:ref:`passive_deletes` - description of similar feature as
used with :func:`_orm.relationship`
:paramref:`.mapper.passive_updates` - supporting ON UPDATE
CASCADE for joined-table inheritance mappers
:param passive_updates: Indicates UPDATE behavior of foreign key
columns when a primary key column changes on a joined-table
inheritance mapping. Defaults to ``True``.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will handle
propagation of an UPDATE from a source column to dependent columns
on joined-table rows.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The unit of work process will
emit an UPDATE statement for the dependent columns during a
primary key change.
.. seealso::
:ref:`passive_updates` - description of a similar feature as
used with :func:`_orm.relationship`
:paramref:`.mapper.passive_deletes` - supporting ON DELETE
CASCADE for joined-table inheritance mappers
:param polymorphic_load: Specifies "polymorphic loading" behavior
for a subclass in an inheritance hierarchy (joined and single
table inheritance only). Valid values are:
* "'inline'" - specifies this class should be part of
the "with_polymorphic" mappers, e.g. its columns will be included
in a SELECT query against the base.
* "'selectin'" - specifies that when instances of this class
are loaded, an additional SELECT will be emitted to retrieve
the columns specific to this subclass. The SELECT uses
IN to fetch multiple subclasses at once.
.. seealso::
:ref:`with_polymorphic_mapper_config`
:ref:`polymorphic_selectin`
:param polymorphic_on: Specifies the column, attribute, or
SQL expression used to determine the target class for an
incoming row, when inheriting classes are present.
May be specified as a string attribute name, or as a SQL
expression such as a :class:`_schema.Column` or in a Declarative
mapping a :func:`_orm.mapped_column` object. It is typically
expected that the SQL expression corresponds to a column in the
base-most mapped :class:`.Table`::
class Employee(Base):
__tablename__ = "employee"
id: Mapped[int] = mapped_column(primary_key=True)
discriminator: Mapped[str] = mapped_column(String(50))
__mapper_args__ = {
"polymorphic_on": discriminator,
"polymorphic_identity": "employee",
}
It may also be specified
as a SQL expression, as in this example where we
use the :func:`.case` construct to provide a conditional
approach::
class Employee(Base):
__tablename__ = "employee"
id: Mapped[int] = mapped_column(primary_key=True)
discriminator: Mapped[str] = mapped_column(String(50))
__mapper_args__ = {
"polymorphic_on": case(
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
else_="employee",
),
"polymorphic_identity": "employee",
}
It may also refer to any attribute using its string name,
which is of particular use when using annotated column
configurations::
class Employee(Base):
__tablename__ = "employee"
id: Mapped[int] = mapped_column(primary_key=True)
discriminator: Mapped[str]
__mapper_args__ = {
"polymorphic_on": "discriminator",
"polymorphic_identity": "employee",
}
When setting ``polymorphic_on`` to reference an
attribute or expression that's not present in the
locally mapped :class:`_schema.Table`, yet the value
of the discriminator should be persisted to the database,
the value of the
discriminator is not automatically set on new
instances; this must be handled by the user,
either through manual means or via event listeners.
A typical approach to establishing such a listener
looks like::
from sqlalchemy import event
from sqlalchemy.orm import object_mapper
@event.listens_for(Employee, "init", propagate=True)
def set_identity(instance, *arg, **kw):
mapper = object_mapper(instance)
instance.discriminator = mapper.polymorphic_identity
Where above, we assign the value of ``polymorphic_identity``
for the mapped class to the ``discriminator`` attribute,
thus persisting the value to the ``discriminator`` column
in the database.
.. warning::
Currently, **only one discriminator column may be set**, typically
on the base-most class in the hierarchy. "Cascading" polymorphic
columns are not yet supported.
.. seealso::
:ref:`inheritance_toplevel`
:param polymorphic_identity: Specifies the value which
identifies this particular class as returned by the column expression
referred to by the :paramref:`_orm.Mapper.polymorphic_on` setting. As
rows are received, the value corresponding to the
:paramref:`_orm.Mapper.polymorphic_on` column expression is compared
to this value, indicating which subclass should be used for the newly
reconstructed object.
.. seealso::
:ref:`inheritance_toplevel`
:param properties: A dictionary mapping the string names of object
attributes to :class:`.MapperProperty` instances, which define the
persistence behavior of that attribute. Note that
:class:`_schema.Column`
objects present in
the mapped :class:`_schema.Table` are automatically placed into
``ColumnProperty`` instances upon mapping, unless overridden.
When using Declarative, this argument is passed automatically,
based on all those :class:`.MapperProperty` instances declared
in the declared class body.
.. seealso::
:ref:`orm_mapping_properties` - in the
:ref:`orm_mapping_classes_toplevel`
:param primary_key: A list of :class:`_schema.Column`
objects, or alternatively string names of attribute names which
refer to :class:`_schema.Column`, which define
the primary key to be used against this mapper's selectable unit.
This is normally simply the primary key of the ``local_table``, but
can be overridden here.
.. versionchanged:: 2.0.2 :paramref:`_orm.Mapper.primary_key`
arguments may be indicated as string attribute names as well.
.. seealso::
:ref:`mapper_primary_key` - background and example use
:param version_id_col: A :class:`_schema.Column`
that will be used to keep a running version id of rows
in the table. This is used to detect concurrent updates or
the presence of stale data in a flush. The methodology is to
detect if an UPDATE statement does not match the last known
version id, a
:class:`~sqlalchemy.orm.exc.StaleDataError` exception is
thrown.
By default, the column must be of :class:`.Integer` type,
unless ``version_id_generator`` specifies an alternative version
generator.
.. seealso::
:ref:`mapper_version_counter` - discussion of version counting
and rationale.
:param version_id_generator: Define how new version ids should
be generated. Defaults to ``None``, which indicates that
a simple integer counting scheme be employed. To provide a custom
versioning scheme, provide a callable function of the form::
def generate_version(version):
return next_version
Alternatively, server-side versioning functions such as triggers,
or programmatic versioning schemes outside of the version id
generator may be used, by specifying the value ``False``.
Please see :ref:`server_side_version_counter` for a discussion
of important points when using this option.
.. seealso::
:ref:`custom_version_counter`
:ref:`server_side_version_counter`
:param with_polymorphic: A tuple in the form ``(<classes>,
<selectable>)`` indicating the default style of "polymorphic"
loading, that is, which tables are queried at once. <classes> is
any single or list of mappers and/or classes indicating the
inherited classes that should be loaded at once. The special value
``'*'`` may be used to indicate all descending classes should be
loaded immediately. The second tuple argument <selectable>
indicates a selectable that will be used to query for multiple
classes.
The :paramref:`_orm.Mapper.polymorphic_load` parameter may be
preferable over the use of :paramref:`_orm.Mapper.with_polymorphic`
in modern mappings to indicate a per-subclass technique of
indicating polymorphic loading styles.
.. seealso::
:ref:`with_polymorphic_mapper_config`
"""
self.class_ = util.assert_arg_type(class_, type, "class_")
self._sort_key = "%s.%s" % (
self.class_.__module__,
self.class_.__name__,
)
self._primary_key_argument = util.to_list(primary_key)
self.always_refresh = always_refresh
if isinstance(version_id_col, MapperProperty):
self.version_id_prop = version_id_col
self.version_id_col = None
else:
self.version_id_col = (
coercions.expect(
roles.ColumnArgumentOrKeyRole,
version_id_col,
argname="version_id_col",
)
if version_id_col is not None
else None
)
if version_id_generator is False:
self.version_id_generator = False
elif version_id_generator is None:
self.version_id_generator = lambda x: (x or 0) + 1
else:
self.version_id_generator = version_id_generator
self.concrete = concrete
self.single = False
if inherits is not None:
self.inherits = _parse_mapper_argument(inherits)
else:
self.inherits = None
if local_table is not None:
self.local_table = coercions.expect(
roles.FromClauseRole,
local_table,
disable_inspection=True,
argname="local_table",
)
elif self.inherits:
# note this is a new flow as of 2.0 so that
# .local_table need not be Optional
self.local_table = self.inherits.local_table
self.single = True
else:
raise sa_exc.ArgumentError(
f"Mapper[{self.class_.__name__}(None)] has None for a "
"primary table argument and does not specify 'inherits'"
)
if inherit_condition is not None:
self.inherit_condition = coercions.expect(
roles.OnClauseRole, inherit_condition
)
else:
self.inherit_condition = None
self.inherit_foreign_keys = inherit_foreign_keys
self._init_properties = dict(properties) if properties else {}
self._delete_orphans = []
self.batch = batch
self.eager_defaults = eager_defaults
self.column_prefix = column_prefix
# interim - polymorphic_on is further refined in
# _configure_polymorphic_setter
self.polymorphic_on = (
coercions.expect( # type: ignore
roles.ColumnArgumentOrKeyRole,
polymorphic_on,
argname="polymorphic_on",
)
if polymorphic_on is not None
else None
)
self.polymorphic_abstract = polymorphic_abstract
self._dependency_processors = []
self.validators = util.EMPTY_DICT
self.passive_updates = passive_updates
self.passive_deletes = passive_deletes
self.legacy_is_orphan = legacy_is_orphan
self._clause_adapter = None
self._requires_row_aliasing = False
self._inherits_equated_pairs = None
self._memoized_values = {}
self._compiled_cache_size = _compiled_cache_size
self._reconstructor = None
self.allow_partial_pks = allow_partial_pks
if self.inherits and not self.concrete:
self.confirm_deleted_rows = False
else:
self.confirm_deleted_rows = confirm_deleted_rows
self._set_with_polymorphic(with_polymorphic)
self.polymorphic_load = polymorphic_load
# our 'polymorphic identity', a string name that when located in a
# result set row indicates this Mapper should be used to construct
# the object instance for that row.
self.polymorphic_identity = polymorphic_identity
# a dictionary of 'polymorphic identity' names, associating those
# names with Mappers that will be used to construct object instances
# upon a select operation.
if _polymorphic_map is None:
self.polymorphic_map = {}
else:
self.polymorphic_map = _polymorphic_map
if include_properties is not None:
self.include_properties = util.to_set(include_properties)
else:
self.include_properties = None
if exclude_properties:
self.exclude_properties = util.to_set(exclude_properties)
else:
self.exclude_properties = None
# prevent this mapper from being constructed
# while a configure_mappers() is occurring (and defer a
# configure_mappers() until construction succeeds)
with _CONFIGURE_MUTEX:
cast("MapperEvents", self.dispatch._events)._new_mapper_instance(
class_, self
)
self._configure_inheritance()
self._configure_class_instrumentation()
self._configure_properties()
self._configure_polymorphic_setter()
self._configure_pks()
self.registry._flag_new_mapper(self)
self._log("constructed")
self._expire_memoizations()
self.dispatch.after_mapper_constructed(self, self.class_)
def _prefer_eager_defaults(self, dialect, table):
if self.eager_defaults == "auto":
if not table.implicit_returning:
return False
return (
table in self._server_default_col_keys
and dialect.insert_executemany_returning
)
else:
return self.eager_defaults
def _gen_cache_key(self, anon_map, bindparams):
return (self,)
# ### BEGIN
# ATTRIBUTE DECLARATIONS START HERE
is_mapper = True
"""Part of the inspection API."""
represents_outer_join = False
registry: _RegistryType
@property
def mapper(self) -> Mapper[_O]:
"""Part of the inspection API.
Returns self.
"""
return self
@property
def entity(self):
r"""Part of the inspection API.
Returns self.class\_.
"""
return self.class_
class_: Type[_O]
"""The class to which this :class:`_orm.Mapper` is mapped."""
_identity_class: Type[_O]
_delete_orphans: List[Tuple[str, Type[Any]]]
_dependency_processors: List[_DependencyProcessor]
_memoized_values: Dict[Any, Callable[[], Any]]
_inheriting_mappers: util.WeakSequence[Mapper[Any]]
_all_tables: Set[TableClause]
_polymorphic_attr_key: Optional[str]
_pks_by_table: Dict[FromClause, OrderedSet[ColumnClause[Any]]]
_cols_by_table: Dict[FromClause, OrderedSet[ColumnElement[Any]]]
_props: util.OrderedDict[str, MapperProperty[Any]]
_init_properties: Dict[str, MapperProperty[Any]]
_columntoproperty: _ColumnMapping
_set_polymorphic_identity: Optional[Callable[[InstanceState[_O]], None]]
_validate_polymorphic_identity: Optional[
Callable[[Mapper[_O], InstanceState[_O], _InstanceDict], None]
]
tables: Sequence[TableClause]
"""A sequence containing the collection of :class:`_schema.Table`
or :class:`_schema.TableClause` objects which this :class:`_orm.Mapper`
is aware of.
If the mapper is mapped to a :class:`_expression.Join`, or an
:class:`_expression.Alias`
representing a :class:`_expression.Select`, the individual
:class:`_schema.Table`
objects that comprise the full construct will be represented here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
validators: util.immutabledict[str, Tuple[str, Dict[str, Any]]]
"""An immutable dictionary of attributes which have been decorated
using the :func:`_orm.validates` decorator.
The dictionary contains string attribute names as keys
mapped to the actual validation method.
"""
always_refresh: bool
allow_partial_pks: bool
version_id_col: Optional[ColumnElement[Any]]
with_polymorphic: Optional[
Tuple[
Union[Literal["*"], Sequence[Union[Mapper[Any], Type[Any]]]],
Optional[FromClause],
]
]
version_id_generator: Optional[Union[Literal[False], Callable[[Any], Any]]]
local_table: FromClause
"""The immediate :class:`_expression.FromClause` to which this
:class:`_orm.Mapper` refers.
Typically is an instance of :class:`_schema.Table`, may be any
:class:`.FromClause`.
The "local" table is the
selectable that the :class:`_orm.Mapper` is directly responsible for
managing from an attribute access and flush perspective. For
non-inheriting mappers, :attr:`.Mapper.local_table` will be the same
as :attr:`.Mapper.persist_selectable`. For inheriting mappers,
:attr:`.Mapper.local_table` refers to the specific portion of
:attr:`.Mapper.persist_selectable` that includes the columns to which
this :class:`.Mapper` is loading/persisting, such as a particular
:class:`.Table` within a join.
.. seealso::
:attr:`_orm.Mapper.persist_selectable`.
:attr:`_orm.Mapper.selectable`.
"""
persist_selectable: FromClause
"""The :class:`_expression.FromClause` to which this :class:`_orm.Mapper`
is mapped.
Typically is an instance of :class:`_schema.Table`, may be any
:class:`.FromClause`.
The :attr:`_orm.Mapper.persist_selectable` is similar to
:attr:`.Mapper.local_table`, but represents the :class:`.FromClause` that
represents the inheriting class hierarchy overall in an inheritance
scenario.
:attr.`.Mapper.persist_selectable` is also separate from the
:attr:`.Mapper.selectable` attribute, the latter of which may be an
alternate subquery used for selecting columns.
:attr.`.Mapper.persist_selectable` is oriented towards columns that
will be written on a persist operation.
.. seealso::
:attr:`_orm.Mapper.selectable`.
:attr:`_orm.Mapper.local_table`.
"""
inherits: Optional[Mapper[Any]]
"""References the :class:`_orm.Mapper` which this :class:`_orm.Mapper`
inherits from, if any.
"""
inherit_condition: Optional[ColumnElement[bool]]
configured: bool = False
"""Represent ``True`` if this :class:`_orm.Mapper` has been configured.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
.. seealso::
:func:`.configure_mappers`.
"""
concrete: bool
"""Represent ``True`` if this :class:`_orm.Mapper` is a concrete
inheritance mapper.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
primary_key: Tuple[ColumnElement[Any], ...]
"""An iterable containing the collection of :class:`_schema.Column`
objects
which comprise the 'primary key' of the mapped table, from the
perspective of this :class:`_orm.Mapper`.
This list is against the selectable in
:attr:`_orm.Mapper.persist_selectable`.
In the case of inheriting mappers, some columns may be managed by a
superclass mapper. For example, in the case of a
:class:`_expression.Join`, the
primary key is determined by all of the primary key columns across all
tables referenced by the :class:`_expression.Join`.
The list is also not necessarily the same as the primary key column
collection associated with the underlying tables; the :class:`_orm.Mapper`
features a ``primary_key`` argument that can override what the
:class:`_orm.Mapper` considers as primary key columns.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
class_manager: ClassManager[_O]
"""The :class:`.ClassManager` which maintains event listeners
and class-bound descriptors for this :class:`_orm.Mapper`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
single: bool
"""Represent ``True`` if this :class:`_orm.Mapper` is a single table
inheritance mapper.
:attr:`_orm.Mapper.local_table` will be ``None`` if this flag is set.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_on: Optional[KeyedColumnElement[Any]]
"""The :class:`_schema.Column` or SQL expression specified as the
``polymorphic_on`` argument
for this :class:`_orm.Mapper`, within an inheritance scenario.
This attribute is normally a :class:`_schema.Column` instance but
may also be an expression, such as one derived from
:func:`.cast`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_map: Dict[Any, Mapper[Any]]
"""A mapping of "polymorphic identity" identifiers mapped to
:class:`_orm.Mapper` instances, within an inheritance scenario.
The identifiers can be of any type which is comparable to the
type of column represented by :attr:`_orm.Mapper.polymorphic_on`.
An inheritance chain of mappers will all reference the same
polymorphic map object. The object is used to correlate incoming
result rows to target mappers.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_identity: Optional[Any]
"""Represent an identifier which is matched against the
:attr:`_orm.Mapper.polymorphic_on` column during result row loading.
Used only with inheritance, this object can be of any type which is
comparable to the type of column represented by
:attr:`_orm.Mapper.polymorphic_on`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
base_mapper: Mapper[Any]
"""The base-most :class:`_orm.Mapper` in an inheritance chain.
In a non-inheriting scenario, this attribute will always be this
:class:`_orm.Mapper`. In an inheritance scenario, it references
the :class:`_orm.Mapper` which is parent to all other :class:`_orm.Mapper`
objects in the inheritance chain.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
columns: ReadOnlyColumnCollection[str, Column[Any]]
"""A collection of :class:`_schema.Column` or other scalar expression
objects maintained by this :class:`_orm.Mapper`.
The collection behaves the same as that of the ``c`` attribute on
any :class:`_schema.Table` object,
except that only those columns included in
this mapping are present, and are keyed based on the attribute name
defined in the mapping, not necessarily the ``key`` attribute of the
:class:`_schema.Column` itself. Additionally, scalar expressions mapped
by :func:`.column_property` are also present here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
c: ReadOnlyColumnCollection[str, Column[Any]]
"""A synonym for :attr:`_orm.Mapper.columns`."""
@util.memoized_property
def _path_registry(self) -> _CachingEntityRegistry:
return PathRegistry.per_mapper(self)
def _configure_inheritance(self):
"""Configure settings related to inheriting and/or inherited mappers
being present."""
# a set of all mappers which inherit from this one.
self._inheriting_mappers = util.WeakSequence()
if self.inherits:
if not issubclass(self.class_, self.inherits.class_):
raise sa_exc.ArgumentError(
"Class '%s' does not inherit from '%s'"
% (self.class_.__name__, self.inherits.class_.__name__)
)
self.dispatch._update(self.inherits.dispatch)
if self.single:
self.persist_selectable = self.inherits.persist_selectable
elif self.local_table is not self.inherits.local_table:
if self.concrete:
self.persist_selectable = self.local_table
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
else:
if self.inherit_condition is None:
# figure out inherit condition from our table to the
# immediate table of the inherited mapper, not its
# full table which could pull in other stuff we don't
# want (allows test/inheritance.InheritTest4 to pass)
try:
self.inherit_condition = sql_util.join_condition(
self.inherits.local_table, self.local_table
)
except sa_exc.NoForeignKeysError as nfe:
assert self.inherits.local_table is not None
assert self.local_table is not None
raise sa_exc.NoForeignKeysError(
"Can't determine the inherit condition "
"between inherited table '%s' and "
"inheriting "
"table '%s'; tables have no "
"foreign key relationships established. "
"Please ensure the inheriting table has "
"a foreign key relationship to the "
"inherited "
"table, or provide an "
"'on clause' using "
"the 'inherit_condition' mapper argument."
% (
self.inherits.local_table.description,
self.local_table.description,
)
) from nfe
except sa_exc.AmbiguousForeignKeysError as afe:
assert self.inherits.local_table is not None
assert self.local_table is not None
raise sa_exc.AmbiguousForeignKeysError(
"Can't determine the inherit condition "
"between inherited table '%s' and "
"inheriting "
"table '%s'; tables have more than one "
"foreign key relationship established. "
"Please specify the 'on clause' using "
"the 'inherit_condition' mapper argument."
% (
self.inherits.local_table.description,
self.local_table.description,
)
) from afe
assert self.inherits.persist_selectable is not None
self.persist_selectable = sql.join(
self.inherits.persist_selectable,
self.local_table,
self.inherit_condition,
)
fks = util.to_set(self.inherit_foreign_keys)
self._inherits_equated_pairs = sql_util.criterion_as_pairs(
self.persist_selectable.onclause,
consider_as_foreign_keys=fks,
)
else:
self.persist_selectable = self.local_table
if self.polymorphic_identity is None:
self._identity_class = self.class_
if (
not self.polymorphic_abstract
and self.inherits.base_mapper.polymorphic_on is not None
):
util.warn(
f"{self} does not indicate a 'polymorphic_identity', "
"yet is part of an inheritance hierarchy that has a "
f"'polymorphic_on' column of "
f"'{self.inherits.base_mapper.polymorphic_on}'. "
"If this is an intermediary class that should not be "
"instantiated, the class may either be left unmapped, "
"or may include the 'polymorphic_abstract=True' "
"parameter in its Mapper arguments. To leave the "
"class unmapped when using Declarative, set the "
"'__abstract__ = True' attribute on the class."
)
elif self.concrete:
self._identity_class = self.class_
else:
self._identity_class = self.inherits._identity_class
if self.version_id_col is None:
self.version_id_col = self.inherits.version_id_col
self.version_id_generator = self.inherits.version_id_generator
elif (
self.inherits.version_id_col is not None
and self.version_id_col is not self.inherits.version_id_col
):
util.warn(
"Inheriting version_id_col '%s' does not match inherited "
"version_id_col '%s' and will not automatically populate "
"the inherited versioning column. "
"version_id_col should only be specified on "
"the base-most mapper that includes versioning."
% (
self.version_id_col.description,
self.inherits.version_id_col.description,
)
)
self.polymorphic_map = self.inherits.polymorphic_map
self.batch = self.inherits.batch
self.inherits._inheriting_mappers.append(self)
self.base_mapper = self.inherits.base_mapper
self.passive_updates = self.inherits.passive_updates
self.passive_deletes = (
self.inherits.passive_deletes or self.passive_deletes
)
self._all_tables = self.inherits._all_tables
if self.polymorphic_identity is not None:
if self.polymorphic_identity in self.polymorphic_map:
util.warn(
"Reassigning polymorphic association for identity %r "
"from %r to %r: Check for duplicate use of %r as "
"value for polymorphic_identity."
% (
self.polymorphic_identity,
self.polymorphic_map[self.polymorphic_identity],
self,
self.polymorphic_identity,
)
)
self.polymorphic_map[self.polymorphic_identity] = self
if self.polymorphic_load and self.concrete:
raise sa_exc.ArgumentError(
"polymorphic_load is not currently supported "
"with concrete table inheritance"
)
if self.polymorphic_load == "inline":
self.inherits._add_with_polymorphic_subclass(self)
elif self.polymorphic_load == "selectin":
pass
elif self.polymorphic_load is not None:
raise sa_exc.ArgumentError(
"unknown argument for polymorphic_load: %r"
% self.polymorphic_load
)
else:
self._all_tables = set()
self.base_mapper = self
assert self.local_table is not None
self.persist_selectable = self.local_table
if self.polymorphic_identity is not None:
self.polymorphic_map[self.polymorphic_identity] = self
self._identity_class = self.class_
if self.persist_selectable is None:
raise sa_exc.ArgumentError(
"Mapper '%s' does not have a persist_selectable specified."
% self
)
def _set_with_polymorphic(
self, with_polymorphic: Optional[_WithPolymorphicArg]
) -> None:
if with_polymorphic == "*":
self.with_polymorphic = ("*", None)
elif isinstance(with_polymorphic, (tuple, list)):
if isinstance(with_polymorphic[0], (str, tuple, list)):
self.with_polymorphic = cast(
"""Tuple[
Union[
Literal["*"],
Sequence[Union["Mapper[Any]", Type[Any]]],
],
Optional["FromClause"],
]""",
with_polymorphic,
)
else:
self.with_polymorphic = (with_polymorphic, None)
elif with_polymorphic is not None:
raise sa_exc.ArgumentError(
f"Invalid setting for with_polymorphic: {with_polymorphic!r}"
)
else:
self.with_polymorphic = None
if self.with_polymorphic and self.with_polymorphic[1] is not None:
self.with_polymorphic = (
self.with_polymorphic[0],
coercions.expect(
roles.FromClauseRole,
self.with_polymorphic[1],
),
)
if self.configured:
self._expire_memoizations()
def _add_with_polymorphic_subclass(self, mapper):
subcl = mapper.class_
if self.with_polymorphic is None:
self._set_with_polymorphic((subcl,))
elif self.with_polymorphic[0] != "*":
assert isinstance(self.with_polymorphic[0], tuple)
self._set_with_polymorphic(
(self.with_polymorphic[0] + (subcl,), self.with_polymorphic[1])
)
def _set_concrete_base(self, mapper):
"""Set the given :class:`_orm.Mapper` as the 'inherits' for this
:class:`_orm.Mapper`, assuming this :class:`_orm.Mapper` is concrete
and does not already have an inherits."""
assert self.concrete
assert not self.inherits
assert isinstance(mapper, Mapper)
self.inherits = mapper
self.inherits.polymorphic_map.update(self.polymorphic_map)
self.polymorphic_map = self.inherits.polymorphic_map
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
self.batch = self.inherits.batch
for mp in self.self_and_descendants:
mp.base_mapper = self.inherits.base_mapper
self.inherits._inheriting_mappers.append(self)
self.passive_updates = self.inherits.passive_updates
self._all_tables = self.inherits._all_tables
for key, prop in mapper._props.items():
if key not in self._props and not self._should_exclude(
key, key, local=False, column=None
):
self._adapt_inherited_property(key, prop, False)
def _set_polymorphic_on(self, polymorphic_on):
self.polymorphic_on = polymorphic_on
self._configure_polymorphic_setter(True)
def _configure_class_instrumentation(self):
"""Associate this Mapper with the
given class and entity name.
Subsequent calls to ``class_mapper()`` for the ``class_`` / ``entity``
name combination will return this mapper. Also decorate the
`__init__` method on the mapped class to include optional
auto-session attachment logic.
"""
# we expect that declarative has applied the class manager
# already and set up a registry. if this is None,
# this raises as of 2.0.
manager = attributes.opt_manager_of_class(self.class_)
if manager is None or not manager.registry:
raise sa_exc.InvalidRequestError(
"The _mapper() function and Mapper() constructor may not be "
"invoked directly outside of a declarative registry."
" Please use the sqlalchemy.orm.registry.map_imperatively() "
"function for a classical mapping."
)
self.dispatch.instrument_class(self, self.class_)
# this invokes the class_instrument event and sets up
# the __init__ method. documented behavior is that this must
# occur after the instrument_class event above.
# yes two events with the same two words reversed and different APIs.
# :(
manager = instrumentation.register_class(
self.class_,
mapper=self,
expired_attribute_loader=util.partial(
loading._load_scalar_attributes, self
),
# finalize flag means instrument the __init__ method
# and call the class_instrument event
finalize=True,
)
self.class_manager = manager
assert manager.registry is not None
self.registry = manager.registry
# The remaining members can be added by any mapper,
# e_name None or not.
if manager.mapper is None:
return
event.listen(manager, "init", _event_on_init, raw=True)
for key, method in util.iterate_attributes(self.class_):
if key == "__init__" and hasattr(method, "_sa_original_init"):
method = method._sa_original_init
if hasattr(method, "__func__"):
method = method.__func__
if callable(method):
if hasattr(method, "__sa_reconstructor__"):
self._reconstructor = method
event.listen(manager, "load", _event_on_load, raw=True)
elif hasattr(method, "__sa_validators__"):
validation_opts = method.__sa_validation_opts__
for name in method.__sa_validators__:
if name in self.validators:
raise sa_exc.InvalidRequestError(
"A validation function for mapped "
"attribute %r on mapper %s already exists."
% (name, self)
)
self.validators = self.validators.union(
{name: (method, validation_opts)}
)
def _set_dispose_flags(self) -> None:
self.configured = True
self._ready_for_configure = True
self._dispose_called = True
self.__dict__.pop("_configure_failed", None)
def _str_arg_to_mapped_col(self, argname: str, key: str) -> Column[Any]:
try:
prop = self._props[key]
except KeyError as err:
raise sa_exc.ArgumentError(
f"Can't determine {argname} column '{key}' - "
"no attribute is mapped to this name."
) from err
try:
expr = prop.expression
except AttributeError as ae:
raise sa_exc.ArgumentError(
f"Can't determine {argname} column '{key}'; "
"property does not refer to a single mapped Column"
) from ae
if not isinstance(expr, Column):
raise sa_exc.ArgumentError(
f"Can't determine {argname} column '{key}'; "
"property does not refer to a single "
"mapped Column"
)
return expr
def _configure_pks(self) -> None:
self.tables = sql_util.find_tables(self.persist_selectable)
self._all_tables.update(t for t in self.tables)
self._pks_by_table = {}
self._cols_by_table = {}
all_cols = util.column_set(
chain(*[col.proxy_set for col in self._columntoproperty])
)
pk_cols = util.column_set(c for c in all_cols if c.primary_key)
# identify primary key columns which are also mapped by this mapper.
for fc in set(self.tables).union([self.persist_selectable]):
if fc.primary_key and pk_cols.issuperset(fc.primary_key):
# ordering is important since it determines the ordering of
# mapper.primary_key (and therefore query.get())
self._pks_by_table[fc] = util.ordered_column_set( # type: ignore # noqa: E501
fc.primary_key
).intersection(
pk_cols
)
self._cols_by_table[fc] = util.ordered_column_set(fc.c).intersection( # type: ignore # noqa: E501
all_cols
)
if self._primary_key_argument:
coerced_pk_arg = [
(
self._str_arg_to_mapped_col("primary_key", c)
if isinstance(c, str)
else c
)
for c in (
coercions.expect(
roles.DDLConstraintColumnRole,
coerce_pk,
argname="primary_key",
)
for coerce_pk in self._primary_key_argument
)
]
else:
coerced_pk_arg = None
# if explicit PK argument sent, add those columns to the
# primary key mappings
if coerced_pk_arg:
for k in coerced_pk_arg:
if k.table not in self._pks_by_table:
self._pks_by_table[k.table] = util.OrderedSet()
self._pks_by_table[k.table].add(k)
# otherwise, see that we got a full PK for the mapped table
elif (
self.persist_selectable not in self._pks_by_table
or len(self._pks_by_table[self.persist_selectable]) == 0
):
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'"
% (self, self.persist_selectable.description)
)
elif self.local_table not in self._pks_by_table and isinstance(
self.local_table, schema.Table
):
util.warn(
"Could not assemble any primary "
"keys for locally mapped table '%s' - "
"no rows will be persisted in this Table."
% self.local_table.description
)
if (
self.inherits
and not self.concrete
and not self._primary_key_argument
):
# if inheriting, the "primary key" for this mapper is
# that of the inheriting (unless concrete or explicit)
self.primary_key = self.inherits.primary_key
else:
# determine primary key from argument or persist_selectable pks
primary_key: Collection[ColumnElement[Any]]
if coerced_pk_arg:
primary_key = [
cc if cc is not None else c
for cc, c in (
(self.persist_selectable.corresponding_column(c), c)
for c in coerced_pk_arg
)
]
else:
# if heuristically determined PKs, reduce to the minimal set
# of columns by eliminating FK->PK pairs for a multi-table
# expression. May over-reduce for some kinds of UNIONs
# / CTEs; use explicit PK argument for these special cases
primary_key = sql_util.reduce_columns(
self._pks_by_table[self.persist_selectable],
ignore_nonexistent_tables=True,
)
if len(primary_key) == 0:
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'"
% (self, self.persist_selectable.description)
)
self.primary_key = tuple(primary_key)
self._log("Identified primary key columns: %s", primary_key)
# determine cols that aren't expressed within our tables; mark these
# as "read only" properties which are refreshed upon INSERT/UPDATE
self._readonly_props = {
self._columntoproperty[col]
for col in self._columntoproperty
if self._columntoproperty[col] not in self._identity_key_props
and (
not hasattr(col, "table")
or col.table not in self._cols_by_table
)
}
def _configure_properties(self) -> None:
self.columns = self.c = sql_base.ColumnCollection() # type: ignore
# object attribute names mapped to MapperProperty objects
self._props = util.OrderedDict()
# table columns mapped to MapperProperty
self._columntoproperty = _ColumnMapping(self)
explicit_col_props_by_column: Dict[
KeyedColumnElement[Any], Tuple[str, ColumnProperty[Any]]
] = {}
explicit_col_props_by_key: Dict[str, ColumnProperty[Any]] = {}
# step 1: go through properties that were explicitly passed
# in the properties dictionary. For Columns that are local, put them
# aside in a separate collection we will reconcile with the Table
# that's given. For other properties, set them up in _props now.
if self._init_properties:
for key, prop_arg in self._init_properties.items():
if not isinstance(prop_arg, MapperProperty):
possible_col_prop = self._make_prop_from_column(
key, prop_arg
)
else:
possible_col_prop = prop_arg
# issue #8705. if the explicit property is actually a
# Column that is local to the local Table, don't set it up
# in ._props yet, integrate it into the order given within
# the Table.
_map_as_property_now = True
if isinstance(possible_col_prop, properties.ColumnProperty):
for given_col in possible_col_prop.columns:
if self.local_table.c.contains_column(given_col):
_map_as_property_now = False
explicit_col_props_by_key[key] = possible_col_prop
explicit_col_props_by_column[given_col] = (
key,
possible_col_prop,
)
if _map_as_property_now:
self._configure_property(
key,
possible_col_prop,
init=False,
)
# step 2: pull properties from the inherited mapper. reconcile
# columns with those which are explicit above. for properties that
# are only in the inheriting mapper, set them up as local props
if self.inherits:
for key, inherited_prop in self.inherits._props.items():
if self._should_exclude(key, key, local=False, column=None):
continue
incoming_prop = explicit_col_props_by_key.get(key)
if incoming_prop:
new_prop = self._reconcile_prop_with_incoming_columns(
key,
inherited_prop,
warn_only=False,
incoming_prop=incoming_prop,
)
explicit_col_props_by_key[key] = new_prop
for inc_col in incoming_prop.columns:
explicit_col_props_by_column[inc_col] = (
key,
new_prop,
)
elif key not in self._props:
self._adapt_inherited_property(key, inherited_prop, False)
# step 3. Iterate through all columns in the persist selectable.
# this includes not only columns in the local table / fromclause,
# but also those columns in the superclass table if we are joined
# inh or single inh mapper. map these columns as well. additional
# reconciliation against inherited columns occurs here also.
for column in self.persist_selectable.columns:
if column in explicit_col_props_by_column:
# column was explicitly passed to properties; configure
# it now in the order in which it corresponds to the
# Table / selectable
key, prop = explicit_col_props_by_column[column]
self._configure_property(key, prop, init=False)
continue
elif column in self._columntoproperty:
continue
column_key = (self.column_prefix or "") + column.key
if self._should_exclude(
column.key,
column_key,
local=self.local_table.c.contains_column(column),
column=column,
):
continue
# adjust the "key" used for this column to that
# of the inheriting mapper
for mapper in self.iterate_to_root():
if column in mapper._columntoproperty:
column_key = mapper._columntoproperty[column].key
self._configure_property(
column_key,
column,
init=False,
setparent=True,
)
def _configure_polymorphic_setter(self, init=False):
"""Configure an attribute on the mapper representing the
'polymorphic_on' column, if applicable, and not
already generated by _configure_properties (which is typical).
Also create a setter function which will assign this
attribute to the value of the 'polymorphic_identity'
upon instance construction, also if applicable. This
routine will run when an instance is created.
"""
setter = False
polymorphic_key: Optional[str] = None
if self.polymorphic_on is not None:
setter = True
if isinstance(self.polymorphic_on, str):
# polymorphic_on specified as a string - link
# it to mapped ColumnProperty
try:
self.polymorphic_on = self._props[self.polymorphic_on]
except KeyError as err:
raise sa_exc.ArgumentError(
"Can't determine polymorphic_on "
"value '%s' - no attribute is "
"mapped to this name." % self.polymorphic_on
) from err
if self.polymorphic_on in self._columntoproperty:
# polymorphic_on is a column that is already mapped
# to a ColumnProperty
prop = self._columntoproperty[self.polymorphic_on]
elif isinstance(self.polymorphic_on, MapperProperty):
# polymorphic_on is directly a MapperProperty,
# ensure it's a ColumnProperty
if not isinstance(
self.polymorphic_on, properties.ColumnProperty
):
raise sa_exc.ArgumentError(
"Only direct column-mapped "
"property or SQL expression "
"can be passed for polymorphic_on"
)
prop = self.polymorphic_on
else:
# polymorphic_on is a Column or SQL expression and
# doesn't appear to be mapped. this means it can be 1.
# only present in the with_polymorphic selectable or
# 2. a totally standalone SQL expression which we'd
# hope is compatible with this mapper's persist_selectable
col = self.persist_selectable.corresponding_column(
self.polymorphic_on
)
if col is None:
# polymorphic_on doesn't derive from any
# column/expression isn't present in the mapped
# table. we will make a "hidden" ColumnProperty
# for it. Just check that if it's directly a
# schema.Column and we have with_polymorphic, it's
# likely a user error if the schema.Column isn't
# represented somehow in either persist_selectable or
# with_polymorphic. Otherwise as of 0.7.4 we
# just go with it and assume the user wants it
# that way (i.e. a CASE statement)
setter = False
instrument = False
col = self.polymorphic_on
if isinstance(col, schema.Column) and (
self.with_polymorphic is None
or self.with_polymorphic[1] is None
or self.with_polymorphic[1].corresponding_column(col)
is None
):
raise sa_exc.InvalidRequestError(
"Could not map polymorphic_on column "
"'%s' to the mapped table - polymorphic "
"loads will not function properly"
% col.description
)
else:
# column/expression that polymorphic_on derives from
# is present in our mapped table
# and is probably mapped, but polymorphic_on itself
# is not. This happens when
# the polymorphic_on is only directly present in the
# with_polymorphic selectable, as when use
# polymorphic_union.
# we'll make a separate ColumnProperty for it.
instrument = True
key = getattr(col, "key", None)
if key:
if self._should_exclude(key, key, False, col):
raise sa_exc.InvalidRequestError(
"Cannot exclude or override the "
"discriminator column %r" % key
)
else:
self.polymorphic_on = col = col.label("_sa_polymorphic_on")
key = col.key
prop = properties.ColumnProperty(col, _instrument=instrument)
self._configure_property(key, prop, init=init, setparent=True)
# the actual polymorphic_on should be the first public-facing
# column in the property
self.polymorphic_on = prop.columns[0]
polymorphic_key = prop.key
else:
# no polymorphic_on was set.
# check inheriting mappers for one.
for mapper in self.iterate_to_root():
# determine if polymorphic_on of the parent
# should be propagated here. If the col
# is present in our mapped table, or if our mapped
# table is the same as the parent (i.e. single table
# inheritance), we can use it
if mapper.polymorphic_on is not None:
if self.persist_selectable is mapper.persist_selectable:
self.polymorphic_on = mapper.polymorphic_on
else:
self.polymorphic_on = (
self.persist_selectable
).corresponding_column(mapper.polymorphic_on)
# we can use the parent mapper's _set_polymorphic_identity
# directly; it ensures the polymorphic_identity of the
# instance's mapper is used so is portable to subclasses.
if self.polymorphic_on is not None:
self._set_polymorphic_identity = (
mapper._set_polymorphic_identity
)
self._polymorphic_attr_key = (
mapper._polymorphic_attr_key
)
self._validate_polymorphic_identity = (
mapper._validate_polymorphic_identity
)
else:
self._set_polymorphic_identity = None
self._polymorphic_attr_key = None
return
if self.polymorphic_abstract and self.polymorphic_on is None:
raise sa_exc.InvalidRequestError(
"The Mapper.polymorphic_abstract parameter may only be used "
"on a mapper hierarchy which includes the "
"Mapper.polymorphic_on parameter at the base of the hierarchy."
)
if setter:
def _set_polymorphic_identity(state):
dict_ = state.dict
# TODO: what happens if polymorphic_on column attribute name
# does not match .key?
polymorphic_identity = (
state.manager.mapper.polymorphic_identity
)
if (
polymorphic_identity is None
and state.manager.mapper.polymorphic_abstract
):
raise sa_exc.InvalidRequestError(
f"Can't instantiate class for {state.manager.mapper}; "
"mapper is marked polymorphic_abstract=True"
)
state.get_impl(polymorphic_key).set(
state,
dict_,
polymorphic_identity,
None,
)
self._polymorphic_attr_key = polymorphic_key
def _validate_polymorphic_identity(mapper, state, dict_):
if (
polymorphic_key in dict_
and dict_[polymorphic_key]
not in mapper._acceptable_polymorphic_identities
):
util.warn_limited(
"Flushing object %s with "
"incompatible polymorphic identity %r; the "
"object may not refresh and/or load correctly",
(state_str(state), dict_[polymorphic_key]),
)
self._set_polymorphic_identity = _set_polymorphic_identity
self._validate_polymorphic_identity = (
_validate_polymorphic_identity
)
else:
self._polymorphic_attr_key = None
self._set_polymorphic_identity = None
_validate_polymorphic_identity = None
@HasMemoized.memoized_attribute
def _version_id_prop(self):
if self.version_id_col is not None:
return self._columntoproperty[self.version_id_col]
else:
return None
@HasMemoized.memoized_attribute
def _acceptable_polymorphic_identities(self):
identities = set()
stack = deque([self])
while stack:
item = stack.popleft()
if item.persist_selectable is self.persist_selectable:
identities.add(item.polymorphic_identity)
stack.extend(item._inheriting_mappers)
return identities
@HasMemoized.memoized_attribute
def _prop_set(self):
return frozenset(self._props.values())
@util.preload_module("sqlalchemy.orm.descriptor_props")
def _adapt_inherited_property(self, key, prop, init):
descriptor_props = util.preloaded.orm_descriptor_props
if not self.concrete:
self._configure_property(key, prop, init=False, setparent=False)
elif key not in self._props:
# determine if the class implements this attribute; if not,
# or if it is implemented by the attribute that is handling the
# given superclass-mapped property, then we need to report that we
# can't use this at the instance level since we are a concrete
# mapper and we don't map this. don't trip user-defined
# descriptors that might have side effects when invoked.
implementing_attribute = self.class_manager._get_class_attr_mro(
key, prop
)
if implementing_attribute is prop or (
isinstance(
implementing_attribute, attributes.InstrumentedAttribute
)
and implementing_attribute._parententity is prop.parent
):
self._configure_property(
key,
descriptor_props.ConcreteInheritedProperty(),
init=init,
setparent=True,
)
@util.preload_module("sqlalchemy.orm.descriptor_props")
def _configure_property(
self,
key: str,
prop_arg: Union[KeyedColumnElement[Any], MapperProperty[Any]],
*,
init: bool = True,
setparent: bool = True,
warn_for_existing: bool = False,
) -> MapperProperty[Any]:
descriptor_props = util.preloaded.orm_descriptor_props
self._log(
"_configure_property(%s, %s)", key, prop_arg.__class__.__name__
)
if not isinstance(prop_arg, MapperProperty):
prop: MapperProperty[Any] = self._property_from_column(
key, prop_arg
)
else:
prop = prop_arg
if isinstance(prop, properties.ColumnProperty):
col = self.persist_selectable.corresponding_column(prop.columns[0])
# if the column is not present in the mapped table,
# test if a column has been added after the fact to the
# parent table (or their parent, etc.) [ticket:1570]
if col is None and self.inherits:
path = [self]
for m in self.inherits.iterate_to_root():
col = m.local_table.corresponding_column(prop.columns[0])
if col is not None:
for m2 in path:
m2.persist_selectable._refresh_for_new_column(col)
col = self.persist_selectable.corresponding_column(
prop.columns[0]
)
break
path.append(m)
# subquery expression, column not present in the mapped
# selectable.
if col is None:
col = prop.columns[0]
# column is coming in after _readonly_props was
# initialized; check for 'readonly'
if hasattr(self, "_readonly_props") and (
not hasattr(col, "table")
or col.table not in self._cols_by_table
):
self._readonly_props.add(prop)
else:
# if column is coming in after _cols_by_table was
# initialized, ensure the col is in the right set
if (
hasattr(self, "_cols_by_table")
and col.table in self._cols_by_table
and col not in self._cols_by_table[col.table]
):
self._cols_by_table[col.table].add(col)
# if this properties.ColumnProperty represents the "polymorphic
# discriminator" column, mark it. We'll need this when rendering
# columns in SELECT statements.
if not hasattr(prop, "_is_polymorphic_discriminator"):
prop._is_polymorphic_discriminator = (
col is self.polymorphic_on
or prop.columns[0] is self.polymorphic_on
)
if isinstance(col, expression.Label):
# new in 1.4, get column property against expressions
# to be addressable in subqueries
col.key = col._tq_key_label = key
self.columns.add(col, key)
for col in prop.columns:
for proxy_col in col.proxy_set:
self._columntoproperty[proxy_col] = prop
if getattr(prop, "key", key) != key:
util.warn(
f"ORM mapped property {self.class_.__name__}.{prop.key} being "
"assigned to attribute "
f"{key!r} is already associated with "
f"attribute {prop.key!r}. The attribute will be de-associated "
f"from {prop.key!r}."
)
prop.key = key
if setparent:
prop.set_parent(self, init)
if key in self._props and getattr(
self._props[key], "_mapped_by_synonym", False
):
syn = self._props[key]._mapped_by_synonym
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r" % (syn, key, key, syn)
)
# replacement cases
# case one: prop is replacing a prop that we have mapped. this is
# independent of whatever might be in the actual class dictionary
if (
key in self._props
and not isinstance(
self._props[key], descriptor_props.ConcreteInheritedProperty
)
and not isinstance(prop, descriptor_props.SynonymProperty)
):
if warn_for_existing:
util.warn_deprecated(
f"User-placed attribute {self.class_.__name__}.{key} on "
f"{self} is replacing an existing ORM-mapped attribute. "
"Behavior is not fully defined in this case. This "
"use is deprecated and will raise an error in a future "
"release",
"2.0",
)
oldprop = self._props[key]
self._path_registry.pop(oldprop, None)
# case two: prop is replacing an attribute on the class of some kind.
# we have to be more careful here since it's normal when using
# Declarative that all the "declared attributes" on the class
# get replaced.
elif (
warn_for_existing
and self.class_.__dict__.get(key, None) is not None
and not isinstance(prop, descriptor_props.SynonymProperty)
and not isinstance(
self._props.get(key, None),
descriptor_props.ConcreteInheritedProperty,
)
):
util.warn_deprecated(
f"User-placed attribute {self.class_.__name__}.{key} on "
f"{self} is replacing an existing class-bound "
"attribute of the same name. "
"Behavior is not fully defined in this case. This "
"use is deprecated and will raise an error in a future "
"release",
"2.0",
)
self._props[key] = prop
prop.instrument_class(self)
for mapper in self._inheriting_mappers:
mapper._adapt_inherited_property(key, prop, init)
if init:
prop.init()
prop.post_instrument_class(self)
if self.configured:
self._expire_memoizations()
return prop
def _make_prop_from_column(
self,
key: str,
column: Union[
Sequence[KeyedColumnElement[Any]], KeyedColumnElement[Any]
],
) -> ColumnProperty[Any]:
columns = util.to_list(column)
mapped_column = []
for c in columns:
mc = self.persist_selectable.corresponding_column(c)
if mc is None:
mc = self.local_table.corresponding_column(c)
if mc is not None:
# if the column is in the local table but not the
# mapped table, this corresponds to adding a
# column after the fact to the local table.
# [ticket:1523]
self.persist_selectable._refresh_for_new_column(mc)
mc = self.persist_selectable.corresponding_column(c)
if mc is None:
raise sa_exc.ArgumentError(
"When configuring property '%s' on %s, "
"column '%s' is not represented in the mapper's "
"table. Use the `column_property()` function to "
"force this column to be mapped as a read-only "
"attribute." % (key, self, c)
)
mapped_column.append(mc)
return properties.ColumnProperty(*mapped_column)
def _reconcile_prop_with_incoming_columns(
self,
key: str,
existing_prop: MapperProperty[Any],
warn_only: bool,
incoming_prop: Optional[ColumnProperty[Any]] = None,
single_column: Optional[KeyedColumnElement[Any]] = None,
) -> ColumnProperty[Any]:
if incoming_prop and (
self.concrete
or not isinstance(existing_prop, properties.ColumnProperty)
):
return incoming_prop
existing_column = existing_prop.columns[0]
if incoming_prop and existing_column in incoming_prop.columns:
return incoming_prop
if incoming_prop is None:
assert single_column is not None
incoming_column = single_column
equated_pair_key = (existing_prop.columns[0], incoming_column)
else:
assert single_column is None
incoming_column = incoming_prop.columns[0]
equated_pair_key = (incoming_column, existing_prop.columns[0])
if (
(
not self._inherits_equated_pairs
or (equated_pair_key not in self._inherits_equated_pairs)
)
and not existing_column.shares_lineage(incoming_column)
and existing_column is not self.version_id_col
and incoming_column is not self.version_id_col
):
msg = (
"Implicitly combining column %s with column "
"%s under attribute '%s'. Please configure one "
"or more attributes for these same-named columns "
"explicitly."
% (
existing_prop.columns[-1],
incoming_column,
key,
)
)
if warn_only:
util.warn(msg)
else:
raise sa_exc.InvalidRequestError(msg)
# existing properties.ColumnProperty from an inheriting
# mapper. make a copy and append our column to it
new_prop = existing_prop.copy()
new_prop.columns.insert(0, incoming_column)
self._log(
"inserting column to existing list "
"in properties.ColumnProperty %s",
key,
)
return new_prop # type: ignore
@util.preload_module("sqlalchemy.orm.descriptor_props")
def _property_from_column(
self,
key: str,
column: KeyedColumnElement[Any],
) -> ColumnProperty[Any]:
"""generate/update a :class:`.ColumnProperty` given a
:class:`_schema.Column` or other SQL expression object."""
descriptor_props = util.preloaded.orm_descriptor_props
prop = self._props.get(key)
if isinstance(prop, properties.ColumnProperty):
return self._reconcile_prop_with_incoming_columns(
key,
prop,
single_column=column,
warn_only=prop.parent is not self,
)
elif prop is None or isinstance(
prop, descriptor_props.ConcreteInheritedProperty
):
return self._make_prop_from_column(key, column)
else:
raise sa_exc.ArgumentError(
"WARNING: when configuring property '%s' on %s, "
"column '%s' conflicts with property '%r'. "
"To resolve this, map the column to the class under a "
"different name in the 'properties' dictionary. Or, "
"to remove all awareness of the column entirely "
"(including its availability as a foreign key), "
"use the 'include_properties' or 'exclude_properties' "
"mapper arguments to control specifically which table "
"columns get mapped." % (key, self, column.key, prop)
)
@util.langhelpers.tag_method_for_warnings(
"This warning originated from the `configure_mappers()` process, "
"which was invoked automatically in response to a user-initiated "
"operation.",
sa_exc.SAWarning,
)
def _check_configure(self) -> None:
if self.registry._new_mappers:
_configure_registries({self.registry}, cascade=True)
def _post_configure_properties(self) -> None:
"""Call the ``init()`` method on all ``MapperProperties``
attached to this mapper.
This is a deferred configuration step which is intended
to execute once all mappers have been constructed.
"""
self._log("_post_configure_properties() started")
l = [(key, prop) for key, prop in self._props.items()]
for key, prop in l:
self._log("initialize prop %s", key)
if prop.parent is self and not prop._configure_started:
prop.init()
if prop._configure_finished:
prop.post_instrument_class(self)
self._log("_post_configure_properties() complete")
self.configured = True
def add_properties(self, dict_of_properties):
"""Add the given dictionary of properties to this mapper,
using `add_property`.
"""
for key, value in dict_of_properties.items():
self.add_property(key, value)
def add_property(
self, key: str, prop: Union[Column[Any], MapperProperty[Any]]
) -> None:
"""Add an individual MapperProperty to this mapper.
If the mapper has not been configured yet, just adds the
property to the initial properties dictionary sent to the
constructor. If this Mapper has already been configured, then
the given MapperProperty is configured immediately.
"""
prop = self._configure_property(
key, prop, init=self.configured, warn_for_existing=True
)
assert isinstance(prop, MapperProperty)
self._init_properties[key] = prop
def _expire_memoizations(self) -> None:
for mapper in self.iterate_to_root():
mapper._reset_memoizations()
@property
def _log_desc(self) -> str:
return (
"("
+ self.class_.__name__
+ "|"
+ (
self.local_table is not None
and self.local_table.description
or str(self.local_table)
)
+ ")"
)
def _log(self, msg: str, *args: Any) -> None:
self.logger.info("%s " + msg, *((self._log_desc,) + args))
def _log_debug(self, msg: str, *args: Any) -> None:
self.logger.debug("%s " + msg, *((self._log_desc,) + args))
def __repr__(self) -> str:
return "<Mapper at 0x%x; %s>" % (id(self), self.class_.__name__)
def __str__(self) -> str:
return "Mapper[%s(%s)]" % (
self.class_.__name__,
(
self.local_table.description
if self.local_table is not None
else self.persist_selectable.description
),
)
def _is_orphan(self, state: InstanceState[_O]) -> bool:
orphan_possible = False
for mapper in self.iterate_to_root():
for key, cls in mapper._delete_orphans:
orphan_possible = True
has_parent = attributes.manager_of_class(cls).has_parent(
state, key, optimistic=state.has_identity
)
if self.legacy_is_orphan and has_parent:
return False
elif not self.legacy_is_orphan and not has_parent:
return True
if self.legacy_is_orphan:
return orphan_possible
else:
return False
def has_property(self, key: str) -> bool:
return key in self._props
def get_property(
self, key: str, _configure_mappers: bool = False
) -> MapperProperty[Any]:
"""return a MapperProperty associated with the given key."""
if _configure_mappers:
self._check_configure()
try:
return self._props[key]
except KeyError as err:
raise sa_exc.InvalidRequestError(
f"Mapper '{self}' has no property '{key}'. If this property "
"was indicated from other mappers or configure events, ensure "
"registry.configure() has been called."
) from err
def get_property_by_column(
self, column: ColumnElement[_T]
) -> MapperProperty[_T]:
"""Given a :class:`_schema.Column` object, return the
:class:`.MapperProperty` which maps this column."""
return self._columntoproperty[column]
@property
def iterate_properties(self):
"""return an iterator of all MapperProperty objects."""
return iter(self._props.values())
def _mappers_from_spec(
self, spec: Any, selectable: Optional[FromClause]
) -> Sequence[Mapper[Any]]:
"""given a with_polymorphic() argument, return the set of mappers it
represents.
Trims the list of mappers to just those represented within the given
selectable, if present. This helps some more legacy-ish mappings.
"""
if spec == "*":
mappers = list(self.self_and_descendants)
elif spec:
mapper_set: Set[Mapper[Any]] = set()
for m in util.to_list(spec):
m = _class_to_mapper(m)
if not m.isa(self):
raise sa_exc.InvalidRequestError(
"%r does not inherit from %r" % (m, self)
)
if selectable is None:
mapper_set.update(m.iterate_to_root())
else:
mapper_set.add(m)
mappers = [m for m in self.self_and_descendants if m in mapper_set]
else:
mappers = []
if selectable is not None:
tables = set(
sql_util.find_tables(selectable, include_aliases=True)
)
mappers = [m for m in mappers if m.local_table in tables]
return mappers
def _selectable_from_mappers(
self, mappers: Iterable[Mapper[Any]], innerjoin: bool
) -> FromClause:
"""given a list of mappers (assumed to be within this mapper's
inheritance hierarchy), construct an outerjoin amongst those mapper's
mapped tables.
"""
from_obj = self.persist_selectable
for m in mappers:
if m is self:
continue
if m.concrete:
raise sa_exc.InvalidRequestError(
"'with_polymorphic()' requires 'selectable' argument "
"when concrete-inheriting mappers are used."
)
elif not m.single:
if innerjoin:
from_obj = from_obj.join(
m.local_table, m.inherit_condition
)
else:
from_obj = from_obj.outerjoin(
m.local_table, m.inherit_condition
)
return from_obj
@HasMemoized.memoized_attribute
def _version_id_has_server_side_value(self) -> bool:
vid_col = self.version_id_col
if vid_col is None:
return False
elif not isinstance(vid_col, Column):
return True
else:
return vid_col.server_default is not None or (
vid_col.default is not None
and (
not vid_col.default.is_scalar
and not vid_col.default.is_callable
)
)
@HasMemoized.memoized_attribute
def _single_table_criteria_component(self):
if self.single and self.inherits and self.polymorphic_on is not None:
hierarchy = tuple(
m.polymorphic_identity
for m in self.self_and_descendants
if not m.polymorphic_abstract
)
return (
self.polymorphic_on._annotate(
{"parententity": self, "parentmapper": self}
),
hierarchy,
)
else:
return None
@HasMemoized.memoized_attribute
def _single_table_criterion(self):
component = self._single_table_criteria_component
if component is not None:
return component[0].in_(component[1])
else:
return None
@HasMemoized.memoized_attribute
def _has_aliased_polymorphic_fromclause(self):
"""return True if with_polymorphic[1] is an aliased fromclause,
like a subquery.
As of #8168, polymorphic adaption with ORMAdapter is used only
if this is present.
"""
return self.with_polymorphic and isinstance(
self.with_polymorphic[1],
expression.AliasedReturnsRows,
)
@HasMemoized.memoized_attribute
def _should_select_with_poly_adapter(self):
"""determine if _MapperEntity or _ORMColumnEntity will need to use
polymorphic adaption when setting up a SELECT as well as fetching
rows for mapped classes and subclasses against this Mapper.
moved here from context.py for #8456 to generalize the ruleset
for this condition.
"""
# this has been simplified as of #8456.
# rule is: if we have a with_polymorphic or a concrete-style
# polymorphic selectable, *or* if the base mapper has either of those,
# we turn on the adaption thing. if not, we do *no* adaption.
#
# (UPDATE for #8168: the above comment was not accurate, as we were
# still saying "do polymorphic" if we were using an auto-generated
# flattened JOIN for with_polymorphic.)
#
# this splits the behavior among the "regular" joined inheritance
# and single inheritance mappers, vs. the "weird / difficult"
# concrete and joined inh mappings that use a with_polymorphic of
# some kind or polymorphic_union.
#
# note we have some tests in test_polymorphic_rel that query against
# a subclass, then refer to the superclass that has a with_polymorphic
# on it (such as test_join_from_polymorphic_explicit_aliased_three).
# these tests actually adapt the polymorphic selectable (like, the
# UNION or the SELECT subquery with JOIN in it) to be just the simple
# subclass table. Hence even if we are a "plain" inheriting mapper
# but our base has a wpoly on it, we turn on adaption. This is a
# legacy case we should probably disable.
#
#
# UPDATE: simplified way more as of #8168. polymorphic adaption
# is turned off even if with_polymorphic is set, as long as there
# is no user-defined aliased selectable / subquery configured.
# this scales back the use of polymorphic adaption in practice
# to basically no cases except for concrete inheritance with a
# polymorphic base class.
#
return (
self._has_aliased_polymorphic_fromclause
or self._requires_row_aliasing
or (self.base_mapper._has_aliased_polymorphic_fromclause)
or self.base_mapper._requires_row_aliasing
)
@HasMemoized.memoized_attribute
def _with_polymorphic_mappers(self) -> Sequence[Mapper[Any]]:
self._check_configure()
if not self.with_polymorphic:
return []
return self._mappers_from_spec(*self.with_polymorphic)
@HasMemoized.memoized_attribute
def _post_inspect(self):
"""This hook is invoked by attribute inspection.
E.g. when Query calls:
coercions.expect(roles.ColumnsClauseRole, ent, keep_inspect=True)
This allows the inspection process run a configure mappers hook.
"""
self._check_configure()
@HasMemoized_ro_memoized_attribute
def _with_polymorphic_selectable(self) -> FromClause:
if not self.with_polymorphic:
return self.persist_selectable
spec, selectable = self.with_polymorphic
if selectable is not None:
return selectable
else:
return self._selectable_from_mappers(
self._mappers_from_spec(spec, selectable), False
)
with_polymorphic_mappers = _with_polymorphic_mappers
"""The list of :class:`_orm.Mapper` objects included in the
default "polymorphic" query.
"""
@HasMemoized_ro_memoized_attribute
def _insert_cols_evaluating_none(self):
return {
table: frozenset(
col for col in columns if col.type.should_evaluate_none
)
for table, columns in self._cols_by_table.items()
}
@HasMemoized.memoized_attribute
def _insert_cols_as_none(self):
return {
table: frozenset(
col.key
for col in columns
if not col.primary_key
and not col.server_default
and not col.default
and not col.type.should_evaluate_none
)
for table, columns in self._cols_by_table.items()
}
@HasMemoized.memoized_attribute
def _propkey_to_col(self):
return {
table: {self._columntoproperty[col].key: col for col in columns}
for table, columns in self._cols_by_table.items()
}
@HasMemoized.memoized_attribute
def _pk_keys_by_table(self):
return {
table: frozenset([col.key for col in pks])
for table, pks in self._pks_by_table.items()
}
@HasMemoized.memoized_attribute
def _pk_attr_keys_by_table(self):
return {
table: frozenset([self._columntoproperty[col].key for col in pks])
for table, pks in self._pks_by_table.items()
}
@HasMemoized.memoized_attribute
def _server_default_cols(
self,
) -> Mapping[FromClause, FrozenSet[Column[Any]]]:
return {
table: frozenset(
[
col
for col in cast("Iterable[Column[Any]]", columns)
if col.server_default is not None
or (
col.default is not None
and col.default.is_clause_element
)
]
)
for table, columns in self._cols_by_table.items()
}
@HasMemoized.memoized_attribute
def _server_onupdate_default_cols(
self,
) -> Mapping[FromClause, FrozenSet[Column[Any]]]:
return {
table: frozenset(
[
col
for col in cast("Iterable[Column[Any]]", columns)
if col.server_onupdate is not None
or (
col.onupdate is not None
and col.onupdate.is_clause_element
)
]
)
for table, columns in self._cols_by_table.items()
}
@HasMemoized.memoized_attribute
def _server_default_col_keys(self) -> Mapping[FromClause, FrozenSet[str]]:
return {
table: frozenset(col.key for col in cols if col.key is not None)
for table, cols in self._server_default_cols.items()
}
@HasMemoized.memoized_attribute
def _server_onupdate_default_col_keys(
self,
) -> Mapping[FromClause, FrozenSet[str]]:
return {
table: frozenset(col.key for col in cols if col.key is not None)
for table, cols in self._server_onupdate_default_cols.items()
}
@HasMemoized.memoized_attribute
def _server_default_plus_onupdate_propkeys(self) -> Set[str]:
result: Set[str] = set()
col_to_property = self._columntoproperty
for table, columns in self._server_default_cols.items():
result.update(
col_to_property[col].key
for col in columns.intersection(col_to_property)
)
for table, columns in self._server_onupdate_default_cols.items():
result.update(
col_to_property[col].key
for col in columns.intersection(col_to_property)
)
return result
@HasMemoized.memoized_instancemethod
def __clause_element__(self):
annotations: Dict[str, Any] = {
"entity_namespace": self,
"parententity": self,
"parentmapper": self,
}
if self.persist_selectable is not self.local_table:
# joined table inheritance, with polymorphic selectable,
# etc.
annotations["dml_table"] = self.local_table._annotate(
{
"entity_namespace": self,
"parententity": self,
"parentmapper": self,
}
)._set_propagate_attrs(
{"compile_state_plugin": "orm", "plugin_subject": self}
)
return self.selectable._annotate(annotations)._set_propagate_attrs(
{"compile_state_plugin": "orm", "plugin_subject": self}
)
@util.memoized_property
def select_identity_token(self):
return (
expression.null()
._annotate(
{
"entity_namespace": self,
"parententity": self,
"parentmapper": self,
"identity_token": True,
}
)
._set_propagate_attrs(
{"compile_state_plugin": "orm", "plugin_subject": self}
)
)
@property
def selectable(self) -> FromClause:
"""The :class:`_schema.FromClause` construct this
:class:`_orm.Mapper` selects from by default.
Normally, this is equivalent to :attr:`.persist_selectable`, unless
the ``with_polymorphic`` feature is in use, in which case the
full "polymorphic" selectable is returned.
"""
return self._with_polymorphic_selectable
def _with_polymorphic_args(
self,
spec: Any = None,
selectable: Union[Literal[False, None], FromClause] = False,
innerjoin: bool = False,
) -> Tuple[Sequence[Mapper[Any]], FromClause]:
if selectable not in (None, False):
selectable = coercions.expect(
roles.FromClauseRole,
selectable,
)
if self.with_polymorphic:
if not spec:
spec = self.with_polymorphic[0]
if selectable is False:
selectable = self.with_polymorphic[1]
elif selectable is False:
selectable = None
mappers = self._mappers_from_spec(spec, selectable)
if selectable is not None:
return mappers, selectable
else:
return mappers, self._selectable_from_mappers(mappers, innerjoin)
@HasMemoized.memoized_attribute
def _polymorphic_properties(self):
return list(
self._iterate_polymorphic_properties(
self._with_polymorphic_mappers
)
)
@property
def _all_column_expressions(self):
poly_properties = self._polymorphic_properties
adapter = self._polymorphic_adapter
return [
adapter.columns[c] if adapter else c
for prop in poly_properties
if isinstance(prop, properties.ColumnProperty)
and prop._renders_in_subqueries
for c in prop.columns
]
def _columns_plus_keys(self, polymorphic_mappers=()):
if polymorphic_mappers:
poly_properties = self._iterate_polymorphic_properties(
polymorphic_mappers
)
else:
poly_properties = self._polymorphic_properties
return [
(prop.key, prop.columns[0])
for prop in poly_properties
if isinstance(prop, properties.ColumnProperty)
]
@HasMemoized.memoized_attribute
def _polymorphic_adapter(self) -> Optional[orm_util.ORMAdapter]:
if self._has_aliased_polymorphic_fromclause:
return orm_util.ORMAdapter(
orm_util._TraceAdaptRole.MAPPER_POLYMORPHIC_ADAPTER,
self,
selectable=self.selectable,
equivalents=self._equivalent_columns,
limit_on_entity=False,
)
else:
return None
def _iterate_polymorphic_properties(self, mappers=None):
"""Return an iterator of MapperProperty objects which will render into
a SELECT."""
if mappers is None:
mappers = self._with_polymorphic_mappers
if not mappers:
for c in self.iterate_properties:
yield c
else:
# in the polymorphic case, filter out discriminator columns
# from other mappers, as these are sometimes dependent on that
# mapper's polymorphic selectable (which we don't want rendered)
for c in util.unique_list(
chain(
*[
list(mapper.iterate_properties)
for mapper in [self] + mappers
]
)
):
if getattr(c, "_is_polymorphic_discriminator", False) and (
self.polymorphic_on is None
or c.columns[0] is not self.polymorphic_on
):
continue
yield c
@HasMemoized.memoized_attribute
def attrs(self) -> util.ReadOnlyProperties[MapperProperty[Any]]:
"""A namespace of all :class:`.MapperProperty` objects
associated this mapper.
This is an object that provides each property based on
its key name. For instance, the mapper for a
``User`` class which has ``User.name`` attribute would
provide ``mapper.attrs.name``, which would be the
:class:`.ColumnProperty` representing the ``name``
column. The namespace object can also be iterated,
which would yield each :class:`.MapperProperty`.
:class:`_orm.Mapper` has several pre-filtered views
of this attribute which limit the types of properties
returned, including :attr:`.synonyms`, :attr:`.column_attrs`,
:attr:`.relationships`, and :attr:`.composites`.
.. warning::
The :attr:`_orm.Mapper.attrs` accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.attrs[somename]`` over
``getattr(mapper.attrs, somename)`` to avoid name collisions.
.. seealso::
:attr:`_orm.Mapper.all_orm_descriptors`
"""
self._check_configure()
return util.ReadOnlyProperties(self._props)
@HasMemoized.memoized_attribute
def all_orm_descriptors(self) -> util.ReadOnlyProperties[InspectionAttr]:
"""A namespace of all :class:`.InspectionAttr` attributes associated
with the mapped class.
These attributes are in all cases Python :term:`descriptors`
associated with the mapped class or its superclasses.
This namespace includes attributes that are mapped to the class
as well as attributes declared by extension modules.
It includes any Python descriptor type that inherits from
:class:`.InspectionAttr`. This includes
:class:`.QueryableAttribute`, as well as extension types such as
:class:`.hybrid_property`, :class:`.hybrid_method` and
:class:`.AssociationProxy`.
To distinguish between mapped attributes and extension attributes,
the attribute :attr:`.InspectionAttr.extension_type` will refer
to a constant that distinguishes between different extension types.
The sorting of the attributes is based on the following rules:
1. Iterate through the class and its superclasses in order from
subclass to superclass (i.e. iterate through ``cls.__mro__``)
2. For each class, yield the attributes in the order in which they
appear in ``__dict__``, with the exception of those in step
3 below. The order will be the
same as that of the class' construction, with the exception
of attributes that were added after the fact by the application
or the mapper.
3. If a certain attribute key is also in the superclass ``__dict__``,
then it's included in the iteration for that class, and not the
class in which it first appeared.
The above process produces an ordering that is deterministic in terms
of the order in which attributes were assigned to the class.
When dealing with a :class:`.QueryableAttribute`, the
:attr:`.QueryableAttribute.property` attribute refers to the
:class:`.MapperProperty` property, which is what you get when
referring to the collection of mapped properties via
:attr:`_orm.Mapper.attrs`.
.. warning::
The :attr:`_orm.Mapper.all_orm_descriptors`
accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.all_orm_descriptors[somename]`` over
``getattr(mapper.all_orm_descriptors, somename)`` to avoid name
collisions.
.. seealso::
:attr:`_orm.Mapper.attrs`
"""
return util.ReadOnlyProperties(
dict(self.class_manager._all_sqla_attributes())
)
@HasMemoized.memoized_attribute
@util.preload_module("sqlalchemy.orm.descriptor_props")
def _pk_synonyms(self) -> Dict[str, str]:
"""return a dictionary of {syn_attribute_name: pk_attr_name} for
all synonyms that refer to primary key columns
"""
descriptor_props = util.preloaded.orm_descriptor_props
pk_keys = {prop.key for prop in self._identity_key_props}
return {
syn.key: syn.name
for k, syn in self._props.items()
if isinstance(syn, descriptor_props.SynonymProperty)
and syn.name in pk_keys
}
@HasMemoized.memoized_attribute
@util.preload_module("sqlalchemy.orm.descriptor_props")
def synonyms(self) -> util.ReadOnlyProperties[SynonymProperty[Any]]:
"""Return a namespace of all :class:`.Synonym`
properties maintained by this :class:`_orm.Mapper`.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
descriptor_props = util.preloaded.orm_descriptor_props
return self._filter_properties(descriptor_props.SynonymProperty)
@util.ro_non_memoized_property
def entity_namespace(self) -> _EntityNamespace:
return self.class_ # type: ignore[return-value]
@HasMemoized.memoized_attribute
def column_attrs(self) -> util.ReadOnlyProperties[ColumnProperty[Any]]:
"""Return a namespace of all :class:`.ColumnProperty`
properties maintained by this :class:`_orm.Mapper`.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.ColumnProperty)
@HasMemoized.memoized_attribute
@util.preload_module("sqlalchemy.orm.relationships")
def relationships(
self,
) -> util.ReadOnlyProperties[RelationshipProperty[Any]]:
"""A namespace of all :class:`.Relationship` properties
maintained by this :class:`_orm.Mapper`.
.. warning::
the :attr:`_orm.Mapper.relationships` accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.relationships[somename]`` over
``getattr(mapper.relationships, somename)`` to avoid name
collisions.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
return self._filter_properties(
util.preloaded.orm_relationships.RelationshipProperty
)
@HasMemoized.memoized_attribute
@util.preload_module("sqlalchemy.orm.descriptor_props")
def composites(self) -> util.ReadOnlyProperties[CompositeProperty[Any]]:
"""Return a namespace of all :class:`.Composite`
properties maintained by this :class:`_orm.Mapper`.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
return self._filter_properties(
util.preloaded.orm_descriptor_props.CompositeProperty
)
def _filter_properties(
self, type_: Type[_MP]
) -> util.ReadOnlyProperties[_MP]:
self._check_configure()
return util.ReadOnlyProperties(
util.OrderedDict(
(k, v) for k, v in self._props.items() if isinstance(v, type_)
)
)
@HasMemoized.memoized_attribute
def _get_clause(self):
"""create a "get clause" based on the primary key. this is used
by query.get() and many-to-one lazyloads to load this item
by primary key.
"""
params = [
(
primary_key,
sql.bindparam("pk_%d" % idx, type_=primary_key.type),
)
for idx, primary_key in enumerate(self.primary_key, 1)
]
return (
sql.and_(*[k == v for (k, v) in params]),
util.column_dict(params),
)
@HasMemoized.memoized_attribute
def _equivalent_columns(self) -> _EquivalentColumnMap:
"""Create a map of all equivalent columns, based on
the determination of column pairs that are equated to
one another based on inherit condition. This is designed
to work with the queries that util.polymorphic_union
comes up with, which often don't include the columns from
the base table directly (including the subclass table columns
only).
The resulting structure is a dictionary of columns mapped
to lists of equivalent columns, e.g.::
{tablea.col1: {tableb.col1, tablec.col1}, tablea.col2: {tabled.col2}}
""" # noqa: E501
result: _EquivalentColumnMap = {}
def visit_binary(binary):
if binary.operator == operators.eq:
if binary.left in result:
result[binary.left].add(binary.right)
else:
result[binary.left] = {binary.right}
if binary.right in result:
result[binary.right].add(binary.left)
else:
result[binary.right] = {binary.left}
for mapper in self.base_mapper.self_and_descendants:
if mapper.inherit_condition is not None:
visitors.traverse(
mapper.inherit_condition, {}, {"binary": visit_binary}
)
return result
def _is_userland_descriptor(self, assigned_name: str, obj: Any) -> bool:
if isinstance(
obj,
(
_MappedAttribute,
instrumentation.ClassManager,
expression.ColumnElement,
),
):
return False
else:
return assigned_name not in self._dataclass_fields
@HasMemoized.memoized_attribute
def _dataclass_fields(self):
return [f.name for f in util.dataclass_fields(self.class_)]
def _should_exclude(self, name, assigned_name, local, column):
"""determine whether a particular property should be implicitly
present on the class.
This occurs when properties are propagated from an inherited class, or
are applied from the columns present in the mapped table.
"""
if column is not None and sql_base._never_select_column(column):
return True
# check for class-bound attributes and/or descriptors,
# either local or from an inherited class
# ignore dataclass field default values
if local:
if self.class_.__dict__.get(
assigned_name, None
) is not None and self._is_userland_descriptor(
assigned_name, self.class_.__dict__[assigned_name]
):
return True
else:
attr = self.class_manager._get_class_attr_mro(assigned_name, None)
if attr is not None and self._is_userland_descriptor(
assigned_name, attr
):
return True
if (
self.include_properties is not None
and name not in self.include_properties
and (column is None or column not in self.include_properties)
):
self._log("not including property %s" % (name))
return True
if self.exclude_properties is not None and (
name in self.exclude_properties
or (column is not None and column in self.exclude_properties)
):
self._log("excluding property %s" % (name))
return True
return False
def common_parent(self, other: Mapper[Any]) -> bool:
"""Return true if the given mapper shares a
common inherited parent as this mapper."""
return self.base_mapper is other.base_mapper
def is_sibling(self, other: Mapper[Any]) -> bool:
"""return true if the other mapper is an inheriting sibling to this
one. common parent but different branch
"""
return (
self.base_mapper is other.base_mapper
and not self.isa(other)
and not other.isa(self)
)
def _canload(
self, state: InstanceState[Any], allow_subtypes: bool
) -> bool:
s = self.primary_mapper()
if self.polymorphic_on is not None or allow_subtypes:
return _state_mapper(state).isa(s)
else:
return _state_mapper(state) is s
def isa(self, other: Mapper[Any]) -> bool:
"""Return True if the this mapper inherits from the given mapper."""
m: Optional[Mapper[Any]] = self
while m and m is not other:
m = m.inherits
return bool(m)
def iterate_to_root(self) -> Iterator[Mapper[Any]]:
m: Optional[Mapper[Any]] = self
while m:
yield m
m = m.inherits
@HasMemoized.memoized_attribute
def self_and_descendants(self) -> Sequence[Mapper[Any]]:
"""The collection including this mapper and all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
"""
descendants = []
stack = deque([self])
while stack:
item = stack.popleft()
descendants.append(item)
stack.extend(item._inheriting_mappers)
return util.WeakSequence(descendants)
def polymorphic_iterator(self) -> Iterator[Mapper[Any]]:
"""Iterate through the collection including this mapper and
all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
To iterate through an entire hierarchy, use
``mapper.base_mapper.polymorphic_iterator()``.
"""
return iter(self.self_and_descendants)
def primary_mapper(self) -> Mapper[Any]:
"""Return the primary mapper corresponding to this mapper's class key
(class)."""
return self.class_manager.mapper
@property
def primary_base_mapper(self) -> Mapper[Any]:
return self.class_manager.mapper.base_mapper
def _result_has_identity_key(self, result, adapter=None):
pk_cols: Sequence[ColumnElement[Any]]
if adapter is not None:
pk_cols = [adapter.columns[c] for c in self.primary_key]
else:
pk_cols = self.primary_key
rk = result.keys()
for col in pk_cols:
if col not in rk:
return False
else:
return True
def identity_key_from_row(
self,
row: Union[Row[Unpack[TupleAny]], RowMapping],
identity_token: Optional[Any] = None,
adapter: Optional[ORMAdapter] = None,
) -> _IdentityKeyType[_O]:
"""Return an identity-map key for use in storing/retrieving an
item from the identity map.
:param row: A :class:`.Row` or :class:`.RowMapping` produced from a
result set that selected from the ORM mapped primary key columns.
.. versionchanged:: 2.0
:class:`.Row` or :class:`.RowMapping` are accepted
for the "row" argument
"""
pk_cols: Sequence[ColumnElement[Any]]
if adapter is not None:
pk_cols = [adapter.columns[c] for c in self.primary_key]
else:
pk_cols = self.primary_key
mapping: RowMapping
if hasattr(row, "_mapping"):
mapping = row._mapping
else:
mapping = row # type: ignore[assignment]
return (
self._identity_class,
tuple(mapping[column] for column in pk_cols),
identity_token,
)
def identity_key_from_primary_key(
self,
primary_key: Tuple[Any, ...],
identity_token: Optional[Any] = None,
) -> _IdentityKeyType[_O]:
"""Return an identity-map key for use in storing/retrieving an
item from an identity map.
:param primary_key: A list of values indicating the identifier.
"""
return (
self._identity_class,
tuple(primary_key),
identity_token,
)
def identity_key_from_instance(self, instance: _O) -> _IdentityKeyType[_O]:
"""Return the identity key for the given instance, based on
its primary key attributes.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
This value is typically also found on the instance state under the
attribute name `key`.
"""
state = attributes.instance_state(instance)
return self._identity_key_from_state(state, PassiveFlag.PASSIVE_OFF)
def _identity_key_from_state(
self,
state: InstanceState[_O],
passive: PassiveFlag = PassiveFlag.PASSIVE_RETURN_NO_VALUE,
) -> _IdentityKeyType[_O]:
dict_ = state.dict
manager = state.manager
return (
self._identity_class,
tuple(
[
manager[prop.key].impl.get(state, dict_, passive)
for prop in self._identity_key_props
]
),
state.identity_token,
)
def primary_key_from_instance(self, instance: _O) -> Tuple[Any, ...]:
"""Return the list of primary key values for the given
instance.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
"""
state = attributes.instance_state(instance)
identity_key = self._identity_key_from_state(
state, PassiveFlag.PASSIVE_OFF
)
return identity_key[1]
@HasMemoized.memoized_attribute
def _persistent_sortkey_fn(self):
key_fns = [col.type.sort_key_function for col in self.primary_key]
if set(key_fns).difference([None]):
def key(state):
return tuple(
key_fn(val) if key_fn is not None else val
for key_fn, val in zip(key_fns, state.key[1])
)
else:
def key(state):
return state.key[1]
return key
@HasMemoized.memoized_attribute
def _identity_key_props(self):
return [self._columntoproperty[col] for col in self.primary_key]
@HasMemoized.memoized_attribute
def _all_pk_cols(self):
collection: Set[ColumnClause[Any]] = set()
for table in self.tables:
collection.update(self._pks_by_table[table])
return collection
@HasMemoized.memoized_attribute
def _should_undefer_in_wildcard(self):
cols: Set[ColumnElement[Any]] = set(self.primary_key)
if self.polymorphic_on is not None:
cols.add(self.polymorphic_on)
return cols
@HasMemoized.memoized_attribute
def _primary_key_propkeys(self):
return {self._columntoproperty[col].key for col in self._all_pk_cols}
def _get_state_attr_by_column(
self,
state: InstanceState[_O],
dict_: _InstanceDict,
column: ColumnElement[Any],
passive: PassiveFlag = PassiveFlag.PASSIVE_RETURN_NO_VALUE,
) -> Any:
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.get(state, dict_, passive=passive)
def _set_committed_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set_committed_value(state, dict_, value)
def _set_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set(state, dict_, value, None)
def _get_committed_attr_by_column(self, obj, column):
state = attributes.instance_state(obj)
dict_ = attributes.instance_dict(obj)
return self._get_committed_state_attr_by_column(
state, dict_, column, passive=PassiveFlag.PASSIVE_OFF
)
def _get_committed_state_attr_by_column(
self, state, dict_, column, passive=PassiveFlag.PASSIVE_RETURN_NO_VALUE
):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.get_committed_value(
state, dict_, passive=passive
)
def _optimized_get_statement(self, state, attribute_names):
"""assemble a WHERE clause which retrieves a given state by primary
key, using a minimized set of tables.
Applies to a joined-table inheritance mapper where the
requested attribute names are only present on joined tables,
not the base table. The WHERE clause attempts to include
only those tables to minimize joins.
"""
props = self._props
col_attribute_names = set(attribute_names).intersection(
state.mapper.column_attrs.keys()
)
tables: Set[FromClause] = set(
chain(
*[
sql_util.find_tables(c, check_columns=True)
for key in col_attribute_names
for c in props[key].columns
]
)
)
if self.base_mapper.local_table in tables:
return None
def visit_binary(binary):
leftcol = binary.left
rightcol = binary.right
if leftcol is None or rightcol is None:
return
if leftcol.table not in tables:
leftval = self._get_committed_state_attr_by_column(
state,
state.dict,
leftcol,
passive=PassiveFlag.PASSIVE_NO_INITIALIZE,
)
if leftval in orm_util._none_set:
raise _OptGetColumnsNotAvailable()
binary.left = sql.bindparam(
None, leftval, type_=binary.right.type
)
elif rightcol.table not in tables:
rightval = self._get_committed_state_attr_by_column(
state,
state.dict,
rightcol,
passive=PassiveFlag.PASSIVE_NO_INITIALIZE,
)
if rightval in orm_util._none_set:
raise _OptGetColumnsNotAvailable()
binary.right = sql.bindparam(
None, rightval, type_=binary.right.type
)
allconds: List[ColumnElement[bool]] = []
start = False
# as of #7507, from the lowest base table on upwards,
# we include all intermediary tables.
for mapper in reversed(list(self.iterate_to_root())):
if mapper.local_table in tables:
start = True
elif not isinstance(mapper.local_table, expression.TableClause):
return None
if start and not mapper.single:
assert mapper.inherits
assert not mapper.concrete
assert mapper.inherit_condition is not None
allconds.append(mapper.inherit_condition)
tables.add(mapper.local_table)
# only the bottom table needs its criteria to be altered to fit
# the primary key ident - the rest of the tables upwards to the
# descendant-most class should all be present and joined to each
# other.
try:
_traversed = visitors.cloned_traverse(
allconds[0], {}, {"binary": visit_binary}
)
except _OptGetColumnsNotAvailable:
return None
else:
allconds[0] = _traversed
cond = sql.and_(*allconds)
cols = []
for key in col_attribute_names:
cols.extend(props[key].columns)
return (
sql.select(*cols)
.where(cond)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
)
def _iterate_to_target_viawpoly(self, mapper):
if self.isa(mapper):
prev = self
for m in self.iterate_to_root():
yield m
if m is not prev and prev not in m._with_polymorphic_mappers:
break
prev = m
if m is mapper:
break
@HasMemoized.memoized_attribute
def _would_selectinload_combinations_cache(self):
return {}
def _would_selectin_load_only_from_given_mapper(self, super_mapper):
"""return True if this mapper would "selectin" polymorphic load based
on the given super mapper, and not from a setting from a subclass.
given::
class A: ...
class B(A):
__mapper_args__ = {"polymorphic_load": "selectin"}
class C(B): ...
class D(B):
__mapper_args__ = {"polymorphic_load": "selectin"}
``inspect(C)._would_selectin_load_only_from_given_mapper(inspect(B))``
returns True, because C does selectin loading because of B's setting.
OTOH, ``inspect(D)
._would_selectin_load_only_from_given_mapper(inspect(B))``
returns False, because D does selectin loading because of its own
setting; when we are doing a selectin poly load from B, we want to
filter out D because it would already have its own selectin poly load
set up separately.
Added as part of #9373.
"""
cache = self._would_selectinload_combinations_cache
try:
return cache[super_mapper]
except KeyError:
pass
# assert that given object is a supermapper, meaning we already
# strong reference it directly or indirectly. this allows us
# to not worry that we are creating new strongrefs to unrelated
# mappers or other objects.
assert self.isa(super_mapper)
mapper = super_mapper
for m in self._iterate_to_target_viawpoly(mapper):
if m.polymorphic_load == "selectin":
retval = m is super_mapper
break
else:
retval = False
cache[super_mapper] = retval
return retval
def _should_selectin_load(self, enabled_via_opt, polymorphic_from):
if not enabled_via_opt:
# common case, takes place for all polymorphic loads
mapper = polymorphic_from
for m in self._iterate_to_target_viawpoly(mapper):
if m.polymorphic_load == "selectin":
return m
else:
# uncommon case, selectin load options were used
enabled_via_opt = set(enabled_via_opt)
enabled_via_opt_mappers = {e.mapper: e for e in enabled_via_opt}
for entity in enabled_via_opt.union([polymorphic_from]):
mapper = entity.mapper
for m in self._iterate_to_target_viawpoly(mapper):
if (
m.polymorphic_load == "selectin"
or m in enabled_via_opt_mappers
):
return enabled_via_opt_mappers.get(m, m)
return None
@util.preload_module("sqlalchemy.orm.strategy_options")
def _subclass_load_via_in(self, entity, polymorphic_from):
"""Assemble a that can load the columns local to
this subclass as a SELECT with IN.
"""
strategy_options = util.preloaded.orm_strategy_options
assert self.inherits
if self.polymorphic_on is not None:
polymorphic_prop = self._columntoproperty[self.polymorphic_on]
keep_props = set([polymorphic_prop] + self._identity_key_props)
else:
keep_props = set(self._identity_key_props)
disable_opt = strategy_options.Load(entity)
enable_opt = strategy_options.Load(entity)
classes_to_include = {self}
m: Optional[Mapper[Any]] = self.inherits
while (
m is not None
and m is not polymorphic_from
and m.polymorphic_load == "selectin"
):
classes_to_include.add(m)
m = m.inherits
for prop in self.column_attrs + self.relationships:
# skip prop keys that are not instrumented on the mapped class.
# this is primarily the "_sa_polymorphic_on" property that gets
# created for an ad-hoc polymorphic_on SQL expression, issue #8704
if prop.key not in self.class_manager:
continue
if prop.parent in classes_to_include or prop in keep_props:
# "enable" options, to turn on the properties that we want to
# load by default (subject to options from the query)
if not isinstance(prop, StrategizedProperty):
continue
enable_opt = enable_opt._set_generic_strategy(
# convert string name to an attribute before passing
# to loader strategy. note this must be in terms
# of given entity, such as AliasedClass, etc.
(getattr(entity.entity_namespace, prop.key),),
dict(prop.strategy_key),
_reconcile_to_other=True,
)
else:
# "disable" options, to turn off the properties from the
# superclass that we *don't* want to load, applied after
# the options from the query to override them
disable_opt = disable_opt._set_generic_strategy(
# convert string name to an attribute before passing
# to loader strategy. note this must be in terms
# of given entity, such as AliasedClass, etc.
(getattr(entity.entity_namespace, prop.key),),
{"do_nothing": True},
_reconcile_to_other=False,
)
primary_key = list(self.primary_key)
in_expr: ColumnElement[Any]
if len(primary_key) > 1:
in_expr = sql.tuple_(*primary_key)
else:
in_expr = primary_key[0]
if entity.is_aliased_class:
assert entity.mapper is self
q = sql.select(entity).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
in_expr = entity._adapter.traverse(in_expr)
primary_key = [entity._adapter.traverse(k) for k in primary_key]
q = q.where(
in_expr.in_(sql.bindparam("primary_keys", expanding=True))
).order_by(*primary_key)
else:
q = sql.select(self).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
q = q.where(
in_expr.in_(sql.bindparam("primary_keys", expanding=True))
).order_by(*primary_key)
return q, enable_opt, disable_opt
@HasMemoized.memoized_attribute
def _subclass_load_via_in_mapper(self):
# the default is loading this mapper against the basemost mapper
return self._subclass_load_via_in(self, self.base_mapper)
def cascade_iterator(
self,
type_: str,
state: InstanceState[_O],
halt_on: Optional[Callable[[InstanceState[Any]], bool]] = None,
) -> Iterator[
Tuple[object, Mapper[Any], InstanceState[Any], _InstanceDict]
]:
r"""Iterate each element and its mapper in an object graph,
for all relationships that meet the given cascade rule.
:param type\_:
The name of the cascade rule (i.e. ``"save-update"``, ``"delete"``,
etc.).
.. note:: the ``"all"`` cascade is not accepted here. For a generic
object traversal function, see :ref:`faq_walk_objects`.
:param state:
The lead InstanceState. child items will be processed per
the relationships defined for this object's mapper.
:return: the method yields individual object instances.
.. seealso::
:ref:`unitofwork_cascades`
:ref:`faq_walk_objects` - illustrates a generic function to
traverse all objects without relying on cascades.
"""
visited_states: Set[InstanceState[Any]] = set()
prp, mpp = object(), object()
assert state.mapper.isa(self)
# this is actually a recursive structure, fully typing it seems
# a little too difficult for what it's worth here
visitables: Deque[
Tuple[
Deque[Any],
object,
Optional[InstanceState[Any]],
Optional[_InstanceDict],
]
]
visitables = deque(
[(deque(state.mapper._props.values()), prp, state, state.dict)]
)
while visitables:
iterator, item_type, parent_state, parent_dict = visitables[-1]
if not iterator:
visitables.pop()
continue
if item_type is prp:
prop = iterator.popleft()
if not prop.cascade or type_ not in prop.cascade:
continue
assert parent_state is not None
assert parent_dict is not None
queue = deque(
prop.cascade_iterator(
type_,
parent_state,
parent_dict,
visited_states,
halt_on,
)
)
if queue:
visitables.append((queue, mpp, None, None))
elif item_type is mpp:
(
instance,
instance_mapper,
corresponding_state,
corresponding_dict,
) = iterator.popleft()
yield (
instance,
instance_mapper,
corresponding_state,
corresponding_dict,
)
visitables.append(
(
deque(instance_mapper._props.values()),
prp,
corresponding_state,
corresponding_dict,
)
)
@HasMemoized.memoized_attribute
def _compiled_cache(self):
return util.LRUCache(self._compiled_cache_size)
@HasMemoized.memoized_attribute
def _multiple_persistence_tables(self):
return len(self.tables) > 1
@HasMemoized.memoized_attribute
def _sorted_tables(self):
table_to_mapper: Dict[TableClause, Mapper[Any]] = {}
for mapper in self.base_mapper.self_and_descendants:
for t in mapper.tables:
table_to_mapper.setdefault(t, mapper)
extra_dependencies = []
for table, mapper in table_to_mapper.items():
super_ = mapper.inherits
if super_:
extra_dependencies.extend(
[(super_table, table) for super_table in super_.tables]
)
def skip(fk):
# attempt to skip dependencies that are not
# significant to the inheritance chain
# for two tables that are related by inheritance.
# while that dependency may be important, it's technically
# not what we mean to sort on here.
parent = table_to_mapper.get(fk.parent.table)
dep = table_to_mapper.get(fk.column.table)
if (
parent is not None
and dep is not None
and dep is not parent
and dep.inherit_condition is not None
):
cols = set(sql_util._find_columns(dep.inherit_condition))
if parent.inherit_condition is not None:
cols = cols.union(
sql_util._find_columns(parent.inherit_condition)
)
return fk.parent not in cols and fk.column not in cols
else:
return fk.parent not in cols
return False
sorted_ = sql_util.sort_tables(
table_to_mapper,
skip_fn=skip,
extra_dependencies=extra_dependencies,
)
ret = util.OrderedDict()
for t in sorted_:
ret[t] = table_to_mapper[t]
return ret
def _memo(self, key: Any, callable_: Callable[[], _T]) -> _T:
if key in self._memoized_values:
return cast(_T, self._memoized_values[key])
else:
self._memoized_values[key] = value = callable_()
return value
@util.memoized_property
def _table_to_equated(self):
"""memoized map of tables to collections of columns to be
synchronized upwards to the base mapper."""
result: util.defaultdict[
Table,
List[
Tuple[
Mapper[Any],
List[Tuple[ColumnElement[Any], ColumnElement[Any]]],
]
],
] = util.defaultdict(list)
def set_union(x, y):
return x.union(y)
for table in self._sorted_tables:
cols = set(table.c)
for m in self.iterate_to_root():
if m._inherits_equated_pairs and cols.intersection(
reduce(
set_union,
[l.proxy_set for l, r in m._inherits_equated_pairs],
)
):
result[table].append((m, m._inherits_equated_pairs))
return result
|
Mapper
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pycodestyle/E30.py
|
{
"start": 6545,
"end": 7294
}
|
class ____: ...
# end
# E302
@overload
def fn(a: int) -> int: ...
@overload
def fn(a: str) -> str: ...
def fn(a: int | str) -> int | str:
...
# end
# E303
def fn():
_ = None
# arbitrary comment
def inner(): # E306 not expected (pycodestyle detects E306)
pass
# end
# E303
def fn():
_ = None
# arbitrary comment
def inner(): # E306 not expected (pycodestyle detects E306)
pass
# end
# E303
print()
print()
# end
# E303:5:1
print()
# comment
print()
# end
# E303:5:5 E303:8:5
def a():
print()
# comment
# another comment
print()
# end
# E303
#!python
"""This class docstring comes on line 5.
It gives error E303: too many blank lines (3)
"""
# end
# E303
|
B
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/string_conversion.py
|
{
"start": 3003,
"end": 3777
}
|
class ____(float):
def __str__(self):
x = _test_source()
return f"{x}" # __str__ method may introduce sources
def base_exception(e: Exception):
return f"{type(e)}"
def function_call_target_1(error_type: Union[str, Type[Exception]]):
f"{error_type}" # Resolved as an implicit call to a function
def function_call_target_2(x: Union[B, C]):
f"{x.__class__}" # Resolved as an implicit call to a function
def multiple_callees_same_location():
s = StrIsTainted()
# The call to str() and the attribute access to str.__add__ have the same location in the AST.
return str(s) + "hello"
def optional_str(condition: bool):
s: Optional[StrIsTainted] = None
if condition:
s = StrIsTainted()
eval(str(s))
|
OverrideStr
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_inline_schemas/pipeline.py
|
{
"start": 4378,
"end": 7332
}
|
class ____(Step):
context: ConnectorContext
title = "Migrate connector to inline schemas."
def __init__(self, context: PipelineContext) -> None:
super().__init__(context)
async def _run(self) -> StepResult:
connector = self.context.connector
connector_path = connector.code_directory
manifest_path = connector.manifest_path
python_path = connector.python_source_dir_path
logger = self.logger
json_streams = _parse_json_streams(python_path)
if len(json_streams) == 0:
return StepResult(step=self, status=StepStatus.SKIPPED, stderr="No JSON streams found.")
data = read_yaml(manifest_path)
if "streams" not in data:
return StepResult(
step=self,
status=StepStatus.SKIPPED,
stderr="No manifest streams found.",
)
# find the explit ones and remove or udpate
json_loaders = _find_json_loaders(data, [])
for loader in json_loaders:
logger.info(f" JSON loader ref: {loader.ref} -> {loader.file_path}")
_update_json_loaders(connector_path, data, json_streams, json_loaders)
# go through the declared streams and update the inline schemas
for stream in data["streams"]:
if isinstance(stream, str):
# see if reference
if stream.startswith("#"):
yaml_stream = _load_reference(data, stream)
if not yaml_stream:
logger.info(f" Stream reference not found: {stream}")
continue
if not _get_stream_name(yaml_stream):
logger.info(f" Stream reference name not found: {stream}")
continue
else:
logger.info(f" Stream reference unknown: {stream}")
continue
else:
yaml_stream = stream
if not yaml_stream:
logger.info(f" !! Yaml stream not found: {stream}")
continue
stream_name = _get_stream_name(yaml_stream)
if not stream_name:
logger.info(f" !! Stream name not found: {stream}")
continue
if yaml_stream.get("schema_loader") and yaml_stream["schema_loader"].get("type") == "InlineSchemaLoader":
continue
yaml_stream["schema_loader"] = {}
schema_loader = yaml_stream["schema_loader"]
_update_inline_schema(schema_loader, json_streams, stream_name)
write_yaml(data, manifest_path)
# await format_prettier([manifest_path], logger=logger)
for json_stream in json_streams.values():
logger.info(f" !! JSON schema not found: {json_stream.name}")
return StepResult(step=self, status=StepStatus.SUCCESS)
|
InlineSchemas
|
python
|
PrefectHQ__prefect
|
tests/test_tasks.py
|
{
"start": 174341,
"end": 177285
}
|
class ____:
@pytest.mark.parametrize(
"args, kwargs",
[
((42, 42), {}),
([42, 42], {}),
((), {"x": 42, "y": 42}),
([42], {"y": 42}),
],
)
async def test_delay_with_args_kwargs(self, args, kwargs):
@task
def multiply(x, y):
return x * y
future = multiply.delay(*args, **kwargs)
assert await get_background_task_run_parameters(
multiply, future.state.state_details.task_parameters_id
) == {"parameters": {"x": 42, "y": 42}, "context": ANY}
def test_delay_with_duplicate_values(self):
@task
def add(x, y):
return x + y
with pytest.raises(
ParameterBindError, match="multiple values for argument 'x'"
):
add.delay(42, x=42)
def test_delay_missing_values(self):
@task
def add(x, y):
return x + y
with pytest.raises(
ParameterBindError, match="missing a required argument: 'y'"
):
add.delay(42)
async def test_delay_handles_default_values(self):
@task
def add(x, y=42):
return x + y
future = add.delay(42)
assert await get_background_task_run_parameters(
add, future.state.state_details.task_parameters_id
) == {"parameters": {"x": 42, "y": 42}, "context": ANY}
async def test_delay_overrides_defaults(self):
@task
def add(x, y=42):
return x + y
future = add.delay(42, y=100)
assert await get_background_task_run_parameters(
add, future.state.state_details.task_parameters_id
) == {"parameters": {"x": 42, "y": 100}, "context": ANY}
async def test_delay_with_variadic_args(self):
@task
def add_em_up(*args):
return sum(args)
future = add_em_up.delay(42, 42)
assert await get_background_task_run_parameters(
add_em_up, future.state.state_details.task_parameters_id
) == {"parameters": {"args": (42, 42)}, "context": ANY}
async def test_delay_with_variadic_kwargs(self):
@task
def add_em_up(**kwargs):
return sum(kwargs.values())
future = add_em_up.delay(x=42, y=42)
assert await get_background_task_run_parameters(
add_em_up, future.state.state_details.task_parameters_id
) == {"parameters": {"kwargs": {"x": 42, "y": 42}}, "context": ANY}
async def test_delay_with_variadic_args_and_kwargs(self):
@task
def add_em_up(*args, **kwargs):
return sum(args) + sum(kwargs.values())
future = add_em_up.delay(42, y=42)
assert await get_background_task_run_parameters(
add_em_up, future.state.state_details.task_parameters_id
) == {"parameters": {"args": (42,), "kwargs": {"y": 42}}, "context": ANY}
|
TestDelay
|
python
|
google__jax
|
tests/pallas/tpu_pallas_test.py
|
{
"start": 114610,
"end": 137157
}
|
class ____(PallasBaseTest):
"""Tests for reported bugs. Only pass in interpret mode unless fixed."""
def test_casting_bool_to_i8(self):
if not jtu.is_device_tpu_at_least(5):
self.skipTest("Operation not supported on this TPU version.")
if not jtu.if_cloud_tpu_at_least(2025, 9, 12):
self.skipTest("Needs a newer libtpu")
def greater_than(x: jax.Array, y: jax.Array):
def kernel(x_ref, y_ref, out_ref):
cmp = (x_ref[...] > y_ref[...]).astype(jnp.int8)
out_ref[:] = cmp
in_specs = [
pl.BlockSpec(memory_space=pltpu.VMEM),
pl.BlockSpec(memory_space=pltpu.VMEM),
]
out_specs = pl.BlockSpec(memory_space=pltpu.VMEM)
return self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct(x.shape, jnp.int8),
in_specs=in_specs,
out_specs=out_specs,
)(x, y)
key = jax.random.key(0)
x_key, y_key = jax.random.split(key)
x = jax.random.normal(x_key, (128, 16), dtype=jnp.float32)
y = jax.random.normal(y_key, (128, 16), dtype=jnp.float32)
out = jax.jit(greater_than)(x, y)
expected = (x > y).astype(jnp.int8)
np.testing.assert_array_equal(out, expected)
def test_float32_stack(self):
x = np.arange(128, dtype=jnp.float32).reshape(1, 128)
y = x + 128
def kernel(x_ref, y_ref, out_ref):
out_ref[...] = jnp.stack([x_ref[...], y_ref[...]], axis=1)
out = self.pallas_call(
kernel, out_shape=jax.ShapeDtypeStruct((1, 2, 128), jnp.float32)
)(x, y)
np.testing.assert_array_equal(out, np.stack([x, y], axis=1))
def test_lane_to_chunk_reshape_bf16(self):
if not jtu.if_cloud_tpu_at_least(2025, 7, 12):
self.skipTest('Needs a newer libTPU')
if not jtu.is_device_tpu_at_least(4):
self.skipTest('Operation not supported on this TPU version.')
x = np.arange(256 * 1024, dtype=jnp.bfloat16).reshape(1, 256, 1024)
def kernel(x_ref, out_ref):
out_ref[...] = jnp.reshape(x_ref[...], (1, 256, 8, 128))
out = self.pallas_call(
kernel, out_shape=jax.ShapeDtypeStruct((1, 256, 8, 128), jnp.bfloat16)
)(x)
np.testing.assert_array_equal(out, np.reshape(x, (1, 256, 8, 128)))
def test_lane_to_chunk_broadcast_fp32(self):
x = np.arange(256 * 128, dtype=jnp.float32).reshape(1, 256, 128)
def kernel(x_ref, out_ref):
out_ref[...] = jnp.broadcast_to(
jnp.expand_dims(x_ref[...], 2), (1, 256, 8, 128)
)
out = self.pallas_call(
kernel, out_shape=jax.ShapeDtypeStruct((1, 256, 8, 128), jnp.float32)
)(x)
np.testing.assert_array_equal(
out, np.broadcast_to(np.expand_dims(x, 2), (1, 256, 8, 128))
)
@only_passes_in_interpret()
def test_lane_dynamic_slice(self):
"""b/346849973"""
x = np.arange(128, dtype=jnp.float32)
def kernel(x_ref, out_ref):
out_ref[...] = lax.dynamic_slice_in_dim(x_ref[...], 64, 1, 0)
out = self.pallas_call(
kernel, out_shape=jax.ShapeDtypeStruct((1,), jnp.float32)
)(x)
np.testing.assert_array_equal(out, x[64:65])
def test_lane_broadcast_bf16(self):
x = np.arange(256, dtype=jnp.bfloat16).reshape(256, 1)
def kernel(x_ref, out_ref):
out_ref[...] = jnp.broadcast_to(x_ref[...], (256, 512))
out = self.pallas_call(
kernel, out_shape=jax.ShapeDtypeStruct((256, 512), jnp.bfloat16)
)(x)
np.testing.assert_array_equal(out, np.broadcast_to(x, (256, 512)))
def test_bfloat16_to_uint32_bitcast(self):
x = np.arange(16 * 2 * 256, dtype=jnp.bfloat16).reshape(16, 2, 256)
def kernel(x_ref, out_ref):
out_ref[...] = pltpu.bitcast(x_ref[...], jnp.uint32)
out = self.pallas_call(
kernel, out_shape=jax.ShapeDtypeStruct((16, 1, 256), jnp.uint32)
)(x)
np.testing.assert_array_equal(out, state_utils.bitcast(x, jnp.uint32))
@parameterized.product(
shape=((128, 64), (15, 256), (16, 256)),
shift=(2, 3),
axis=(0, 1),
)
def test_roll_partial_with_static_shift(
self, shape: tuple[int, int], shift: int, axis: int
):
if (
not jtu.if_cloud_tpu_at_least(2025, 7, 19)
and shape[0] % 8
and axis == 0
):
self.skipTest('Needs a newer libtpu for non-sublane-aligned shape')
x = np.arange(math.prod(shape), dtype=jnp.float32).reshape(shape)
def kernel(x_ref, out_ref):
out_ref[...] = pltpu.roll(x_ref[...], shift=shift, axis=axis)
out = self.pallas_call(
kernel, out_shape=jax.ShapeDtypeStruct(shape, jnp.float32)
)(x)
np.testing.assert_array_equal(out, np.roll(x, shift, axis))
@parameterized.product(
shape_and_axis=(((128, 64), 1), ((63, 256), 0)),
)
def test_roll_partial_with_dynamic_shift(
self, shape_and_axis: tuple[tuple[int, int], int]
):
if self.INTERPRET:
self.skipTest('Test only applies to non-interpret mode.')
shape, axis = shape_and_axis
x = np.arange(math.prod(shape), dtype=jnp.float32).reshape(shape)
def kernel(x_ref, out_ref):
amount = x_ref[0, 0].astype(jnp.int32)
out_ref[...] = pltpu.roll(x_ref[...], amount, axis=axis)
with self.assertRaisesRegex(Exception, 'unsupported unaligned shape'):
_ = self.pallas_call(
kernel, out_shape=jax.ShapeDtypeStruct(shape, jnp.float32)
)(x)
def test_retiling1(self):
if not jtu.if_cloud_tpu_at_least(2025, 7, 2):
self.skipTest('Needs a newer libtpu')
x = np.arange(1024, dtype=jnp.bfloat16).reshape(1024)
def kernel(x_ref, out_ref):
out_ref[:, :] = jnp.reshape(x_ref[:].astype(jnp.float32), (8, 128))
out = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
)(x)
np.testing.assert_array_equal(out, np.reshape(x, (8, 128)))
def test_retiling2(self):
x = np.arange(1 * 8 * 1024, dtype=jnp.bfloat16).reshape(1, 8, 1024)
def kernel(x_ref, out_ref):
out_ref[:, :, :] = jnp.reshape(
x_ref[:, 7, :].astype(jnp.float32), (1, 8, 128)
)
out = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((1, 8, 128), jnp.float32),
)(x)
np.testing.assert_array_equal(out, np.reshape(x[:, 7, :], (1, 8, 128)))
def test_sublane_adding_shape_cast_f32(self):
if not jtu.if_cloud_tpu_at_least(2025, 7, 12):
self.skipTest('Needs a newer libTPU')
x = np.arange(8 * 128, dtype=jnp.float32).reshape(8, 128)
def kernel(x_ref, out_ref):
out_ref[:, 0, :] = x_ref[:, :]
out = self.pallas_call(
kernel, out_shape=jax.ShapeDtypeStruct((8, 1, 128), jnp.float32)
)(x)
np.testing.assert_array_equal(out, np.reshape(x, (8, 1, 128)))
def test_sublane_adding_shape_cast_bf16(self):
if not jtu.if_cloud_tpu_at_least(2025, 7, 12):
self.skipTest('Needs a newer libTPU')
if not jtu.is_device_tpu_at_least(4):
self.skipTest('Operation not supported on this TPU version.')
x = np.arange(8 * 128, dtype=jnp.bfloat16).reshape(8, 128)
def kernel(x_ref, out_ref):
out_ref[:, 0, :] = x_ref[:, :]
out = self.pallas_call(
kernel, out_shape=jax.ShapeDtypeStruct((8, 1, 128), jnp.bfloat16)
)(x)
np.testing.assert_array_equal(out, np.reshape(x, (8, 1, 128)))
def test_mixed_strides(self):
x = np.full((8, 128), 1.0, dtype=jnp.float32)
y = np.full((8, 2, 128), 2.0, dtype=jnp.bfloat16)
def kernel(x_ref, y_ref, out_ref):
out_ref[:, :] = x_ref[:, :] + y_ref[:, 1, :].astype(jnp.float32)
out = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
)(x, y)
np.testing.assert_array_equal(
out, np.full((8, 128), 3.0, dtype=jnp.float32)
)
def test_sum(self):
x = np.zeros((8, 2, 8, 128), dtype=jnp.float32)
def kernel(x_ref, out_ref):
out_ref[:, :, :] = jnp.sum(x_ref[:, :, :, :], 2)
out = self.pallas_call(
kernel, out_shape=jax.ShapeDtypeStruct((8, 2, 128), jnp.float32)
)(x)
np.testing.assert_array_equal(out, np.zeros((8, 2, 128), dtype=jnp.float32))
def test_transpose(self):
if not jtu.if_cloud_tpu_at_least(2025, 9, 19):
self.skipTest('Needs a newer libTPU')
x = np.zeros((8, 2, 8, 128), dtype=jnp.float32)
def kernel(x_ref, out_ref):
out_ref[:, :, :, :] = jnp.transpose(x_ref[:, :, :, :], (0, 2, 1, 3))
out = self.pallas_call(
kernel, out_shape=jax.ShapeDtypeStruct((8, 8, 2, 128), jnp.float32)
)(x)
np.testing.assert_array_equal(
out, np.zeros((8, 8, 2, 128), dtype=jnp.float32)
)
# (q, m, n) -> (q, m * n) where n % 128 == 0
@parameterized.parameters(
(q, m, n, dtype)
for (q, m, n), dtype in itertools.product(
[
(32, 16, 512),
(20, 19, 512),
(5, 3, 256),
(9, 15, 256),
(3, 2, 256),
],
[jnp.float32, jnp.uint32, jnp.bfloat16, jnp.int8],
)
)
def test_reshape_two_minor_dims_to_R2(self, q, m, n, dtype):
if not jtu.if_cloud_tpu_at_least(2025, 7, 12):
self.skipTest('Needs a newer libTPU')
if (dtype == jnp.bfloat16 and not jtu.is_device_tpu_at_least(4)) or (
dtype == jnp.int8 and not jtu.is_device_tpu_at_least(5)
):
self.skipTest('Operation not supported on this TPU version.')
def kernel(x_ref, y_ref):
y_ref[...] = x_ref[...].reshape(
x_ref.shape[0], x_ref.shape[1] * x_ref.shape[2]
)
x = np.arange(q * m * n, dtype=dtype).reshape(q, m, n)
out = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((q, m * n), dtype),
)(x)
jax.numpy.set_printoptions(threshold=jax.numpy.inf)
expected = x.reshape([q, m * n])
np.testing.assert_array_equal(out, x.reshape([q, m * n]))
# (q, m, n, k) -> (q, m, n * k) where k % 128 == 0
@parameterized.parameters(
(q, m, n, k, dtype)
for (q, m, n, k), dtype in itertools.product(
[
(3, 8, 17, 512),
(1, 8, 9, 256),
(1, 8, 3, 256),
(10, 1, 4, 256),
(1, 2, 2, 256),
(1, 9, 3, 256),
],
[jnp.float32, jnp.uint32, jnp.bfloat16, jnp.int8],
)
)
def test_reshape_two_minor_dims_to_R3(self, q, m, n, k, dtype):
if not jtu.if_cloud_tpu_at_least(2025, 7, 12):
self.skipTest('Needs a newer libTPU')
if (dtype == jnp.bfloat16 and not jtu.is_device_tpu_at_least(4)) or (
dtype == jnp.int8 and not jtu.is_device_tpu_at_least(5)
):
self.skipTest('Operation not supported on this TPU version.')
def kernel(x_ref, y_ref):
y_ref[...] = x_ref[...].reshape(
x_ref.shape[0], x_ref.shape[1], x_ref.shape[2] * x_ref.shape[3]
)
x = np.arange(q * m * n * k, dtype=dtype).reshape(q, m, n, k)
out = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((q, m, n * k), dtype),
)(x)
np.testing.assert_array_equal(out, x.reshape([q, m, n * k]))
# (p, q, m, n, k) -> (p, q * m * n * k) where k % 128 == 0
@parameterized.parameters(
(p, q, m, n, k, dtype)
for (p, q, m, n, k), dtype in itertools.product(
[
(5, 3, 8, 17, 512),
(6, 1, 8, 9, 256),
(16, 1, 8, 3, 256),
(3, 2, 1, 4, 256),
(1, 7, 2, 2, 256),
],
[jnp.float32, jnp.uint32, jnp.bfloat16, jnp.int8],
)
)
def test_reshape_four_minor_dims_to_R2(self, p, q, m, n, k, dtype):
if not jtu.if_cloud_tpu_at_least(2025, 7, 12):
self.skipTest('Needs a newer libTPU')
if (dtype == jnp.bfloat16 and not jtu.is_device_tpu_at_least(4)) or (
dtype == jnp.int8 and not jtu.is_device_tpu_at_least(5)
):
self.skipTest('Operation not supported on this TPU version.')
def kernel(x_ref, y_ref):
y_ref[...] = x_ref[...].reshape(
x_ref.shape[0],
x_ref.shape[1] * x_ref.shape[2] * x_ref.shape[3] * x_ref.shape[4],
)
x = np.arange(p * q * m * n * k, dtype=dtype).reshape(p, q, m, n, k)
out = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((p, q * m * n * k), dtype),
)(x)
np.testing.assert_array_equal(out, x.reshape([p, q * m * n * k]))
# (q, m, n, k) -> (q, m, 1, n * k) where k % 128 == 0
@parameterized.parameters(
(q, m, n, k, dtype)
for (q, m, n, k), dtype in itertools.product(
[
(10, 1, 4, 256),
],
[jnp.float32, jnp.uint32, jnp.bfloat16, jnp.int8],
)
)
def test_reshape_two_minor_dims_preserve_rank(self, q, m, n, k, dtype):
if not jtu.if_cloud_tpu_at_least(2025, 7, 12):
self.skipTest('Needs a newer libTPU')
if (dtype == jnp.bfloat16 and not jtu.is_device_tpu_at_least(4)) or (
dtype == jnp.int8 and not jtu.is_device_tpu_at_least(5)
):
self.skipTest('Operation not supported on this TPU version.')
def kernel(x_ref, y_ref):
y_ref[...] = (
x_ref[...]
.reshape(
x_ref.shape[0], x_ref.shape[1], x_ref.shape[2] * x_ref.shape[3]
)
.reshape(
x_ref.shape[0], 1, x_ref.shape[1], x_ref.shape[2] * x_ref.shape[3]
)
)
q, m, n, k = 10, 1, 4, 256
x = np.arange(q * m * n * k, dtype=dtype).reshape(q, m, n, k)
out = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((q, m, 1, n * k), dtype),
)(x)
np.testing.assert_array_equal(out, x.reshape([q, m, 1, n * k]))
# (q, m, n, k) -> (q * m, n * k) where k % 128 == 0
@parameterized.parameters(
(q, m, n, k, dtype)
for (q, m, n, k), dtype in itertools.product(
[
(3, 9, 17, 512),
(1, 8, 9, 256),
(1, 8, 3, 384),
(10, 1, 4, 256),
(1, 2, 2, 256),
],
[jnp.float32, jnp.uint32, jnp.bfloat16, jnp.int8],
)
)
def test_reshape_fold_two_leading_dims_and_two_minor_dims_R4_to_R2(
self, q, m, n, k, dtype
):
if not jtu.if_cloud_tpu_at_least(2025, 7, 12):
self.skipTest('Needs a newer libTPU')
if (dtype == jnp.bfloat16 and not jtu.is_device_tpu_at_least(4)) or (
dtype == jnp.int8 and not jtu.is_device_tpu_at_least(5)
):
self.skipTest('Operation not supported on this TPU version.')
def kernel(x_ref, y_ref):
y_ref[...] = x_ref[...].reshape(
x_ref.shape[0] * x_ref.shape[1], x_ref.shape[2] * x_ref.shape[3]
)
x = np.arange(q * m * n * k, dtype=dtype).reshape(q, m, n, k)
out = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((q * m, n * k), dtype),
)(x)
np.testing.assert_array_equal(out, x.reshape([q * m, n * k]))
# (q * m, n, k) -> (q, m, n * k) where k % 128 == 0
@parameterized.parameters(
(q, m, n, k, dtype)
for (q, m, n, k), dtype in itertools.product(
[
(2, 2, 17, 512),
(3, 2, 3, 256),
(1, 5, 4, 384),
],
[jnp.float32, jnp.uint32, jnp.bfloat16, jnp.int8],
)
)
def test_reshape_unfold_leading_dim_and_fold_two_minor_dims_R3_to_R3(
self, q, m, n, k, dtype
):
if not jtu.if_cloud_tpu_at_least(2025, 7, 12):
self.skipTest('Needs a newer libTPU')
if (dtype == jnp.bfloat16 and not jtu.is_device_tpu_at_least(4)) or (
dtype == jnp.int8 and not jtu.is_device_tpu_at_least(5)
):
self.skipTest('Operation not supported on this TPU version.')
def kernel(x_ref, y_ref):
y_ref[...] = x_ref[...].reshape(
q,
m,
x_ref.shape[1] * x_ref.shape[2],
)
x = np.arange(q * m * n * k, dtype=dtype).reshape(q * m, n, k)
out = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((q, m, n * k), dtype),
)(x)
np.testing.assert_array_equal(out, x.reshape([q, m, n * k]))
# (q * m, n * k) -> (q, m, n, k) where k % 128 == 0
@parameterized.parameters(
(q, m, n, k, dtype)
for (q, m, n, k), dtype in itertools.product(
[
(2, 2, 17, 512),
(3, 2, 3, 256),
(1, 5, 4, 384),
],
[jnp.float32, jnp.uint32, jnp.bfloat16, jnp.int8],
)
)
def test_reshape_unfold_leading_and_minor_dims_R2_to_R4(
self, q, m, n, k, dtype
):
if not jtu.if_cloud_tpu_at_least(2025, 7, 12):
self.skipTest('Needs a newer libTPU')
if (dtype == jnp.bfloat16 and not jtu.is_device_tpu_at_least(4)) or (
dtype == jnp.int8 and not jtu.is_device_tpu_at_least(5)
):
self.skipTest('Operation not supported on this TPU version.')
def kernel(x_ref, y_ref):
y_ref[...] = x_ref[...].reshape(q, m, n, k)
x = np.arange(q * m * n * k, dtype=dtype).reshape(q * m, n * k)
out = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((q, m, n, k), dtype),
)(x)
np.testing.assert_array_equal(out, x.reshape([q, m, n, k]))
# (q, m, n * k) -> (q * m, n, k) where k % 128 == 0
@parameterized.parameters(
(q, m, n, k, dtype)
for (q, m, n, k), dtype in itertools.product(
[
(2, 2, 17, 512),
(3, 2, 8, 256),
(1, 5, 4, 384),
],
[jnp.float32, jnp.uint32, jnp.bfloat16, jnp.int8],
)
)
def test_reshape_fold_leading_dims_and_unfold_minor_dim(
self, q, m, n, k, dtype
):
if not jtu.if_cloud_tpu_at_least(2025, 7, 12):
self.skipTest('Needs a newer libTPU')
if (dtype == jnp.bfloat16 and not jtu.is_device_tpu_at_least(4)) or (
dtype == jnp.int8 and not jtu.is_device_tpu_at_least(5)
):
self.skipTest('Operation not supported on this TPU version.')
def kernel(x_ref, y_ref):
y_ref[...] = x_ref[...].reshape(q * m, n, k)
x = np.arange(q * m * n * k, dtype=dtype).reshape(q, m, n * k)
out = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((q * m, n, k), dtype),
)(x)
np.testing.assert_array_equal(out, x.reshape([q * m, n, k]))
# (q, m, n, k) -> (q, m * n, k) where k % 128 == 0
@parameterized.parameters(
(q, m, n, k, dtype)
for (q, m, n, k), dtype in itertools.product(
[
(2, 2, 17, 512),
(3, 2, 8, 256),
(1, 5, 4, 384),
],
[jnp.float32, jnp.uint32, jnp.bfloat16, jnp.int8],
)
)
def test_reshape_fold_middle_dims(self, q, m, n, k, dtype):
if not jtu.if_cloud_tpu_at_least(2025, 7, 12):
self.skipTest('Needs a newer libTPU')
if (dtype == jnp.bfloat16 and not jtu.is_device_tpu_at_least(4)) or (
dtype == jnp.int8 and not jtu.is_device_tpu_at_least(5)
):
self.skipTest('Operation not supported on this TPU version.')
def kernel(x_ref, y_ref):
y_ref[...] = x_ref[...].reshape(q, m * n, k)
x = np.arange(q * m * n * k, dtype=dtype).reshape(q, m, n, k)
out = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((q, m * n, k), dtype),
)(x)
np.testing.assert_array_equal(out, x.reshape([q, m * n, k]))
# (q, m * n, k) -> (q, m, n, k) where k % 128 == 0
@parameterized.parameters(
(q, m, n, k, dtype)
for (q, m, n, k), dtype in itertools.product(
[
(2, 2, 17, 512),
(3, 2, 8, 256),
(9, 5, 4, 384),
],
[jnp.float32, jnp.uint32, jnp.bfloat16, jnp.int8],
)
)
def test_reshape_unfold_middle_dims(self, q, m, n, k, dtype):
if not jtu.if_cloud_tpu_at_least(2025, 7, 12):
self.skipTest('Needs a newer libTPU')
if (dtype == jnp.bfloat16 and not jtu.is_device_tpu_at_least(4)) or (
dtype == jnp.int8 and not jtu.is_device_tpu_at_least(5)
):
self.skipTest('Operation not supported on this TPU version.')
def kernel(x_ref, y_ref):
y_ref[...] = x_ref[...].reshape(q, m, n, k)
x = np.arange(q * m * n * k, dtype=dtype).reshape(q, m * n, k)
out = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((q, m, n, k), dtype),
)(x)
np.testing.assert_array_equal(out, x.reshape([q, m, n, k]))
@parameterized.parameters([jnp.int8, jnp.bfloat16, jnp.float32])
def test_reshape_shift_factor_from_minor_to_major(self, dtype):
if not jtu.if_cloud_tpu_at_least(2025, 7, 12):
self.skipTest('Needs a newer libTPU')
if (dtype == jnp.bfloat16 and not jtu.is_device_tpu_at_least(4)) or (
dtype == jnp.int8 and not jtu.is_device_tpu_at_least(5)
):
self.skipTest('Operation not supported on this TPU version.')
q0, m0, n0 = 1, 3, 7680
q1, m1, n1 = 3, 10, 768
def kernel(x_ref, y_ref):
y_ref[...] = x_ref[...].reshape(q1, m1, n1)
x = np.arange(q0 * m0 * n0, dtype=dtype).reshape(q0, m0, n0)
out = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((q1, m1, n1), dtype),
)(x)
np.testing.assert_array_equal(out, x.reshape([q1, m1, n1]))
@parameterized.product(
dtype=[jnp.float32, jnp.bfloat16, jnp.float8_e4m3fn],
)
def test_reshape_fold_minormost_dim(self, dtype):
if not jtu.if_cloud_tpu_at_least(2025, 10, 22):
self.skipTest('Needs a newer libTPU')
packing = 32 // (8 * np.dtype(dtype).itemsize)
in_shape = (8 * packing, 128)
out_shape = (1, math.prod(in_shape))
def kernel(x_ref, y_ref):
x = x_ref[...]
y_ref[...] = x.reshape(out_shape)
x = np.random.randn(*in_shape).astype(dtype)
out = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct(out_shape, dtype),
)(x)
np.testing.assert_array_equal(out, x.reshape(out_shape))
def test_dynamic_grid_with_smem_output(self):
if self.INTERPRET:
self.skipTest('Fail on interpreter.')
if not jtu.if_cloud_tpu_at_least(2025, 11, 3):
self.skipTest('Needs a newer libTPU')
def body(_, o_ref):
o_ref[0] = lax.cond(
pl.program_id(0) == 0, lambda: 1, lambda: o_ref[0] + 1
)
def wrapper_dynamic(n):
return self.pallas_call(
body,
out_shape=pltpu.SMEM((1,), dtype=jnp.int32),
grid_spec=pl.GridSpec(
grid=(n,),
in_specs=[pl.BlockSpec(memory_space=pltpu.SMEM)],
out_specs=pl.BlockSpec(memory_space=pltpu.SMEM),
),
)(n)
n = jax.random.randint(jax.random.key(0), (1,), 1, 10, dtype=jnp.int32)
compiled_kernel = jax.jit(wrapper_dynamic).lower(n).compile()
np.testing.assert_array_equal(compiled_kernel(n), n)
|
MiscellaneousTest
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/config_vectors.py
|
{
"start": 12720,
"end": 76417
}
|
class ____:
@staticmethod
def self_provided(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
):
"""Create a vector using no vectorizer. You will need to provide the vectors yourself.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
"""
return _VectorConfigCreate(
name=name,
vectorizer=_VectorizerConfigCreate(vectorizer=Vectorizers.NONE),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def custom(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
module_name: str,
module_config: Optional[Dict[str, Any]] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
) -> _VectorConfigCreate:
"""Create a vector using a custom module that is not currently supported by the SDK.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
module_name: The name of the custom module to use.
module_config: The configuration of the custom module to use.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_VectorizerCustomConfig(
vectorizer=_EnumLikeStr(module_name), module_config=module_config
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def text2vec_cohere(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
base_url: Optional[AnyHttpUrl] = None,
model: Optional[Union[CohereModel, str]] = None,
dimensions: Optional[int] = None,
truncate: Optional[CohereTruncation] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec-cohere` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/cohere/embeddings)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
model: The model to use. Defaults to `None`, which uses the server-defined default.
dimensions: Number of output dimensions. Defaults to `None`, which uses the server-defined default.
truncate: The truncation strategy to use. Defaults to `None`, which uses the server-defined default.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
Raises:
pydantic.ValidationError: If `model` is not a valid value from the `CohereModel` type or if `truncate` is not a valid value from the `CohereTruncation` type.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecCohereConfig(
baseURL=base_url,
model=model,
dimensions=dimensions,
truncate=truncate,
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def multi2vec_cohere(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
base_url: Optional[AnyHttpUrl] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
model: Optional[Union[CohereMultimodalModel, str]] = None,
dimensions: Optional[int] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
truncate: Optional[CohereTruncation] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `multi2vec_cohere` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/cohere/embeddings-multimodal)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
image_fields: The image fields to use in vectorization.
model: The model to use. Defaults to `None`, which uses the server-defined default.
dimensions: Number of output dimensions. Defaults to `None`, which uses the server-defined default.
text_fields: The text fields to use in vectorization.
truncate: The truncation strategy to use. Defaults to `None`, which uses the server-defined default.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
Raises:
pydantic.ValidationError: If `model` is not a valid value from the `CohereMultimodalModel` type or if `truncate` is not a valid value from the `CohereTruncation` type.
"""
return _VectorConfigCreate(
name=name,
vectorizer=_Multi2VecCohereConfig(
baseURL=base_url,
model=model,
dimensions=dimensions,
truncate=truncate,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
@typing_deprecated(
"The contextionary model is old and not recommended for use. If you are looking for a local, lightweight model try the new text2vec-model2vec module instead."
)
def text2vec_contextionary(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec_contextionary` module.
See the [documentation](https://weaviate.io/developers/weaviate/modules/retriever-vectorizer-modules/text2vec-contextionary)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecContextionaryConfig(
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def text2vec_model2vec(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
inference_url: Optional[str] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec_model2vec` module.
See the [documentation](https://docs.weaviate.io/weaviate/model-providers/model2vec)
for detailed usage.
Args:
name: The name of the vector.
inference_url: The inferenceUrl to use where API requests should go. Defaults to `None`, which uses the server-defined default.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecModel2VecConfig(
vectorizeClassName=vectorize_collection_name,
inferenceUrl=inference_url,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def text2vec_databricks(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
endpoint: str,
instruction: Optional[str] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec-databricks` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/databricks/embeddings)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
endpoint: The endpoint to use.
instruction: The instruction strategy to use. Defaults to `None`, which uses the server-defined default.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecDatabricksConfig(
endpoint=endpoint,
instruction=instruction,
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def text2vec_mistral(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
base_url: Optional[AnyHttpUrl] = None,
model: Optional[str] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec-mistral` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/mistral/embeddings)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
model: The model to use. Defaults to `None`, which uses the server-defined default.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecMistralConfig(
baseURL=base_url,
model=model,
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def text2vec_morph(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
base_url: Optional[AnyHttpUrl] = None,
model: Optional[str] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec-morph` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/morph/embeddings)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
model: The model to use. Defaults to `None`, which uses the server-defined default.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecMorphConfig(
baseURL=base_url,
model=model,
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def text2vec_ollama(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
api_endpoint: Optional[str] = None,
model: Optional[str] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec-ollama` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/ollama/embeddings)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
api_endpoint: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
Docker users may need to specify an alias, such as `http://host.docker.internal:11434` so that the container can access the host machine.
model: The model to use. Defaults to `None`, which uses the server-defined default.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecOllamaConfig(
apiEndpoint=api_endpoint,
model=model,
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def text2vec_openai(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
base_url: Optional[AnyHttpUrl] = None,
dimensions: Optional[int] = None,
model: Optional[Union[OpenAIModel, str]] = None,
model_version: Optional[str] = None,
type_: Optional[OpenAIType] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec-openai` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/openai/embeddings)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
dimensions: Number of dimensions. Applicable to v3 OpenAI models only. Defaults to `None`, which uses the server-defined default.
model: The model to use. Defaults to `None`, which uses the server-defined default.
model_version: The model version to use. Defaults to `None`, which uses the server-defined default.
type_: The type of model to use. Defaults to `None`, which uses the server-defined default.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
Raises:
pydantic.ValidationError: If `type_` is not a valid value from the `OpenAIType` type.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecOpenAIConfig(
baseURL=base_url,
model=model,
modelVersion=model_version,
type_=type_,
vectorizeClassName=vectorize_collection_name,
dimensions=dimensions,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def text2vec_aws(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
endpoint: Optional[str] = None,
model: Optional[Union[AWSModel, str]],
region: str,
service: Union[AWSService, str] = "bedrock",
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec-aws` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/aws/embeddings)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
endpoint: The endpoint to use. Defaults to `None`, which uses the server-defined default.
model: The model to use, REQUIRED.
region: The AWS region to run the model from, REQUIRED.
service: The AWS service to use. Defaults to `bedrock`.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecAWSConfig(
model=model,
endpoint=endpoint,
region=region,
service=service,
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def multi2vec_aws(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
dimensions: Optional[int] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
model: Optional[str] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
region: Optional[str] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
) -> _VectorConfigCreate:
"""Create a vector using the `multi2vec-aws` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/aws/embeddings)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
dimensions: The number of dimensions to use. Defaults to `None`, which uses the server-defined default.
image_fields: The image fields to use in vectorization.
model: The model to use. Defaults to `None`, which uses the server-defined default.
text_fields: The text fields to use in vectorization.
region: The AWS region to run the model from. Defaults to `None`, which uses the server-defined defau
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
Raises:
pydantic.ValidationError: If `model` is not a valid value from the `JinaMultimodalModel` type.
"""
return _VectorConfigCreate(
name=name,
vectorizer=_Multi2VecAWSConfig(
region=region,
model=model,
dimensions=dimensions,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def img2vec_neural(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
image_fields: List[str],
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
) -> _VectorConfigCreate:
"""Create a vector using the `img2vec-neural` module.
See the [documentation](https://weaviate.io/developers/weaviate/modules/retriever-vectorizer-modules/img2vec-neural)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
image_fields: The image fields to use. This is a required field and must match the property fields of the collection that are defined as `DataType.BLOB`.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
Raises:
pydantic.ValidationError: If `image_fields` is not a `list`.
"""
return _VectorConfigCreate(
name=name,
vectorizer=_Img2VecNeuralConfig(imageFields=image_fields),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def multi2vec_clip(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
inference_url: Optional[str] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
) -> _VectorConfigCreate:
"""Create a vector using the `multi2vec-clip` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/transformers/embeddings-multimodal)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
inference_url: The inference url to use where API requests should go. Defaults to `None`, which uses the server-defined default.
image_fields: The image fields to use in vectorization.
text_fields: The text fields to use in vectorization.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
"""
return _VectorConfigCreate(
name=name,
vectorizer=_Multi2VecClipConfig(
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
inferenceUrl=inference_url,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def multi2vec_google(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
dimensions: Optional[int] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
location: str,
model: Optional[str] = None,
project_id: str,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
video_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
video_interval_seconds: Optional[int] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
) -> _VectorConfigCreate:
"""Create a vector using the `multi2vec-google` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/google/embeddings-multimodal)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
dimensions: The number of dimensions to use. Defaults to `None`, which uses the server-defined default.
image_fields: The image fields to use in vectorization.
location: Where the model runs. REQUIRED.
model: The model to use. Defaults to `None`, which uses the server-defined default.
project_id: The project ID to use, REQUIRED.
text_fields: The text fields to use in vectorization.
video_fields: The video fields to use in vectorization.
video_interval_seconds: Length of a video interval. Defaults to `None`, which uses the server-defined default.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
"""
return _VectorConfigCreate(
name=name,
vectorizer=_Multi2VecGoogleConfig(
projectId=project_id,
location=location,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
videoFields=_map_multi2vec_fields(video_fields),
dimensions=dimensions,
modelId=model,
videoIntervalSeconds=video_interval_seconds,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def multi2vec_bind(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
audio_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
depth_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
imu_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
thermal_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
video_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
) -> _VectorConfigCreate:
"""Create a vector using the `multi2vec-bind` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/imagebind/embeddings-multimodal)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
audio_fields: The audio fields to use in vectorization.
depth_fields: The depth fields to use in vectorization.
image_fields: The image fields to use in vectorization.
imu_fields: The IMU fields to use in vectorization.
text_fields: The text fields to use in vectorization.
thermal_fields: The thermal fields to use in vectorization.
video_fields: The video fields to use in vectorization.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
"""
return _VectorConfigCreate(
name=name,
vectorizer=_Multi2VecBindConfig(
audioFields=_map_multi2vec_fields(audio_fields),
depthFields=_map_multi2vec_fields(depth_fields),
imageFields=_map_multi2vec_fields(image_fields),
IMUFields=_map_multi2vec_fields(imu_fields),
textFields=_map_multi2vec_fields(text_fields),
thermalFields=_map_multi2vec_fields(thermal_fields),
videoFields=_map_multi2vec_fields(video_fields),
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def multi2vec_voyageai(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
base_url: Optional[AnyHttpUrl] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
model: Optional[Union[VoyageMultimodalModel, str]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
truncation: Optional[bool] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
) -> _VectorConfigCreate:
"""Create a vector using the `multi2vec-voyageai` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/voyageai/embeddings-multimodal)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
image_fields: The image fields to use in vectorization.
model: The model to use. Defaults to `None`, which uses the server-defined default.
output_encoding: The output encoding to use. Defaults to `None`, which uses the server-defined default.
text_fields: The text fields to use in vectorization.
truncation: The truncation strategy to use. Defaults to `None`, which uses the server-defined default.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
Raises:
pydantic.ValidationError: If `model` is not a valid value from the `VoyageaiMultimodalModel` type.
"""
return _VectorConfigCreate(
name=name,
vectorizer=_Multi2VecVoyageaiConfig(
baseURL=base_url,
model=model,
truncation=truncation,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def multi2vec_nvidia(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
base_url: Optional[AnyHttpUrl] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
model: Optional[str] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
truncation: Optional[bool] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
) -> _VectorConfigCreate:
"""Create a vector using the `multi2vec-nvidia` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/nvidia/embeddings-multimodal)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
image_fields: The image fields to use in vectorization.
model: The model to use. Defaults to `None`, which uses the server-defined default.
text_fields: The text fields to use in vectorization.
truncation: The truncation strategy to use. Defaults to `None`, which uses the server-defined default.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
Raises:
pydantic.ValidationError: If `model` is not a valid value from the `NvidiaMultimodalModel` type.
"""
return _VectorConfigCreate(
name=name,
vectorizer=_Multi2VecNvidiaConfig(
baseURL=base_url,
model=model,
truncation=truncation,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def ref2vec_centroid(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
method: Literal["mean"] = "mean",
reference_properties: List[str],
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
) -> _VectorConfigCreate:
"""Create a vector using the `ref2vec-centroid` module.
See the [documentation](https://weaviate.io/developers/weaviate/modules/retriever-vectorizer-modules/text2vec-gpt4all)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
method: The method to use. Defaults to `mean`.
reference_properties: The reference properties to use in vectorization, REQUIRED.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
"""
return _VectorConfigCreate(
name=name,
vectorizer=_Ref2VecCentroidConfig(
referenceProperties=reference_properties,
method=method,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def text2vec_azure_openai(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
base_url: Optional[AnyHttpUrl] = None,
deployment_id: str,
dimensions: Optional[int] = None,
model: Optional[str] = None,
resource_name: str,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec-openai` module running with Azure.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/openai-azure/embeddings)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
deployment_id: The deployment ID to use, REQUIRED.
dimensions: The dimensionality of the vectors. Defaults to `None`, which uses the server-defined default.
model: The model to use. Defaults to `None`, which uses the server-defined default.
resource_name: The resource name to use, REQUIRED.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecAzureOpenAIConfig(
baseURL=base_url,
dimensions=dimensions,
model=model,
resourceName=resource_name,
deploymentId=deployment_id,
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
@typing_deprecated(
"The `text2vec-gpt4all` vectorizer is deprecated and will be removed in a future release. See the docs (https://docs.weaviate.io/weaviate/model-providers) for alternatives."
)
def text2vec_gpt4all(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec-gpt4all` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/gpt4all/embeddings)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecGPT4AllConfig(
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def text2vec_huggingface(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
endpoint_url: Optional[AnyHttpUrl] = None,
model: Optional[str] = None,
passage_model: Optional[str] = None,
query_model: Optional[str] = None,
wait_for_model: Optional[bool] = None,
use_gpu: Optional[bool] = None,
use_cache: Optional[bool] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec-huggingface` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/huggingface/embeddings)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
endpoint_url: The endpoint URL to use. Defaults to `None`, which uses the server-defined default.
model: The model to use. Defaults to `None`, which uses the server-defined default.
passage_model: The passage model to use. Defaults to `None`, which uses the server-defined default.
query_model: The query model to use. Defaults to `None`, which uses the server-defined default.
wait_for_model: Whether to wait for the model to be loaded. Defaults to `None`, which uses the server-defined default.
use_gpu: Whether to use the GPU. Defaults to `None`, which uses the server-defined default.
use_cache: Whether to use the cache. Defaults to `None`, which uses the server-defined default.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
Raises:
pydantic.ValidationError: If the arguments passed to the function are invalid.
It is important to note that some of these variables are mutually exclusive.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/huggingface/embeddings#vectorizer-parameters) for more details.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecHuggingFaceConfig(
model=model,
passageModel=passage_model,
queryModel=query_model,
endpointURL=endpoint_url,
waitForModel=wait_for_model,
useGPU=use_gpu,
useCache=use_cache,
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def text2vec_google(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
api_endpoint: Optional[str] = None,
dimensions: Optional[int] = None,
model: Optional[str] = None,
project_id: str,
title_property: Optional[str] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec-google` model.
See the [documentation]https://weaviate.io/developers/weaviate/model-providers/google/embeddings)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
api_endpoint: The API endpoint to use without a leading scheme such as `http://`. Defaults to `None`, which uses the server-defined default.
dimensions: The dimensionality of the vectors. Defaults to `None`, which uses the server-defined default.
model: The model to use. Defaults to `None`, which uses the server-defined default.
project_id: The project ID to use, REQUIRED.
title_property: The Weaviate property name for the `gecko-002` or `gecko-003` model to use as the title.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
Raises:
pydantic.ValidationError: If `api_endpoint` is not a valid URL.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecGoogleConfig(
projectId=project_id,
apiEndpoint=api_endpoint,
dimensions=dimensions,
modelId=model,
vectorizeClassName=vectorize_collection_name,
titleProperty=title_property,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def text2vec_google_aistudio(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
dimensions: Optional[int] = None,
model: Optional[str] = None,
title_property: Optional[str] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec-google` model.
See the [documentation]https://weaviate.io/developers/weaviate/model-providers/google/embeddings)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
dimenions: The dimensionality of the vectors. Defaults to `None`, which uses the server-defined default.
model: The model to use. Defaults to `None`, which uses the server-defined default.
title_property: The Weaviate property name for the `gecko-002` or `gecko-003` model to use as the title.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
Raises:
pydantic.ValidationError: If `api_endpoint` is not a valid URL.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecGoogleConfig(
projectId=None,
apiEndpoint="generativelanguage.googleapis.com",
dimensions=dimensions,
modelId=model,
vectorizeClassName=vectorize_collection_name,
titleProperty=title_property,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def text2vec_transformers(
*,
name: Optional[str] = None,
dimensions: Optional[int] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
inference_url: Optional[str] = None,
passage_inference_url: Optional[str] = None,
pooling_strategy: Literal["masked_mean", "cls"] = "masked_mean",
query_inference_url: Optional[str] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec-transformers` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/transformers/embeddings)
for detailed usage.
Args:
name: The name of the vector.
dimensions: The number of dimensions for the generated embeddings. Defaults to `None`, which uses the server-defined default.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
inference_url: The inferenceUrl to use where API requests should go. You can use either this OR passage/query_inference_url. Defaults to `None`, which uses the server-defined default.
passage_inference_url: The inferenceUrl to use where passage API requests should go. You can use either this and query_inference_url OR inference_url. Defaults to `None`, which uses the server-defined default.
pooling_strategy: The pooling strategy to use. Defaults to `masked_mean`.
query_inference_url: The inferenceUrl to use where query API requests should go. You can use either this and passage_inference_url OR inference_url. Defaults to `None`, which uses the server-defined default.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecTransformersConfig(
poolingStrategy=pooling_strategy,
dimensions=dimensions,
vectorizeClassName=vectorize_collection_name,
inferenceUrl=inference_url,
passageInferenceUrl=passage_inference_url,
queryInferenceUrl=query_inference_url,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def text2vec_jinaai(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
base_url: Optional[str] = None,
dimensions: Optional[int] = None,
model: Optional[Union[JinaModel, str]] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec-jinaai` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/jinaai/embeddings) for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
base_url: The base URL to send the vectorization requests to. Defaults to `None`, which uses the server-defined default.
dimensions: The number of dimensions for the generated embeddings. Defaults to `None`, which uses the server-defined default.
model: The model to use. Defaults to `None`, which uses the server-defined default.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecJinaConfig(
baseURL=base_url,
dimensions=dimensions,
model=model,
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def multi2vec_jinaai(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
base_url: Optional[AnyHttpUrl] = None,
dimensions: Optional[int] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
model: Optional[Union[JinaMultimodalModel, str]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
) -> _VectorConfigCreate:
"""Create a vector using the `multi2vec-jinaai` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/jinaai/embeddings-multimodal)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
dimensions: The number of dimensions for the generated embeddings (only available for some models). Defaults to `None`, which uses the server-defined default.
image_fields: The image fields to use in vectorization.
model: The model to use. Defaults to `None`, which uses the server-defined default.
text_fields: The text fields to use in vectorization.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
Raises:
pydantic.ValidationError: If `model` is not a valid value from the `JinaMultimodalModel` type.
"""
return _VectorConfigCreate(
name=name,
vectorizer=_Multi2VecJinaConfig(
baseURL=base_url,
model=model,
dimensions=dimensions,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def text2vec_voyageai(
*,
name: Optional[str] = None,
base_url: Optional[str] = None,
dimensions: Optional[int] = None,
model: Optional[Union[VoyageModel, str]] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
source_properties: Optional[List[str]] = None,
truncate: Optional[bool] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec-voyageai` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/voyageai/embeddings)
for detailed usage.
Args:
name: The name of the vector.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
dimensions: The number of dimensions for the generated embeddings. Defaults to `None`, which uses the server-defined default.
model: The model to use. Defaults to `None`, which uses the server-defined default.
See the
[documentation](https://weaviate.io/developers/weaviate/model-providers/voyageai/embeddings#available-models) for more details.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
truncate: Whether to truncate the input texts to fit within the context length. Defaults to `None`, which uses the server-defined default.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecVoyageConfig(
model=model,
vectorizeClassName=vectorize_collection_name,
baseURL=base_url,
truncate=truncate,
dimensions=dimensions,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def text2vec_weaviate(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
base_url: Optional[str] = None,
dimensions: Optional[int] = None,
model: Optional[Union[WeaviateModel, str]] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecWeaviateConfig(
model=model,
vectorizeClassName=vectorize_collection_name,
baseURL=base_url,
dimensions=dimensions,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
@staticmethod
def text2vec_nvidia(
*,
name: Optional[str] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
base_url: Optional[str] = None,
model: Optional[str] = None,
truncate: Optional[bool] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a vector using the `text2vec-nvidia` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/nvidia/embeddings)
for detailed usage.
Args:
name: The name of the vector.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
model: The model to use. Defaults to `None`, which uses the server-defined default.
See the
[documentation](https://weaviate.io/developers/weaviate/model-providers/nvidia/embeddings#available-models) for more details.
truncate: Whether to truncate the input texts to fit within the context length. Defaults to `None`, which uses the server-defined default.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecNvidiaConfig(
model=model,
vectorizeClassName=vectorize_collection_name,
baseURL=base_url,
truncate=truncate,
),
vector_index_config=_IndexWrappers.single(vector_index_config, quantizer),
)
|
_Vectors
|
python
|
run-llama__llama_index
|
llama-index-integrations/embeddings/llama-index-embeddings-vertex-endpoint/llama_index/embeddings/vertex_endpoint/utils.py
|
{
"start": 681,
"end": 1056
}
|
class ____(BaseIOHandler):
"""Handles serialization of input and deserialization of output."""
def serialize_input(self, request: List[str]) -> List[Dict[str, Any]]:
return [{"inputs": text} for text in request]
def deserialize_output(self, response: Any) -> List[List[float]]:
return [prediction[0] for prediction in response.predictions]
|
IOHandler
|
python
|
scipy__scipy
|
benchmarks/benchmarks/stats_sampling.py
|
{
"start": 1886,
"end": 2526
}
|
class ____:
def __init__(self):
self.mode = 0
def pdf(self, x):
return 0.05 + 0.45 * (1 + np.sin(2*np.pi*x))
def dpdf(self, x):
return 0.2 * 0.45 * (2*np.pi) * np.cos(2*np.pi*x)
def cdf(self, x):
return (0.05*(x + 1) +
0.9*(1. + 2.*np.pi*(1 + x) - np.cos(2.*np.pi*x)) /
(4.*np.pi))
def support(self):
return -1, 1
def __repr__(self):
return 'sin2'
# Sin 10 distribution
# / 0.05 + 0.45*(1 +sin(2 Pi x)) if |x| <= 5
# f(x) = <
# \ 0 otherwise
# Taken from UNU.RAN test suite (from file t_pinv.c)
|
contdist4
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/queues.py
|
{
"start": 77250,
"end": 79613
}
|
class ____(Request):
"""
Moves a task entry one step forward towards the top of the queue.
:param queue: Queue id
:type queue: str
:param task: Task id
:type task: str
:param count: Number of positions in the queue to move the task forward
relative to the current position. Optional, the default value is 1.
:type count: int
"""
_service = "queues"
_action = "move_task_forward"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"count": {
"description": "Number of positions in the queue to move the task forward relative to the current position. Optional, the default value is 1.",
"type": "integer",
},
"queue": {"description": "Queue id", "type": "string"},
"task": {"description": "Task id", "type": "string"},
},
"required": ["queue", "task"],
"type": "object",
}
def __init__(self, queue: str, task: str, count: Optional[int] = None, **kwargs: Any) -> None:
super(MoveTaskForwardRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
self.count = count
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("count")
def count(self) -> Optional[int]:
return self._property_count
@count.setter
def count(self, value: Optional[int]) -> None:
if value is None:
self._property_count = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "count", six.integer_types)
self._property_count = value
|
MoveTaskForwardRequest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximize-sum-of-squares-of-digits.py
|
{
"start": 38,
"end": 428
}
|
class ____(object):
def maxSumOfSquares(self, num, sum):
"""
:type num: int
:type sum: int
:rtype: str
"""
if num*9 < sum:
return ""
q, r = divmod(sum, 9)
result = ['0']*num
for i in xrange(q):
result[i] = '9'
if r:
result[q] = str(r)
return "".join(result)
|
Solution
|
python
|
facebook__pyre-check
|
tools/generate_taint_models/generator_specifications.py
|
{
"start": 1451,
"end": 1968
}
|
class ____(ParameterAnnotation):
def __init__(
self,
parameter_taint: str,
parameter_kind: str,
) -> None:
self.parameter_taint = parameter_taint
self.parameter_kind = parameter_kind
def get(self, parameter: "Parameter") -> Optional[str]:
sanitized_parameter_name = re.compile('[^a-zA-Z_0-9]').sub("",parameter.name)
return f"{self.parameter_kind}[{self.parameter_taint}[{sanitized_parameter_name}]]"
|
AllParametersAnnotationWithParameterNameAsSubKind
|
python
|
scrapy__scrapy
|
tests/test_selector.py
|
{
"start": 307,
"end": 3923
}
|
class ____:
def test_simple_selection(self):
"""Simple selector tests"""
body = b"<p><input name='a'value='1'/><input name='b'value='2'/></p>"
response = TextResponse(url="http://example.com", body=body, encoding="utf-8")
sel = Selector(response)
xl = sel.xpath("//input")
assert len(xl) == 2
for x in xl:
assert isinstance(x, Selector)
assert sel.xpath("//input").getall() == [x.get() for x in sel.xpath("//input")]
assert [x.get() for x in sel.xpath("//input[@name='a']/@name")] == ["a"]
assert [
x.get()
for x in sel.xpath(
"number(concat(//input[@name='a']/@value, //input[@name='b']/@value))"
)
] == ["12.0"]
assert sel.xpath("concat('xpath', 'rules')").getall() == ["xpathrules"]
assert [
x.get()
for x in sel.xpath(
"concat(//input[@name='a']/@value, //input[@name='b']/@value)"
)
] == ["12"]
def test_root_base_url(self):
body = b'<html><form action="/path"><input name="a" /></form></html>'
url = "http://example.com"
response = TextResponse(url=url, body=body, encoding="utf-8")
sel = Selector(response)
assert url == sel.root.base
def test_flavor_detection(self):
text = b'<div><img src="a.jpg"><p>Hello</div>'
sel = Selector(XmlResponse("http://example.com", body=text, encoding="utf-8"))
assert sel.type == "xml"
assert sel.xpath("//div").getall() == [
'<div><img src="a.jpg"><p>Hello</p></img></div>'
]
sel = Selector(HtmlResponse("http://example.com", body=text, encoding="utf-8"))
assert sel.type == "html"
assert sel.xpath("//div").getall() == [
'<div><img src="a.jpg"><p>Hello</p></div>'
]
def test_http_header_encoding_precedence(self):
# '\xa3' = pound symbol in unicode
# '\xc2\xa3' = pound symbol in utf-8
# '\xa3' = pound symbol in latin-1 (iso-8859-1)
meta = (
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">'
)
head = f"<head>{meta}</head>"
body_content = '<span id="blank">\xa3</span>'
body = f"<body>{body_content}</body>"
html = f"<html>{head}{body}</html>"
encoding = "utf-8"
html_utf8 = html.encode(encoding)
headers = {"Content-Type": ["text/html; charset=utf-8"]}
response = HtmlResponse(
url="http://example.com", headers=headers, body=html_utf8
)
x = Selector(response)
assert x.xpath("//span[@id='blank']/text()").getall() == ["\xa3"]
def test_badly_encoded_body(self):
# \xe9 alone isn't valid utf8 sequence
r1 = TextResponse(
"http://www.example.com",
body=b"<html><p>an Jos\xe9 de</p><html>",
encoding="utf-8",
)
Selector(r1).xpath("//text()").getall()
def test_weakref_slots(self):
"""Check that classes are using slots and are weak-referenceable"""
x = Selector(text="")
weakref.ref(x)
assert not hasattr(x, "__dict__"), (
f"{x.__class__.__name__} does not use __slots__"
)
def test_selector_bad_args(self):
with pytest.raises(ValueError, match="received both response and text"):
Selector(TextResponse(url="http://example.com", body=b""), text="")
@pytest.mark.skipif(not PARSEL_18_PLUS, reason="parsel < 1.8 doesn't support jmespath")
|
TestSelector
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-databricks/prefect_databricks/models/jobs.py
|
{
"start": 92029,
"end": 92814
}
|
class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
output_link: Optional[str] = Field(
None, description="The link to find the output results."
)
query_text: Optional[str] = Field(
None,
description=(
"The text of the SQL query. Can Run permission of the SQL query associated"
" with the SQL alert is required to view this field."
),
)
sql_statements: Optional[SqlStatementOutput] = Field(
None, description="Information about SQL statements executed in the run."
)
warehouse_id: Optional[str] = Field(
None, description="The canonical identifier of the SQL warehouse."
)
|
SqlAlertOutput
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-edge-weight-equilibrium-queries-in-a-tree.py
|
{
"start": 2773,
"end": 3901
}
|
class ____(object):
def minOperationsQueries(self, n, edges, queries):
"""
:type n: int
:type edges: List[List[int]]
:type queries: List[List[int]]
:rtype: List[int]
"""
adj = [[] for _ in xrange(n)]
for u, v, w in edges:
w -= 1
adj[u].append((v, w))
adj[v].append((u, w))
pairs = collections.defaultdict(set)
for a, b in queries:
pairs[a].add(b), pairs[b].add(a)
tree_infos = TreeInfos(adj, pairs)
result = [0]*len(queries)
for i, (a, b) in enumerate(queries):
lca = tree_infos.lca[min(a, b), max(a, b)]
result[i] = (tree_infos.D[a]+tree_infos.D[b]-2*tree_infos.D[lca])-max(tree_infos.CNT[a][w]+tree_infos.CNT[b][w]-2*tree_infos.CNT[lca][w] for w in xrange(MAX_W))
return result
# Time: O(r * (n + q) + nlogn + qlogn), r = max(w for _, _, w in edges)
# Space: O(r * n + nlogn)
import collections
from functools import partial
# Template:
# https://github.com/kamyu104/GoogleKickStart-2021/blob/main/Round%20H/dependent_events2.py
|
Solution
|
python
|
astropy__astropy
|
astropy/time/tests/test_custom_formats.py
|
{
"start": 260,
"end": 7496
}
|
class ____(ValueError):
pass
@pytest.fixture
def custom_format_name():
for i in count():
if not i:
custom = "custom_format_name"
else:
custom = f"custom_format_name_{i}"
if custom not in Time.FORMATS:
break
yield custom
Time.FORMATS.pop(custom, None)
def test_custom_time_format_set_jds_exception(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
raise SpecificException
try:
Time(7.0, format=custom_format_name)
except ValueError as e:
assert hasattr(e, "__cause__") and isinstance(e.__cause__, SpecificException)
def test_custom_time_format_val_type_exception(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def _check_val_type(self, val, val2):
raise SpecificException
try:
Time(7.0, format=custom_format_name)
except ValueError as e:
assert hasattr(e, "__cause__") and isinstance(e.__cause__, SpecificException)
def test_custom_time_format_value_exception(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
@property
def value(self):
raise SpecificException
t = Time.now()
with pytest.raises(SpecificException):
getattr(t, custom_format_name)
def test_custom_time_format_fine(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
@property
def value(self):
return self.jd1 + self.jd2
t = Time.now()
getattr(t, custom_format_name)
t2 = Time(7, 9, format=custom_format_name)
getattr(t2, custom_format_name)
def test_custom_time_format_forgot_property(custom_format_name):
with pytest.raises(ValueError):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
def value(self):
return self.jd1, self.jd2
def test_custom_time_format_problematic_name():
assert "sort" not in Time.FORMATS, "problematic name in default FORMATS!"
assert hasattr(Time, "sort")
try:
class Custom(TimeFormat):
name = "sort"
_dtype = np.dtype([("jd1", "f8"), ("jd2", "f8")])
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
@property
def value(self):
result = np.empty(self.jd1.shape, self._dtype)
result["jd1"] = self.jd1
result["jd2"] = self.jd2
return result
t = Time.now()
assert t.sort() == t, "bogus time format clobbers everyone's Time objects"
t.format = "sort"
assert t.value.dtype == Custom._dtype
t2 = Time(7, 9, format="sort")
assert t2.value == np.array((7, 9), Custom._dtype)
finally:
Time.FORMATS.pop("sort", None)
def test_mjd_longdouble_preserves_precision(custom_format_name):
class CustomMJD(TimeFormat):
name = custom_format_name
def _check_val_type(self, val, val2):
val = np.longdouble(val)
if val2 is not None:
raise ValueError("Only one value permitted")
return val, 0
def set_jds(self, val, val2):
mjd1 = np.float64(np.floor(val))
mjd2 = np.float64(val - mjd1)
self.jd1, self.jd2 = day_frac(mjd1 + DJM0, mjd2)
@property
def value(self):
mjd1, mjd2 = day_frac(self.jd1 - DJM0, self.jd2)
return np.longdouble(mjd1) + np.longdouble(mjd2)
m = 58000.0
t = Time(m, format=custom_format_name)
# Pick a different long double (ensuring it will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
m2 = np.longdouble(m) + max(
2.0 * m * np.finfo(np.longdouble).eps, np.finfo(float).eps
)
assert m2 != m, "long double is weird!"
t2 = Time(m2, format=custom_format_name)
assert t != t2
assert isinstance(getattr(t, custom_format_name), np.longdouble)
assert getattr(t, custom_format_name) != getattr(t2, custom_format_name)
@pytest.mark.parametrize(
"jd1, jd2",
[
("foo", None),
(np.arange(3), np.arange(4)),
("foo", "bar"),
(1j, 2j),
pytest.param(
np.longdouble(3),
np.longdouble(5),
marks=pytest.mark.skipif(
np.longdouble().itemsize == np.dtype(float).itemsize,
reason="long double == double on this platform",
),
),
({1: 2}, {3: 4}),
({1, 2}, {3, 4}),
([1, 2], [3, 4]),
(lambda: 4, lambda: 7),
],
)
def test_custom_format_cannot_make_bogus_jd1(custom_format_name, jd1, jd2):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = jd1, jd2
@property
def value(self):
return self.jd1 + self.jd2
with pytest.raises((ValueError, TypeError)):
Time(5, format=custom_format_name)
def test_custom_format_scalar_jd1_jd2_okay(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = 7.0, 3.0
@property
def value(self):
return self.jd1 + self.jd2
getattr(Time(5, format=custom_format_name), custom_format_name)
@pytest.mark.parametrize(
"thing",
[
1,
1.0,
np.longdouble(1),
1.0j,
"foo",
b"foo",
Time(5, format="mjd"),
lambda: 7,
np.datetime64("2005-02-25"),
date(2006, 2, 25),
],
)
def test_custom_format_can_return_any_scalar(custom_format_name, thing):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = 2.0, 0.0
@property
def value(self):
return np.array(thing)
assert type(
getattr(Time(5, format=custom_format_name), custom_format_name)
) == type(thing)
assert np.all(
getattr(Time(5, format=custom_format_name), custom_format_name) == thing
)
@pytest.mark.parametrize(
"thing",
[
(1, 2),
[1, 2],
np.array([2, 3]),
np.array([2, 3, 5, 7]),
{6: 7},
{1, 2},
],
)
def test_custom_format_can_return_any_iterable(custom_format_name, thing):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = 2.0, 0.0
@property
def value(self):
return thing
assert type(
getattr(Time(5, format=custom_format_name), custom_format_name)
) == type(thing)
assert np.all(
getattr(Time(5, format=custom_format_name), custom_format_name) == thing
)
|
SpecificException
|
python
|
has2k1__plotnine
|
plotnine/scales/scale_color.py
|
{
"start": 14755,
"end": 14820
}
|
class ____(scale_color_brewer):
pass
@alias
|
scale_colour_brewer
|
python
|
facebookresearch__faiss
|
contrib/datasets.py
|
{
"start": 4470,
"end": 5418
}
|
class ____(Dataset):
"""
The original dataset is available at: http://corpus-texmex.irisa.fr/
(ANN_SIFT1M)
"""
def __init__(self):
Dataset.__init__(self)
self.d, self.nt, self.nb, self.nq = 128, 100000, 1000000, 10000
self.basedir = dataset_basedir + 'sift1M/'
def get_queries(self):
return fvecs_read(self.basedir + "sift_query.fvecs")
def get_train(self, maxtrain=None):
maxtrain = maxtrain if maxtrain is not None else self.nt
return fvecs_read(self.basedir + "sift_learn.fvecs")[:maxtrain]
def get_database(self):
return fvecs_read(self.basedir + "sift_base.fvecs")
def get_groundtruth(self, k=None):
gt = ivecs_read(self.basedir + "sift_groundtruth.ivecs")
if k is not None:
assert k <= 100
gt = gt[:, :k]
return gt
def sanitize(x):
return np.ascontiguousarray(x, dtype='float32')
|
DatasetSIFT1M
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-google-sheets/components.py
|
{
"start": 18766,
"end": 23337
}
|
class ____(DefaultErrorHandler):
"""
Custom error handler for handling 500 errors with grid data requests.
This handler extends the DefaultErrorHandler by adding special handling for 500 errors
when includeGridData=true. When a 500 error occurs, it immediately tests if the sheet can be
fetched without grid data. If successful, the sheet is skipped (IGNORE). If it still fails
(either with another non-200 status or a network/HTTP error), the error is retried using the
default backoff strategy (RETRY).
"""
def interpret_response(self, response_or_exception: Optional[Union[requests.Response, Exception]]) -> ErrorResolution:
"""
Interpret the response and determine the appropriate action.
Handles 500 errors when includeGridData=true by testing if the sheet can be fetched
without grid data to determine if the error is due to corrupt grid data (skip the sheet)
or a genuine server error (retry with backoff).
For all other responses/exceptions, delegates to the parent DefaultErrorHandler.
"""
# Only handle Response objects (not exceptions) with our custom logic
# For exceptions, delegate to parent immediately
if not isinstance(response_or_exception, requests.Response):
return super().interpret_response(response_or_exception)
response = response_or_exception
url = response.request.url
# Special handling for 500 errors with includeGridData=true
if response.status_code == 500 and "includeGridData=true" in url:
# Immediately test without grid data to determine if this is a corrupt grid data issue
# or a genuine server error
sheet_match = re.search(r"ranges=([^!&]+)", url)
sheet_name = sheet_match.group(1) if sheet_match else "unknown"
logger.info(f"500 error encountered for sheet '{sheet_name}' - testing without grid data...")
# Test the same request but without grid data
alt_url = url.replace("includeGridData=true", "includeGridData=false")
try:
# Copy headers from original request
headers = dict(response.request.headers)
# Make test request without grid data
alt_response = requests.get(alt_url, headers=headers, timeout=30)
# If the test succeeds (200 OK), the sheet exists but has bad grid data - skip it
if alt_response.status_code == 200:
logger.warning(
f"Sheet '{sheet_name}' has corrupt or incompatible grid data and will be skipped. "
f"This usually happens with sheets containing complex formatting or data types "
f"that the Google Sheets API cannot process with includeGridData=true."
)
return ErrorResolution(
response_action=ResponseAction.IGNORE,
failure_type=None,
error_message=f"Skipping sheet '{sheet_name}' due to corrupt grid data",
)
else:
# Test also failed - this is a genuine server error, retry with backoff
logger.info(
f"Sheet '{sheet_name}' test without grid data also failed with status {alt_response.status_code}. "
f"This appears to be a genuine server error. Retrying with backoff..."
)
return ErrorResolution(
response_action=ResponseAction.RETRY,
failure_type=FailureType.transient_error,
error_message="Internal server error encountered. Retrying with backoff.",
)
except requests.RequestException as e:
# If test request fails with a network/HTTP error, treat it as a transient server error - retry with backoff
logger.info(f"Test request for sheet '{sheet_name}' failed with network error: {e}. Retrying with backoff...")
return ErrorResolution(
response_action=ResponseAction.RETRY,
failure_type=FailureType.transient_error,
error_message=f"Internal server error encountered: {str(e)}. Retrying with backoff.",
)
# Return None to pass response to next handler in the composite chain (DefaultErrorHandler)
return None
|
GridDataErrorHandler
|
python
|
kamyu104__LeetCode-Solutions
|
Python/string-transformation.py
|
{
"start": 3071,
"end": 4519
}
|
class ____(object):
def numberOfWays(self, s, t, k):
"""
:type s: str
:type t: str
:type k: int
:rtype: int
"""
MOD = 10**9+7
def matrix_mult(A, B):
ZB = zip(*B)
return [[sum(a*b % MOD for a, b in itertools.izip(row, col)) % MOD for col in ZB] for row in A]
def matrix_expo(A, K):
result = [[int(i == j) for j in xrange(len(A))] for i in xrange(len(A))]
while K:
if K % 2:
result = matrix_mult(result, A)
A = matrix_mult(A, A)
K /= 2
return result
# Template: https://cp-algorithms.com/string/z-function.html
def z_function(s): # Time: O(n), Space: O(n)
z = [0]*len(s)
l, r = 0, 0
for i in xrange(1, len(z)):
if i <= r:
z[i] = min(r-i+1, z[i-l])
while i+z[i] < len(z) and s[z[i]] == s[i+z[i]]:
z[i] += 1
if i+z[i]-1 > r:
l, r = i, i+z[i]-1
return z
n = len(s)
T = [[0, 1],
[n-1, (n-1)-1]]
dp = [1, 0]
dp = matrix_mult([dp], matrix_expo(T, k))[0] # [dp[0], dp[1]] * T^k
z = z_function(t+s+s[:-1])
return reduce(lambda a, b: (a+b)%MOD, (dp[int(i != 0)] for i in xrange(n) if z[i+len(t)] >= len(t)), 0)
|
Solution3
|
python
|
spack__spack
|
lib/spack/spack/test/error_messages.py
|
{
"start": 2059,
"end": 2315
}
|
class ____(Package):
version("2.1")
version("2.0")
variant("v2", default=True, when="@2.1:")
""",
)
# Cluster of packages that includes requirements - goal is to "chain"
# the requirements like other constraints.
_pkgw4 = (
"w4",
"""\
|
Z3
|
python
|
pyca__cryptography
|
tests/hazmat/primitives/test_pkcs7.py
|
{
"start": 54868,
"end": 55128
}
|
class ____:
def test_pkcs7_decrypt_unsupported(self, backend):
cert, key = _load_rsa_cert_key()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
pkcs7.pkcs7_decrypt_der(b"", cert, key, [])
|
TestPKCS7DecryptUnsupported
|
python
|
pyca__cryptography
|
src/cryptography/hazmat/primitives/hashes.py
|
{
"start": 4725,
"end": 5100
}
|
class ____(HashAlgorithm):
name = "blake2s"
block_size = 64
_max_digest_size = 32
_min_digest_size = 1
def __init__(self, digest_size: int):
if digest_size != 32:
raise ValueError("Digest size must be 32")
self._digest_size = digest_size
@property
def digest_size(self) -> int:
return self._digest_size
|
BLAKE2s
|
python
|
automl__auto-sklearn
|
autosklearn/evaluation/test_evaluator.py
|
{
"start": 558,
"end": 4640
}
|
class ____(AbstractEvaluator):
def __init__(
self,
backend: Backend,
queue: multiprocessing.Queue,
metrics: Sequence[Scorer],
additional_components: Dict[str, ThirdPartyComponents],
port: Optional[int],
configuration: Optional[Union[int, Configuration]] = None,
scoring_functions: Optional[List[Scorer]] = None,
seed: int = 1,
include: Optional[List[str]] = None,
exclude: Optional[List[str]] = None,
disable_file_output: bool = False,
init_params: Optional[Dict[str, Any]] = None,
):
super(TestEvaluator, self).__init__(
backend=backend,
queue=queue,
port=port,
configuration=configuration,
metrics=metrics,
additional_components=additional_components,
scoring_functions=scoring_functions,
seed=seed,
output_y_hat_optimization=False,
num_run=-1,
include=include,
exclude=exclude,
disable_file_output=disable_file_output,
init_params=init_params,
)
self.configuration = configuration
self.X_train = self.datamanager.data["X_train"]
self.Y_train = self.datamanager.data["Y_train"]
self.X_test = self.datamanager.data.get("X_test")
self.Y_test = self.datamanager.data.get("Y_test")
self.model = self._get_model(self.feat_type)
def fit_predict_and_loss(self) -> None:
_fit_and_suppress_warnings(self.logger, self.model, self.X_train, self.Y_train)
loss, Y_pred, _, _ = self.predict_and_loss()
self.finish_up(
loss=loss,
train_loss=None,
opt_pred=Y_pred,
test_pred=None,
file_output=False,
final_call=True,
additional_run_info=None,
status=StatusType.SUCCESS,
)
def predict_and_loss(
self, train: bool = False
) -> Tuple[Union[Dict[str, float], float], np.array, Any, Any]:
if train:
Y_pred = self.predict_function(
self.X_train, self.model, self.task_type, self.Y_train
)
err = calculate_losses(
solution=self.Y_train,
prediction=Y_pred,
task_type=self.task_type,
metrics=self.metrics,
scoring_functions=self.scoring_functions,
)
else:
Y_pred = self.predict_function(
self.X_test, self.model, self.task_type, self.Y_train
)
err = calculate_losses(
solution=self.Y_test,
prediction=Y_pred,
task_type=self.task_type,
metrics=self.metrics,
scoring_functions=self.scoring_functions,
)
return err, Y_pred, None, None
# create closure for evaluating an algorithm
# Has a stupid name so pytest doesn't regard it as a test
def eval_t(
queue: multiprocessing.Queue,
config: Union[int, Configuration],
backend: Backend,
metrics: Sequence[Scorer],
seed: int,
num_run: int,
instance: Dict[str, Any],
scoring_functions: Optional[List[Scorer]],
output_y_hat_optimization: bool,
include: Optional[List[str]],
exclude: Optional[List[str]],
disable_file_output: bool,
port: Optional[int],
additional_components: Dict[str, ThirdPartyComponents],
init_params: Optional[Dict[str, Any]] = None,
budget: Optional[float] = None,
budget_type: Optional[str] = None,
) -> None:
evaluator = TestEvaluator(
configuration=config,
backend=backend,
metrics=metrics,
seed=seed,
port=port,
queue=queue,
scoring_functions=scoring_functions,
include=include,
exclude=exclude,
disable_file_output=disable_file_output,
additional_components=additional_components,
init_params=init_params,
)
evaluator.fit_predict_and_loss()
|
TestEvaluator
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/dates.py
|
{
"start": 42120,
"end": 52317
}
|
class ____(DateLocator):
"""
On autoscale, this class picks the best `DateLocator` to set the view
limits and the tick locations.
Attributes
----------
intervald : dict
Mapping of tick frequencies to multiples allowed for that ticking.
The default is ::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14, 21],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500,
1000, 2000, 5000, 10000, 20000, 50000,
100000, 200000, 500000, 1000000],
}
where the keys are defined in `dateutil.rrule`.
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
When customizing, you should only modify the values for the existing
keys. You should not add or delete entries.
Example for forcing ticks every 3 hours::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=True):
"""
Parameters
----------
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
minticks : int
The minimum number of ticks desired; controls whether ticks occur
yearly, monthly, etc.
maxticks : int
The maximum number of ticks desired; controls the interval between
ticks (ticking every other, every 3, etc.). For fine-grained
control, this can be a dictionary mapping individual rrule
frequency constants (YEARLY, MONTHLY, etc.) to their own maximum
number of ticks. This can be used to keep the number of ticks
appropriate to the format chosen in `AutoDateFormatter`. Any
frequency not specified in this dictionary is given a default
value.
interval_multiples : bool, default: True
Whether ticks should be chosen to be multiple of the interval,
locking them to 'nicer' locations. For example, this will force
the ticks to be at hours 0, 6, 12, 18 when hourly ticking is done
at 6 hour intervals.
"""
super().__init__(tz=tz)
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict.fromkeys(self._freqs, maxticks)
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
if interval_multiples:
# Swap "3" for "4" in the DAILY list; If we use 3 we get bad
# tick loc for months w/ 31 days: 1, 4, ..., 28, 31, 1
# If we use 4 then we get: 1, 5, ... 25, 29, 1
self.intervald[DAILY] = [1, 2, 4, 7, 14]
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
# docstring inherited
dmin, dmax = self.viewlim_to_dt()
locator = self.get_locator(dmin, dmax)
return locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if not np.isfinite(vmin) or not np.isfinite(vmax):
# Except if there is no data, then use 1970 as default.
return (date2num(datetime.date(1970, 1, 1)),
date2num(datetime.date(1970, 1, 2)))
if vmax < vmin:
vmin, vmax = vmax, vmin
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def get_locator(self, dmin, dmax):
"""Pick the best locator based on a distance."""
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = numYears * MONTHS_PER_YEAR + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year.
numHours = numDays * HOURS_PER_DAY + delta.hours
numMinutes = numHours * MIN_PER_HOUR + delta.minutes
numSeconds = np.floor(tdelta.total_seconds())
numMicroseconds = np.floor(tdelta.total_seconds() * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from a list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
if not (self.interval_multiples and freq == DAILY):
_api.warn_external(
f"AutoDateLocator was unable to pick an appropriate "
f"interval for this date range. It may be necessary "
f"to add an interval value to the AutoDateLocator's "
f"intervald dictionary. Defaulting to {interval}.")
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
if i in (DAILY, WEEKLY):
if interval == 14:
# just make first and 15th. Avoids 30th.
byranges[i] = [1, 15]
elif interval == 7:
byranges[i] = [1, 8, 15, 22]
interval = 1
else:
byranges[i] = self._byranges[i]
break
else:
interval = 1
if (freq == YEARLY) and self.interval_multiples:
locator = YearLocator(interval, tz=self.tz)
elif use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, tz=self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
if date2num(dmin) > 70 * 365 and interval < 1000:
_api.warn_external(
'Plotting microsecond time intervals for dates far from '
f'the epoch (time origin: {get_epoch()}) is not well-'
'supported. See matplotlib.dates.set_epoch to change the '
'epoch.')
locator.set_axis(self.axis)
return locator
|
AutoDateLocator
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_index.py
|
{
"start": 920,
"end": 973
}
|
class ____:
def __index__(self):
...
|
Index3
|
python
|
wandb__wandb
|
wandb/sdk/artifacts/_generated/registry_team_members.py
|
{
"start": 265,
"end": 355
}
|
class ____(GQLResult):
project: Optional[RegistryTeamMembersProject]
|
RegistryTeamMembers
|
python
|
django__django
|
tests/postgres_tests/array_default_migrations/0001_initial.py
|
{
"start": 81,
"end": 799
}
|
class ____(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="IntegerArrayDefaultModel",
fields=[
(
"id",
models.BigAutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"field",
django.contrib.postgres.fields.ArrayField(models.IntegerField()),
),
],
options={},
bases=(models.Model,),
),
]
|
Migration
|
python
|
plotly__plotly.py
|
_plotly_utils/basevalidators.py
|
{
"start": 53076,
"end": 55276
}
|
class ____(BaseValidator):
"""
"subplotid": {
"description": "An id string of a subplot type (given by dflt),
optionally followed by an integer >1. e.g. if
dflt='geo', we can have 'geo', 'geo2', 'geo3',
...",
"requiredOpts": [
"dflt"
],
"otherOpts": [
"regex"
]
}
"""
def __init__(self, plotly_name, parent_name, dflt=None, regex=None, **kwargs):
if dflt is None and regex is None:
raise ValueError("One or both of regex and deflt must be specified")
super(SubplotidValidator, self).__init__(
plotly_name=plotly_name, parent_name=parent_name, **kwargs
)
if dflt is not None:
self.base = dflt
else:
# e.g. regex == '/^y([2-9]|[1-9][0-9]+)?$/'
self.base = re.match(r"/\^(\w+)", regex).group(1)
self.regex = self.base + r"(\d*)"
def description(self):
desc = """\
The '{plotly_name}' property is an identifier of a particular
subplot, of type '{base}', that may be specified as the string '{base}'
optionally followed by an integer >= 1
(e.g. '{base}', '{base}1', '{base}2', '{base}3', etc.)
""".format(plotly_name=self.plotly_name, base=self.base)
return desc
def validate_coerce(self, v):
if v is None:
pass
elif not isinstance(v, str):
self.raise_invalid_val(v)
else:
# match = re.fullmatch(self.regex, v)
match = fullmatch(self.regex, v)
if not match:
is_valid = False
else:
digit_str = match.group(1)
if len(digit_str) > 0 and int(digit_str) == 0:
is_valid = False
elif len(digit_str) > 0 and int(digit_str) == 1:
# Remove 1 suffix (e.g. x1 -> x)
v = self.base
is_valid = True
else:
is_valid = True
if not is_valid:
self.raise_invalid_val(v)
return v
|
SubplotidValidator
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1003519,
"end": 1004148
}
|
class ____(sgqlc.types.Type):
"""Represents a single highlight in a search result match."""
__schema__ = github_schema
__field_names__ = ("begin_indice", "end_indice", "text")
begin_indice = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="beginIndice")
"""The indice in the fragment where the matched text begins."""
end_indice = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="endIndice")
"""The indice in the fragment where the matched text ends."""
text = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="text")
"""The text matched."""
|
TextMatchHighlight
|
python
|
huggingface__transformers
|
src/transformers/models/bart/modeling_bart.py
|
{
"start": 10612,
"end": 13619
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: BartConfig, layer_idx: Optional[int] = None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = BartAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
config=config,
layer_idx=layer_idx,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: torch.FloatTensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
BartEncoderLayer
|
python
|
pandas-dev__pandas
|
pandas/io/formats/format.py
|
{
"start": 51028,
"end": 55458
}
|
class ____(_GenericArrayFormatter):
values: ExtensionArray
def _format_strings(self) -> list[str]:
values = self.values
formatter = self.formatter
fallback_formatter = None
if formatter is None:
fallback_formatter = values._formatter(boxed=True)
if isinstance(values, Categorical):
# Categorical is special for now, so that we can preserve tzinfo
array = values._internal_get_values()
else:
array = np.asarray(values, dtype=object)
fmt_values = format_array(
array,
formatter,
float_format=self.float_format,
na_rep=self.na_rep,
digits=self.digits,
space=self.space,
justify=self.justify,
decimal=self.decimal,
leading_space=self.leading_space,
quoting=self.quoting,
fallback_formatter=fallback_formatter,
)
return fmt_values
def format_percentiles(
percentiles: np.ndarray | Sequence[float],
) -> list[str]:
"""
Outputs rounded and formatted percentiles.
Parameters
----------
percentiles : list-like, containing floats from interval [0,1]
Returns
-------
formatted : list of strings
Notes
-----
Rounding precision is chosen so that: (1) if any two elements of
``percentiles`` differ, they remain different after rounding
(2) no entry is *rounded* to 0% or 100%.
Any non-integer is always rounded to at least 1 decimal place.
Examples
--------
Keeps all entries different after rounding:
>>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
['1.999%', '2.001%', '50%', '66.667%', '99.99%']
No element is rounded to 0% or 100% (unless already equal to it).
Duplicates are allowed:
>>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
"""
if len(percentiles) == 0:
return []
percentiles = np.asarray(percentiles)
# It checks for np.nan as well
if (
not is_numeric_dtype(percentiles)
or not np.all(percentiles >= 0)
or not np.all(percentiles <= 1)
):
raise ValueError("percentiles should all be in the interval [0,1]")
percentiles = 100 * percentiles
prec = get_precision(percentiles)
percentiles_round_type = percentiles.round(prec).astype(int)
int_idx = np.isclose(percentiles_round_type, percentiles)
if np.all(int_idx):
out = percentiles_round_type.astype(str)
return [i + "%" for i in out]
unique_pcts = np.unique(percentiles)
prec = get_precision(unique_pcts)
out = np.empty_like(percentiles, dtype=object)
out[int_idx] = percentiles[int_idx].round().astype(int).astype(str)
out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)
return [i + "%" for i in out]
def get_precision(array: np.ndarray | Sequence[float]) -> int:
to_begin = array[0] if array[0] > 0 else None
to_end = 100 - array[-1] if array[-1] < 100 else None
diff = np.ediff1d(array, to_begin=to_begin, to_end=to_end)
diff = abs(diff)
prec = -np.floor(np.log10(np.min(diff))).astype(int)
prec = max(1, prec)
return prec
def _format_datetime64(x: NaTType | Timestamp, nat_rep: str = "NaT") -> str:
if x is NaT:
return nat_rep
# Timestamp.__str__ falls back to datetime.datetime.__str__ = isoformat(sep=' ')
# so it already uses string formatting rather than strftime (faster).
return str(x)
def _format_datetime64_dateonly(
x: NaTType | Timestamp,
nat_rep: str = "NaT",
date_format: str | None = None,
) -> str:
if isinstance(x, NaTType):
return nat_rep
if date_format:
return x.strftime(date_format)
else:
# Timestamp._date_repr relies on string formatting (faster than strftime)
return x._date_repr
def get_format_datetime64(
is_dates_only: bool, nat_rep: str = "NaT", date_format: str | None = None
) -> Callable:
"""Return a formatter callable taking a datetime64 as input and providing
a string as output"""
if is_dates_only:
return lambda x: _format_datetime64_dateonly(
x, nat_rep=nat_rep, date_format=date_format
)
else:
return lambda x: _format_datetime64(x, nat_rep=nat_rep)
|
_ExtensionArrayFormatter
|
python
|
pytorch__pytorch
|
torch/_vendor/packaging/version.py
|
{
"start": 1727,
"end": 4491
}
|
class ____:
_key: Tuple[Any, ...]
def __hash__(self) -> int:
return hash(self._key)
# Please keep the duplicated `isinstance` check
# in the six comparisons hereunder
# unless you find a way to avoid adding overhead function calls.
def __lt__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key < other._key
def __le__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key <= other._key
def __eq__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key == other._key
def __ge__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key >= other._key
def __gt__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key > other._key
def __ne__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key != other._key
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
_VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>alpha|a|beta|b|preview|pre|c|rc)
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
VERSION_PATTERN = _VERSION_PATTERN
"""
A string containing the regular expression used to match a valid version.
The pattern is not anchored at either end, and is intended for embedding in larger
expressions (for example, matching a version number as part of a file name). The
regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
flags set.
:meta hide-value:
"""
|
_BaseVersion
|
python
|
gevent__gevent
|
src/greentest/3.10/test_socket.py
|
{
"start": 163434,
"end": 163991
}
|
class ____(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
|
SendrecvmsgUDP6TestBase
|
python
|
django__django
|
tests/composite_pk/models/tenant.py
|
{
"start": 705,
"end": 742
}
|
class ____(AbstractUser):
pass
|
User
|
python
|
python-markdown__markdown
|
markdown/extensions/toc.py
|
{
"start": 15510,
"end": 18330
}
|
class ____(Extension):
TreeProcessorClass = TocTreeprocessor
def __init__(self, **kwargs):
self.config = {
'marker': [
'[TOC]',
'Text to find and replace with Table of Contents. Set to an empty string to disable. '
'Default: `[TOC]`.'
],
'title': [
'', 'Title to insert into TOC `<div>`. Default: an empty string.'
],
'title_class': [
'toctitle', 'CSS class used for the title. Default: `toctitle`.'
],
'toc_class': [
'toc', 'CSS class(es) used for the link. Default: `toclink`.'
],
'anchorlink': [
False, 'True if header should be a self link. Default: `False`.'
],
'anchorlink_class': [
'toclink', 'CSS class(es) used for the link. Defaults: `toclink`.'
],
'permalink': [
0, 'True or link text if a Sphinx-style permalink should be added. Default: `False`.'
],
'permalink_class': [
'headerlink', 'CSS class(es) used for the link. Default: `headerlink`.'
],
'permalink_title': [
'Permanent link', 'Title attribute of the permalink. Default: `Permanent link`.'
],
'permalink_leading': [
False,
'True if permalinks should be placed at start of the header, rather than end. Default: False.'
],
'baselevel': ['1', 'Base level for headers. Default: `1`.'],
'slugify': [
slugify, 'Function to generate anchors based on header text. Default: `slugify`.'
],
'separator': ['-', 'Word separator. Default: `-`.'],
'toc_depth': [
6,
'Define the range of section levels to include in the Table of Contents. A single integer '
'(b) defines the bottom section level (<h1>..<hb>) only. A string consisting of two digits '
'separated by a hyphen in between (`2-5`) defines the top (t) and the bottom (b) (<ht>..<hb>). '
'Default: `6` (bottom).'
],
}
""" Default configuration options. """
super().__init__(**kwargs)
def extendMarkdown(self, md):
""" Add TOC tree processor to Markdown. """
md.registerExtension(self)
self.md = md
self.reset()
tocext = self.TreeProcessorClass(md, self.getConfigs())
md.treeprocessors.register(tocext, 'toc', 5)
def reset(self) -> None:
self.md.toc = ''
self.md.toc_tokens = []
def makeExtension(**kwargs): # pragma: no cover
return TocExtension(**kwargs)
|
TocExtension
|
python
|
Textualize__textual
|
docs/examples/guide/layout/grid_layout3_row_col_adjust.py
|
{
"start": 80,
"end": 536
}
|
class ____(App):
CSS_PATH = "grid_layout3_row_col_adjust.tcss"
def compose(self) -> ComposeResult:
yield Static("One", classes="box")
yield Static("Two", classes="box")
yield Static("Three", classes="box")
yield Static("Four", classes="box")
yield Static("Five", classes="box")
yield Static("Six", classes="box")
if __name__ == "__main__":
app = GridLayoutExample()
app.run()
|
GridLayoutExample
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/models/user_social_auth.py
|
{
"start": 273,
"end": 520
}
|
class ____(Serializer):
def serialize(self, obj, attrs, user, **kwargs):
return {
"id": str(obj.id),
"provider": obj.provider,
"providerLabel": get_provider_label(obj),
}
|
UserSocialAuthSerializer
|
python
|
mwaskom__seaborn
|
seaborn/matrix.py
|
{
"start": 2739,
"end": 17203
}
|
class ____:
"""Draw a heatmap plot of a matrix with nice labels and colormaps."""
def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws,
xticklabels=True, yticklabels=True, mask=None):
"""Initialize the plotting object."""
# We always want to have a DataFrame with semantic information
# and an ndarray to pass to matplotlib
if isinstance(data, pd.DataFrame):
plot_data = data.values
else:
plot_data = np.asarray(data)
data = pd.DataFrame(plot_data)
# Validate the mask and convert to DataFrame
mask = _matrix_mask(data, mask)
plot_data = np.ma.masked_where(np.asarray(mask), plot_data)
# Get good names for the rows and columns
xtickevery = 1
if isinstance(xticklabels, int):
xtickevery = xticklabels
xticklabels = _index_to_ticklabels(data.columns)
elif xticklabels is True:
xticklabels = _index_to_ticklabels(data.columns)
elif xticklabels is False:
xticklabels = []
ytickevery = 1
if isinstance(yticklabels, int):
ytickevery = yticklabels
yticklabels = _index_to_ticklabels(data.index)
elif yticklabels is True:
yticklabels = _index_to_ticklabels(data.index)
elif yticklabels is False:
yticklabels = []
if not len(xticklabels):
self.xticks = []
self.xticklabels = []
elif isinstance(xticklabels, str) and xticklabels == "auto":
self.xticks = "auto"
self.xticklabels = _index_to_ticklabels(data.columns)
else:
self.xticks, self.xticklabels = self._skip_ticks(xticklabels,
xtickevery)
if not len(yticklabels):
self.yticks = []
self.yticklabels = []
elif isinstance(yticklabels, str) and yticklabels == "auto":
self.yticks = "auto"
self.yticklabels = _index_to_ticklabels(data.index)
else:
self.yticks, self.yticklabels = self._skip_ticks(yticklabels,
ytickevery)
# Get good names for the axis labels
xlabel = _index_to_label(data.columns)
ylabel = _index_to_label(data.index)
self.xlabel = xlabel if xlabel is not None else ""
self.ylabel = ylabel if ylabel is not None else ""
# Determine good default values for the colormapping
self._determine_cmap_params(plot_data, vmin, vmax,
cmap, center, robust)
# Sort out the annotations
if annot is None or annot is False:
annot = False
annot_data = None
else:
if isinstance(annot, bool):
annot_data = plot_data
else:
annot_data = np.asarray(annot)
if annot_data.shape != plot_data.shape:
err = "`data` and `annot` must have same shape."
raise ValueError(err)
annot = True
# Save other attributes to the object
self.data = data
self.plot_data = plot_data
self.annot = annot
self.annot_data = annot_data
self.fmt = fmt
self.annot_kws = {} if annot_kws is None else annot_kws.copy()
self.cbar = cbar
self.cbar_kws = {} if cbar_kws is None else cbar_kws.copy()
def _determine_cmap_params(self, plot_data, vmin, vmax,
cmap, center, robust):
"""Use some heuristics to set good defaults for colorbar and range."""
# plot_data is a np.ma.array instance
calc_data = plot_data.astype(float).filled(np.nan)
if vmin is None:
if robust:
vmin = np.nanpercentile(calc_data, 2)
else:
vmin = np.nanmin(calc_data)
if vmax is None:
if robust:
vmax = np.nanpercentile(calc_data, 98)
else:
vmax = np.nanmax(calc_data)
self.vmin, self.vmax = vmin, vmax
# Choose default colormaps if not provided
if cmap is None:
if center is None:
self.cmap = cm.rocket
else:
self.cmap = cm.icefire
elif isinstance(cmap, str):
self.cmap = get_colormap(cmap)
elif isinstance(cmap, list):
self.cmap = mpl.colors.ListedColormap(cmap)
else:
self.cmap = cmap
# Recenter a divergent colormap
if center is not None:
# Copy bad values
# in mpl<3.2 only masked values are honored with "bad" color spec
# (see https://github.com/matplotlib/matplotlib/pull/14257)
bad = self.cmap(np.ma.masked_invalid([np.nan]))[0]
# under/over values are set for sure when cmap extremes
# do not map to the same color as +-inf
under = self.cmap(-np.inf)
over = self.cmap(np.inf)
under_set = under != self.cmap(0)
over_set = over != self.cmap(self.cmap.N - 1)
vrange = max(vmax - center, center - vmin)
normlize = mpl.colors.Normalize(center - vrange, center + vrange)
cmin, cmax = normlize([vmin, vmax])
cc = np.linspace(cmin, cmax, 256)
self.cmap = mpl.colors.ListedColormap(self.cmap(cc))
self.cmap.set_bad(bad)
if under_set:
self.cmap.set_under(under)
if over_set:
self.cmap.set_over(over)
def _annotate_heatmap(self, ax, mesh):
"""Add textual labels with the value in each cell."""
mesh.update_scalarmappable()
height, width = self.annot_data.shape
xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5)
for x, y, m, color, val in zip(xpos.flat, ypos.flat,
mesh.get_array().flat, mesh.get_facecolors(),
self.annot_data.flat):
if m is not np.ma.masked:
lum = relative_luminance(color)
text_color = ".15" if lum > .408 else "w"
annotation = ("{:" + self.fmt + "}").format(val)
text_kwargs = dict(color=text_color, ha="center", va="center")
text_kwargs.update(self.annot_kws)
ax.text(x, y, annotation, **text_kwargs)
def _skip_ticks(self, labels, tickevery):
"""Return ticks and labels at evenly spaced intervals."""
n = len(labels)
if tickevery == 0:
ticks, labels = [], []
elif tickevery == 1:
ticks, labels = np.arange(n) + .5, labels
else:
start, end, step = 0, n, tickevery
ticks = np.arange(start, end, step) + .5
labels = labels[start:end:step]
return ticks, labels
def _auto_ticks(self, ax, labels, axis):
"""Determine ticks and ticklabels that minimize overlap."""
transform = ax.figure.dpi_scale_trans.inverted()
bbox = ax.get_window_extent().transformed(transform)
size = [bbox.width, bbox.height][axis]
axis = [ax.xaxis, ax.yaxis][axis]
tick, = axis.set_ticks([0])
fontsize = tick.label1.get_size()
max_ticks = int(size // (fontsize / 72))
if max_ticks < 1:
return [], []
tick_every = len(labels) // max_ticks + 1
tick_every = 1 if tick_every == 0 else tick_every
ticks, labels = self._skip_ticks(labels, tick_every)
return ticks, labels
def plot(self, ax, cax, kws):
"""Draw the heatmap on the provided Axes."""
# Remove all the Axes spines
despine(ax=ax, left=True, bottom=True)
# setting vmin/vmax in addition to norm is deprecated
# so avoid setting if norm is set
if kws.get("norm") is None:
kws.setdefault("vmin", self.vmin)
kws.setdefault("vmax", self.vmax)
# Draw the heatmap
mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)
# Set the axis limits
ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
# Invert the y axis to show the plot in matrix form
ax.invert_yaxis()
# Possibly add a colorbar
if self.cbar:
cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)
cb.outline.set_linewidth(0)
# If rasterized is passed to pcolormesh, also rasterize the
# colorbar to avoid white lines on the PDF rendering
if kws.get('rasterized', False):
cb.solids.set_rasterized(True)
# Add row and column labels
if isinstance(self.xticks, str) and self.xticks == "auto":
xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)
else:
xticks, xticklabels = self.xticks, self.xticklabels
if isinstance(self.yticks, str) and self.yticks == "auto":
yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)
else:
yticks, yticklabels = self.yticks, self.yticklabels
ax.set(xticks=xticks, yticks=yticks)
xtl = ax.set_xticklabels(xticklabels)
ytl = ax.set_yticklabels(yticklabels, rotation="vertical")
plt.setp(ytl, va="center") # GH2484
# Possibly rotate them if they overlap
_draw_figure(ax.figure)
if axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
if axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
# Add the axis labels
ax.set(xlabel=self.xlabel, ylabel=self.ylabel)
# Annotate the cells with the formatted values
if self.annot:
self._annotate_heatmap(ax, mesh)
def heatmap(
data, *,
vmin=None, vmax=None, cmap=None, center=None, robust=False,
annot=None, fmt=".2g", annot_kws=None,
linewidths=0, linecolor="white",
cbar=True, cbar_kws=None, cbar_ax=None,
square=False, xticklabels="auto", yticklabels="auto",
mask=None, ax=None,
**kwargs
):
"""Plot rectangular data as a color-encoded matrix.
This is an Axes-level function and will draw the heatmap into the
currently-active Axes if none is provided to the ``ax`` argument. Part of
this Axes space will be taken and used to plot a colormap, unless ``cbar``
is False or a separate Axes is provided to ``cbar_ax``.
Parameters
----------
data : rectangular dataset
2D dataset that can be coerced into an ndarray. If a Pandas DataFrame
is provided, the index/column information will be used to label the
columns and rows.
vmin, vmax : floats, optional
Values to anchor the colormap, otherwise they are inferred from the
data and other keyword arguments.
cmap : matplotlib colormap name or object, or list of colors, optional
The mapping from data values to color space. If not provided, the
default will depend on whether ``center`` is set.
center : float, optional
The value at which to center the colormap when plotting divergent data.
Using this parameter will change the default ``cmap`` if none is
specified.
robust : bool, optional
If True and ``vmin`` or ``vmax`` are absent, the colormap range is
computed with robust quantiles instead of the extreme values.
annot : bool or rectangular dataset, optional
If True, write the data value in each cell. If an array-like with the
same shape as ``data``, then use this to annotate the heatmap instead
of the data. Note that DataFrames will match on position, not index.
fmt : str, optional
String formatting code to use when adding annotations.
annot_kws : dict of key, value mappings, optional
Keyword arguments for :meth:`matplotlib.axes.Axes.text` when ``annot``
is True.
linewidths : float, optional
Width of the lines that will divide each cell.
linecolor : color, optional
Color of the lines that will divide each cell.
cbar : bool, optional
Whether to draw a colorbar.
cbar_kws : dict of key, value mappings, optional
Keyword arguments for :meth:`matplotlib.figure.Figure.colorbar`.
cbar_ax : matplotlib Axes, optional
Axes in which to draw the colorbar, otherwise take space from the
main Axes.
square : bool, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
xticklabels, yticklabels : "auto", bool, list-like, or int, optional
If True, plot the column names of the dataframe. If False, don't plot
the column names. If list-like, plot these alternate labels as the
xticklabels. If an integer, use the column names but plot only every
n label. If "auto", try to densely plot non-overlapping labels.
mask : bool array or DataFrame, optional
If passed, data will not be shown in cells where ``mask`` is True.
Cells with missing values are automatically masked.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
kwargs : other keyword arguments
All other keyword arguments are passed to
:meth:`matplotlib.axes.Axes.pcolormesh`.
Returns
-------
ax : matplotlib Axes
Axes object with the heatmap.
See Also
--------
clustermap : Plot a matrix using hierarchical clustering to arrange the
rows and columns.
Examples
--------
.. include:: ../docstrings/heatmap.rst
"""
# Initialize the plotter object
plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws, xticklabels,
yticklabels, mask)
# Add the pcolormesh kwargs here
kwargs["linewidths"] = linewidths
kwargs["edgecolor"] = linecolor
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect("equal")
plotter.plot(ax, cbar_ax, kwargs)
return ax
|
_HeatMapper
|
python
|
run-llama__llama_index
|
llama-index-integrations/embeddings/llama-index-embeddings-ibm/tests/test_ibm.py
|
{
"start": 141,
"end": 3537
}
|
class ____:
TEST_URL = "https://us-south.ml.cloud.ibm.com"
TEST_APIKEY = "apikey"
TEST_PROJECT_ID = "project_id"
TEST_MODEL = "test_model"
def mock_embed_query(self) -> List[float]:
return [-0.053358648, -0.009175377, -0.025022397]
def mock_embed_texts(self) -> List[List[float]]:
return [
[-0.053358648, -0.009175377, -0.025022397],
[-0.053358648, -0.009175377, -0.025022397],
]
def test_initialization(self) -> None:
with pytest.raises(ValueError, match=r"^Did not find"):
_ = WatsonxEmbeddings(
model_id=self.TEST_MODEL, project_id=self.TEST_PROJECT_ID
)
# Cloud scenario
with pytest.raises(ValueError, match=r"^Did not find 'apikey' or 'token',"):
_ = WatsonxEmbeddings(
model_id=self.TEST_MODEL,
url=self.TEST_URL,
project_id=self.TEST_PROJECT_ID,
)
# CPD scenario with password and missing username
with pytest.raises(ValueError, match=r"^Did not find username"):
_ = WatsonxEmbeddings(
model_id=self.TEST_MODEL,
password="123",
url="cpd-instance",
project_id=self.TEST_PROJECT_ID,
)
# CPD scenario with apikey and missing username
with pytest.raises(ValueError, match=r"^Did not find username"):
_ = WatsonxEmbeddings(
model_id=self.TEST_MODEL,
apikey="123",
url="cpd-instance",
project_id=self.TEST_PROJECT_ID,
)
@patch("llama_index.embeddings.ibm.base.Embeddings")
def test_get_query_embedding(self, MockEmbedding: MagicMock) -> None:
mock_instance = MockEmbedding.return_value
mock_instance.embed_query.return_value = self.mock_embed_query()
embed = WatsonxEmbeddings(
model_id=self.TEST_MODEL,
url=self.TEST_URL,
apikey=self.TEST_APIKEY,
project_id=self.TEST_PROJECT_ID,
)
assert embed.get_query_embedding(query="TEST") == self.mock_embed_query()
@patch("llama_index.embeddings.ibm.base.Embeddings")
def test_get_texts_embedding(self, MockEmbedding: MagicMock) -> None:
mock_instance = MockEmbedding.return_value
mock_instance.embed_documents.return_value = self.mock_embed_texts()
embed = WatsonxEmbeddings(
model_id=self.TEST_MODEL,
url=self.TEST_URL,
apikey=self.TEST_APIKEY,
project_id=self.TEST_PROJECT_ID,
)
assert (
embed.get_text_embedding_batch(texts=["TEST1", "TEST2"])
== self.mock_embed_texts()
)
@pytest.mark.asyncio
@patch("llama_index.embeddings.ibm.base.Embeddings")
async def test_get_query_embedding_async(self, MockEmbedding: MagicMock) -> None:
mock_instance = MockEmbedding.return_value
mock_instance.embed_query.return_value = self.mock_embed_query()
embed = WatsonxEmbeddings(
model_id=self.TEST_MODEL,
url=self.TEST_URL,
apikey=self.TEST_APIKEY,
project_id=self.TEST_PROJECT_ID,
)
response = await embed.aget_text_embedding("TEST1")
assert response == self.mock_embed_query()
|
TestWasonxLLMInference
|
python
|
python__mypy
|
mypy/patterns.py
|
{
"start": 2457,
"end": 2857
}
|
class ____(Pattern):
# None corresponds to *_ in a list pattern. It will match multiple items but won't bind them to
# a name.
capture: NameExpr | None
def __init__(self, capture: NameExpr | None) -> None:
super().__init__()
self.capture = capture
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_starred_pattern(self)
|
StarredPattern
|
python
|
google__pytype
|
pytype/tests/test_paramspec.py
|
{
"start": 7468,
"end": 15560
}
|
class ____(test_base.BaseTest):
"""Tests for ParamSpec imported from pyi files."""
def test_decorator(self):
with self.DepTree([("foo.pyi", _DECORATOR_PYI)]):
ty, _ = self.InferWithErrors("""
import foo
class A:
pass
@foo.decorator
def h(a: A, b: str) -> int:
return 10
p = h(A(), b='2')
q = h(1, 2) # wrong-arg-types
""")
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import List, Any
p: List[int]
q: Any
class A: ...
def h(a: A, b: str) -> List[int]: ...
""",
)
def test_method_decoration(self):
with self.DepTree([("foo.pyi", _DECORATOR_PYI)]):
ty = self.Infer("""
import foo
class A:
pass
class B:
@foo.decorator
def h(a: 'B', b: str) -> int:
return 10
""")
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import List, Any
class A: ...
class B:
def h(a: B, b: str) -> List[int]: ...
""",
)
def test_multiple_decorators(self):
"""Check that we don't cache the decorator type params."""
with self.DepTree([("foo.pyi", _DECORATOR_PYI)]):
self.Check("""
import foo
@foo.decorator
def f(x) -> str:
return "a"
@foo.decorator
def g() -> int:
return 42
def h() -> list[str]:
return f(10)
def k() -> list[int]:
return g()
""")
def test_imported_paramspec(self):
with self.DepTree([("foo.pyi", _DECORATOR_PYI)]):
ty, _ = self.InferWithErrors("""
from foo import decorator
class A:
pass
@decorator
def h(a: A, b: str) -> int:
return 10
p = h(A(), b='2')
q = h(1, 2) # wrong-arg-types
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable, List, ParamSpec, TypeVar, Any
p: List[int]
q: Any
P = ParamSpec('P')
T = TypeVar('T')
class A: ...
def decorator(fn: Callable[P, T]) -> Callable[P, List[T]]: ...
def h(a: A, b: str) -> List[int]: ...
""",
)
def test_concatenate(self):
# TODO(b/217789659):
# - Should change_arg preserve the name of the posarg?
# - Should paramspecs in error messages be changed to ...?
with self.DepTree([(
"foo.pyi",
"""
from typing import TypeVar, ParamSpec, Concatenate, Callable
T = TypeVar("T")
P = ParamSpec("P")
def change_arg(fn: Callable[Concatenate[int, P], T]) -> Callable[Concatenate[str, P], T]: ...
def drop_arg(fn: Callable[Concatenate[int, P], T]) -> Callable[P, T]: ...
def add_arg(fn: Callable[P, T]) -> Callable[Concatenate[int, P], T]: ...
def mismatched(fn: Callable[Concatenate[str, P], T]) -> Callable[Concatenate[str, P], T]: ...
""",
)]):
ty, err = self.InferWithErrors("""
import foo
@foo.change_arg
def f(a: int, b: str) -> int:
return 10
@foo.drop_arg
def g(a: int, b: str) -> int:
return 10
@foo.add_arg
def h(a: int, b: str) -> int:
return 10
@foo.mismatched # wrong-arg-types[e]>=3.11
def k(a: int, b: str) -> int: # wrong-arg-types[e]<3.11
return 10
""")
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Any
k: Any
def f(_0: str, b: str) -> int: ...
def g(b: str) -> int: ...
def h(_0: int, /, a: int, b: str) -> int: ...
""",
)
self.assertErrorSequences(
err,
{
"e": [
"Expected",
"fn: Callable[Concatenate[str, P], Any]",
"Actual",
"fn: Callable[[int, str], int]",
]
},
)
def test_overloaded_argument(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import TypeVar, ParamSpec, Callable, List
T = TypeVar("T")
P = ParamSpec("P")
def decorator(fn: Callable[P, T]) -> Callable[P, List[T]]: ...
@overload
def f(x: str) -> int: ...
@overload
def f(x: str, *, y: int = 0) -> int: ...
""",
)]):
ty = self.Infer("""
import foo
f = foo.decorator(foo.f)
""")
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import List, overload
@overload
def f(x: str) -> List[int]: ...
@overload
def f(x: str, *, y: int = ...) -> List[int]: ...
""",
)
def test_starargs(self):
with self.DepTree([("foo.pyi", _DECORATOR_PYI)]):
ty = self.Infer("""
import foo
class A:
pass
class B:
@foo.decorator
def h(a: 'B', b: str, *args, **kwargs) -> int:
return 10
@foo.decorator
def s(*args) -> int:
return 10
@foo.decorator
def k(**kwargs) -> int:
return 10
""")
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import List, Any
class A: ...
class B:
def h(a: B, b: str, *args, **kwargs) -> List[int]: ...
def s(*args) -> List[int]: ...
def k(**kwargs) -> List[int]: ...
""",
)
def test_callable(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import TypeVar, ParamSpec, Concatenate, Callable
T = TypeVar("T")
P = ParamSpec("P")
def add_arg(fn: Callable[P, T]) -> Callable[Concatenate[int, P], T]: ...
""",
)]):
self.Check("""
import foo
from typing import Callable, List
def f(method: Callable[[int, str], bool]):
a = foo.add_arg(method)
b = a(1, 2, '3')
assert_type(b, bool)
""")
def test_match_callable(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import Any, Callable, ParamSpec
P = ParamSpec('P')
def f(x: Callable[P, Any]) -> Callable[P, Any]: ...
""",
)]):
self.Check("""
import foo
# Any function should match `Callable[P, Any]`.
def f0():
pass
def f1(x):
pass
def f2(x1, x2):
pass
foo.f(f0)
foo.f(f1)
foo.f(f2)
class C0:
def __call__(self):
pass
class C1:
def __call__(self, x1):
pass
class C2:
def __call__(self, x1, x2):
pass
# Any class object should match.
foo.f(C0)
# Any class instance with a `__call__` method should match.
foo.f(C0())
foo.f(C1())
foo.f(C2())
""")
def test_callable_class_inference(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import Any, Callable, ParamSpec
P = ParamSpec('P')
def f(x: Callable[P, Any]) -> Callable[P, Any]: ...
""",
)]):
ty = self.Infer("""
import foo
class C:
def __call__(self, x: int, y) -> str:
return str(x)
f = foo.f(C())
""")
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Any
class C:
def __call__(self, x: int, y) -> str: ...
def f(x: int, y) -> Any: ...
""",
)
def test_paramspec_in_callable_as_param_not_fail(self):
self.Check("""
from typing import ParamSpec, Callable
_P = ParamSpec("_P")
CallbleWithParamSpec = Callable[_P, None]
class A:
def __init__(self, callable_1: CallbleWithParamSpec, callable_2: CallbleWithParamSpec):
pass
class B(A):
def __init__(self, callable_1: CallbleWithParamSpec, callable_2: CallbleWithParamSpec):
super().__init__(
callable_1,
callable_2,
)
""")
|
PyiParamSpecTest
|
python
|
astropy__astropy
|
astropy/extern/configobj/validate.py
|
{
"start": 14149,
"end": 14553
}
|
class ____(VdtValueError):
"""The value supplied was of the correct type, but was too long."""
def __init__(self, value):
"""
>>> raise VdtValueTooLongError('jedie')
Traceback (most recent call last):
VdtValueTooLongError: the value "jedie" is too long.
"""
ValidateError.__init__(self, 'the value "%s" is too long.' % (value,))
|
VdtValueTooLongError
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/agentql_web/base.py
|
{
"start": 369,
"end": 3021
}
|
class ____(BasePydanticReader):
"""
Scrape a URL with or without a agentql query and returns document in json format.
Args:
api_key (str): The AgentQL API key, get one at https://dev.agentql.com
params (dict): Additional parameters to pass to the AgentQL API. Visit https://docs.agentql.com/rest-api/api-reference for details.
"""
api_key: str
params: Optional[dict]
def __init__(
self,
api_key: str,
params: Optional[dict] = None,
) -> None:
super().__init__(api_key=api_key, params=params)
def load_data(
self, url: str, query: Optional[str] = None, prompt: Optional[str] = None
) -> List[Document]:
"""
Load data from the input directory.
Args:
url (str): URL to scrape or crawl.
query (Optional[str]): AgentQL query used to specify the scraped data.
prompt (Optional[str]): Natural language description of the data you want to scrape.
Either query or prompt must be provided.
params (Optional[dict]): Additional parameters to pass to the AgentQL API. Visit https://docs.agentql.com/rest-api/api-reference for details.
Returns:
List[Document]: List of documents.
"""
payload = {"url": url, "query": query, "prompt": prompt, "params": self.params}
headers = {
"X-API-Key": f"{self.api_key}",
"Content-Type": "application/json",
"X-TF-Request-Origin": REQUEST_ORIGIN,
}
try:
response = httpx.post(
QUERY_DATA_ENDPOINT,
headers=headers,
json=payload,
timeout=API_TIMEOUT_SECONDS,
)
response.raise_for_status()
except httpx.HTTPStatusError as e:
response = e.response
if response.status_code in [401, 403]:
raise ValueError(
"Please, provide a valid API Key. You can create one at https://dev.agentql.com."
) from e
else:
try:
error_json = response.json()
msg = (
error_json["error_info"]
if "error_info" in error_json
else error_json["detail"]
)
except (ValueError, TypeError):
msg = f"HTTP {e}."
raise ValueError(msg) from e
else:
json = response.json()
return [Document(text=str(json["data"]), metadata=json["metadata"])]
|
AgentQLWebReader
|
python
|
allegroai__clearml
|
clearml/utilities/gpu/pynvml.py
|
{
"start": 41187,
"end": 41901
}
|
class ____(_PrintableStructure):
_fields_ = [
# Moved to the new busId location below
('busIdLegacy', c_char * NVML_DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE),
('domain', c_uint),
('bus', c_uint),
('device', c_uint),
('pciDeviceId', c_uint),
# Added in 2.285
('pciSubSystemId', c_uint),
# New busId replaced the long deprecated and reserved fields with a
# field of the same size in 9.0
('busId', c_char * NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE),
]
_fmt_ = {
'domain': "0x%08X",
'bus': "0x%02X",
'device': "0x%02X",
'pciDeviceId': "0x%08X",
'pciSubSystemId': "0x%08X",
}
|
nvmlPciInfo_t
|
python
|
walkccc__LeetCode
|
solutions/673. Number of Longest Increasing Subsequence/673.py
|
{
"start": 0,
"end": 794
}
|
class ____:
def findNumberOfLIS(self, nums: list[int]) -> int:
ans = 0
maxLength = 0
# length[i] := the length of the LIS ending in nums[i]
length = [1] * len(nums)
# count[i] := the number of LIS's ending in nums[i]
count = [1] * len(nums)
# Calculate the `length` and `count` arrays.
for i, num in enumerate(nums):
for j in range(i):
if nums[j] < num:
if length[i] < length[j] + 1:
length[i] = length[j] + 1
count[i] = count[j]
elif length[i] == length[j] + 1:
count[i] += count[j]
# Get the number of LIS.
for i, l in enumerate(length):
if l > maxLength:
maxLength = l
ans = count[i]
elif l == maxLength:
ans += count[i]
return ans
|
Solution
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/sensors/test_dataflow.py
|
{
"start": 14054,
"end": 20993
}
|
class ____:
@pytest.mark.parametrize(
("job_current_state", "fail_on_terminal_state"),
[
(DataflowJobStatus.JOB_STATE_RUNNING, True),
(DataflowJobStatus.JOB_STATE_RUNNING, False),
(DataflowJobStatus.JOB_STATE_DONE, False),
],
)
@mock.patch("airflow.providers.google.cloud.sensors.dataflow.DataflowHook")
def test_poke(self, mock_hook, job_current_state, fail_on_terminal_state):
mock_get_job = mock_hook.return_value.get_job
mock_fetch_job_messages_by_id = mock_hook.return_value.fetch_job_messages_by_id
callback = mock.MagicMock()
task = DataflowJobMessagesSensor(
task_id=TEST_TASK_ID,
job_id=TEST_JOB_ID,
callback=callback,
fail_on_terminal_state=fail_on_terminal_state,
location=TEST_LOCATION,
project_id=TEST_PROJECT_ID,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_get_job.return_value = {"id": TEST_JOB_ID, "currentState": job_current_state}
results = task.poke(mock.MagicMock())
assert callback.return_value == results.xcom_value
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_fetch_job_messages_by_id.assert_called_once_with(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
callback.assert_called_once_with(mock_fetch_job_messages_by_id.return_value)
@mock.patch("airflow.providers.google.cloud.sensors.dataflow.DataflowHook")
def test_poke_raise_exception(self, mock_hook):
mock_get_job = mock_hook.return_value.get_job
mock_fetch_job_messages_by_id = mock_hook.return_value.fetch_job_messages_by_id
callback = mock.MagicMock()
task = DataflowJobMessagesSensor(
task_id=TEST_TASK_ID,
job_id=TEST_JOB_ID,
callback=callback,
fail_on_terminal_state=True,
location=TEST_LOCATION,
project_id=TEST_PROJECT_ID,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_get_job.return_value = {"id": TEST_JOB_ID, "currentState": DataflowJobStatus.JOB_STATE_DONE}
with pytest.raises(
AirflowException,
match=f"Job with id '{TEST_JOB_ID}' is already in terminal state: "
f"{DataflowJobStatus.JOB_STATE_DONE}",
):
task.poke(mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_fetch_job_messages_by_id.assert_not_called()
callback.assert_not_called()
@mock.patch("airflow.providers.google.cloud.hooks.dataflow.AsyncDataflowHook")
def test_execute_enters_deferred_state(self, mock_hook):
"""
Tests that DataflowJobMessagesTrigger will be fired when the DataflowJobMessagesSensor
is executed and deferrable is set to True.
"""
task = DataflowJobMessagesSensor(
task_id=TEST_TASK_ID,
job_id=TEST_JOB_ID,
fail_on_terminal_state=False,
location=TEST_LOCATION,
project_id=TEST_PROJECT_ID,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
deferrable=True,
callback=None,
)
mock_hook.return_value.exists.return_value = False
with pytest.raises(TaskDeferred) as exc:
task.execute(None)
assert isinstance(exc.value.trigger, DataflowJobMessagesTrigger), (
"Trigger is not a DataflowJobMessagesTrigger"
)
def test_execute_complete_success_without_callback_function(self):
"""Tests that the trigger event contains expected values if no callback function is provided."""
expected_result = []
task = DataflowJobMessagesSensor(
task_id=TEST_TASK_ID,
job_id=TEST_JOB_ID,
fail_on_terminal_state=False,
location=TEST_LOCATION,
project_id=TEST_PROJECT_ID,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
deferrable=True,
callback=None,
)
actual_message = task.execute_complete(
context=None,
event={
"status": "success",
"message": f"Detected 2 job messages for job '{TEST_JOB_ID}'",
"result": [],
},
)
assert actual_message == expected_result
def test_execute_complete_success_with_callback_function(self):
"""Tests that the trigger event contains expected values if the callback function is provided."""
expected_result = [
{
"id": "1707695235850",
"time": "2024-02-06T23:47:15.850Z",
"message_text": "msg.",
"message_importance": 5,
},
{
"id": "1707695635401",
"time": "2024-02-06T23:53:55.401Z",
"message_text": "msg.",
"message_importance": 5,
},
]
task = DataflowJobMessagesSensor(
task_id=TEST_TASK_ID,
job_id=TEST_JOB_ID,
callback=lambda res: res,
fail_on_terminal_state=False,
location=TEST_LOCATION,
project_id=TEST_PROJECT_ID,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
deferrable=True,
)
actual_result = task.execute_complete(
context=None,
event={
"status": "success",
"message": f"Detected 2 job messages for job '{TEST_JOB_ID}'",
"result": expected_result,
},
)
assert actual_result == expected_result
def test_execute_complete_not_success_status_raises_exception(self):
"""Tests that AirflowException or AirflowSkipException is raised if the trigger event contains an error."""
task = DataflowJobMessagesSensor(
task_id=TEST_TASK_ID,
job_id=TEST_JOB_ID,
callback=None,
fail_on_terminal_state=False,
location=TEST_LOCATION,
project_id=TEST_PROJECT_ID,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
deferrable=True,
)
with pytest.raises(AirflowException):
task.execute_complete(
context=None, event={"status": "error", "message": "test error message", "result": None}
)
|
TestDataflowJobMessagesSensor
|
python
|
pytorch__pytorch
|
torch/nn/modules/pooling.py
|
{
"start": 1773,
"end": 4963
}
|
class ____(_MaxPoolNd):
r"""Applies a 1D max pooling over an input signal composed of several input planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, L)`
and output :math:`(N, C, L_{out})` can be precisely described as:
.. math::
out(N_i, C_j, k) = \max_{m=0, \ldots, \text{kernel\_size} - 1}
input(N_i, C_j, stride \times k + m)
If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
for :attr:`padding` number of points. :attr:`dilation` is the stride between the elements within the
sliding window. This `link`_ has a nice visualization of the pooling parameters.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
or the input. Sliding windows that would start in the right padded region are ignored.
Args:
kernel_size: The size of the sliding window, must be > 0.
stride: The stride of the sliding window, must be > 0. Default value is :attr:`kernel_size`.
padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
dilation: The stride between elements within a sliding window, must be > 0.
return_indices: If ``True``, will return the argmax along with the max values.
Useful for :class:`torch.nn.MaxUnpool1d` later
ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
ensures that every element in the input tensor is covered by a sliding window.
Shape:
- Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
- Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`,
where ``ceil_mode = False``
.. math::
L_{out} = \left\lfloor \frac{L_{in} + 2 \times \text{padding} - \text{dilation}
\times (\text{kernel\_size} - 1) - 1}{\text{stride}}\right\rfloor + 1
where ``ceil_mode = True``
.. math::
L_{out} = \left\lceil \frac{L_{in} + 2 \times \text{padding} - \text{dilation}
\times (\text{kernel\_size} - 1) - 1 + (stride - 1)}{\text{stride}}\right\rceil + 1
- Ensure that the last pooling starts inside the image, make :math:`L_{out} = L_{out} - 1`
when :math:`(L_{out} - 1) * \text{stride} >= L_{in} + \text{padding}`.
Examples::
>>> # pool of size=3, stride=2
>>> m = nn.MaxPool1d(3, stride=2)
>>> input = torch.randn(20, 16, 50)
>>> output = m(input)
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
kernel_size: _size_1_t
stride: _size_1_t
padding: _size_1_t
dilation: _size_1_t
def forward(self, input: Tensor):
"""Runs the forward pass."""
return F.max_pool1d(
input,
self.kernel_size,
self.stride,
self.padding,
self.dilation,
ceil_mode=self.ceil_mode,
return_indices=self.return_indices,
)
|
MaxPool1d
|
python
|
gevent__gevent
|
src/greentest/3.10/test_socket.py
|
{
"start": 22414,
"end": 22527
}
|
class ____(InetTestBase):
"""Base class for IPv6 socket tests."""
host = socket_helper.HOSTv6
|
Inet6TestBase
|
python
|
apache__airflow
|
airflow-core/src/airflow/example_dags/example_dynamic_task_mapping_with_no_taskflow_operators.py
|
{
"start": 1255,
"end": 2017
}
|
class ____(BaseOperator):
"""A custom operator that sums the input."""
template_fields = ("values",)
def __init__(self, values, **kwargs):
super().__init__(**kwargs)
self.values = values
def execute(self, context):
total = sum(self.values)
print(f"Total was {total}")
return total
with DAG(
dag_id="example_dynamic_task_mapping_with_no_taskflow_operators",
schedule=None,
start_date=datetime(2022, 3, 4),
catchup=False,
):
# map the task to a list of values
add_one_task = AddOneOperator.partial(task_id="add_one").expand(value=[1, 2, 3])
# aggregate (reduce) the mapped tasks results
sum_it_task = SumItOperator(task_id="sum_it", values=add_one_task.output)
|
SumItOperator
|
python
|
nedbat__coveragepy
|
coverage/multiproc.py
|
{
"start": 702,
"end": 2198
}
|
class ____(OriginalProcess): # pylint: disable=abstract-method
"""A replacement for multiprocess.Process that starts coverage."""
def _bootstrap(self, *args, **kwargs): # type: ignore[no-untyped-def]
"""Wrapper around _bootstrap to start coverage."""
debug: DebugControl | None = None
try:
from coverage import Coverage # avoid circular import
cov = Coverage(data_suffix=True, auto_data=True)
cov._warn_preimported_source = False
cov.start()
_debug = cov._debug
assert _debug is not None
if _debug.should("multiproc"):
debug = _debug
if debug:
debug.write("Calling multiprocessing bootstrap")
except Exception:
print("Exception during multiprocessing bootstrap init:", file=sys.stderr)
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
raise
try:
return original_bootstrap(self, *args, **kwargs)
finally:
if debug:
debug.write("Finished multiprocessing bootstrap")
try:
cov.stop()
cov.save()
except Exception as exc:
if debug:
debug.write("Exception during multiprocessing bootstrap cleanup", exc=exc)
raise
if debug:
debug.write("Saved multiprocessing data")
|
ProcessWithCoverage
|
python
|
PrefectHQ__prefect
|
src/prefect/events/filters.py
|
{
"start": 5922,
"end": 7120
}
|
class ____(EventDataFilter):
id: Optional[list[str]] = Field(
default=None, description="Only include events for resources with these IDs"
)
id_prefix: Optional[list[str]] = Field(
default=None,
description=(
"Only include events for resources with IDs starting with these prefixes"
),
)
labels: Optional[ResourceSpecification] = Field(
default=None,
description="Only include events for related resources with these labels",
)
def includes(self, event: Event) -> bool:
resources = [event.resource] + event.related
if not any(self._includes(resource) for resource in resources):
return False
return True
def _includes(self, resource: Resource) -> bool:
if self.id:
if not any(resource.id == resource_id for resource_id in self.id):
return False
if self.id_prefix:
if not any(resource.id.startswith(prefix) for prefix in self.id_prefix):
return False
if self.labels:
if not self.labels.matches(resource):
return False
return True
|
EventAnyResourceFilter
|
python
|
python__mypy
|
mypy/test/teststubtest.py
|
{
"start": 3096,
"end": 3324
}
|
class ____(Sequence[T]): ...
def property(f: T) -> T: ...
def classmethod(f: T) -> T: ...
def staticmethod(f: T) -> T: ...
"""
stubtest_enum_stub = """
import sys
from typing import Any, TypeVar, Iterator
_T = TypeVar('_T')
|
list
|
python
|
pennersr__django-allauth
|
allauth/headless/account/response.py
|
{
"start": 283,
"end": 515
}
|
class ____(APIResponse):
def __init__(self, request, verification_sent):
super().__init__(
request, status=HTTPStatus.OK if verification_sent else HTTPStatus.FORBIDDEN
)
|
RequestEmailVerificationResponse
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_9/workers.py
|
{
"start": 74405,
"end": 74643
}
|
class ____(Response):
"""
Response of workers.register endpoint.
"""
_service = "workers"
_action = "register"
_version = "2.9"
_schema = {"definitions": {}, "properties": {}, "type": "object"}
|
RegisterResponse
|
python
|
davidhalter__jedi
|
test/completion/stdlib.py
|
{
"start": 4987,
"end": 5847
}
|
class ____(enum.Enum):
attr_x = 3
attr_y = 2.0
#? ['mro']
X.mro
#? ['attr_x', 'attr_y']
X.attr_
#? str()
X.attr_x.name
#? int()
X.attr_x.value
#? str()
X.attr_y.name
#? float()
X.attr_y.value
#? str()
X().name
#? float()
X().attr_x.attr_y.value
# -----------------
# functools
# -----------------
import functools
basetwo = functools.partial(int, base=2)
#? int()
basetwo()
def function(a, b):
return a, b
a = functools.partial(function, 0)
#? int()
a('')[0]
#? str()
a('')[1]
kw = functools.partial(function, b=1.0)
tup = kw(1)
#? int()
tup[0]
#? float()
tup[1]
def my_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwds):
return f(*args, **kwds)
return wrapper
@my_decorator
def example(a):
return a
#? str()
example('')
# From GH #1574
#? float()
functools.wraps(functools.partial(str, 1))(lambda: 1.0)()
|
X
|
python
|
django__django
|
tests/m2m_through_regress/models.py
|
{
"start": 351,
"end": 615
}
|
class ____(models.Model):
id = models.AutoField(db_column="usermembership_id", primary_key=True)
user = models.ForeignKey(User, models.CASCADE)
group = models.ForeignKey("Group", models.CASCADE)
price = models.IntegerField(default=100)
|
UserMembership
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/storage/compute_log_manager.py
|
{
"start": 5555,
"end": 15103
}
|
class ____(ABC, MayHaveInstanceWeakref[T_DagsterInstance]):
"""Abstract base class for capturing the unstructured logs (stdout/stderr) in the current
process, stored / retrieved with a provided log_key.
"""
@abstractmethod
@contextmanager
def capture_logs(self, log_key: Sequence[str]) -> Generator[CapturedLogContext, None, None]:
"""Context manager for capturing the stdout/stderr within the current process, and persisting
it under the given log key.
Args:
log_key (List[String]): The log key identifying the captured logs
"""
@abstractmethod
@contextmanager
def open_log_stream(
self, log_key: Sequence[str], io_type: ComputeIOType
) -> Iterator[Optional[IO[bytes]]]:
"""Context manager for providing an IO stream that enables the caller to write to a log stream
managed by the captured log manager, to be read later using the given log key.
Args:
log_key (List[String]): The log key identifying the captured logs
"""
@abstractmethod
def is_capture_complete(self, log_key: Sequence[str]) -> bool:
"""Flag indicating when the log capture for a given log key has completed.
Args:
log_key (List[String]): The log key identifying the captured logs
Returns:
Boolean
"""
@abstractmethod
def get_log_data_for_type(
self,
log_key: Sequence[str],
io_type: ComputeIOType,
offset: int,
max_bytes: Optional[int],
) -> tuple[Optional[bytes], int]:
"""Returns a chunk of the captured io_type logs for a given log key.
Args:
log_key (List[String]): The log key identifying the captured logs
io_type (ComputeIOType): stderr or stdout
offset (Optional[int]): An offset in to the log to start from
max_bytes (Optional[int]): A limit on the size of the log chunk to fetch
Returns:
Tuple[Optional[bytes], int]: The content read and offset in to the file
"""
@abstractmethod
def get_log_metadata(self, log_key: Sequence[str]) -> CapturedLogMetadata:
"""Returns the metadata of the captured logs for a given log key, including
displayable information on where the logs are persisted.
Args:
log_key (List[String]): The log key identifying the captured logs
Returns:
CapturedLogMetadata
"""
@abstractmethod
def delete_logs(
self,
log_key: Optional[Sequence[str]] = None,
prefix: Optional[Sequence[str]] = None,
) -> None:
"""Deletes the captured logs for a given log key.
Args:
log_key(Optional[List[String]]): The log key of the logs to delete
prefix(Optional[List[String]]): The prefix of the log keys to delete
"""
@abstractmethod
def subscribe(
self, log_key: Sequence[str], cursor: Optional[str] = None
) -> CapturedLogSubscription:
"""Registers an observable object for log data.
Args:
log_key (List[String]): The log key identifying the captured logs
cursor (Optional[String]): The string cursor marking the position within the log stream
Returns:
CapturedLogSubscription
"""
def unsubscribe(self, subscription: CapturedLogSubscription) -> None:
"""Deregisters an observable object from receiving log updates.
Args:
subscription (CapturedLogSubscription): subscription object which manages when to send
back data to the subscriber
"""
pass
def dispose(self):
pass
def parse_cursor(self, cursor: Optional[str] = None) -> tuple[int, int]:
# Translates a string cursor into a set of byte offsets for stdout, stderr
if not cursor:
return 0, 0
parts = cursor.split(":")
if not parts or len(parts) != 2:
return 0, 0
stdout, stderr = [int(_) for _ in parts]
return stdout, stderr
def build_cursor(self, stdout_offset: int, stderr_offset: int) -> str:
return f"{stdout_offset}:{stderr_offset}"
def get_log_data(
self,
log_key: Sequence[str],
cursor: Optional[str] = None,
max_bytes: Optional[int] = None,
) -> CapturedLogData:
stdout_offset, stderr_offset = self.parse_cursor(cursor)
stdout, new_stdout_offset = self.get_log_data_for_type(
log_key,
ComputeIOType.STDOUT,
stdout_offset,
max_bytes,
)
stderr, new_stderr_offset = self.get_log_data_for_type(
log_key,
ComputeIOType.STDERR,
stderr_offset,
max_bytes,
)
return CapturedLogData(
log_key=log_key,
stdout=stdout,
stderr=stderr,
cursor=self.build_cursor(new_stdout_offset, new_stderr_offset),
)
def build_log_key_for_run(self, run_id: str, step_key: str) -> Sequence[str]:
"""Legacy adapter to translate run_id/key to captured log manager-based log_key."""
return [run_id, "compute_logs", step_key]
def get_log_keys_for_log_key_prefix(
self, log_key_prefix: Sequence[str], io_type: ComputeIOType
) -> Sequence[Sequence[str]]:
"""Returns the logs keys for a given log key prefix. This is determined by looking at the
directory defined by the log key prefix and creating a log_key for each file in the directory.
"""
raise NotImplementedError("Must implement get_log_keys_for_log_key_prefix")
def _get_log_lines_for_log_key(
self,
log_key: Sequence[str],
io_type: ComputeIOType,
) -> Sequence[str]:
"""For a log key, gets the corresponding file, and splits the file into lines."""
log_data, _ = self.get_log_data_for_type(
log_key,
io_type,
offset=0,
max_bytes=None,
)
raw_logs = log_data.decode("utf-8") if log_data else ""
log_lines = raw_logs.split("\n")
return log_lines
def read_log_lines_for_log_key_prefix(
self,
log_key_prefix: Sequence[str],
cursor: Optional[str],
io_type: ComputeIOType,
) -> tuple[Sequence[str], Optional[LogLineCursor]]:
"""For a given directory defined by log_key_prefix that contains files, read the logs from the files
as if they are a single continuous file. Reads env var DAGSTER_CAPTURED_LOG_CHUNK_SIZE lines at a time.
Returns the lines read and the next cursor.
Note that the has_more_now attribute of the cursor indicates if there are more logs that can be read immediately.
If has_more_now if False, the process producing logs could still be running and dump more logs into the
directory at a later time.
"""
num_lines = int(os.getenv("DAGSTER_CAPTURED_LOG_CHUNK_SIZE", "1000"))
# find all of the log_keys to read from and sort them in the order to be read
log_keys = sorted(
self.get_log_keys_for_log_key_prefix(log_key_prefix, io_type=io_type),
key=lambda x: "/".join(x),
)
if len(log_keys) == 0:
return [], None
log_cursor = LogLineCursor.parse(cursor) if cursor else None
if log_cursor is None:
log_key_to_fetch_idx = 0
line_cursor = 0
else:
log_key_to_fetch_idx = log_keys.index(log_cursor.log_key)
line_cursor = log_cursor.line
if line_cursor == -1:
# line_cursor for -1 means the entirety of the file has been read, but the next file
# didn't exist yet. So we see if a new file has been added.
# if the next file doesn't exist yet, return
if log_key_to_fetch_idx + 1 >= len(log_keys):
return [], log_cursor
log_key_to_fetch_idx += 1
line_cursor = 0
log_lines = self._get_log_lines_for_log_key(log_keys[log_key_to_fetch_idx], io_type=io_type)
records = []
has_more = True
while len(records) < num_lines:
remaining_log_lines = log_lines[line_cursor:]
remaining_lines_to_fetch = num_lines - len(records)
if remaining_lines_to_fetch < len(remaining_log_lines):
records.extend(remaining_log_lines[:remaining_lines_to_fetch])
line_cursor += remaining_lines_to_fetch
else:
records.extend(remaining_log_lines)
line_cursor = -1
if line_cursor == -1:
# we've read the entirety of the file, update the cursor
if log_key_to_fetch_idx + 1 >= len(log_keys):
# no more files to process
has_more = False
break
log_key_to_fetch_idx += 1
line_cursor = 0
if len(records) < num_lines:
# we still need more records, so fetch the next file
log_lines = self._get_log_lines_for_log_key(
log_keys[log_key_to_fetch_idx], io_type=io_type
)
new_cursor = LogLineCursor(
log_key=log_keys[log_key_to_fetch_idx],
line=line_cursor,
has_more_now=has_more,
)
return records, new_cursor
|
ComputeLogManager
|
python
|
sqlalchemy__sqlalchemy
|
examples/space_invaders/space_invaders.py
|
{
"start": 7030,
"end": 7327
}
|
class ____(EnemyGlyph):
"""Describe the enemy saucer flying overhead."""
__mapper_args__ = {"polymorphic_identity": "saucer"}
def glyph_for_state(self, coord, state):
if state["flip"] == 0:
return self.alt_data
else:
return self.data
|
SaucerGlyph
|
python
|
chroma-core__chroma
|
chromadb/test/property/test_collections_with_database_tenant.py
|
{
"start": 523,
"end": 6047
}
|
class ____(CollectionStateMachine):
"""A collection state machine test that includes tenant and database information,
and switches between them."""
tenants: Bundle # [str]
databases: Bundle # [Tuple[str, str]] # database to tenant it belongs to
tenant_to_database_to_model: Dict[
str, Dict[str, Dict[str, Optional[types.CollectionMetadata]]]
]
admin_client: AdminAPI
curr_tenant: str
curr_database: str
tenants = Bundle("tenants")
databases = Bundle("databases")
def __init__(self, client_factories: ClientFactories):
client = client_factories.create_client()
super().__init__(client)
self.client = client
self.admin_client = client_factories.create_admin_client_from_system()
@initialize()
def initialize(self) -> None:
self.client.reset()
self.tenant_to_database_to_model = {}
self.curr_tenant = DEFAULT_TENANT
self.curr_database = DEFAULT_DATABASE
self.client.set_tenant(DEFAULT_TENANT, DEFAULT_DATABASE)
self.set_tenant_model(self.curr_tenant, {})
self.set_database_model_for_tenant(self.curr_tenant, self.curr_database, {})
@rule(target=tenants, name=strategies.tenant_database_name)
def create_tenant(self, name: str) -> MultipleResults: # [str]:
tenant = self.overwrite_tenant(name)
# Check if tenant already exists
if self.has_tenant(tenant):
with pytest.raises(Exception):
self.admin_client.create_tenant(tenant)
return multiple()
self.admin_client.create_tenant(tenant)
# When we create a tenant, create a default database for it just for testing
# since the state machine could call collection operations before creating a
# database
self.admin_client.create_database(DEFAULT_DATABASE, tenant=tenant)
self.set_tenant_model(tenant, {})
self.set_database_model_for_tenant(tenant, DEFAULT_DATABASE, {})
return multiple(tenant)
@rule(target=databases, name=strategies.tenant_database_name)
def create_database(self, name: str) -> MultipleResults: # [Tuple[str, str]]:
database = self.overwrite_database(name)
tenant = self.overwrite_tenant(self.curr_tenant)
# If database already exists in current tenant, raise an error
if self.has_database_for_tenant(tenant, database):
with pytest.raises(Exception):
self.admin_client.create_database(name=database, tenant=tenant)
return multiple()
self.admin_client.create_database(name=database, tenant=tenant)
self.set_database_model_for_tenant(
tenant=tenant, database=database, database_model={}
)
return multiple((database, tenant))
@rule(database=databases)
def set_database_and_tenant(self, database: Tuple[str, str]) -> None:
# Get a database and switch to the database and the tenant it belongs to
database_name = database[0]
tenant_name = database[1]
self.set_api_tenant_database(tenant_name, database_name)
self.curr_database = database_name
self.curr_tenant = tenant_name
@rule(tenant=tenants)
def set_tenant(self, tenant: str) -> None:
self.set_api_tenant_database(tenant, DEFAULT_DATABASE)
self.curr_tenant = tenant
self.curr_database = DEFAULT_DATABASE
# These methods allow other tests, namely
# test_collections_with_database_tenant_override.py, to swap out the model
# without needing to do a bunch of pythonic cleverness to fake a dict which
# preteds to have every key.
def set_api_tenant_database(self, tenant: str, database: str) -> None:
self.client.set_tenant(tenant, database)
# For calls to create_database, and create_tenant we may want to override the tenant and database
# This is a leaky abstraction that exists soley for the purpose of
# test_collections_with_database_tenant_override.py
def overwrite_tenant(self, tenant: str) -> str:
return tenant
def overwrite_database(self, database: str) -> str:
return database
def has_tenant(self, tenant: str) -> bool:
return tenant in self.tenant_to_database_to_model
def get_tenant_model(
self, tenant: str
) -> Dict[str, Dict[str, Optional[types.CollectionMetadata]]]:
return self.tenant_to_database_to_model[tenant]
def set_tenant_model(
self,
tenant: str,
model: Dict[str, Dict[str, Optional[types.CollectionMetadata]]],
) -> None:
self.tenant_to_database_to_model[tenant] = model
def has_database_for_tenant(self, tenant: str, database: str) -> bool:
return database in self.tenant_to_database_to_model[tenant]
def set_database_model_for_tenant(
self,
tenant: str,
database: str,
database_model: Dict[str, Optional[types.CollectionMetadata]],
) -> None:
self.tenant_to_database_to_model[tenant][database] = database_model
@property
def model(self) -> Dict[str, Optional[types.CollectionMetadata]]:
return self.tenant_to_database_to_model[self.curr_tenant][self.curr_database]
def test_collections(
caplog: pytest.LogCaptureFixture, client_factories: ClientFactories
) -> None:
caplog.set_level(logging.ERROR)
run_state_machine_as_test(lambda: TenantDatabaseCollectionStateMachine(client_factories)) # type: ignore
|
TenantDatabaseCollectionStateMachine
|
python
|
PrefectHQ__prefect
|
src/prefect/events/actions.py
|
{
"start": 2823,
"end": 2964
}
|
class ____(DeploymentAction):
"""Pauses the given Deployment"""
type: Literal["pause-deployment"] = "pause-deployment"
|
PauseDeployment
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/paramSpec4.py
|
{
"start": 2836,
"end": 3405
}
|
class ____(Generic[R, P]):
f: Callable[P, str]
prop: R
def __init__(self, f: Callable[P, str], prop: R) -> None:
self.f = f
self.prop = prop
def func10(q: int, /) -> str: ...
y1 = A(func10, 1)
assert_type(y1, A[int, [int]])
reveal_type(y1, expected_text="A[int, (q: int, /)]")
# This should generate an error because Concatenate is not
# allowed in this context.
def func11(func: Concatenate[int, ...]) -> None:
# This should generate an error because Concatenate is not
# allowed in this context.
x: Concatenate[int, ...]
|
A
|
python
|
miyuchina__mistletoe
|
mistletoe/span_token.py
|
{
"start": 6504,
"end": 7079
}
|
class ____(SpanToken):
"""
Escape sequence token. ("\\\\*")
This is an inline token with a single child of type RawText.
Attributes:
children (iterator): a single RawText node containing the escaped character.
"""
pattern = re.compile(r"\\([!\"#$%&'()*+,-./:;<=>?@\[\\\]^_`{|}~])")
parse_inner = False
precedence = 2
def __init__(self, match):
self.children = (RawText(match.group(self.parse_group)),)
@classmethod
def strip(cls, string):
return html.unescape(cls.pattern.sub(r'\1', string))
|
EscapeSequence
|
python
|
docker__docker-py
|
docker/types/networks.py
|
{
"start": 1738,
"end": 1901
}
|
class ____(dict):
def __init__(self, endpoints_config=None):
if endpoints_config:
self["EndpointsConfig"] = endpoints_config
|
NetworkingConfig
|
python
|
huggingface__transformers
|
tests/models/layoutlm/test_modeling_layoutlm.py
|
{
"start": 8812,
"end": 13837
}
|
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
LayoutLMModel,
LayoutLMForMaskedLM,
LayoutLMForSequenceClassification,
LayoutLMForTokenClassification,
LayoutLMForQuestionAnswering,
)
if is_torch_available()
else None
)
pipeline_model_mapping = (
{
"document-question-answering": LayoutLMForQuestionAnswering,
"feature-extraction": LayoutLMModel,
"fill-mask": LayoutLMForMaskedLM,
"text-classification": LayoutLMForSequenceClassification,
"token-classification": LayoutLMForTokenClassification,
"zero-shot": LayoutLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = LayoutLMModelTester(self)
self.config_tester = ConfigTester(self, config_class=LayoutLMConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
def prepare_layoutlm_batch_inputs():
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
input_ids = torch.tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]],device=torch_device) # noqa: E231
attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],],device=torch_device) # noqa: E231
bbox = torch.tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]],device=torch_device) # noqa: E231
token_type_ids = torch.tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]],device=torch_device) # noqa: E231
# these are sequence labels (i.e. at the token level)
labels = torch.tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]],device=torch_device) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_torch
|
LayoutLMModelTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/weak_tensor_np_math_ops_test.py
|
{
"start": 1889,
"end": 8143
}
|
class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
def setUp(self):
super(MathTest, self).setUp()
self.array_transforms = [
lambda x: x, # Identity,
_get_weak_tensor,
np_array_ops.array,
]
self.types = [np.int32, np.int64, np.float32, np.float64]
def _testUnaryOp(self, math_fun, np_fun, name, weak_result):
def run_test(a):
for fn in self.array_transforms:
arg1 = fn(a)
if weak_result and flexible_dtypes.result_type(arg1)[1]:
self.assertIsInstance(math_fun(arg1), weak_tensor.WeakTensor)
else:
self.assertIsInstance(math_fun(arg1), np_arrays.ndarray)
self.match(
math_fun(arg1), np_fun(arg1), msg='{}({})'.format(name, arg1))
run_test(5)
run_test([2, 3])
run_test([[2, -3], [-6, 7]])
def testLog(self):
self._testUnaryOp(np_math_ops.log, np.log, 'log', True)
def testExp(self):
self._testUnaryOp(np_math_ops.exp, np.exp, 'exp', True)
def testTanh(self):
self._testUnaryOp(np_math_ops.tanh, np.tanh, 'tanh', True)
def testSqrt(self):
self._testUnaryOp(np_math_ops.sqrt, np.sqrt, 'sqrt', True)
def match(self, actual, expected, msg='', check_dtype=True):
if check_dtype:
self.assertEqual(
actual.dtype,
_NP_to_TF_result_inferred_types[expected.dtype],
'Dtype mismatch.\nActual: {}\nExpected: {}\n{}'.format(
actual.dtype.as_numpy_dtype,
_NP_to_TF_result_inferred_types[expected.dtype],
msg,
),
)
self.assertEqual(
actual.shape, expected.shape,
'Shape mismatch.\nActual: {}\nExpected: {}\n{}'.format(
actual.shape, expected.shape, msg))
np.testing.assert_allclose(actual.tolist(), expected.tolist(), rtol=1e-6)
def testArgsort(self):
self._testUnaryOp(np_math_ops.argsort, np.argsort, 'argsort', False)
# Test stability
r = np.arange(100)
a = np.zeros(100)
np.testing.assert_equal(np_math_ops.argsort(a, kind='stable'), r)
def testArgMaxArgMin(self):
data = [
0,
5,
[1],
[1, 2, 3],
[[1, 2, 3]],
[[4, 6], [7, 8]],
[[[4, 6], [9, 10]], [[7, 8], [12, 34]]],
]
for fn, d in itertools.product(self.array_transforms, data):
arr = fn(d)
# argmax and argmin returns indices (int64 type).
self.match(np_math_ops.argmax(arr), np.argmax(arr), check_dtype=False)
self.match(np_math_ops.argmin(arr), np.argmin(arr), check_dtype=False)
if hasattr(arr, 'shape'):
ndims = len(arr.shape)
else:
ndims = np_array_ops.array(arr, copy=False).ndim
if ndims == 0:
# Numpy flattens the scalar ndarray and treats it as a 1-d array of
# size 1.
ndims = 1
for axis in range(-ndims, ndims):
self.match(
np_math_ops.argmax(arr, axis=axis),
np.argmax(arr, axis=axis),
check_dtype=False,
)
self.match(
np_math_ops.argmin(arr, axis=axis),
np.argmin(arr, axis=axis),
check_dtype=False,
)
def testAverageWrongShape(self):
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError, r''):
np_math_ops.average(np.ones([2, 3]), weights=np.ones([2, 4]))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError, r''):
np_math_ops.average(np.ones([2, 3]), axis=0, weights=np.ones([2, 4]))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError, r''):
np_math_ops.average(np.ones([2, 3]), axis=0, weights=np.ones([]))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError, r''):
np_math_ops.average(np.ones([2, 3]), axis=0, weights=np.ones([5]))
def testPtp(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
np_math_ops.ptp(arg, *args, **kwargs),
np.ptp(arg, *args, **kwargs),
check_dtype=False,
)
run_test([1, 2, 3])
run_test([1., 2., 3.])
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
# Test that enable_numpy_methods() gets called when weak_tensor_ops is
# imported.
@parameterized.parameters([
'T', 'ndim', 'size', 'data', '__pos__', '__round__', 'tolist', 'flatten',
'transpose', 'reshape', 'ravel', 'clip', 'astype', 'max', 'mean', 'min'])
def testNumpyMethodsOnTensor(self, np_method):
a = ops.convert_to_tensor([1, 2])
self.assertTrue(hasattr(a, np_method))
def testFlatten(self):
a1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
a2 = _get_weak_tensor(a1)
self.assertAllEqual(a1.flatten('C'), a2.flatten('C'))
self.assertAllEqual(a1.flatten('F'), a2.flatten('F'))
self.assertAllEqual(a1.flatten('C'), a2.flatten('A'))
self.assertAllEqual(a1.flatten('C'), a2.flatten('K'))
with self.assertRaises(ValueError):
a2.flatten('invalid')
def testIsInf(self):
x1 = _get_weak_tensor(-2147483648)
x2 = _get_weak_tensor(2147483647)
self.assertFalse(np_math_ops.isinf(x1))
self.assertFalse(np_math_ops.isinf(x2))
self.assertFalse(np_math_ops.isposinf(x1))
self.assertFalse(np_math_ops.isposinf(x2))
self.assertFalse(np_math_ops.isneginf(x1))
self.assertFalse(np_math_ops.isneginf(x2))
def testRandomOpsReturnFloat32(self):
x = np_random.rand(2, 50)
np_x = np.random.rand(2, 50)
self.assertEqual(x.dtype, dtypes.float32)
self.assertEqual(np_x.shape, x.shape)
x = np_random.standard_normal(50)
np_x = np.random.standard_normal(50)
self.assertEqual(x.dtype, dtypes.float32)
self.assertEqual(np_x.shape, x.shape)
x = np_random.uniform(low=-1, high=0, size=(50, 50))
np_x = np.random.uniform(low=-1, high=0, size=(50, 50))
self.assertEqual(x.dtype, dtypes.float32)
self.assertEqual(np_x.shape, x.shape)
if __name__ == '__main__':
tensor.enable_tensor_equality()
np_math_ops.enable_numpy_methods_on_tensor()
ops.enable_eager_execution()
ops.set_dtype_conversion_mode('all')
googletest.main()
|
MathTest
|
python
|
ray-project__ray
|
python/ray/util/client/dataclient.py
|
{
"start": 4292,
"end": 7522
}
|
class ____:
"""
This object collects chunks from async get requests via __call__, and
calls the underlying callback when the object is fully received, or if an
exception while retrieving the object occurs.
This is not used in synchronous gets (synchronous gets interact with the
raylet servicer directly, not through the datapath).
__call__ returns true once the underlying call back has been called.
"""
def __init__(self, callback: ResponseCallable, request: ray_client_pb2.DataRequest):
# Bytearray containing data received so far
self.data = bytearray()
# The callback that will be called once all data is received
self.callback = callback
# The id of the last chunk we've received, or -1 if haven't seen any yet
self.last_seen_chunk = -1
# The GetRequest that initiated the transfer. start_chunk_id will be
# updated as chunks are received to avoid re-requesting chunks that
# we've already received.
self.request = request
def __call__(self, response: Union[ray_client_pb2.DataResponse, Exception]) -> bool:
if isinstance(response, Exception):
self.callback(response)
return True
get_resp = response.get
if not get_resp.valid:
self.callback(response)
return True
if get_resp.total_size > OBJECT_TRANSFER_WARNING_SIZE and log_once(
"client_object_transfer_size_warning"
):
size_gb = get_resp.total_size / 2**30
warnings.warn(
"Ray Client is attempting to retrieve a "
f"{size_gb:.2f} GiB object over the network, which may "
"be slow. Consider serializing the object to a file and "
"using rsync or S3 instead.",
UserWarning,
)
chunk_data = get_resp.data
chunk_id = get_resp.chunk_id
if chunk_id == self.last_seen_chunk + 1:
self.data.extend(chunk_data)
self.last_seen_chunk = chunk_id
# If we disconnect partway through, restart the get request
# at the first chunk we haven't seen
self.request.get.start_chunk_id = self.last_seen_chunk + 1
elif chunk_id > self.last_seen_chunk + 1:
# A chunk was skipped. This shouldn't happen in practice since
# grpc guarantees that chunks will arrive in order.
msg = (
f"Received chunk {chunk_id} when we expected "
f"{self.last_seen_chunk + 1} for request {response.req_id}"
)
logger.warning(msg)
self.callback(RuntimeError(msg))
return True
else:
# We received a chunk that've already seen before. Ignore, since
# it should already be appended to self.data.
logger.debug(
f"Received a repeated chunk {chunk_id} "
f"from request {response.req_id}."
)
if get_resp.chunk_id == get_resp.total_chunks - 1:
self.callback(self.data)
return True
else:
# Not done yet
return False
|
ChunkCollector
|
python
|
fastai__fastai
|
fastai/data/load.py
|
{
"start": 2537,
"end": 3781
}
|
class ____(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
# %% ../../nbs/02_data.load.ipynb 15
def collate_error(e:Exception, batch):
"Raises error when the batch could not collate, stating what items in the batch are different sizes and their types"
err = f'Error when trying to collate the data into batches with fa_collate, at least two tensors in the batch are not the same size.\n\n'
# we need to iterate through the entire batch and find a mismatch
length = len(batch[0])
for idx in range(length): # for each type in the batch
for i, item in enumerate(batch):
if i == 0: shape_a, type_a = item[idx].shape, item[idx].__class__.__name__
elif item[idx].shape != shape_a:
shape_b = item[idx].shape
if shape_a != shape_b:
err += f'Mismatch found on axis {idx} of the batch and is of type `{type_a}`:\n\tItem at index 0 has shape: {shape_a}\n\tItem at index {i} has shape: {shape_b}\n\nPlease include a transform in `after_item` that ensures all data of type {type_a} is the same size'
e.args = [err]
raise
# %% ../../nbs/02_data.load.ipynb 18
@funcs_kwargs
|
SkipItemException
|
python
|
sympy__sympy
|
sympy/physics/quantum/gate.py
|
{
"start": 30464,
"end": 42588
}
|
class ____(TwoQubitGate):
"""Two qubit SWAP gate.
This gate swap the values of the two qubits.
Parameters
----------
label : tuple
A tuple of the form (target1, target2).
Examples
========
"""
is_hermitian = True
gate_name = 'SWAP'
gate_name_latex = r'\text{SWAP}'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('SWAP', format)
def decompose(self, **options):
"""Decompose the SWAP gate into CNOT gates."""
i, j = self.targets[0], self.targets[1]
g1 = CNotGate(i, j)
g2 = CNotGate(j, i)
return g1*g2*g1
def plot_gate(self, circ_plot, gate_idx):
min_wire = int(_min(self.targets))
max_wire = int(_max(self.targets))
circ_plot.control_line(gate_idx, min_wire, max_wire)
circ_plot.swap_point(gate_idx, min_wire)
circ_plot.swap_point(gate_idx, max_wire)
def _represent_ZGate(self, basis, **options):
"""Represent the SWAP gate in the computational basis.
The following representation is used to compute this:
SWAP = |1><1|x|1><1| + |0><0|x|0><0| + |1><0|x|0><1| + |0><1|x|1><0|
"""
format = options.get('format', 'sympy')
targets = [int(t) for t in self.targets]
min_target = _min(targets)
max_target = _max(targets)
nqubits = options.get('nqubits', self.min_qubits)
op01 = matrix_cache.get_matrix('op01', format)
op10 = matrix_cache.get_matrix('op10', format)
op11 = matrix_cache.get_matrix('op11', format)
op00 = matrix_cache.get_matrix('op00', format)
eye2 = matrix_cache.get_matrix('eye2', format)
result = None
for i, j in ((op01, op10), (op10, op01), (op00, op00), (op11, op11)):
product = nqubits*[eye2]
product[nqubits - min_target - 1] = i
product[nqubits - max_target - 1] = j
new_result = matrix_tensor_product(*product)
if result is None:
result = new_result
else:
result = result + new_result
return result
# Aliases for gate names.
CNOT = CNotGate
SWAP = SwapGate
def CPHASE(a,b): return CGateS((a,),Z(b))
#-----------------------------------------------------------------------------
# Represent
#-----------------------------------------------------------------------------
def represent_zbasis(controls, targets, target_matrix, nqubits, format='sympy'):
"""Represent a gate with controls, targets and target_matrix.
This function does the low-level work of representing gates as matrices
in the standard computational basis (ZGate). Currently, we support two
main cases:
1. One target qubit and no control qubits.
2. One target qubits and multiple control qubits.
For the base of multiple controls, we use the following expression [1]:
1_{2**n} + (|1><1|)^{(n-1)} x (target-matrix - 1_{2})
Parameters
----------
controls : list, tuple
A sequence of control qubits.
targets : list, tuple
A sequence of target qubits.
target_matrix : sympy.Matrix, numpy.matrix, scipy.sparse
The matrix form of the transformation to be performed on the target
qubits. The format of this matrix must match that passed into
the `format` argument.
nqubits : int
The total number of qubits used for the representation.
format : str
The format of the final matrix ('sympy', 'numpy', 'scipy.sparse').
Examples
========
References
----------
[1] http://www.johnlapeyre.com/qinf/qinf_html/node6.html.
"""
controls = [int(x) for x in controls]
targets = [int(x) for x in targets]
nqubits = int(nqubits)
# This checks for the format as well.
op11 = matrix_cache.get_matrix('op11', format)
eye2 = matrix_cache.get_matrix('eye2', format)
# Plain single qubit case
if len(controls) == 0 and len(targets) == 1:
product = []
bit = targets[0]
# Fill product with [I1,Gate,I2] such that the unitaries,
# I, cause the gate to be applied to the correct Qubit
if bit != nqubits - 1:
product.append(matrix_eye(2**(nqubits - bit - 1), format=format))
product.append(target_matrix)
if bit != 0:
product.append(matrix_eye(2**bit, format=format))
return matrix_tensor_product(*product)
# Single target, multiple controls.
elif len(targets) == 1 and len(controls) >= 1:
target = targets[0]
# Build the non-trivial part.
product2 = []
for i in range(nqubits):
product2.append(matrix_eye(2, format=format))
for control in controls:
product2[nqubits - 1 - control] = op11
product2[nqubits - 1 - target] = target_matrix - eye2
return matrix_eye(2**nqubits, format=format) + \
matrix_tensor_product(*product2)
# Multi-target, multi-control is not yet implemented.
else:
raise NotImplementedError(
'The representation of multi-target, multi-control gates '
'is not implemented.'
)
#-----------------------------------------------------------------------------
# Gate manipulation functions.
#-----------------------------------------------------------------------------
def gate_simp(circuit):
"""Simplifies gates symbolically
It first sorts gates using gate_sort. It then applies basic
simplification rules to the circuit, e.g., XGate**2 = Identity
"""
# Bubble sort out gates that commute.
circuit = gate_sort(circuit)
# Do simplifications by subing a simplification into the first element
# which can be simplified. We recursively call gate_simp with new circuit
# as input more simplifications exist.
if isinstance(circuit, Add):
return sum(gate_simp(t) for t in circuit.args)
elif isinstance(circuit, Mul):
circuit_args = circuit.args
elif isinstance(circuit, Pow):
b, e = circuit.as_base_exp()
circuit_args = (gate_simp(b)**e,)
else:
return circuit
# Iterate through each element in circuit, simplify if possible.
for i in range(len(circuit_args)):
# H,X,Y or Z squared is 1.
# T**2 = S, S**2 = Z
if isinstance(circuit_args[i], Pow):
if isinstance(circuit_args[i].base,
(HadamardGate, XGate, YGate, ZGate)) \
and isinstance(circuit_args[i].exp, Number):
# Build a new circuit taking replacing the
# H,X,Y,Z squared with one.
newargs = (circuit_args[:i] +
(circuit_args[i].base**(circuit_args[i].exp % 2),) +
circuit_args[i + 1:])
# Recursively simplify the new circuit.
circuit = gate_simp(Mul(*newargs))
break
elif isinstance(circuit_args[i].base, PhaseGate):
# Build a new circuit taking old circuit but splicing
# in simplification.
newargs = circuit_args[:i]
# Replace PhaseGate**2 with ZGate.
newargs = newargs + (ZGate(circuit_args[i].base.args[0])**
(Integer(circuit_args[i].exp/2)), circuit_args[i].base**
(circuit_args[i].exp % 2))
# Append the last elements.
newargs = newargs + circuit_args[i + 1:]
# Recursively simplify the new circuit.
circuit = gate_simp(Mul(*newargs))
break
elif isinstance(circuit_args[i].base, TGate):
# Build a new circuit taking all the old elements.
newargs = circuit_args[:i]
# Put an Phasegate in place of any TGate**2.
newargs = newargs + (PhaseGate(circuit_args[i].base.args[0])**
Integer(circuit_args[i].exp/2), circuit_args[i].base**
(circuit_args[i].exp % 2))
# Append the last elements.
newargs = newargs + circuit_args[i + 1:]
# Recursively simplify the new circuit.
circuit = gate_simp(Mul(*newargs))
break
return circuit
def gate_sort(circuit):
"""Sorts the gates while keeping track of commutation relations
This function uses a bubble sort to rearrange the order of gate
application. Keeps track of Quantum computations special commutation
relations (e.g. things that apply to the same Qubit do not commute with
each other)
circuit is the Mul of gates that are to be sorted.
"""
# Make sure we have an Add or Mul.
if isinstance(circuit, Add):
return sum(gate_sort(t) for t in circuit.args)
if isinstance(circuit, Pow):
return gate_sort(circuit.base)**circuit.exp
elif isinstance(circuit, Gate):
return circuit
if not isinstance(circuit, Mul):
return circuit
changes = True
while changes:
changes = False
circ_array = circuit.args
for i in range(len(circ_array) - 1):
# Go through each element and switch ones that are in wrong order
if isinstance(circ_array[i], (Gate, Pow)) and \
isinstance(circ_array[i + 1], (Gate, Pow)):
# If we have a Pow object, look at only the base
first_base, first_exp = circ_array[i].as_base_exp()
second_base, second_exp = circ_array[i + 1].as_base_exp()
# Use SymPy's hash based sorting. This is not mathematical
# sorting, but is rather based on comparing hashes of objects.
# See Basic.compare for details.
if first_base.compare(second_base) > 0:
if Commutator(first_base, second_base).doit() == 0:
new_args = (circuit.args[:i] + (circuit.args[i + 1],) +
(circuit.args[i],) + circuit.args[i + 2:])
circuit = Mul(*new_args)
changes = True
break
if AntiCommutator(first_base, second_base).doit() == 0:
new_args = (circuit.args[:i] + (circuit.args[i + 1],) +
(circuit.args[i],) + circuit.args[i + 2:])
sign = _S.NegativeOne**(first_exp*second_exp)
circuit = sign*Mul(*new_args)
changes = True
break
return circuit
#-----------------------------------------------------------------------------
# Utility functions
#-----------------------------------------------------------------------------
def random_circuit(ngates, nqubits, gate_space=(X, Y, Z, S, T, H, CNOT, SWAP)):
"""Return a random circuit of ngates and nqubits.
This uses an equally weighted sample of (X, Y, Z, S, T, H, CNOT, SWAP)
gates.
Parameters
----------
ngates : int
The number of gates in the circuit.
nqubits : int
The number of qubits in the circuit.
gate_space : tuple
A tuple of the gate classes that will be used in the circuit.
Repeating gate classes multiple times in this tuple will increase
the frequency they appear in the random circuit.
"""
qubit_space = range(nqubits)
result = []
for i in range(ngates):
g = random.choice(gate_space)
if g == CNotGate or g == SwapGate:
qubits = random.sample(qubit_space, 2)
g = g(*qubits)
else:
qubit = random.choice(qubit_space)
g = g(qubit)
result.append(g)
return Mul(*result)
def zx_basis_transform(self, format='sympy'):
"""Transformation matrix from Z to X basis."""
return matrix_cache.get_matrix('ZX', format)
def zy_basis_transform(self, format='sympy'):
"""Transformation matrix from Z to Y basis."""
return matrix_cache.get_matrix('ZY', format)
|
SwapGate
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.