language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/function8.py
|
{
"start": 249,
"end": 490
}
|
class ____(Generic[T]):
values: Sequence[float | T]
def create_container(values: Sequence[float | T]) -> Container[T]:
return Container(values)
arg: Sequence[float | int] = (1, 2.0)
x: Container[int] = create_container(arg)
|
Container
|
python
|
joke2k__faker
|
tests/providers/test_address.py
|
{
"start": 76428,
"end": 78472
}
|
class ____:
"""Test en_IN address provider methods"""
def test_city_name(self, faker, num_samples):
"""Tests `city names` are fetched correctly"""
for _ in range(num_samples):
city_name = faker.city_name()
assert isinstance(city_name, str)
assert city_name in EnInAddressProvider.cities
def test_state(self, faker, num_samples):
"""Tests `states` are fetched correctly"""
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in EnInAddressProvider.states
def test_union_territories(self, faker, num_samples):
"""Tests `union_territories` are fetched correctly"""
for _ in range(num_samples):
union_territory = faker.union_territory()
assert isinstance(union_territory, str)
assert (union_territory,) in EnInAddressProvider.union_territories
@pytest.mark.parametrize("pincodes", ["pincode_in_state", "zipcode_in_state", "postcode_in_state"])
def test_pincodes_in_state(self, faker, num_samples, pincodes):
"""Test `pincodes` for state and union territories"""
for _ in range(num_samples):
include_ut = faker.random_element([True, False])
pincode = getattr(faker, pincodes)(include_union_territories=include_ut)
assert isinstance(pincode, int)
assert len(str(pincode)) == 6
@pytest.mark.parametrize(
"pincodes",
[
("pincode_in_army"),
("zipcode_in_army"),
("postcode_in_army"),
("postcode_in_military"),
("zipcode_in_military"),
("pincode_in_military"),
],
)
def test_pincodes_in_military(self, faker, num_samples, pincodes):
"""Test `pincodes` for Army"""
for _ in range(num_samples):
pincode = getattr(faker, pincodes)()
assert isinstance(pincode, int)
assert len(str(pincode)) == 6
|
TestEnIn
|
python
|
getsentry__sentry
|
tests/sentry/stacktraces/test_in_app_normalization.py
|
{
"start": 1048,
"end": 2028
}
|
class ____(TestCase):
def test_sets_client_in_app(self) -> None:
event_data_with_client_values = make_event(
[
make_stacktrace(
frame_0_in_app=True,
frame_1_in_app=False,
)
]
)
normalize_stacktraces_for_grouping(event_data_with_client_values)
frames = event_data_with_client_values["exception"]["values"][0]["stacktrace"]["frames"]
assert frames[0]["data"]["client_in_app"] is True
assert frames[1]["data"]["client_in_app"] is False
event_data_no_client_values = make_event([make_stacktrace()])
normalize_stacktraces_for_grouping(event_data_no_client_values)
frames = event_data_no_client_values["exception"]["values"][0]["stacktrace"]["frames"]
assert frames[0]["data"].get("client_in_app") is None
assert frames[1]["data"].get("client_in_app") is None
|
NormalizeStacktracesFroGroupingTest
|
python
|
scikit-learn__scikit-learn
|
sklearn/utils/tests/test_param_validation.py
|
{
"start": 1113,
"end": 1518
}
|
class ____:
"""A class to test the _InstancesOf constraint and the validation of methods."""
@validate_params({"a": [Real]}, prefer_skip_nested_validation=True)
def _method(self, a):
"""A validated method"""
@deprecated()
@validate_params({"a": [Real]}, prefer_skip_nested_validation=True)
def _deprecated_method(self, a):
"""A deprecated validated method"""
|
_Class
|
python
|
pypa__setuptools
|
pkg_resources/__init__.py
|
{
"start": 75870,
"end": 76671
}
|
class ____(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path: str, egg_info: str) -> None:
self.module_path = path
self.egg_info = egg_info
|
PathMetadata
|
python
|
django__django
|
tests/auth_tests/test_models.py
|
{
"start": 1727,
"end": 2027
}
|
class ____(TestCase):
fixtures = ["regular.json"]
def test_user_is_created_and_added_to_group(self):
user = User.objects.get(username="my_username")
group = Group.objects.get(name="my_group")
self.assertEqual(group, user.groups.get())
|
LoadDataWithoutNaturalKeysTestCase
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/sparse_ops/sparse_xent_op_d9m_test.py
|
{
"start": 3004,
"end": 8337
}
|
class ____(
sparse_xent_op_test_base.SparseXentOpTestBase):
"""Test that SparseSoftmaxCrossEntropyWithLogits operates reproducibly.
Inheriting from sparse_xent_op_test_base.SparseXentOpTestBase ensures that
regular op functionality is correct when the deterministic code-path is
selected.
Note that because nn_ops.sparse_softmax_cross_entropy_with_logits_v2 calls
nn_ops.sparse_softmax_cross_entropy_with_logits directly, the focus of
testing is on the former in order to test both.
"""
def _randomInts(self, shape, high, dtype):
return constant_op.constant(
np.random.randint(low=0, high=high, size=shape).astype(dtype))
def _randomFloats(self, shape, dtype):
return constant_op.constant(
(2 * np.random.random_sample(shape) - 1).astype(dtype))
def _generateInputs(self, labels_dtype, logits_dtype, seed):
batch_size = 1024
classes_count = 1000
np.random.seed(seed)
labels_shape = (batch_size)
labels = self._randomInts(
labels_shape, high=classes_count, dtype=labels_dtype)
logits_shape = (batch_size, classes_count)
logits = self._randomFloats(logits_shape, logits_dtype)
return labels, logits
@test_util.run_in_graph_and_eager_modes
def testForward(self):
with self.cached_session():
for logits_dtype in [np.float16, np.float32, np.float64, \
dtypes.bfloat16.as_numpy_dtype]:
for labels_dtype in [np.int32, np.int64]:
for trial in range(5):
seed = 123 + trial
labels, logits = self._generateInputs(
labels_dtype, logits_dtype, seed=seed)
result_a = nn_ops.sparse_softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits)
result_b = nn_ops.sparse_softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits)
self.assertAllEqual(result_a, result_b)
@test_util.run_in_graph_and_eager_modes
def testBackward(self):
with self.cached_session():
for logits_dtype in [np.float16, np.float32, np.float64, \
dtypes.bfloat16.as_numpy_dtype]:
for labels_dtype in [np.int32, np.int64]:
labels, logits = self._generateInputs(
labels_dtype, logits_dtype, seed=456)
output_shape = labels.shape[0]
def gradients(seed):
np.random.seed(seed)
upstream_gradients = self._randomFloats(output_shape, logits_dtype)
with backprop.GradientTape(persistent=True) as tape:
tape.watch(logits)
op_output = nn_ops.sparse_softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits)
gradient_injector_output = op_output * upstream_gradients
return tape.gradient(gradient_injector_output, logits)
for trial in range(5):
seed = 456 + trial
result_a = gradients(seed=seed)
result_b = gradients(seed=seed)
self.assertAllEqual(result_a, result_b)
# Modifications to the parent class
# (sparse_xent_op_test_base.SparseXentOpTestBase) follow
def testInvalidLabelGPU(self):
"""Modified test for invalid labels on GPU.
When running on GPU, the pre-existing, nondeterministic implementation
produces NaN (in both the forward and backward directions) for results
associated with invalid labels (less than zero or greater than the number of
classes minus one). However, while the deterministic implementation also
produces NaN in the forward direction, it produces zeros in the backward
direction.
"""
self._testInvalidLabelGPU(invalid_label_gradient=0.0)
def testInvalidLabelCPU(self):
"""Modified test for invalid labels on CPU.
When running on CPU, the pre-existing, nondeterministic implementation
throws a custom exception when any of the label values are invalid (less
than zero or greater than the number of classes minus one). However, in the
deterministic implementation, tf.gather throws an exception instead.
"""
self._testInvalidLabelCPU(
expected_regex="indices\[0\] = 4 is not in \[0, 4\)")
def testLabelsPlaceholderScalar(self):
"""Test exception-throwing for non-statically-shaped, zero-rank labels.
The deterministic implementation cannot check for this case because it does
not have a specific implementation of SparseSoftmaxXentWithLogitsOp.
Instead tf.gather, which is used to create the deterministic implementation,
throws an error.
"""
self._testLabelsPlaceholderScalar(
expected_error_message="Expected batch_dims in the range \[0, 0\], " +
"but got 1")
def testScalarHandling(self):
"""Test exception-throwing for non-statically-shaped, zero-rank labels.
The deterministic implementation cannot check for this case because it does
not have a specific implementation of SparseSoftmaxXentWithLogitsOp.
Instead tf.gather, which is used to create the deterministic implementation,
throws an error.
"""
self._testScalarHandling(
expected_regex="Expected batch_dims in the range \[0, 0\], but got 1.*")
if __name__ == "__main__":
# TODO(reedwm): Merge this test with sparse_xent_op_test.py.
config.enable_op_determinism()
test.main()
|
SparseXentOpDeterministicTest
|
python
|
getsentry__sentry
|
src/sentry/replays/usecases/query/conditions/base.py
|
{
"start": 161,
"end": 1259
}
|
class ____:
"""Computed expression base column.
Computed expressions are not passed as arguments to the condition visitor methods. They are
computed on the fly within the visitor.
"""
@staticmethod
def visit_eq(value: Any) -> Condition:
not_supported()
@staticmethod
def visit_neq(value: Any) -> Condition:
not_supported()
@staticmethod
def visit_gt(value: Any) -> Condition:
not_supported()
@staticmethod
def visit_gte(value: Any) -> Condition:
not_supported()
@staticmethod
def visit_lt(value: Any) -> Condition:
not_supported()
@staticmethod
def visit_lte(value: Any) -> Condition:
not_supported()
@staticmethod
def visit_match(value: Any) -> Condition:
not_supported()
@staticmethod
def visit_not_match(value: Any) -> Condition:
not_supported()
@staticmethod
def visit_in(value: list[Any]) -> Condition:
not_supported()
@staticmethod
def visit_not_in(value: list[Any]) -> Condition:
not_supported()
|
ComputedBase
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_endpoint_hints.py
|
{
"start": 383,
"end": 5003
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'for_nodes': 'list[V1ForNode]',
'for_zones': 'list[V1ForZone]'
}
attribute_map = {
'for_nodes': 'forNodes',
'for_zones': 'forZones'
}
def __init__(self, for_nodes=None, for_zones=None, local_vars_configuration=None): # noqa: E501
"""V1EndpointHints - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._for_nodes = None
self._for_zones = None
self.discriminator = None
if for_nodes is not None:
self.for_nodes = for_nodes
if for_zones is not None:
self.for_zones = for_zones
@property
def for_nodes(self):
"""Gets the for_nodes of this V1EndpointHints. # noqa: E501
forNodes indicates the node(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries. This is an Alpha feature and is only used when the PreferSameTrafficDistribution feature gate is enabled. # noqa: E501
:return: The for_nodes of this V1EndpointHints. # noqa: E501
:rtype: list[V1ForNode]
"""
return self._for_nodes
@for_nodes.setter
def for_nodes(self, for_nodes):
"""Sets the for_nodes of this V1EndpointHints.
forNodes indicates the node(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries. This is an Alpha feature and is only used when the PreferSameTrafficDistribution feature gate is enabled. # noqa: E501
:param for_nodes: The for_nodes of this V1EndpointHints. # noqa: E501
:type: list[V1ForNode]
"""
self._for_nodes = for_nodes
@property
def for_zones(self):
"""Gets the for_zones of this V1EndpointHints. # noqa: E501
forZones indicates the zone(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries. # noqa: E501
:return: The for_zones of this V1EndpointHints. # noqa: E501
:rtype: list[V1ForZone]
"""
return self._for_zones
@for_zones.setter
def for_zones(self, for_zones):
"""Sets the for_zones of this V1EndpointHints.
forZones indicates the zone(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries. # noqa: E501
:param for_zones: The for_zones of this V1EndpointHints. # noqa: E501
:type: list[V1ForZone]
"""
self._for_zones = for_zones
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1EndpointHints):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1EndpointHints):
return True
return self.to_dict() != other.to_dict()
|
V1EndpointHints
|
python
|
ray-project__ray
|
python/ray/_private/thirdparty/pynvml/pynvml.py
|
{
"start": 57535,
"end": 57702
}
|
class ____(_PrintableStructure):
_fields_ = [
('pci', nvmlPciInfo_t),
('uuid', c_char * NVML_DEVICE_UUID_BUFFER_SIZE)
]
|
c_nvmlExcludedDeviceInfo_t
|
python
|
FactoryBoy__factory_boy
|
tests/test_declarations.py
|
{
"start": 5775,
"end": 6899
}
|
class ____(unittest.TestCase):
def test_post_generation(self):
call_params = []
def foo(*args, **kwargs):
call_params.append(args)
call_params.append(kwargs)
helpers.build(
dict,
foo=declarations.PostGeneration(foo),
foo__bar=42,
blah=42,
blah__baz=1,
)
self.assertEqual(2, len(call_params))
self.assertEqual(3, len(call_params[0])) # instance, step, context.value
self.assertEqual({'bar': 42}, call_params[1])
def test_decorator_simple(self):
call_params = []
@helpers.post_generation
def foo(*args, **kwargs):
call_params.append(args)
call_params.append(kwargs)
helpers.build(
dict,
foo=foo,
foo__bar=42,
blah=42,
blah__baz=1,
)
self.assertEqual(2, len(call_params))
self.assertEqual(3, len(call_params[0])) # instance, step, context.value
self.assertEqual({'bar': 42}, call_params[1])
|
PostGenerationDeclarationTestCase
|
python
|
huggingface__transformers
|
src/transformers/models/vitdet/modeling_vitdet.py
|
{
"start": 13445,
"end": 14808
}
|
class ____(nn.Module):
"""
The standard bottleneck residual block without the last activation layer. It contains 3 conv layers with kernels
1x1, 3x3, 1x1.
"""
def __init__(self, config, in_channels, out_channels, bottleneck_channels):
"""
Args:
config (`VitDetConfig`):
Model configuration.
in_channels (`int`):
Number of input channels.
out_channels (`int`):
Number of output channels.
bottleneck_channels (`int`):
Number of output channels for the 3x3 "bottleneck" conv layers.
"""
super().__init__()
self.conv1 = nn.Conv2d(in_channels, bottleneck_channels, 1, bias=False)
self.norm1 = VitDetLayerNorm(bottleneck_channels)
self.act1 = ACT2FN[config.hidden_act]
self.conv2 = nn.Conv2d(bottleneck_channels, bottleneck_channels, 3, padding=1, bias=False)
self.norm2 = VitDetLayerNorm(bottleneck_channels)
self.act2 = ACT2FN[config.hidden_act]
self.conv3 = nn.Conv2d(bottleneck_channels, out_channels, 1, bias=False)
self.norm3 = VitDetLayerNorm(out_channels)
def forward(self, x):
out = x
for layer in self.children():
out = layer(out)
out = x + out
return out
|
VitDetResBottleneckBlock
|
python
|
geekcomputers__Python
|
Split_Circular_Linked_List.py
|
{
"start": 94,
"end": 1805
}
|
class ____:
def __init__(self):
self.head = None
def Push(self, data):
temp = Node(data)
temp.next = self.head
temp1 = self.head
if self.head is not None:
while temp1.next is not None:
temp1 = temp1.next
temp1.next = temp
else:
temp.next = temp
self.head = temp
def Split_List(self, head1, head2):
if self.head is None:
return
slow_ptr = self.head
fast_ptr = self.head
while fast_ptr.next != self.head and fast_ptr.next.next != self.head:
fast_ptr = fast_ptr.next.next
slow_ptr = slow_ptr.next.next
if fast_ptr.next.next == self.head:
fast_ptr = fast_ptr.next
head1 = self.head
slow_ptr.next = head1
if self.head.next != self.head:
head2.head = slow_ptr.next
fast_ptr.next = slow_ptr.next
def Display(self):
temp = self.head
if self.head is not None:
while temp:
print(temp.data, "->", end=" ")
temp = temp.next
if temp == self.head:
print(temp.data)
break
if __name__ == "__main__":
L_list = Circular_Linked_List()
head1 = Circular_Linked_List()
head2 = Circular_Linked_List()
L_list.Push(6)
L_list.Push(4)
L_list.Push(2)
L_list.Push(8)
L_list.Push(12)
L_list.Push(10)
L_list.Split_List(head1, head2)
print("Circular Linked List: ")
L_list.Display()
print("Firts Split Linked List: ")
head1.Display()
print("Second Split Linked List: ")
head2.Display()
|
Circular_Linked_List
|
python
|
python__mypy
|
mypy/types.py
|
{
"start": 106999,
"end": 113686
}
|
class ____(ProperType):
"""Type of TypedDict object {'k1': v1, ..., 'kn': vn}.
A TypedDict object is a dictionary with specific string (literal) keys. Each
key has a value with a distinct type that depends on the key. TypedDict objects
are normal dict objects at runtime.
A TypedDictType can be either named or anonymous. If it's anonymous, its
fallback will be typing_extensions._TypedDict (Instance). _TypedDict is a subclass
of Mapping[str, object] and defines all non-mapping dict methods that TypedDict
supports. Some dict methods are unsafe and not supported. _TypedDict isn't defined
at runtime.
If a TypedDict is named, its fallback will be an Instance of the named type
(ex: "Point") whose TypeInfo has a typeddict_type that is anonymous. This
is similar to how named tuples work.
TODO: The fallback structure is perhaps overly complicated.
"""
__slots__ = (
"items",
"required_keys",
"readonly_keys",
"fallback",
"extra_items_from",
"to_be_mutated",
)
items: dict[str, Type] # item_name -> item_type
required_keys: set[str]
readonly_keys: set[str]
fallback: Instance
extra_items_from: list[ProperType] # only used during semantic analysis
to_be_mutated: bool # only used in a plugin for `.update`, `|=`, etc
def __init__(
self,
items: dict[str, Type],
required_keys: set[str],
readonly_keys: set[str],
fallback: Instance,
line: int = -1,
column: int = -1,
) -> None:
super().__init__(line, column)
self.items = items
self.required_keys = required_keys
self.readonly_keys = readonly_keys
self.fallback = fallback
self.can_be_true = len(self.items) > 0
self.can_be_false = len(self.required_keys) == 0
self.extra_items_from = []
self.to_be_mutated = False
def accept(self, visitor: TypeVisitor[T]) -> T:
return visitor.visit_typeddict_type(self)
def __hash__(self) -> int:
return hash(
(
frozenset(self.items.items()),
self.fallback,
frozenset(self.required_keys),
frozenset(self.readonly_keys),
)
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, TypedDictType):
return NotImplemented
if self is other:
return True
return (
frozenset(self.items.keys()) == frozenset(other.items.keys())
and all(
left_item_type == right_item_type
for (_, left_item_type, right_item_type) in self.zip(other)
)
and self.fallback == other.fallback
and self.required_keys == other.required_keys
and self.readonly_keys == other.readonly_keys
)
def serialize(self) -> JsonDict:
return {
".class": "TypedDictType",
"items": [[n, t.serialize()] for (n, t) in self.items.items()],
"required_keys": sorted(self.required_keys),
"readonly_keys": sorted(self.readonly_keys),
"fallback": self.fallback.serialize(),
}
@classmethod
def deserialize(cls, data: JsonDict) -> TypedDictType:
assert data[".class"] == "TypedDictType"
return TypedDictType(
{n: deserialize_type(t) for (n, t) in data["items"]},
set(data["required_keys"]),
set(data["readonly_keys"]),
Instance.deserialize(data["fallback"]),
)
def write(self, data: WriteBuffer) -> None:
write_tag(data, TYPED_DICT_TYPE)
self.fallback.write(data)
write_type_map(data, self.items)
write_str_list(data, sorted(self.required_keys))
write_str_list(data, sorted(self.readonly_keys))
write_tag(data, END_TAG)
@classmethod
def read(cls, data: ReadBuffer) -> TypedDictType:
assert read_tag(data) == INSTANCE
fallback = Instance.read(data)
ret = TypedDictType(
read_type_map(data), set(read_str_list(data)), set(read_str_list(data)), fallback
)
assert read_tag(data) == END_TAG
return ret
@property
def is_final(self) -> bool:
return self.fallback.type.is_final
def is_anonymous(self) -> bool:
return self.fallback.type.fullname in TPDICT_FB_NAMES
def as_anonymous(self) -> TypedDictType:
if self.is_anonymous():
return self
assert self.fallback.type.typeddict_type is not None
return self.fallback.type.typeddict_type.as_anonymous()
def copy_modified(
self,
*,
fallback: Instance | None = None,
item_types: list[Type] | None = None,
item_names: list[str] | None = None,
required_keys: set[str] | None = None,
readonly_keys: set[str] | None = None,
) -> TypedDictType:
if fallback is None:
fallback = self.fallback
if item_types is None:
items = self.items
else:
items = dict(zip(self.items, item_types))
if required_keys is None:
required_keys = self.required_keys
if readonly_keys is None:
readonly_keys = self.readonly_keys
if item_names is not None:
items = {k: v for (k, v) in items.items() if k in item_names}
required_keys &= set(item_names)
return TypedDictType(items, required_keys, readonly_keys, fallback, self.line, self.column)
def create_anonymous_fallback(self) -> Instance:
anonymous = self.as_anonymous()
return anonymous.fallback
def names_are_wider_than(self, other: TypedDictType) -> bool:
return len(other.items.keys() - self.items.keys()) == 0
def zip(self, right: TypedDictType) -> Iterable[tuple[str, Type, Type]]:
left = self
for item_name, left_item_type in left.items.items():
right_item_type = right.items.get(item_name)
if right_item_type is not None:
yield (item_name, left_item_type, right_item_type)
def zipall(self, right: TypedDictType) -> Iterable[tuple[str, Type | None, Type | None]]:
left = self
for item_name, left_item_type in left.items.items():
right_item_type = right.items.get(item_name)
yield (item_name, left_item_type, right_item_type)
for item_name, right_item_type in right.items.items():
if item_name in left.items:
continue
yield (item_name, None, right_item_type)
|
TypedDictType
|
python
|
jazzband__pip-tools
|
piptools/resolver.py
|
{
"start": 1616,
"end": 5001
}
|
class ____:
"""
Summary of a requirement's properties for comparison purposes.
"""
def __init__(self, ireq: InstallRequirement) -> None:
self.req = ireq.req
self.key = key_from_ireq(ireq)
self.extras = frozenset(ireq.extras)
self.specifier = ireq.specifier
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.key == other.key
and self.specifier == other.specifier
and self.extras == other.extras
)
def __hash__(self) -> int:
return hash((self.key, self.specifier, self.extras))
def __str__(self) -> str:
return repr((self.key, str(self.specifier), sorted(self.extras)))
def combine_install_requirements(
ireqs: Iterable[InstallRequirement],
) -> InstallRequirement:
"""
Return a single install requirement that reflects a combination of
all the inputs.
"""
# We will store the source ireqs in a _source_ireqs attribute;
# if any of the inputs have this, then use those sources directly.
source_ireqs: list[InstallRequirement] = []
for ireq in ireqs:
source_ireqs.extend(getattr(ireq, "_source_ireqs", [ireq]))
# Optimization. Don't bother with combination logic.
if len(source_ireqs) == 1:
return source_ireqs[0]
link_attrs = {
attr: getattr(source_ireqs[0], attr) for attr in ("link", "original_link")
}
constraint = source_ireqs[0].constraint
extras = set(source_ireqs[0].extras)
# deepcopy the accumulator req so as to not modify the inputs
req = copy.deepcopy(source_ireqs[0].req)
for ireq in source_ireqs[1:]:
# NOTE we may be losing some info on dropped reqs here
if req is not None and ireq.req is not None:
req.specifier &= ireq.req.specifier
constraint &= ireq.constraint
extras |= ireq.extras
if req is not None:
req.extras = set(extras)
for attr_name, attr_val in link_attrs.items():
link_attrs[attr_name] = attr_val or getattr(ireq, attr_name)
# InstallRequirements objects are assumed to come from only one source, and
# so they support only a single comes_from entry. This function breaks this
# model. As a workaround, we deterministically choose a single source for
# the comes_from entry, and add an extra _source_ireqs attribute to keep
# track of multiple sources for use within pip-tools.
if any(ireq.comes_from is None for ireq in source_ireqs):
# None indicates package was directly specified.
comes_from = None
else:
# Populate the comes_from field from one of the sources.
# Requirement input order is not stable, so we need to sort:
# We choose the shortest entry in order to keep the printed
# representation as concise as possible.
comes_from = min(
(ireq.comes_from for ireq in source_ireqs),
key=lambda x: (len(str(x)), str(x)),
)
combined_ireq = copy_install_requirement(
template=source_ireqs[0],
req=req,
comes_from=comes_from,
constraint=constraint,
extras=extras,
**link_attrs,
)
combined_ireq._source_ireqs = source_ireqs
return combined_ireq
|
RequirementSummary
|
python
|
ray-project__ray
|
rllib/offline/feature_importance.py
|
{
"start": 3944,
"end": 10680
}
|
class ____(OfflineEvaluator):
@override(OfflineEvaluator)
def __init__(
self,
policy: Policy,
repeat: int = 1,
limit_fraction: float = 1.0,
perturb_fn: Callable[[pd.DataFrame, int], pd.DataFrame] = _perturb_df,
):
"""Feature importance in a model inspection technique that can be used for any
fitted predictor when the data is tablular.
This implementation is also known as permutation importance that is defined to
be the variation of the model's prediction when a single feature value is
randomly shuffled. In RLlib it is implemented as a custom OffPolicyEstimator
which is used to evaluate RLlib policies without performing environment
interactions.
Example usage: In the example below the feature importance module is used to
evaluate the policy and the each feature's importance is computed after each
training iteration. The permutation are repeated `self.repeat` times and the
results are averages across repeats.
```python
config = (
AlgorithmConfig()
.offline_data(
off_policy_estimation_methods=
{
"feature_importance": {
"type": FeatureImportance,
"repeat": 10,
"limit_fraction": 0.1,
}
}
)
)
algorithm = DQN(config=config)
results = algorithm.train()
```
Args:
policy: the policy to use for feature importance.
repeat: number of times to repeat the perturbation.
perturb_fn: function to perturb the features. By default reshuffle the
features within the batch.
limit_fraction: fraction of the dataset to use for feature importance
This is only used in estimate_on_dataset when the dataset is too large
to compute feature importance on.
"""
super().__init__(policy)
self.repeat = repeat
self.perturb_fn = perturb_fn
self.limit_fraction = limit_fraction
def estimate(self, batch: SampleBatchType) -> Dict[str, Any]:
"""Estimate the feature importance of the policy.
Given a batch of tabular observations, the importance of each feature is
computed by perturbing each feature and computing the difference between the
perturbed policy and the reference policy. The importance is computed for each
feature and each perturbation is repeated `self.repeat` times.
Args:
batch: the batch of data to use for feature importance.
Returns:
A dict mapping each feature index string to its importance.
"""
batch = convert_ma_batch_to_sample_batch(batch)
obs_batch = batch["obs"]
n_features = obs_batch.shape[-1]
importance = np.zeros((self.repeat, n_features))
ref_actions, _, _ = self.policy.compute_actions(obs_batch, explore=False)
for r in range(self.repeat):
for i in range(n_features):
copy_obs_batch = copy.deepcopy(obs_batch)
_perturb_fn(copy_obs_batch, index=i)
perturbed_actions, _, _ = self.policy.compute_actions(
copy_obs_batch, explore=False
)
importance[r, i] = np.mean(np.abs(perturbed_actions - ref_actions))
# take an average across repeats
importance = importance.mean(0)
metrics = {f"feature_{i}": importance[i] for i in range(len(importance))}
return metrics
@override(OfflineEvaluator)
def estimate_on_dataset(
self, dataset: Dataset, *, n_parallelism: int = ...
) -> Dict[str, Any]:
"""Estimate the feature importance of the policy given a dataset.
For each feature in the dataset, the importance is computed by applying
perturbations to each feature and computing the difference between the
perturbed prediction and the reference prediction. The importance
computation for each feature and each perturbation is repeated `self.repeat`
times. If dataset is large the user can initialize the estimator with a
`limit_fraction` to limit the dataset to a fraction of the original dataset.
The dataset should include a column named `obs` where each row is a vector of D
dimensions. The importance is computed for each dimension of the vector.
Note (Implementation detail): The computation across features are distributed
with ray workers since each feature is independent of each other.
Args:
dataset: the dataset to use for feature importance.
n_parallelism: number of parallel workers to use for feature importance.
Returns:
A dict mapping each feature index string to its importance.
"""
policy_state = self.policy.get_state()
# step 1: limit the dataset to a few first rows
ds = dataset.limit(int(self.limit_fraction * dataset.count()))
# step 2: compute the reference actions
bsize = max(1, ds.count() // n_parallelism)
actions_ds = ds.map_batches(
_compute_actions,
batch_size=bsize,
fn_kwargs={
"output_key": "ref_actions",
"policy_state": policy_state,
},
)
# step 3: compute the feature importance
n_features = ds.take(1)[0][SampleBatch.OBS].shape[-1]
importance = np.zeros((self.repeat, n_features))
for r in range(self.repeat):
# shuffle the entire dataset
shuffled_ds = actions_ds.random_shuffle()
bsize_per_task = max(1, (shuffled_ds.count() * n_features) // n_parallelism)
# for each index perturb the dataset and compute the feat importance score
remote_fns = [
get_feature_importance_on_index.remote(
dataset=shuffled_ds,
index=i,
perturb_fn=self.perturb_fn,
bsize=bsize_per_task,
policy_state=policy_state,
)
for i in range(n_features)
]
ds_w_fi_scores = ray.get(remote_fns)
importance[r] = np.array([d.mean("delta") for d in ds_w_fi_scores])
importance = importance.mean(0)
metrics = {f"feature_{i}": importance[i] for i in range(len(importance))}
return metrics
|
FeatureImportance
|
python
|
huggingface__transformers
|
src/transformers/models/qwen2_moe/modeling_qwen2_moe.py
|
{
"start": 28068,
"end": 32536
}
|
class ____(Qwen2MoePreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = Qwen2MoeModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.router_aux_loss_coef = config.router_aux_loss_coef
self.num_experts = config.num_experts
self.num_experts_per_tok = config.num_experts_per_tok
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_router_logits: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> MoeCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, Qwen2MoeForCausalLM
>>> model = Qwen2MoeForCausalLM.from_pretrained("mistralai/Qwen2Moe-8x7B-v0.1")
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Qwen2Moe-8x7B-v0.1")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_router_logits = (
output_router_logits if output_router_logits is not None else self.config.output_router_logits
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: MoeModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_router_logits=output_router_logits,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
return MoeCausalLMOutputWithPast(
loss=loss,
aux_loss=aux_loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
router_logits=outputs.router_logits,
)
|
Qwen2MoeForCausalLM
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/runnables/graph.py
|
{
"start": 731,
"end": 907
}
|
class ____(Protocol):
"""Protocol for objects that can be converted to a string."""
def __str__(self) -> str:
"""Convert the object to a string."""
|
Stringifiable
|
python
|
sphinx-doc__sphinx
|
sphinx/transforms/__init__.py
|
{
"start": 7824,
"end": 8164
}
|
class ____(SphinxTransform):
"""Update source and rawsource attributes"""
default_priority = 10
def apply(self, **kwargs: Any) -> None:
for node in self.document.findall():
if isinstance(node, (nodes.TextElement, nodes.image, nodes.topic)):
apply_source_workaround(node)
|
ApplySourceWorkaround
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_textbox25.py
|
{
"start": 315,
"end": 867
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox25.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox("E9", "This is some text", {"line": {"width": 3.25}})
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airlift/dagster_airlift/core/serialization/serialized_data.py
|
{
"start": 3092,
"end": 3229
}
|
class ____(NamedTuple):
dag_id: str
task_id: str
created_at: str
updated_at: str
@whitelist_for_serdes
|
DatasetProducingTask
|
python
|
pyca__cryptography
|
src/cryptography/hazmat/decrepit/ciphers/algorithms.py
|
{
"start": 1065,
"end": 1097
}
|
class ____:
key_size = 64
|
_DES
|
python
|
django__django
|
django/forms/widgets.py
|
{
"start": 19415,
"end": 19690
}
|
class ____(DateTimeBaseInput):
format_key = "TIME_INPUT_FORMATS"
template_name = "django/forms/widgets/time.html"
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == "")
|
TimeInput
|
python
|
ray-project__ray
|
python/ray/tune/tests/test_soft_imports.py
|
{
"start": 29,
"end": 767
}
|
class ____(unittest.TestCase):
"""Tests whether it's possible to use Ray Tune without soft dependencies"""
def testSoftImports(self):
import ray.tune.schedulers # noqa: F401
from ray.tune.search import SEARCH_ALG_IMPORT
for name, import_func in SEARCH_ALG_IMPORT.items():
print(f"testing searcher {name}")
searcher = import_func()
# ensure that the dependencies aren't actually installed
if searcher and name not in ("variant_generator", "random"):
with self.assertRaises((AssertionError, ImportError)):
searcher()
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
TestSoftImports
|
python
|
charliermarsh__ruff
|
scripts/ty_benchmark/src/benchmark/projects.py
|
{
"start": 117,
"end": 10256
}
|
class ____(NamedTuple):
name: str
"""The name of the project to benchmark."""
repository: str
"""The git repository to clone."""
revision: str
install_arguments: list[str]
"""Arguments passed to `uv pip install`.
Dependencies are pinned using a `--exclude-newer` flag when installing them
into the virtual environment; see the `Venv.install()` method for details.
"""
python_version: Literal["3.13", "3.14", "3.12", "3.11", "3.10", "3.9", "3.8"]
skip: str | None = None
"""The project is skipped from benchmarking if not `None`."""
include: list[str] = []
"""The directories and files to check. If empty, checks the current directory"""
exclude: list[str] = []
"""The directories and files to exclude from checks."""
def clone(self, checkout_dir: Path) -> None:
# Skip cloning if the project has already been cloned (the script doesn't yet support updating)
if (checkout_dir / ".git").exists():
return
logging.debug(f"Cloning {self.repository} to {checkout_dir}")
try:
# git doesn't support cloning a specific revision.
# This is the closest that I found to a "shallow clone with a specific revision"
subprocess.run(
[
"git",
"init",
"--quiet",
],
env={"GIT_TERMINAL_PROMPT": "0"},
cwd=checkout_dir,
check=True,
capture_output=True,
text=True,
)
subprocess.run(
["git", "remote", "add", "origin", str(self.repository), "--no-fetch"],
env={"GIT_TERMINAL_PROMPT": "0"},
cwd=checkout_dir,
check=True,
capture_output=True,
text=True,
)
subprocess.run(
[
"git",
"fetch",
"origin",
self.revision,
"--quiet",
"--depth",
"1",
"--no-tags",
],
check=True,
cwd=checkout_dir,
capture_output=True,
text=True,
)
subprocess.run(
["git", "reset", "--hard", "FETCH_HEAD", "--quiet"],
check=True,
cwd=checkout_dir,
capture_output=True,
text=True,
)
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to clone {self.name}: {e.stderr}")
logging.info(f"Cloned {self.name} to {checkout_dir}.")
# Selection of projects taken from
# [mypy-primer](https://github.com/hauntsaninja/mypy_primer/blob/0ea6cc614b3e91084059b9a3acc58f94c066a211/mypy_primer/projects.py#L71).
# May require frequent updating, especially the dependencies list
ALL: Final = [
Project(
name="black",
repository="https://github.com/psf/black",
revision="45b4087976b7880db9dabacc992ee142f2d6c7c7",
python_version="3.10",
include=["src"],
install_arguments=[
"-r",
"pyproject.toml",
# All extras except jupyter because installing the jupyter optional results in a mypy typing error.
"--extra",
"colorama",
# uvloop is not supported on Windows
*(["--extra", "uvloop"] if sys.platform != "win32" else []),
"--extra",
"d",
],
),
Project(
name="discord.py",
repository="https://github.com/Rapptz/discord.py.git",
revision="9be91cb093402f54a44726c7dc4c04ff3b2c5a63",
python_version="3.8",
include=["discord"],
install_arguments=[
"-r",
"pyproject.toml",
"typing_extensions>=4.3,<5",
],
),
# Fairly chunky project, requires the pydantic mypy plugin.
#
# Pyrefly reports significantely more diagnostics than ty and, unlike ty, has partial pydantic support.
# Both could be the reason why Pyrefly is slower than ty (it's notable that it's mainly slower because it has a much higher system time)
Project(
name="homeassistant",
repository="https://github.com/home-assistant/core.git",
revision="10c12623bfc0b3a06ffaa88bf986f61818cfb8be",
python_version="3.13",
include=["homeassistant"],
skip="Missing dependencies on Windows" if sys.platform == "win32" else None,
install_arguments=[
"-r",
"requirements_test_all.txt",
"-r",
"requirements.txt",
],
),
Project(
name="isort",
repository="https://github.com/pycqa/isort",
revision="ed501f10cb5c1b17aad67358017af18cf533c166",
python_version="3.11",
include=["isort"],
install_arguments=["types-colorama", "colorama"],
),
Project(
name="jinja",
repository="https://github.com/pallets/jinja",
revision="5ef70112a1ff19c05324ff889dd30405b1002044",
python_version="3.10",
include=["src"],
install_arguments=["-r", "pyproject.toml"],
),
Project(
name="pandas",
repository="https://github.com/pandas-dev/pandas",
revision="4d8348341bc4de2f0f90782ecef1b092b9418a19",
include=["pandas", "typings"],
exclude=["pandas/tests"],
python_version="3.11",
install_arguments=[
"-r",
"requirements-dev.txt",
],
),
Project(
name="pandas-stubs",
repository="https://github.com/pandas-dev/pandas-stubs",
revision="ad8cae5bc1f0bc87ce22b4d445e0700976c9dfb4",
include=["pandas-stubs"],
python_version="3.10",
# Uses poetry :(
install_arguments=[
"types-pytz >=2022.1.1",
"types-python-dateutil>=2.8.19",
"numpy >=1.23.5",
"pyarrow >=10.0.1",
"matplotlib >=3.10.1",
"xarray>=22.6.0",
"SQLAlchemy>=2.0.39",
"odfpy >=1.4.1",
"pyxlsb >=1.0.10",
"jinja2 >=3.1",
"scipy >=1.9.1",
"scipy-stubs >=1.15.3.0",
],
),
Project(
name="prefect",
repository="https://github.com/PrefectHQ/prefect.git",
revision="a3db33d4f9ee7a665430ae6017c649d057139bd3",
# See https://github.com/PrefectHQ/prefect/blob/a3db33d4f9ee7a665430ae6017c649d057139bd3/.pre-commit-config.yaml#L33-L39
include=[
"src/prefect/server/models",
"src/prefect/concurrency",
"src/prefect/events",
"src/prefect/input",
],
python_version="3.10",
install_arguments=[
"-r",
"pyproject.toml",
"--group",
"dev",
],
),
Project(
name="pytorch",
repository="https://github.com/pytorch/pytorch.git",
revision="be33b7faf685560bb618561b44b751713a660337",
include=["torch", "caffe2"],
# see https://github.com/pytorch/pytorch/blob/c56655268b4ae575ee4c89c312fd93ca2f5b3ba9/pyrefly.toml#L23
exclude=[
"torch/_inductor/codegen/triton.py",
"tools/linter/adapters/test_device_bias_linter.py",
"tools/code_analyzer/gen_operators_yaml.py",
"torch/_inductor/runtime/triton_heuristics.py",
"torch/_inductor/runtime/triton_helpers.py",
"torch/_inductor/runtime/halide_helpers.py",
"torch/utils/tensorboard/summary.py",
"torch/distributed/flight_recorder/components/types.py",
"torch/linalg/__init__.py",
"torch/package/importer.py",
"torch/package/_package_pickler.py",
"torch/jit/annotations.py",
"torch/utils/data/datapipes/_typing.py",
"torch/nn/functional.py",
"torch/_export/utils.py",
"torch/fx/experimental/unification/multipledispatch/__init__.py",
"torch/nn/modules/__init__.py",
"torch/nn/modules/rnn.py",
"torch/_inductor/codecache.py",
"torch/distributed/elastic/metrics/__init__.py",
"torch/_inductor/fx_passes/bucketing.py",
"torch/onnx/_internal/exporter/_torchlib/ops/nn.py",
"torch/include/**",
"torch/csrc/**",
"torch/distributed/elastic/agent/server/api.py",
"torch/testing/_internal/**",
"torch/distributed/fsdp/fully_sharded_data_parallel.py",
"torch/ao/quantization/pt2e/_affine_quantization.py",
"torch/nn/modules/pooling.py",
"torch/nn/parallel/_functions.py",
"torch/_appdirs.py",
"torch/multiprocessing/pool.py",
"torch/overrides.py",
"*/__pycache__/**",
"*/.*",
],
# See https://github.com/pytorch/pytorch/blob/be33b7faf685560bb618561b44b751713a660337/.lintrunner.toml#L141
install_arguments=[
'numpy==1.26.4 ; python_version >= "3.10" and python_version <= "3.11"',
'numpy==2.1.0 ; python_version >= "3.12" and python_version <= "3.13"',
'numpy==2.3.4 ; python_version >= "3.14"',
"expecttest==0.3.0",
"pyrefly==0.36.2",
"sympy==1.13.3",
"types-requests==2.27.25",
"types-pyyaml==6.0.2",
"types-tabulate==0.8.8",
"types-protobuf==5.29.1.20250403",
"types-setuptools==79.0.0.20250422",
"types-jinja2==2.11.9",
"types-colorama==0.4.6",
"filelock==3.18.0",
"junitparser==2.1.1",
"rich==14.1.0",
"optree==0.17.0",
"types-openpyxl==3.1.5.20250919",
"types-python-dateutil==2.9.0.20251008",
"mypy==1.16.0", # pytorch pins mypy,
],
python_version="3.11",
),
]
|
Project
|
python
|
pytorch__pytorch
|
torch/fx/proxy.py
|
{
"start": 18165,
"end": 25463
}
|
class ____:
"""
``Proxy`` objects are ``Node`` wrappers that flow through the
program during symbolic tracing and record all the operations
(``torch`` function calls, method calls, operators) that they touch
into the growing FX Graph.
If you're doing graph transforms, you can wrap your own ``Proxy``
method around a raw ``Node`` so that you can use the overloaded
operators to add additional things to a ``Graph``.
``Proxy`` objects cannot be iterated. In other words, the symbolic
tracer will throw an error if a ``Proxy`` is used in a loop or as
an ``*args``/``**kwargs`` function argument.
There are two main ways around this:
1. Factor out the untraceable logic into a top-level function and
use ``fx.wrap`` on it.
2. If the control flow is static (i.e. the loop trip count is
based on some hyperparameter), the code can be kept in its original
position and refactored into something like::
for i in range(self.some_hyperparameter):
indexed_item = proxied_value[i]
For a more detailed description into the Proxy internals, check out
the "Proxy" section in `torch/fx/README.md`
"""
@compatibility(is_backward_compatible=True)
def __init__(self, node: Node, tracer: "Optional[TracerBase]" = None):
if tracer is None:
# This allows you to create a Proxy object around a raw Node
tracer = GraphAppendingTracer(node.graph)
self.tracer = tracer
self.node = node
def __repr__(self) -> str:
return f"Proxy({self.node.name})"
def __getattr__(self, k) -> "Attribute":
# note: not added to the graph yet, if this is a method call
# we peephole optimize to the method invocation
return Attribute(self, k)
def __getstate__(self) -> dict:
return self.__dict__
def __deepcopy__(self, memo) -> dict:
# We have to explicitly override this method, because otherwise deepcopy
# will go to __getattr__(self, "__deepcopy__") and return a
# Attribute(__deepcopy__), and may go into an infinite loop in some cases.
import copy
new_dict = {}
for k, v in self.__dict__.items():
try:
new_obj = copy.deepcopy(v, memo)
except Exception:
log.warning(
"Shallow copy %s of Proxy because it cannot be deepcopied. "
"Proxy is created for node %s",
k,
self.node.name,
)
new_obj = copy.copy(v)
new_dict[k] = new_obj
assert "node" in new_dict
assert "tracer" in new_dict
new_proxy = Proxy(new_dict["node"], new_dict["tracer"])
for k, v in new_dict.items():
new_proxy.__dict__[k] = v
return new_proxy
def __setstate__(self, d):
# This is called when being unpickled/loaded.
self.__dict__ = d
def __call__(self, *args, **kwargs) -> "Proxy":
return self.tracer.create_proxy(
"call_method", "__call__", (self,) + args, kwargs
)
def __iter__(self) -> Iterator["Proxy"]:
frame = inspect.currentframe()
assert frame is not None
calling_frame = frame.f_back
assert calling_frame is not None
inst_list = list(dis.get_instructions(calling_frame.f_code))
if sys.version_info >= (3, 11):
from bisect import bisect_left
inst_idx = bisect_left(
inst_list, calling_frame.f_lasti, key=lambda x: x.offset
)
else:
inst_idx = calling_frame.f_lasti // 2
inst = inst_list[inst_idx]
if inst.opname == "UNPACK_SEQUENCE":
return (self[i] for i in range(inst.argval)) # type: ignore[index]
return self.tracer.iter(self)
def __abs__(self):
return self.tracer.create_proxy("call_function", operator.abs, (self,), {})
def __bool__(self) -> bool:
if self.tracer.trace_asserts:
# check if this boolean is used in an assertion, bytecode pattern for assertions
# is pretty stable for Python 3.7--3.9
frame = inspect.currentframe()
assert frame is not None
calling_frame = frame.f_back
assert calling_frame is not None
insts = list(dis.get_instructions(calling_frame.f_code))
if sys.version_info >= (3, 11):
from bisect import bisect_left
cur = bisect_left(insts, calling_frame.f_lasti, key=lambda x: x.offset)
else:
cur = calling_frame.f_lasti // 2
inst = insts[cur]
if inst.opname == "POP_JUMP_IF_TRUE":
first = insts[cur + 1]
assert inst.arg is not None
last = insts[inst.arg // 2 - 1]
starts_with_assert = (
first.opname == "LOAD_GLOBAL"
and first.argval == "AssertionError"
or first.opname == "LOAD_ASSERTION_ERROR"
)
if starts_with_assert and last.opname == "RAISE_VARARGS":
self.tracer.create_proxy("call_function", assert_fn, (self,), {})
return True
return self.tracer.to_bool(self)
@compatibility(is_backward_compatible=True)
def keys(self):
return self.tracer.keys(self)
def __len__(self):
raise RuntimeError(
"'len' is not supported in symbolic tracing by default. If you want "
"this call to be recorded, please call torch.fx.wrap('len') at "
"module scope"
)
@classmethod
def __torch_function__(cls, orig_method, types, args=None, kwargs=None):
args = args if args else ()
kwargs = kwargs if kwargs else {}
tracers: dict[Any, None] = {}
def find_tracer(a):
if isinstance(a, cls):
tracers[a.tracer] = None
map_aggregate(args, find_tracer)
map_aggregate(kwargs, find_tracer)
if len(tracers) > 1:
raise RuntimeError(
f"Found multiple different tracers {list(tracers.keys())} while "
f"trying to trace operations {orig_method}"
)
tracer = next(iter(tracers.keys()))
if isinstance(orig_method, torch._C.ScriptMethod):
args = (orig_method.owner,) + args
return tracer.create_proxy("call_method", orig_method.name, args, kwargs)
if torch.overrides.is_tensor_method_or_property(orig_method):
return tracer.create_proxy(
"call_method", orig_method.__name__, args, kwargs
)
else:
if isinstance(orig_method, torch._ops.HigherOrderOperator):
# TODO: Define how to symbolically trace HigherOrderOperators
raise RuntimeError("Unable to symbolically trace HigherOrderOperators")
return tracer.create_proxy(
"call_function",
orig_method,
args,
kwargs,
name=tracer.graph._target_to_str(orig_method.__name__),
)
@compatibility(is_backward_compatible=False)
|
Proxy
|
python
|
ApeWorX__ape
|
src/ape/cli/arguments.py
|
{
"start": 1257,
"end": 6433
}
|
class ____:
"""
Helper callback class for handling CLI-given contract paths.
"""
def __init__(self, value, project: Optional["ProjectManager"] = None):
from ape.utils.basemodel import ManagerAccessMixin
self.value = value
self.missing_compilers: set[str] = set() # set of .ext
self.project = project or ManagerAccessMixin.local_project
@classmethod
def callback(cls, ctx, param, value) -> set[Path]:
"""
Use this for click.option / argument callbacks.
"""
project = ctx.params.get("project")
return cls(value, project=project).filtered_paths
@property
def filtered_paths(self) -> set[Path]:
"""
Get the filtered set of paths.
"""
value = self.value
contract_paths: Iterable[Path]
if value and isinstance(value, (Path, str)):
# Given single path.
contract_paths = (Path(value),)
elif not value or value == "*":
# We include missing compilers here in case the user isn't aware, and we can print a better message.
contract_paths = {
p for p in self.project.sources.get_source_paths(include_missing_compilers=True)
}
elif isinstance(value, Iterable):
contract_paths = value
else:
raise BadArgumentUsage(f"Not a path or iter[Path]: {value}")
# Convert source IDs or relative paths to absolute paths.
path_set = self.lookup(contract_paths)
# Handle missing compilers.
if self.missing_compilers:
# Craft a nice message for all missing compilers.
missing_ext = ", ".join(sorted(self.missing_compilers))
message = (
f"Missing compilers for the following file types: '{missing_ext}'. "
"Possibly, a compiler plugin is not installed or is "
"installed but not loading correctly."
)
if ".vy" in self.missing_compilers:
message = f"{message} Is 'ape-vyper' installed?"
if ".sol" in self.missing_compilers:
message = f"{message} Is 'ape-solidity' installed?"
logger.warning(message)
return path_set
@property
def exclude_patterns(self) -> set[str]:
from ape.utils.basemodel import ManagerAccessMixin as access
return access.config_manager.get_config("compile").exclude or set()
def do_exclude(self, path: Union[Path, str]) -> bool:
return self.project.sources.is_excluded(path)
def compiler_is_unknown(self, path: Union[Path, str]) -> bool:
from ape.utils.basemodel import ManagerAccessMixin
from ape.utils.os import get_full_extension
ext = get_full_extension(path)
unknown_compiler = (
ext and ext not in ManagerAccessMixin.compiler_manager.registered_compilers
)
if unknown_compiler and ext not in self.missing_compilers:
self.missing_compilers.add(ext)
return bool(unknown_compiler)
def lookup(self, path_iter: Iterable, path_set: Optional[set] = None) -> set[Path]:
path_set = path_set or set()
given_paths = [p for p in path_iter] # Handle iterators w/o losing it.
for path_id in given_paths:
path = Path(path_id)
contracts_folder = self.project.contracts_folder
if (
self.project.path / path.name
) == contracts_folder or path.name == contracts_folder.name:
# Was given the path to the contracts folder.
path_set = path_set.union({p for p in self.project.sources.paths})
elif (self.project.path / path).is_dir():
# Was given sub-dir in the project folder.
path_set = path_set.union(
self.lookup(
(p for p in (self.project.path / path).iterdir()), path_set=path_set
)
)
elif (contracts_folder / path.name).is_dir():
# Was given sub-dir in the contracts folder.
path_set = path_set.union(
self.lookup(
(p for p in (contracts_folder / path.name).iterdir()), path_set=path_set
)
)
elif resolved_path := self.project.sources.lookup(path):
# Check compiler missing.
if self.compiler_is_unknown(resolved_path):
# NOTE: ^ Also tracks.
continue
# We know here that the compiler is known.
path_set.add(resolved_path)
else:
raise BadArgumentUsage(f"Source file '{path.name}' not found.")
return path_set
def contract_file_paths_argument():
"""
A ``click.argument`` representing contract source file paths.
This argument takes 0-to-many values.
The return type from the callback is a flattened list of
source file-paths.
"""
return click.argument("file_paths", nargs=-1, callback=_ContractPaths.callback)
|
_ContractPaths
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_header_image13.py
|
{
"start": 315,
"end": 1189
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("header_image13.xlsx")
self.ignore_elements = {
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"]
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_header(
"&L&G&C&G&R&G",
{
"image_left": self.image_dir + "black_72.jpg",
"image_center": self.image_dir + "black_150.jpg",
"image_right": self.image_dir + "black_300.jpg",
},
)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
getsentry__sentry
|
src/sentry/shared_integrations/exceptions/__init__.py
|
{
"start": 628,
"end": 2827
}
|
class ____(Exception):
"""
Base class for errors which arise while making outgoing requests to third-party APIs.
"""
code: int | None = None
def __init__(
self,
text: str,
code: int | None = None,
url: str | None = None,
host: str | None = None,
path: str | None = None,
) -> None:
if code is not None:
self.code = code
self.text = text
self.url = url
# we allow `host` and `path` to be passed in separately from `url` in case
# either one is all we have
self.host = host
self.path = path
self.json: dict[str, Any] | None = None
self.xml: BeautifulSoup | None = None
# TODO(dcramer): pull in XML support from Jira
if text:
try:
self.json = json.loads(text)
except (json.JSONDecodeError, ValueError):
if self.text[:5] == "<?xml":
# perhaps it's XML?
self.xml = BeautifulSoup(self.text, "xml")
if url and not self.host:
try:
self.host = urlparse(url).netloc
except ValueError:
self.host = "[invalid URL]"
if url and not self.path:
try:
self.path = urlparse(url).path
except ValueError:
self.path = "[invalid URL]"
super().__init__(text[:1024])
def __str__(self) -> str:
return self.text
@classmethod
def from_response(cls, response: Response, url: str | None = None) -> ApiError:
if response.status_code == 401:
return ApiUnauthorized(response.text, url=url)
elif response.status_code == 429:
return ApiRateLimitedError(response.text, url=url)
elif response.status_code == 409:
return ApiConflictError(response.text, url=url)
elif response.status_code == 400:
return ApiInvalidRequestError(response.text, url=url)
elif response.status_code == 403:
return ApiForbiddenError(response.text, url=url)
return cls(response.text, response.status_code, url=url)
|
ApiError
|
python
|
langchain-ai__langchain
|
libs/langchain/langchain_classic/memory/entity.py
|
{
"start": 1113,
"end": 1959
}
|
class ____(BaseModel, ABC):
"""Abstract base class for Entity store."""
@abstractmethod
def get(self, key: str, default: str | None = None) -> str | None:
"""Get entity value from store."""
@abstractmethod
def set(self, key: str, value: str | None) -> None:
"""Set entity value in store."""
@abstractmethod
def delete(self, key: str) -> None:
"""Delete entity value from store."""
@abstractmethod
def exists(self, key: str) -> bool:
"""Check if entity exists in store."""
@abstractmethod
def clear(self) -> None:
"""Delete all entities from store."""
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
|
BaseEntityStore
|
python
|
pytorch__pytorch
|
test/test_functionalization_of_rng_ops.py
|
{
"start": 816,
"end": 10980
}
|
class ____(TestCase):
@dtypes(torch.float32)
@patch.object(torch._functorch.config, "functionalize_rng_ops", True)
def test_rand_like(self, dtype, device):
def fn(x):
a = torch.rand_like(x) * x
a = torch.rand_like(x) * a
return a
x = torch.rand(10, device=device, dtype=dtype)
for seed in range(10):
torch.cuda.manual_seed(seed)
ref = fn(x)
torch.cuda.manual_seed(seed)
aot_fn = aot_function(fn, functools.partial(count_philox_rand, freq=2))
res = aot_fn(x)
self.assertEqual(ref, res)
@dtypes(torch.float32)
@patch.object(torch._functorch.config, "functionalize_rng_ops", True)
def test_rand_like_dynamic(self, dtype, device):
def fn(x):
a = torch.rand_like(x) * x
a = torch.rand_like(x) * a
return a
for seed in range(1, 10):
shape = (seed, seed)
x = torch.rand(shape, device=device, dtype=dtype)
torch.cuda.manual_seed(seed)
ref = fn(x)
torch.cuda.manual_seed(seed)
opt_fn = torch.compile(fn, backend="aot_eager", dynamic=True)
res = opt_fn(x)
self.assertEqual(ref, res)
@dtypes(torch.float32)
@patch.object(torch._functorch.config, "functionalize_rng_ops", True)
def test_rand_like_dynamic_bwd(self, dtype, device):
def fn(x):
a = torch.rand_like(x) * x
a = torch.rand_like(x) * a
return a
for seed in range(1, 10):
shape = (seed, seed)
x = torch.rand(shape, device=device, dtype=dtype, requires_grad=True)
torch.cuda.manual_seed(seed)
ref = fn(x)
ref.sum().backward()
torch.cuda.manual_seed(seed)
opt_fn = torch.compile(fn, backend="aot_eager", dynamic=True)
res = opt_fn(x)
res.sum().backward()
self.assertEqual(ref, res)
@dtypes(torch.float32)
@patch.object(torch._functorch.config, "functionalize_rng_ops", True)
def test_rand(self, dtype, device):
shape = (10,)
def fn(x):
a = torch.rand(*shape, device=device, dtype=dtype) * x
a = torch.rand(*shape, device=device, dtype=dtype) * a
return a
x = torch.rand(*shape, device=device, dtype=dtype)
for seed in range(10):
torch.cuda.manual_seed(seed)
ref = fn(x)
torch.cuda.manual_seed(seed)
aot_fn = aot_function(fn, functools.partial(count_philox_rand, freq=2))
res = aot_fn(x)
self.assertEqual(ref, res)
@dtypes(torch.float32)
@patch.object(torch._functorch.config, "functionalize_rng_ops", True)
def test_autograd_function(self, dtype, device):
shape = (16, 16)
class Custom(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
a = torch.rand_like(x) * x
a = torch.rand_like(x) * a
return a
@staticmethod
def backward(ctx, grad_out):
(x,) = ctx.saved_tensors
return grad_out * torch.rand_like(grad_out) * torch.cos(x)
custom = Custom.apply
x = torch.rand(*shape, device=device, dtype=dtype, requires_grad=True)
x_clone = x.detach().clone().requires_grad_(True)
torch.cuda.manual_seed(123)
ref = custom(x)
ref.sum().backward()
torch.cuda.manual_seed(123)
fwd_compiler = functools.partial(count_philox_rand, freq=2)
bwd_compiler = functools.partial(count_philox_rand, freq=1)
aot_custom = aot_function(custom, fwd_compiler, bwd_compiler)
res = aot_custom(x_clone)
res.sum().backward()
self.assertEqual(ref, res)
self.assertEqual(x.grad, x_clone.grad)
@dtypes(torch.float32)
@patch.object(torch._functorch.config, "functionalize_rng_ops", True)
def test_multiple_subgraphs(self, dtype, device):
# Checks that rng state is maintained when there are multiple aot traced
# graphs.
shape = (16, 16)
class CustomOp1(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
a = torch.rand_like(x) * x
a = torch.rand_like(x) * a
return a
@staticmethod
def backward(ctx, grad_out):
(x,) = ctx.saved_tensors
return grad_out * torch.rand_like(grad_out) * torch.cos(x)
class CustomOp2(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
a = torch.rand_like(x) * x
return a
@staticmethod
def backward(ctx, grad_out):
(x,) = ctx.saved_tensors
return grad_out * torch.rand_like(grad_out) * torch.rand_like(x)
custom_op1 = CustomOp1.apply
custom_op2 = CustomOp2.apply
def fn(x):
a = custom_op1(x)
b = a.sin()
return custom_op2(b)
fwd_compiler = functools.partial(count_philox_rand, freq=2)
bwd_compiler = functools.partial(count_philox_rand, freq=1)
aot_custom_op1 = aot_function(custom_op1, fwd_compiler, bwd_compiler)
fwd_compiler = functools.partial(count_philox_rand, freq=1)
bwd_compiler = functools.partial(count_philox_rand, freq=2)
aot_custom_op2 = aot_function(custom_op2, fwd_compiler, bwd_compiler)
def aot_fn(x):
a = aot_custom_op1(x)
b = a.sin()
return aot_custom_op2(b)
for seed in range(10):
torch.cuda.manual_seed(seed)
x = torch.rand(*shape, device=device, dtype=dtype, requires_grad=True)
x_clone = x.detach().clone().requires_grad_(True)
torch.cuda.manual_seed(seed)
ref = fn(x)
ref.sum().backward()
torch.cuda.manual_seed(seed)
res = aot_fn(x_clone)
res.sum().backward()
self.assertEqual(ref, res)
self.assertEqual(x.grad, x_clone.grad)
@dtypes(torch.float32)
@patch.object(torch._functorch.config, "functionalize_rng_ops", True)
def test_set_get_rng_state(self, dtype, device):
def fn(x):
a = torch.rand_like(x) * x
state = torch.cuda.get_rng_state()
a = torch.rand_like(x) * a
torch.cuda.set_rng_state(state)
a = torch.rand_like(x) * a
return a
x = torch.rand(10, device=device, dtype=dtype)
for seed in range(10):
torch.cuda.manual_seed(seed)
ref = fn(x)
torch.cuda.manual_seed(seed)
fwd_compiler = functools.partial(count_philox_rand, freq=3)
aot_fn = aot_function(fn, fwd_compiler)
res = aot_fn(x)
self.assertEqual(ref, res)
@dtypes(torch.float32)
@patch.object(torch._functorch.config, "functionalize_rng_ops", True)
def test_min_cut_partitioner(self, dtype, device):
# Checks that the calling convention is maintained
shape = (16, 16)
def fn(x):
a = torch.rand_like(x) * x
a = torch.rand_like(x) * a
a = torch.sin(a)
a = torch.sin(a)
a = torch.sin(a)
return a
x = torch.rand(*shape, device=device, dtype=dtype, requires_grad=True)
x_clone = x.detach().clone().requires_grad_(True)
torch.cuda.manual_seed(123)
ref = fn(x)
ref.sum().backward()
torch.cuda.manual_seed(123)
fwd_compiler = functools.partial(count_philox_rand, freq=2)
bwd_compiler = functools.partial(count_philox_rand, freq=0)
aot_custom = aot_function(
fn,
fwd_compiler,
bwd_compiler,
partition_fn=min_cut_rematerialization_partition,
)
# aot_custom = aot_function(fn, fwd_compiler, bwd_compiler)
res = aot_custom(x_clone)
res.sum().backward()
self.assertEqual(ref, res)
self.assertEqual(x.grad, x_clone.grad)
# TODO - Dropout needs more work because of offset calculation
@patch.object(torch._functorch.config, "functionalize_rng_ops", True)
@dtypes(torch.float32)
def test_checkpoint(self, dtype, device):
def g(x, y):
return torch.nn.functional.dropout(x, 0.6)
def fn(x, y):
return torch.utils.checkpoint.checkpoint(g, x, y, use_reentrant=False)
# x = torch.rand(2, 2, device="cuda", requires_grad=True)
x = torch.ones(2, 2, device="cuda", requires_grad=True)
y = torch.rand(2, 2, device="cuda", requires_grad=True)
torch.cuda.manual_seed(123)
fn(x, y)
# With checkpointing we should recompute dropout in bwd, and philox_rand is passed from fwd
fwd_compiler = functools.partial(count_philox_rand, freq=1)
bwd_compiler = functools.partial(count_philox_rand, freq=0)
aot_fn = aot_function(fn, fwd_compiler, bwd_compiler)
# We can't check accuracy here because rand_like generated different rand numbers than dropout
res = aot_fn(x, y)
res.sum().backward()
@dtypes(torch.float32)
@patch.object(torch._functorch.config, "functionalize_rng_ops", True)
def test_dropout_decomp(self, dtype, device):
def fn(x):
return torch.nn.functional.dropout(x, 0.6) * x
x = torch.rand(10, device=device, dtype=dtype)
# Ensure the decomp is happening
aot_fn = aot_function(fn, functools.partial(count_philox_rand, freq=1))
# We can't check accuracy here because rand_like generated different rand numbers than dropout
aot_fn(x)
only_for = ("cuda",)
instantiate_device_type_tests(TestFunctionalizationRngOps, globals(), only_for=only_for)
|
TestFunctionalizationRngOps
|
python
|
PrefectHQ__prefect
|
src/prefect/settings/sources.py
|
{
"start": 11394,
"end": 13545
}
|
class ____(TomlConfigSettingsSourceBase):
"""Custom pydantic settings source to load settings from a pyproject.toml file"""
def __init__(
self,
settings_cls: Type[BaseSettings],
):
super().__init__(settings_cls)
self.toml_file_path: Path = Path("pyproject.toml")
self.toml_data: dict[str, Any] = self._read_files(self.toml_file_path)
self.toml_table_header: tuple[str, ...] = settings_cls.model_config.get(
"pyproject_toml_table_header", ("tool", "prefect")
)
for key in self.toml_table_header:
self.toml_data: dict[str, Any] = self.toml_data.get(key, {})
def _is_test_mode() -> bool:
"""Check if the current process is in test mode."""
return bool(
os.getenv("PREFECT_TEST_MODE")
or os.getenv("PREFECT_UNIT_TEST_MODE")
or os.getenv("PREFECT_TESTING_UNIT_TEST_MODE")
or os.getenv("PREFECT_TESTING_TEST_MODE")
)
def _get_profiles_path() -> Path:
"""Helper to get the profiles path"""
if _is_test_mode():
return DEFAULT_PROFILES_PATH
if env_path := os.getenv("PREFECT_PROFILES_PATH"):
return Path(env_path)
if dotenv_path := dotenv.dotenv_values(".env").get("PREFECT_PROFILES_PATH"):
return Path(dotenv_path)
if toml_path := _get_profiles_path_from_toml("prefect.toml", ["profiles_path"]):
return Path(toml_path)
if pyproject_path := _get_profiles_path_from_toml(
"pyproject.toml", ["tool", "prefect", "profiles_path"]
):
return Path(pyproject_path)
if os.environ.get("PREFECT_HOME"):
return Path(os.environ["PREFECT_HOME"]) / "profiles.toml"
if not (DEFAULT_PREFECT_HOME / "profiles.toml").exists():
return DEFAULT_PROFILES_PATH
return DEFAULT_PREFECT_HOME / "profiles.toml"
def _get_profiles_path_from_toml(path: str, keys: List[str]) -> Optional[str]:
"""Helper to get the profiles path from a toml file."""
try:
toml_data = _read_toml_file(Path(path))
except FileNotFoundError:
return None
return get_from_dict(toml_data, keys)
|
PyprojectTomlConfigSettingsSource
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/frame_methods.py
|
{
"start": 16531,
"end": 17211
}
|
class ____:
def setup(self):
N = 10000
# this is the worst case, where every column has NaNs.
arr = np.random.randn(N, 100)
arr[::2] = np.nan
self.df = DataFrame(arr)
self.df2 = DataFrame(
{
"A": np.arange(0, N),
"B": np.random.randint(0, 100, N),
"C": np.random.randn(N),
"D": np.random.randn(N),
}
)
self.df2.loc[1::5, "A"] = np.nan
self.df2.loc[1::5, "C"] = np.nan
def time_interpolate(self):
self.df.interpolate()
def time_interpolate_some_good(self):
self.df2.interpolate()
|
Interpolate
|
python
|
readthedocs__readthedocs.org
|
readthedocs/rtd_tests/tests/test_api.py
|
{
"start": 3104,
"end": 28092
}
|
class ____(TestCase):
fixtures = ["eric.json", "test_data.json"]
def setUp(self):
self.user = User.objects.get(username="eric")
self.project = get(Project, users=[self.user])
self.version = self.project.versions.get(slug=LATEST)
def test_healthcheck(self):
# Build cloning state
build = get(
Build,
project=self.project,
version=self.version,
state=BUILD_STATE_CLONING,
builder="build-a1b2c3",
success=False,
)
self.assertIsNone(build.healthcheck)
client = APIClient()
r = client.post(reverse("build-healthcheck", args=(build.pk,), query={"builder": "build-a1b2c3"}))
build.refresh_from_db()
self.assertEqual(r.status_code, 204)
self.assertIsNotNone(build.healthcheck)
# Build invalid builder
build.healthcheck = None
build.save()
client = APIClient()
r = client.post(reverse("build-healthcheck", args=(build.pk,), query={"builder": "build-invalid"}))
build.refresh_from_db()
self.assertEqual(r.status_code, 404)
self.assertIsNone(build.healthcheck)
# Build finished state
build.state = BUILD_STATE_FINISHED
build.healthcheck = None
build.save()
client = APIClient()
r = client.post(reverse("build-healthcheck", args=(build.pk,), query={"builder": "build-a1b2c3"}))
build.refresh_from_db()
self.assertEqual(r.status_code, 404)
self.assertIsNone(build.healthcheck)
def test_reset_build(self):
build = get(
Build,
project=self.project,
version=self.version,
state=BUILD_STATE_CLONING,
success=False,
output="Output",
error="Error",
exit_code=9,
builder="Builder",
cold_storage=True,
)
command = get(
BuildCommandResult,
build=build,
)
build.commands.add(command)
Notification.objects.add(
attached_to=build,
message_id=BuildCancelled.SKIPPED_EXIT_CODE_183,
)
self.assertEqual(build.commands.count(), 1)
self.assertEqual(build.notifications.count(), 1)
client = APIClient()
_, build_api_key = BuildAPIKey.objects.create_key(self.project)
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
r = client.post(reverse("build-reset", args=(build.pk,)))
self.assertEqual(r.status_code, 204)
build.refresh_from_db()
self.assertEqual(build.project, self.project)
self.assertEqual(build.version, self.version)
self.assertEqual(build.state, BUILD_STATE_TRIGGERED)
self.assertEqual(build.status, "")
self.assertTrue(build.success)
self.assertEqual(build.output, "")
self.assertEqual(build.error, "")
self.assertIsNone(build.exit_code)
self.assertEqual(build.builder, "")
self.assertFalse(build.cold_storage)
self.assertEqual(build.commands.count(), 0)
self.assertEqual(build.notifications.count(), 0)
@mock.patch("readthedocs.api.v2.views.model_views.get_s3_build_tools_scoped_credentials")
@mock.patch("readthedocs.api.v2.views.model_views.get_s3_build_media_scoped_credentials")
def test_get_temporary_credentials_for_build(self, get_s3_build_media_scoped_credentials, get_s3_build_tools_scoped_credentials):
build = get(
Build,
project=self.project,
version=self.version,
state=BUILD_STATE_UPLOADING,
success=False,
output="Output",
error="Error",
exit_code=0,
builder="Builder",
cold_storage=True,
)
client = APIClient()
_, build_api_key = BuildAPIKey.objects.create_key(self.project)
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
get_s3_build_media_scoped_credentials.return_value = AWSS3TemporaryCredentials(
access_key_id="access_key_id",
secret_access_key="secret_access_key",
session_token="session_token",
region_name="us-east-1",
bucket_name="readthedocs-media",
)
r = client.post(reverse("build-credentials-for-storage", args=(build.pk,)), {"type": "build_media"})
assert r.status_code == 200
assert r.data == {
"s3": {
"access_key_id": "access_key_id",
"secret_access_key": "secret_access_key",
"session_token": "session_token",
"region_name": "us-east-1",
"bucket_name": "readthedocs-media",
}
}
get_s3_build_media_scoped_credentials.assert_called_once_with(
build=build,
duration=60 * 30,
)
get_s3_build_tools_scoped_credentials.return_value = AWSS3TemporaryCredentials(
access_key_id="access_key_id",
secret_access_key="secret_access_key",
session_token="session_token",
region_name="us-east-1",
bucket_name="readthedocs-build-tools",
)
r = client.post(reverse("build-credentials-for-storage", args=(build.pk,)), {"type": "build_tools"})
assert r.status_code == 200
assert r.data == {
"s3": {
"access_key_id": "access_key_id",
"secret_access_key": "secret_access_key",
"session_token": "session_token",
"region_name": "us-east-1",
"bucket_name": "readthedocs-build-tools",
}
}
get_s3_build_tools_scoped_credentials.assert_called_once_with(
build=build,
duration=60 * 30,
)
def test_api_does_not_have_private_config_key_superuser(self):
client = APIClient()
client.login(username="super", password="test")
project = Project.objects.get(pk=1)
version = project.versions.first()
build = Build.objects.create(project=project, version=version)
resp = client.get("/api/v2/build/{}/".format(build.pk))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertIn("config", resp.data)
self.assertNotIn("_config", resp.data)
def test_api_does_not_have_private_config_key_normal_user(self):
client = APIClient()
project = Project.objects.get(pk=1)
version = project.versions.first()
build = Build.objects.create(project=project, version=version)
resp = client.get("/api/v2/build/{}/".format(build.pk))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertIn("config", resp.data)
self.assertNotIn("_config", resp.data)
def test_save_same_config_using_patch(self):
project = Project.objects.get(pk=1)
version = project.versions.first()
build_one = Build.objects.create(project=project, version=version)
client = APIClient()
_, build_api_key = BuildAPIKey.objects.create_key(project)
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
resp = client.patch(
"/api/v2/build/{}/".format(build_one.pk),
{"config": {"one": "two"}},
format="json",
)
self.assertEqual(resp.data["config"], {"one": "two"})
build_two = Build.objects.create(project=project, version=version)
resp = client.patch(
"/api/v2/build/{}/".format(build_two.pk),
{"config": {"one": "two"}},
format="json",
)
self.assertEqual(resp.data["config"], {"one": "two"})
resp = client.get("/api/v2/build/{}/".format(build_one.pk))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
build = resp.data
self.assertEqual(build["config"], {"one": "two"})
# Checking the values from the db, just to be sure the
# api isn't lying.
self.assertEqual(
Build.objects.get(pk=build_one.pk)._config,
{"one": "two"},
)
self.assertEqual(
Build.objects.get(pk=build_two.pk)._config,
{Build.CONFIG_KEY: build_one.pk},
)
def test_response_building(self):
"""The ``view docs`` attr should return a link to the dashboard."""
client = APIClient()
client.login(username="super", password="test")
project = get(
Project,
language="en",
main_language_project=None,
)
version = get(
Version,
project=project,
built=False,
uploaded=False,
)
build = get(
Build,
project=project,
version=version,
state="cloning",
exit_code=0,
)
resp = client.get("/api/v2/build/{build}/".format(build=build.pk))
self.assertEqual(resp.status_code, 200)
dashboard_url = reverse(
"project_version_detail",
kwargs={
"project_slug": project.slug,
"version_slug": version.slug,
},
)
build = resp.data
self.assertEqual(build["state"], "cloning")
self.assertEqual(build["error"], "")
self.assertEqual(build["exit_code"], 0)
self.assertEqual(build["success"], True)
self.assertTrue(build["docs_url"].endswith(dashboard_url))
self.assertTrue(build["docs_url"].startswith("https://"))
@override_settings(DOCROOT="/home/docs/checkouts/readthedocs.org/user_builds")
def test_response_finished_and_success(self):
"""The ``view docs`` attr should return a link to the docs."""
client = APIClient()
client.login(username="super", password="test")
project = get(
Project,
language="en",
slug="myproject",
main_language_project=None,
)
version = get(
Version,
slug="myversion",
project=project,
built=True,
uploaded=True,
)
build = get(
Build,
project=project,
version=version,
state="finished",
exit_code=0,
)
buildcommandresult = get(
BuildCommandResult,
build=build,
command="python -m pip install --upgrade --no-cache-dir pip setuptools<58.3.0",
exit_code=0,
)
resp = client.get("/api/v2/build/{build}/".format(build=build.pk))
self.assertEqual(resp.status_code, 200)
build = resp.data
docs_url = f"http://{project.slug}.readthedocs.io/en/{version.slug}/"
self.assertEqual(build["state"], "finished")
self.assertEqual(build["error"], "")
self.assertEqual(build["exit_code"], 0)
self.assertEqual(build["success"], True)
self.assertEqual(build["docs_url"], docs_url)
# Verify the path is trimmed
self.assertEqual(
build["commands"][0]["command"],
"python -m pip install --upgrade --no-cache-dir pip setuptools<58.3.0",
)
def test_response_finished_and_fail(self):
"""The ``view docs`` attr should return a link to the dashboard."""
client = APIClient()
client.login(username="super", password="test")
project = get(
Project,
language="en",
main_language_project=None,
)
version = get(
Version,
project=project,
built=False,
uploaded=False,
)
build = get(
Build,
project=project,
version=version,
state="finished",
success=False,
exit_code=1,
)
resp = client.get("/api/v2/build/{build}/".format(build=build.pk))
self.assertEqual(resp.status_code, 200)
dashboard_url = reverse(
"project_version_detail",
kwargs={
"project_slug": project.slug,
"version_slug": version.slug,
},
)
build = resp.data
self.assertEqual(build["state"], "finished")
self.assertEqual(build["error"], "")
self.assertEqual(build["exit_code"], 1)
self.assertEqual(build["success"], False)
self.assertTrue(build["docs_url"].endswith(dashboard_url))
self.assertTrue(build["docs_url"].startswith("https://"))
def test_make_build_without_permission(self):
"""Ensure anonymous/non-staff users cannot write the build endpoint."""
client = APIClient()
def _try_post():
resp = client.post(
"/api/v2/build/",
{
"project": 1,
"version": 1,
"success": True,
"output": "Test Output",
"error": "Test Error",
},
format="json",
)
self.assertEqual(resp.status_code, 403)
_try_post()
api_user = get(User, is_staff=False, password="test")
assert api_user.is_staff is False
client.force_authenticate(user=api_user)
_try_post()
def test_update_build_without_permission(self):
"""Ensure anonymous/non-staff users cannot update build endpoints."""
client = APIClient()
api_user = get(User, is_staff=False, password="test")
client.force_authenticate(user=api_user)
project = Project.objects.get(pk=1)
version = project.versions.first()
build = get(Build, project=project, version=version, state="cloning")
resp = client.put(
"/api/v2/build/{}/".format(build.pk),
{
"project": 1,
"version": 1,
"state": "finished",
},
format="json",
)
self.assertEqual(resp.status_code, 403)
def test_make_build_protected_fields(self):
"""
Ensure build api view delegates correct serializer.
Build API keys should be able to read/write the `builder` property, but we
don't expose this to end users via the API
"""
project = Project.objects.get(pk=1)
version = project.versions.first()
build = get(Build, project=project, version=version, builder="foo")
client = APIClient()
api_user = get(User, is_staff=False, password="test")
client.force_authenticate(user=api_user)
resp = client.get("/api/v2/build/{}/".format(build.pk), format="json")
self.assertEqual(resp.status_code, 200)
_, build_api_key = BuildAPIKey.objects.create_key(project)
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
resp = client.get("/api/v2/build/{}/".format(build.pk), format="json")
self.assertEqual(resp.status_code, 200)
self.assertIn("builder", resp.data)
def test_make_build_commands(self):
"""Create build commands."""
_, build_api_key = BuildAPIKey.objects.create_key(self.project)
client = APIClient()
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
build = get(Build, project=self.project, version=self.version, success=True)
now = timezone.now()
start_time = now - datetime.timedelta(seconds=5)
end_time = now
resp = client.post(
"/api/v2/command/",
{
"build": build.pk,
"command": "$CONDA_ENVS_PATH/$CONDA_DEFAULT_ENV/bin/python -m sphinx",
"description": "Conda and Sphinx command",
"exit_code": 0,
"start_time": start_time,
"end_time": end_time,
},
format="json",
)
resp = client.post(
"/api/v2/command/",
{
"build": build.pk,
"command": "$READTHEDOCS_VIRTUALENV_PATH/bin/python -m sphinx",
"description": "Python and Sphinx command",
"exit_code": 0,
"start_time": start_time + datetime.timedelta(seconds=1),
"end_time": end_time,
},
format="json",
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
resp = client.get(f"/api/v2/build/{build.pk}/")
self.assertEqual(resp.status_code, 200)
build = resp.data
self.assertEqual(len(build["commands"]), 2)
self.assertEqual(build["commands"][0]["command"], "python -m sphinx")
self.assertEqual(build["commands"][0]["run_time"], 5)
self.assertEqual(
build["commands"][0]["description"], "Conda and Sphinx command"
)
self.assertEqual(build["commands"][0]["exit_code"], 0)
self.assertEqual(
dateutil.parser.parse(build["commands"][0]["start_time"]), start_time
)
self.assertEqual(
dateutil.parser.parse(build["commands"][0]["end_time"]), end_time
)
self.assertEqual(build["commands"][1]["command"], "python -m sphinx")
self.assertEqual(
build["commands"][1]["description"], "Python and Sphinx command"
)
def test_get_raw_log_success(self):
project = Project.objects.get(pk=1)
version = project.versions.first()
build = get(
Build,
project=project,
version=version,
builder="foo",
state=BUILD_STATE_FINISHED,
)
get(
BuildCommandResult,
build=build,
command="python setup.py install",
output="Installing dependencies...",
)
get(
BuildCommandResult,
build=build,
command="git checkout master",
output='Switched to branch "master"',
)
client = APIClient()
api_user = get(User)
client.force_authenticate(user=api_user)
resp = client.get("/api/v2/build/{}.txt".format(build.pk))
self.assertEqual(resp.status_code, 200)
self.assertIn("Read the Docs build information", resp.content.decode())
self.assertIn("Build id: {}".format(build.id), resp.content.decode())
self.assertIn("Project: {}".format(build.project.slug), resp.content.decode())
self.assertIn("Version: {}".format(build.version.slug), resp.content.decode())
self.assertIn("Commit: {}".format(build.commit), resp.content.decode())
self.assertIn("Date: ", resp.content.decode())
self.assertIn("State: finished", resp.content.decode())
self.assertIn("Success: True", resp.content.decode())
self.assertIn("[rtd-command-info]", resp.content.decode())
self.assertIn(
"python setup.py install\nInstalling dependencies...",
resp.content.decode(),
)
self.assertIn(
'git checkout master\nSwitched to branch "master"',
resp.content.decode(),
)
def test_get_raw_log_building(self):
project = Project.objects.get(pk=1)
version = project.versions.first()
build = get(
Build,
project=project,
version=version,
builder="foo",
success=False,
exit_code=1,
state="building",
)
get(
BuildCommandResult,
build=build,
command="python setup.py install",
output="Installing dependencies...",
exit_code=1,
)
get(
BuildCommandResult,
build=build,
command="git checkout master",
output='Switched to branch "master"',
)
client = APIClient()
api_user = get(User)
client.force_authenticate(user=api_user)
resp = client.get("/api/v2/build/{}.txt".format(build.pk))
self.assertEqual(resp.status_code, 200)
self.assertIn("Read the Docs build information", resp.content.decode())
self.assertIn("Build id: {}".format(build.id), resp.content.decode())
self.assertIn("Project: {}".format(build.project.slug), resp.content.decode())
self.assertIn("Version: {}".format(build.version.slug), resp.content.decode())
self.assertIn("Commit: {}".format(build.commit), resp.content.decode())
self.assertIn("Date: ", resp.content.decode())
self.assertIn("State: building", resp.content.decode())
self.assertIn("Success: Unknow", resp.content.decode())
self.assertIn("[rtd-command-info]", resp.content.decode())
self.assertIn(
"python setup.py install\nInstalling dependencies...",
resp.content.decode(),
)
self.assertIn(
'git checkout master\nSwitched to branch "master"',
resp.content.decode(),
)
def test_get_raw_log_failure(self):
project = Project.objects.get(pk=1)
version = project.versions.first()
build = get(
Build,
project=project,
version=version,
builder="foo",
success=False,
exit_code=1,
state=BUILD_STATE_FINISHED,
)
get(
BuildCommandResult,
build=build,
command="python setup.py install",
output="Installing dependencies...",
exit_code=1,
)
get(
BuildCommandResult,
build=build,
command="git checkout master",
output='Switched to branch "master"',
)
client = APIClient()
api_user = get(User)
client.force_authenticate(user=api_user)
resp = client.get("/api/v2/build/{}.txt".format(build.pk))
self.assertEqual(resp.status_code, 200)
self.assertIn("Read the Docs build information", resp.content.decode())
self.assertIn("Build id: {}".format(build.id), resp.content.decode())
self.assertIn("Project: {}".format(build.project.slug), resp.content.decode())
self.assertIn("Version: {}".format(build.version.slug), resp.content.decode())
self.assertIn("Commit: {}".format(build.commit), resp.content.decode())
self.assertIn("Date: ", resp.content.decode())
self.assertIn("State: finished", resp.content.decode())
self.assertIn("Success: False", resp.content.decode())
self.assertIn("[rtd-command-info]", resp.content.decode())
self.assertIn(
"python setup.py install\nInstalling dependencies...",
resp.content.decode(),
)
self.assertIn(
'git checkout master\nSwitched to branch "master"',
resp.content.decode(),
)
def test_get_invalid_raw_log(self):
client = APIClient()
api_user = get(User)
client.force_authenticate(user=api_user)
resp = client.get("/api/v2/build/{}.txt".format(404))
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_build_filter_by_commit(self):
"""
Create a build with commit
Should return the list of builds according to the
commit query params
"""
project1 = Project.objects.get(pk=1)
project2 = Project.objects.get(pk=2)
version1 = project1.versions.first()
version2 = project2.versions.first()
get(Build, project=project1, version=version1, builder="foo", commit="test")
get(Build, project=project2, version=version2, builder="foo", commit="other")
client = APIClient()
api_user = get(User, is_staff=False, password="test")
client.force_authenticate(user=api_user)
resp = client.get("/api/v2/build/", {"commit": "test"}, format="json")
self.assertEqual(resp.status_code, 200)
build = resp.data
self.assertEqual(len(build["results"]), 1)
def test_build_without_version(self):
build = get(
Build,
project=self.project,
version=None,
state=BUILD_STATE_FINISHED,
exit_code=0,
)
command = "python -m pip install --upgrade --no-cache-dir pip setuptools<58.3.0"
get(
BuildCommandResult,
build=build,
command=command,
output="Running...",
exit_code=0,
)
client = APIClient()
client.force_authenticate(user=self.user)
r = client.get(reverse("build-detail", args=(build.pk,)))
assert r.status_code == 200
assert r.data["version"] is None
assert r.data["commands"][0]["command"] == command
|
APIBuildTests
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 266181,
"end": 266663
}
|
class ____(sgqlc.types.Input):
"""Autogenerated input type of PublishSponsorsTier"""
__schema__ = github_schema
__field_names__ = ("tier_id", "client_mutation_id")
tier_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="tierId")
"""The ID of the draft tier to publish."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
|
PublishSponsorsTierInput
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_doughnut03.py
|
{
"start": 315,
"end": 1237
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_doughnut03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "doughnut"})
data = [
[2, 4, 6],
[60, 30, 10],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$3",
"values": "=Sheet1!$B$1:$B$3",
}
)
chart.set_hole_size(90)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
apache__airflow
|
providers/apache/hive/tests/unit/apache/hive/transfers/test_hive_to_mysql.py
|
{
"start": 1229,
"end": 8232
}
|
class ____(TestHiveEnvironment):
def setup_method(self, method):
self.kwargs = dict(
sql="sql",
mysql_table="table",
hiveserver2_conn_id="hiveserver2_default",
mysql_conn_id="mysql_default",
task_id="test_hive_to_mysql",
)
super().setup_method(method)
@patch("airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook")
@patch("airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook")
def test_execute(self, mock_hive_hook, mock_mysql_hook):
HiveToMySqlOperator(**self.kwargs).execute(context={})
mock_hive_hook.assert_called_once_with(hiveserver2_conn_id=self.kwargs["hiveserver2_conn_id"])
mock_hive_hook.return_value.get_records.assert_called_once_with("sql", parameters={})
mock_mysql_hook.assert_called_once_with(
mysql_conn_id=self.kwargs["mysql_conn_id"], local_infile=False
)
mock_mysql_hook.return_value.insert_rows.assert_called_once_with(
table=self.kwargs["mysql_table"], rows=mock_hive_hook.return_value.get_records.return_value
)
@patch("airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook")
@patch("airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook")
def test_execute_mysql_preoperator(self, mock_hive_hook, mock_mysql_hook):
self.kwargs.update(dict(mysql_preoperator="preoperator"))
HiveToMySqlOperator(**self.kwargs).execute(context={})
mock_mysql_hook.return_value.run.assert_called_once_with(self.kwargs["mysql_preoperator"])
@patch("airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook")
@patch("airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook")
def test_execute_with_mysql_postoperator(self, mock_hive_hook, mock_mysql_hook):
self.kwargs.update(dict(mysql_postoperator="postoperator"))
HiveToMySqlOperator(**self.kwargs).execute(context={})
mock_mysql_hook.return_value.run.assert_called_once_with(self.kwargs["mysql_postoperator"])
@patch("airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook")
@patch("airflow.providers.apache.hive.transfers.hive_to_mysql.NamedTemporaryFile")
@patch("airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook")
def test_execute_bulk_load(self, mock_hive_hook, mock_tmp_file_context, mock_mysql_hook):
mock_tmp_file = MagicMock()
mock_tmp_file.name = "tmp_file"
mock_tmp_file_context.return_value.__enter__.return_value = mock_tmp_file
context = {}
self.kwargs.update(dict(bulk_load=True))
HiveToMySqlOperator(**self.kwargs).execute(context=context)
mock_mysql_hook.assert_called_once_with(mysql_conn_id=self.kwargs["mysql_conn_id"], local_infile=True)
mock_tmp_file_context.assert_called_once_with()
mock_hive_hook.return_value.to_csv.assert_called_once_with(
self.kwargs["sql"],
"tmp_file",
delimiter="\t",
lineterminator="\n",
output_header=False,
hive_conf=context_to_airflow_vars(context),
)
mock_mysql_hook.return_value.bulk_load.assert_called_once_with(
table=self.kwargs["mysql_table"], tmp_file="tmp_file"
)
mock_tmp_file_context.return_value.__exit__.assert_called_once_with(None, None, None)
@patch("airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook")
def test_execute_with_hive_conf(self, mock_mysql_hook):
context = {}
mock_hive_hook = MockHiveServer2Hook()
mock_hive_hook.get_records = MagicMock(return_value="test_hive_results")
self.kwargs.update(dict(hive_conf={"mapreduce.job.queuename": "fake_queue"}))
with patch(
"airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook",
return_value=mock_hive_hook,
):
HiveToMySqlOperator(**self.kwargs).execute(context=context)
hive_conf = context_to_airflow_vars(context)
hive_conf.update(self.kwargs["hive_conf"])
mock_hive_hook.get_records.assert_called_once_with(self.kwargs["sql"], parameters=hive_conf)
@pytest.mark.skipif(
"AIRFLOW_RUNALL_TESTS" not in os.environ, reason="Skipped because AIRFLOW_RUNALL_TESTS is not set"
)
def test_hive_to_mysql(self):
test_hive_results = "test_hive_results"
mock_hive_hook = MockHiveServer2Hook()
mock_hive_hook.get_records = MagicMock(return_value=test_hive_results)
mock_mysql_hook = MockMySqlHook()
mock_mysql_hook.run = MagicMock()
mock_mysql_hook.insert_rows = MagicMock()
with patch(
"airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook",
return_value=mock_hive_hook,
):
with patch(
"airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook",
return_value=mock_mysql_hook,
):
op = HiveToMySqlOperator(
mysql_conn_id="airflow_db",
task_id="hive_to_mysql_check",
sql="""
SELECT name
FROM airflow.static_babynames
LIMIT 100
""",
mysql_table="test_static_babynames",
mysql_preoperator=[
"DROP TABLE IF EXISTS test_static_babynames;",
"CREATE TABLE test_static_babynames (name VARCHAR(500))",
],
dag=self.dag,
)
op.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
raw_select_name_query = mock_hive_hook.get_records.call_args_list[0][0][0]
actual_select_name_query = re.sub(r"\s{2,}", " ", raw_select_name_query).strip()
expected_select_name_query = "SELECT name FROM airflow.static_babynames LIMIT 100"
assert expected_select_name_query == actual_select_name_query
actual_hive_conf = mock_hive_hook.get_records.call_args_list[0][1]["hive_conf"]
expected_hive_conf = {
"airflow.ctx.dag_owner": "airflow",
"airflow.ctx.dag_id": "test_dag_id",
"airflow.ctx.task_id": "hive_to_mysql_check",
"airflow.ctx.logical_date": "2015-01-01T00:00:00+00:00",
}
assert expected_hive_conf == actual_hive_conf
expected_mysql_preoperator = [
"DROP TABLE IF EXISTS test_static_babynames;",
"CREATE TABLE test_static_babynames (name VARCHAR(500))",
]
mock_mysql_hook.run.assert_called_with(expected_mysql_preoperator)
mock_mysql_hook.insert_rows.assert_called_with(table="test_static_babynames", rows=test_hive_results)
|
TestHiveToMySqlTransfer
|
python
|
wandb__wandb
|
wandb/sdk/internal/sender.py
|
{
"start": 3911,
"end": 4697
}
|
class ____:
resumed: bool
step: int
history: int
events: int
output: int
runtime: float
wandb_runtime: Optional[int]
summary: Optional[Dict[str, Any]]
config: Optional[Dict[str, Any]]
tags: Optional[List[str]]
def __init__(self) -> None:
self.resumed = False
self.step = 0
self.history = 0
self.events = 0
self.output = 0
self.runtime = 0
# wandb_runtime is the canonical runtime (stored in summary._wandb.runtime)
self.wandb_runtime = None
self.summary = None
self.config = None
self.tags = None
def __str__(self) -> str:
obj = ",".join(map(lambda it: f"{it[0]}={it[1]}", vars(self).items()))
return f"ResumeState({obj})"
|
ResumeState
|
python
|
tiangolo__fastapi
|
docs_src/authentication_error_status_code/tutorial001_an.py
|
{
"start": 189,
"end": 628
}
|
class ____(HTTPBearer):
def make_not_authenticated_error(self) -> HTTPException:
return HTTPException(
status_code=status.HTTP_403_FORBIDDEN, detail="Not authenticated"
)
CredentialsDep = Annotated[HTTPAuthorizationCredentials, Depends(HTTPBearer403())]
@app.get("/me")
def read_me(credentials: CredentialsDep):
return {"message": "You are authenticated", "token": credentials.credentials}
|
HTTPBearer403
|
python
|
ray-project__ray
|
python/ray/experimental/channel/common.py
|
{
"start": 5625,
"end": 8587
}
|
class ____:
"""
Abstraction for a transport between a writer actor and some number of
reader actors.
"""
def __init__(
self,
writer: Optional[ray.actor.ActorHandle],
readers: List[Optional[ray.actor.ActorHandle]],
typ: Optional["ChannelOutputType"],
):
"""
Create a channel that can be read and written by a Ray driver or actor.
Args:
writer: The actor that may write to the channel. None signifies the driver.
readers: The actors that may read from the channel. None signifies
the driver.
typ: Type information about the values passed through the channel.
"""
pass
def ensure_registered_as_writer(self):
"""
Check whether the process is a valid writer. This method must be idempotent.
"""
raise NotImplementedError
def ensure_registered_as_reader(self):
"""
Check whether the process is a valid reader. This method must be idempotent.
"""
raise NotImplementedError
def write(self, value: Any, timeout: Optional[float] = None) -> None:
"""
Write a value to the channel.
Blocks if there are still pending readers for the previous value. The
writer may not write again until the specified number of readers have
read the value.
Args:
value: The value to write.
timeout: The maximum time in seconds to wait to write the value.
None means using default timeout, 0 means immediate timeout
(immediate success or timeout without blocking), -1 means
infinite timeout (block indefinitely).
"""
raise NotImplementedError
def read(self, timeout: Optional[float] = None) -> Any:
"""
Read the latest value from the channel. This call will block until a
value is available to read.
Subsequent calls to read() may *block* if the deserialized object is
zero-copy (e.g., bytes or a numpy array) *and* the object is still in scope.
Args:
timeout: The maximum time in seconds to wait to read the value.
None means using default timeout, 0 means immediate timeout
(immediate success or timeout without blocking), -1 means
infinite timeout (block indefinitely).
Returns:
Any: The deserialized value. If the deserialized value is an
Exception, it will be returned directly instead of being raised.
"""
raise NotImplementedError
def close(self) -> None:
"""
Close this channel. This method must not block and it must be made
idempotent. Any existing values in the channel may be lost after the
channel is closed.
"""
raise NotImplementedError
# Interfaces for channel I/O.
@DeveloperAPI
|
ChannelInterface
|
python
|
walkccc__LeetCode
|
solutions/2192. All Ancestors of a Node in a Directed Acyclic Graph/2192.py
|
{
"start": 0,
"end": 481
}
|
class ____:
def getAncestors(self, n: int, edges: list[list[int]]) -> list[list[int]]:
ans = [[] for _ in range(n)]
graph = [[] for _ in range(n)]
for u, v in edges:
graph[u].append(v)
def dfs(u: int, ancestor: int, seen: set[int]) -> None:
seen.add(u)
for v in graph[u]:
if v in seen:
continue
ans[v].append(ancestor)
dfs(v, ancestor, seen)
for i in range(n):
dfs(i, i, set())
return ans
|
Solution
|
python
|
protocolbuffers__protobuf
|
python/google/protobuf/text_format.py
|
{
"start": 30833,
"end": 49135
}
|
class ____(object):
"""Text format parser for protocol message."""
def __init__(self,
allow_unknown_extension=False,
allow_field_number=False,
descriptor_pool=None,
allow_unknown_field=False):
self.allow_unknown_extension = allow_unknown_extension
self.allow_field_number = allow_field_number
self.descriptor_pool = descriptor_pool
self.allow_unknown_field = allow_unknown_field
def ParseLines(self, lines, message):
"""Parses a text representation of a protocol message into a message."""
self._allow_multiple_scalars = False
self._ParseOrMerge(lines, message)
return message
def MergeLines(self, lines, message):
"""Merges a text representation of a protocol message into a message."""
self._allow_multiple_scalars = True
self._ParseOrMerge(lines, message)
return message
def _ParseOrMerge(self, lines, message):
"""Converts a text representation of a protocol message into a message.
Args:
lines: Lines of a message's text representation.
message: A protocol buffer message to merge into.
Raises:
ParseError: On text parsing problems.
"""
# Tokenize expects native str lines.
try:
str_lines = (
line if isinstance(line, str) else line.decode('utf-8')
for line in lines)
tokenizer = Tokenizer(str_lines)
except UnicodeDecodeError as e:
raise ParseError from e
if message:
self.root_type = message.DESCRIPTOR.full_name
while not tokenizer.AtEnd():
self._MergeField(tokenizer, message)
def _MergeField(self, tokenizer, message):
"""Merges a single protocol message field into a message.
Args:
tokenizer: A tokenizer to parse the field name and values.
message: A protocol message to record the data.
Raises:
ParseError: In case of text parsing problems.
"""
message_descriptor = message.DESCRIPTOR
if (message_descriptor.full_name == _ANY_FULL_TYPE_NAME and
tokenizer.TryConsume('[')):
type_url_prefix, packed_type_name = self._ConsumeAnyTypeUrl(tokenizer)
tokenizer.Consume(']')
tokenizer.TryConsume(':')
self._DetectSilentMarker(tokenizer, message_descriptor.full_name,
type_url_prefix + '/' + packed_type_name)
if tokenizer.TryConsume('<'):
expanded_any_end_token = '>'
else:
tokenizer.Consume('{')
expanded_any_end_token = '}'
expanded_any_sub_message = _BuildMessageFromTypeName(packed_type_name,
self.descriptor_pool)
# Direct comparison with None is used instead of implicit bool conversion
# to avoid false positives with falsy initial values, e.g. for
# google.protobuf.ListValue.
if expanded_any_sub_message is None:
raise ParseError('Type %s not found in descriptor pool' %
packed_type_name)
while not tokenizer.TryConsume(expanded_any_end_token):
if tokenizer.AtEnd():
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' %
(expanded_any_end_token,))
self._MergeField(tokenizer, expanded_any_sub_message)
deterministic = False
message.Pack(expanded_any_sub_message,
type_url_prefix=type_url_prefix,
deterministic=deterministic)
return
if tokenizer.TryConsume('['):
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
name = '.'.join(name)
if not message_descriptor.is_extendable:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" does not have extensions.' %
message_descriptor.full_name)
# pylint: disable=protected-access
field = message.Extensions._FindExtensionByName(name)
# pylint: enable=protected-access
if not field:
if self.allow_unknown_extension:
field = None
else:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" not registered. '
'Did you import the _pb2 module which defines it? '
'If you are trying to place the extension in the MessageSet '
'field of another message that is in an Any or MessageSet field, '
'that message\'s _pb2 module must be imported as well' % name)
elif message_descriptor != field.containing_type:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" does not extend message type "%s".' %
(name, message_descriptor.full_name))
tokenizer.Consume(']')
else:
name = tokenizer.ConsumeIdentifierOrNumber()
if self.allow_field_number and name.isdigit():
number = ParseInteger(name, True, True)
field = message_descriptor.fields_by_number.get(number, None)
if not field and message_descriptor.is_extendable:
field = message.Extensions._FindExtensionByNumber(number)
else:
field = message_descriptor.fields_by_name.get(name, None)
# Group names are expected to be capitalized as they appear in the
# .proto file, which actually matches their type names, not their field
# names.
if not field:
field = message_descriptor.fields_by_name.get(name.lower(), None)
if field and not _IsGroupLike(field):
field = None
if field and field.message_type.name != name:
field = None
if not field and not self.allow_unknown_field:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" has no field named "%s".' %
(message_descriptor.full_name, name))
if field:
if not self._allow_multiple_scalars and field.containing_oneof:
# Check if there's a different field set in this oneof.
# Note that we ignore the case if the same field was set before, and we
# apply _allow_multiple_scalars to non-scalar fields as well.
which_oneof = message.WhichOneof(field.containing_oneof.name)
if which_oneof is not None and which_oneof != field.name:
raise tokenizer.ParseErrorPreviousToken(
'Field "%s" is specified along with field "%s", another member '
'of oneof "%s" for message type "%s".' %
(field.name, which_oneof, field.containing_oneof.name,
message_descriptor.full_name))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
tokenizer.TryConsume(':')
self._DetectSilentMarker(tokenizer, message_descriptor.full_name,
field.full_name)
merger = self._MergeMessageField
else:
tokenizer.Consume(':')
self._DetectSilentMarker(tokenizer, message_descriptor.full_name,
field.full_name)
merger = self._MergeScalarField
if (field.is_repeated and
tokenizer.TryConsume('[')):
# Short repeated format, e.g. "foo: [1, 2, 3]"
if not tokenizer.TryConsume(']'):
while True:
merger(tokenizer, message, field)
if tokenizer.TryConsume(']'):
break
tokenizer.Consume(',')
else:
merger(tokenizer, message, field)
else: # Proto field is unknown.
assert (self.allow_unknown_extension or self.allow_unknown_field)
self._SkipFieldContents(tokenizer, name, message_descriptor.full_name)
# For historical reasons, fields may optionally be separated by commas or
# semicolons.
if not tokenizer.TryConsume(','):
tokenizer.TryConsume(';')
def _LogSilentMarker(self, immediate_message_type, field_name):
pass
def _DetectSilentMarker(self, tokenizer, immediate_message_type, field_name):
if tokenizer.contains_silent_marker_before_current_token:
self._LogSilentMarker(immediate_message_type, field_name)
def _ConsumeAnyTypeUrl(self, tokenizer):
"""Consumes a google.protobuf.Any type URL and returns the type name."""
# Consume "type.googleapis.com/".
prefix = [tokenizer.ConsumeIdentifier()]
tokenizer.Consume('.')
prefix.append(tokenizer.ConsumeIdentifier())
tokenizer.Consume('.')
prefix.append(tokenizer.ConsumeIdentifier())
tokenizer.Consume('/')
# Consume the fully-qualified type name.
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
return '.'.join(prefix), '.'.join(name)
def _MergeMessageField(self, tokenizer, message, field):
"""Merges a single scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: The message of which field is a member.
field: The descriptor of the field to be merged.
Raises:
ParseError: In case of text parsing problems.
"""
is_map_entry = _IsMapEntry(field)
if tokenizer.TryConsume('<'):
end_token = '>'
else:
tokenizer.Consume('{')
end_token = '}'
if field.is_repeated:
if field.is_extension:
sub_message = message.Extensions[field].add()
elif is_map_entry:
sub_message = getattr(message, field.name).GetEntryClass()()
else:
sub_message = getattr(message, field.name).add()
else:
if field.is_extension:
if (not self._allow_multiple_scalars and
message.HasExtension(field)):
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" should not have multiple "%s" extensions.' %
(message.DESCRIPTOR.full_name, field.full_name))
sub_message = message.Extensions[field]
else:
# Also apply _allow_multiple_scalars to message field.
# TODO: Change to _allow_singular_overwrites.
if (not self._allow_multiple_scalars and
message.HasField(field.name)):
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" should not have multiple "%s" fields.' %
(message.DESCRIPTOR.full_name, field.name))
sub_message = getattr(message, field.name)
sub_message.SetInParent()
while not tokenizer.TryConsume(end_token):
if tokenizer.AtEnd():
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token,))
self._MergeField(tokenizer, sub_message)
if is_map_entry:
value_cpptype = field.message_type.fields_by_name['value'].cpp_type
if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
value = getattr(message, field.name)[sub_message.key]
value.CopyFrom(sub_message.value)
else:
getattr(message, field.name)[sub_message.key] = sub_message.value
def _MergeScalarField(self, tokenizer, message, field):
"""Merges a single scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: A protocol message to record the data.
field: The descriptor of the field to be merged.
Raises:
ParseError: In case of text parsing problems.
RuntimeError: On runtime errors.
"""
_ = self.allow_unknown_extension
value = None
if field.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_SFIXED32):
value = _ConsumeInt32(tokenizer)
elif field.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_SINT64,
descriptor.FieldDescriptor.TYPE_SFIXED64):
value = _ConsumeInt64(tokenizer)
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_FIXED32):
value = _ConsumeUint32(tokenizer)
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_FIXED64):
value = _ConsumeUint64(tokenizer)
elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT,
descriptor.FieldDescriptor.TYPE_DOUBLE):
value = tokenizer.ConsumeFloat()
elif field.type == descriptor.FieldDescriptor.TYPE_BOOL:
value = tokenizer.ConsumeBool()
elif field.type == descriptor.FieldDescriptor.TYPE_STRING:
value = tokenizer.ConsumeString()
elif field.type == descriptor.FieldDescriptor.TYPE_BYTES:
value = tokenizer.ConsumeByteString()
elif field.type == descriptor.FieldDescriptor.TYPE_ENUM:
value = tokenizer.ConsumeEnum(field)
else:
raise RuntimeError('Unknown field type %d' % field.type)
if field.is_repeated:
if field.is_extension:
message.Extensions[field].append(value)
else:
getattr(message, field.name).append(value)
else:
if field.is_extension:
if (not self._allow_multiple_scalars and
field.has_presence and
message.HasExtension(field)):
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" should not have multiple "%s" extensions.' %
(message.DESCRIPTOR.full_name, field.full_name))
else:
message.Extensions[field] = value
else:
duplicate_error = False
if not self._allow_multiple_scalars:
if field.has_presence:
duplicate_error = message.HasField(field.name)
else:
# For field that doesn't represent presence, try best effort to
# check multiple scalars by compare to default values.
duplicate_error = not decoder.IsDefaultScalarValue(
getattr(message, field.name)
)
if duplicate_error:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" should not have multiple "%s" fields.' %
(message.DESCRIPTOR.full_name, field.name))
else:
setattr(message, field.name, value)
def _SkipFieldContents(self, tokenizer, field_name, immediate_message_type):
"""Skips over contents (value or message) of a field.
Args:
tokenizer: A tokenizer to parse the field name and values.
field_name: The field name currently being parsed.
immediate_message_type: The type of the message immediately containing
the silent marker.
"""
# Try to guess the type of this field.
# If this field is not a message, there should be a ":" between the
# field name and the field value and also the field value should not
# start with "{" or "<" which indicates the beginning of a message body.
# If there is no ":" or there is a "{" or "<" after ":", this field has
# to be a message or the input is ill-formed.
if tokenizer.TryConsume(
':') and not tokenizer.LookingAt('{') and not tokenizer.LookingAt('<'):
self._DetectSilentMarker(tokenizer, immediate_message_type, field_name)
if tokenizer.LookingAt('['):
self._SkipRepeatedFieldValue(tokenizer, immediate_message_type)
else:
self._SkipFieldValue(tokenizer)
else:
self._DetectSilentMarker(tokenizer, immediate_message_type, field_name)
self._SkipFieldMessage(tokenizer, immediate_message_type)
def _SkipField(self, tokenizer, immediate_message_type):
"""Skips over a complete field (name and value/message).
Args:
tokenizer: A tokenizer to parse the field name and values.
immediate_message_type: The type of the message immediately containing
the silent marker.
"""
field_name = ''
if tokenizer.TryConsume('['):
# Consume extension or google.protobuf.Any type URL
field_name += '[' + tokenizer.ConsumeIdentifier()
num_identifiers = 1
while tokenizer.TryConsume('.'):
field_name += '.' + tokenizer.ConsumeIdentifier()
num_identifiers += 1
# This is possibly a type URL for an Any message.
if num_identifiers == 3 and tokenizer.TryConsume('/'):
field_name += '/' + tokenizer.ConsumeIdentifier()
while tokenizer.TryConsume('.'):
field_name += '.' + tokenizer.ConsumeIdentifier()
tokenizer.Consume(']')
field_name += ']'
else:
field_name += tokenizer.ConsumeIdentifierOrNumber()
self._SkipFieldContents(tokenizer, field_name, immediate_message_type)
# For historical reasons, fields may optionally be separated by commas or
# semicolons.
if not tokenizer.TryConsume(','):
tokenizer.TryConsume(';')
def _SkipFieldMessage(self, tokenizer, immediate_message_type):
"""Skips over a field message.
Args:
tokenizer: A tokenizer to parse the field name and values.
immediate_message_type: The type of the message immediately containing
the silent marker
"""
if tokenizer.TryConsume('<'):
delimiter = '>'
else:
tokenizer.Consume('{')
delimiter = '}'
while not tokenizer.LookingAt('>') and not tokenizer.LookingAt('}'):
self._SkipField(tokenizer, immediate_message_type)
tokenizer.Consume(delimiter)
def _SkipFieldValue(self, tokenizer):
"""Skips over a field value.
Args:
tokenizer: A tokenizer to parse the field name and values.
Raises:
ParseError: In case an invalid field value is found.
"""
if (not tokenizer.TryConsumeByteString()and
not tokenizer.TryConsumeIdentifier() and
not _TryConsumeInt64(tokenizer) and
not _TryConsumeUint64(tokenizer) and
not tokenizer.TryConsumeFloat()):
raise ParseError('Invalid field value: ' + tokenizer.token)
def _SkipRepeatedFieldValue(self, tokenizer, immediate_message_type):
"""Skips over a repeated field value.
Args:
tokenizer: A tokenizer to parse the field value.
"""
tokenizer.Consume('[')
if not tokenizer.TryConsume(']'):
while True:
if tokenizer.LookingAt('<') or tokenizer.LookingAt('{'):
self._SkipFieldMessage(tokenizer, immediate_message_type)
else:
self._SkipFieldValue(tokenizer)
if tokenizer.TryConsume(']'):
break
tokenizer.Consume(',')
|
_Parser
|
python
|
openai__openai-python
|
src/openai/resources/beta/realtime/realtime.py
|
{
"start": 32186,
"end": 32699
}
|
class ____(BaseRealtimeConnectionResource):
def update(
self, *, session: transcription_session_update_param.Session, event_id: str | NotGiven = NOT_GIVEN
) -> None:
"""Send this event to update a transcription session."""
self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "transcription_session.update", "session": session, "event_id": event_id}),
)
)
|
RealtimeTranscriptionSessionResource
|
python
|
rq__rq
|
rq/job.py
|
{
"start": 1318,
"end": 1916
}
|
class ____(str, Enum):
"""The Status of Job within its lifecycle at any given time."""
CREATED = 'created'
QUEUED = 'queued'
FINISHED = 'finished'
FAILED = 'failed'
STARTED = 'started'
DEFERRED = 'deferred'
SCHEDULED = 'scheduled'
STOPPED = 'stopped'
CANCELED = 'canceled'
def parse_job_id(job_or_execution_id: str) -> str:
"""Parse a string and returns job ID. This function supports both job ID and execution composite key."""
if ':' in job_or_execution_id:
return job_or_execution_id.split(':')[0]
return job_or_execution_id
|
JobStatus
|
python
|
getsentry__sentry
|
tests/snuba/rules/conditions/test_event_frequency.py
|
{
"start": 31122,
"end": 31733
}
|
class ____(StandardIntervalTestBase):
__test__ = Abstract(__module__, __qualname__)
rule_cls = EventFrequencyCondition
def increment(self, event, count, environment=None, timestamp=None):
timestamp = timestamp if timestamp else before_now(minutes=1)
data = {"fingerprint": event.data["fingerprint"]}
if environment:
data["environment"] = environment
for _ in range(count):
self.add_event(
data=data,
project_id=self.project.id,
timestamp=timestamp,
)
|
EventFrequencyConditionTestCase
|
python
|
ipython__ipython
|
IPython/terminal/ipapp.py
|
{
"start": 2137,
"end": 6248
}
|
class ____(CrashHandler):
"""sys.excepthook for IPython itself, leaves a detailed report on disk."""
def __init__(self, app):
contact_name = release.author
contact_email = release.author_email
bug_tracker = 'https://github.com/ipython/ipython/issues'
super(IPAppCrashHandler,self).__init__(
app, contact_name, contact_email, bug_tracker
)
def make_report(self,traceback):
"""Return a string containing a crash report."""
sec_sep = self.section_sep
# Start with parent report
report = [super(IPAppCrashHandler, self).make_report(traceback)]
# Add interactive-specific info we may have
rpt_add = report.append
try:
rpt_add(sec_sep+"History of session input:")
for line in self.app.shell.user_ns['_ih']:
rpt_add(line)
rpt_add('\n*** Last line of input (may not be in above history):\n')
rpt_add(self.app.shell._last_input_line+'\n')
except:
pass
return ''.join(report)
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(base_flags)
flags.update(shell_flags)
frontend_flags = {}
addflag = lambda *args: frontend_flags.update(boolean_flag(*args))
addflag('autoedit-syntax', 'TerminalInteractiveShell.autoedit_syntax',
'Turn on auto editing of files with syntax errors.',
'Turn off auto editing of files with syntax errors.'
)
addflag('simple-prompt', 'TerminalInteractiveShell.simple_prompt',
"Force simple minimal prompt using `raw_input`",
"Use a rich interactive prompt with prompt_toolkit",
)
addflag('banner', 'TerminalIPythonApp.display_banner',
"Display a banner upon starting IPython.",
"Don't display a banner upon starting IPython."
)
addflag('confirm-exit', 'TerminalInteractiveShell.confirm_exit',
"""Set to confirm when you try to exit IPython with an EOF (Control-D
in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
you can force a direct exit without any confirmation.""",
"Don't prompt the user when exiting."
)
addflag(
"tip",
"TerminalInteractiveShell.enable_tip",
"""Shows a tip when IPython starts.""",
"Don't show tip when IPython starts.",
)
addflag('term-title', 'TerminalInteractiveShell.term_title',
"Enable auto setting the terminal title.",
"Disable auto setting the terminal title."
)
classic_config = Config()
classic_config.InteractiveShell.cache_size = 0
classic_config.PlainTextFormatter.pprint = False
classic_config.TerminalInteractiveShell.prompts_class = (
"IPython.terminal.prompts.ClassicPrompts"
)
classic_config.InteractiveShell.separate_in = ""
classic_config.InteractiveShell.separate_out = ""
classic_config.InteractiveShell.separate_out2 = ""
classic_config.InteractiveShell.colors = "nocolor"
classic_config.InteractiveShell.xmode = "Plain"
frontend_flags['classic']=(
classic_config,
"Gives IPython a similar feel to the classic Python prompt."
)
# # log doesn't make so much sense this way anymore
# paa('--log','-l',
# action='store_true', dest='InteractiveShell.logstart',
# help="Start logging to the default log file (./ipython_log.py).")
#
# # quick is harder to implement
frontend_flags['quick']=(
{'TerminalIPythonApp' : {'quick' : True}},
"Enable quick startup with no config files."
)
frontend_flags['i'] = (
{'TerminalIPythonApp' : {'force_interact' : True}},
"""If running code from the command line, become interactive afterwards.
It is often useful to follow this with `--` to treat remaining flags as
script arguments.
"""
)
flags.update(frontend_flags)
aliases = dict(base_aliases)
aliases.update(shell_aliases) # type: ignore[arg-type]
#-----------------------------------------------------------------------------
# Main classes and functions
#-----------------------------------------------------------------------------
|
IPAppCrashHandler
|
python
|
pytorch__pytorch
|
torch/distributed/fsdp/_fully_shard/_fsdp_common.py
|
{
"start": 1857,
"end": 2328
}
|
class ____(DataParallelMeshInfo):
def __post_init__(self):
super().__post_init__()
if self.replicate_mesh_dim is None:
raise AssertionError("Expects non-None replicate_mesh_dim")
self.replicate_mesh_size: int = self.mesh.size(self.replicate_mesh_dim)
self.replicate_process_group = self.mesh.get_group(self.replicate_mesh_dim)
self.replicate_mesh_rank: int = self.replicate_process_group.rank()
@dataclass
|
DDPMeshInfo
|
python
|
pytorch__pytorch
|
torch/distributed/checkpoint/_pg_transport.py
|
{
"start": 1167,
"end": 1486
}
|
class ____:
"""
This is the metadata for a DTensor that is used to transfer checkpoints.
It contains the metadata for the local tensor and the spec of the DTensor.
This must be pickleable so that it can be sent over the wire.
"""
local: _TensorMeta
spec: _DTensorSpec
@dataclass
|
_DTensorMeta
|
python
|
huggingface__transformers
|
utils/models_to_deprecate.py
|
{
"start": 5082,
"end": 14131
}
|
class ____:
"""
Utility for getting models from the hub based on tags. Handles errors without crashing the script.
"""
def __init__(self, tags):
self.tags = tags
self.model_list = api.list_models(filter=tags)
def __iter__(self):
try:
yield from self.model_list
except Exception as e:
print(f"Error: {e}")
return
def _extract_commit_hash(commits):
for commit in commits:
if commit.startswith("commit "):
return commit.split(" ")[1]
return ""
def get_list_of_repo_model_paths(models_dir):
# Get list of all models in the library
models = glob.glob(os.path.join(models_dir, "*/modeling_*.py"))
# Get list of all deprecated models in the library
deprecated_models = glob.glob(os.path.join(models_dir, "deprecated", "*"))
# For each deprecated model, remove the deprecated models from the list of all models as well as the symlink path
for deprecated_model in deprecated_models:
deprecated_model_name = "/" + deprecated_model.split("/")[-1] + "/"
models = [model for model in models if deprecated_model_name not in model]
# Remove deprecated models
models = [model for model in models if "/deprecated" not in model]
# Remove auto
models = [model for model in models if "/auto/" not in model]
return models
def get_list_of_models_to_deprecate(
thresh_num_downloads=5_000,
thresh_date=None,
use_cache=False,
save_model_info=False,
max_num_models=-1,
):
if thresh_date is None:
thresh_date = datetime.now(timezone.utc).replace(year=datetime.now(timezone.utc).year - 1)
else:
thresh_date = datetime.strptime(thresh_date, "%Y-%m-%d").replace(tzinfo=timezone.utc)
models_dir = PATH_TO_REPO / "src/transformers/models"
model_paths = get_list_of_repo_model_paths(models_dir=models_dir)
if use_cache and os.path.exists("models_info.json"):
with open("models_info.json", "r") as f:
models_info = json.load(f)
# Convert datetimes back to datetime objects
for model, info in models_info.items():
info["first_commit_datetime"] = datetime.fromisoformat(info["first_commit_datetime"])
else:
print("Building a dictionary of basic model info...")
models_info = defaultdict(dict)
for i, model_path in enumerate(tqdm(sorted(model_paths))):
if max_num_models != -1 and i > max_num_models:
break
model = model_path.split("/")[-2]
if model in models_info:
continue
commits = repo.git.log("--diff-filter=A", "--", model_path).split("\n")
commit_hash = _extract_commit_hash(commits)
commit_obj = repo.commit(commit_hash)
committed_datetime = commit_obj.committed_datetime
models_info[model]["commit_hash"] = commit_hash
models_info[model]["first_commit_datetime"] = committed_datetime
models_info[model]["model_path"] = model_path
models_info[model]["downloads"] = 0
models_info[model]["tags"] = [model]
# The keys in the dictionary above are the model folder names. In some cases, the model tag on the hub does not
# match the model folder name. We replace the key and append the expected tag.
for folder_name, expected_tag in MODEL_FOLDER_NAME_TO_TAG_MAPPING.items():
if folder_name in models_info:
models_info[expected_tag] = models_info[folder_name]
models_info[expected_tag]["tags"] = [expected_tag]
del models_info[folder_name]
# Some models have multiple tags on the hub. We add the expected tag to the list of tags.
for model_name, extra_tags in EXTRA_TAGS_MAPPING.items():
if model_name in models_info:
models_info[model_name]["tags"].extend(extra_tags)
# Sanity check for the case with all models: the model tags must match the keys in the MODEL_NAMES_MAPPING
# (= actual model tags on the hub)
if max_num_models == -1:
all_model_tags = set()
for model_name in models_info:
all_model_tags.update(models_info[model_name]["tags"])
non_deprecated_model_tags = (
set(MODEL_NAMES_MAPPING.keys()) - set(DEPRECATED_MODELS_TAGS) - set(DEPRECATED_MODELS)
)
if all_model_tags != non_deprecated_model_tags:
raise ValueError(
"The tags of the `models_info` dictionary must match the keys in the `MODEL_NAMES_MAPPING`!"
"\nMissing tags in `model_info`: "
+ str(sorted(non_deprecated_model_tags - all_model_tags))
+ "\nExtra tags in `model_info`: "
+ str(sorted(all_model_tags - non_deprecated_model_tags))
+ "\n\nYou need to update one or more of the following: `MODEL_NAMES_MAPPING`, "
"`EXTRA_TAGS_MAPPING` or `DEPRECATED_MODELS_TAGS`."
)
# Filter out models which were added less than a year ago
models_info = {
model: info for model, info in models_info.items() if info["first_commit_datetime"] < thresh_date
}
# We make successive calls to the hub, filtering based on the model tags
print("Making calls to the hub to find models below the threshold number of downloads...")
num_models = len(models_info)
for i, (model, model_info) in enumerate(models_info.items()):
print(f"{i + 1}/{num_models}: getting hub downloads for model='{model}' (tags={model_info['tags']})")
for model_tag in model_info["tags"]:
if model_info["downloads"] > thresh_num_downloads:
break
model_list = HubModelLister(tags=model_tag)
for hub_model in model_list:
if hub_model.private:
continue
model_info["downloads"] += hub_model.downloads
# No need to make further hub calls, it's above the set threshold
if model_info["downloads"] > thresh_num_downloads:
break
if save_model_info and not (use_cache and os.path.exists("models_info.json")):
# Make datetimes serializable
for model, info in models_info.items():
info["first_commit_datetime"] = info["first_commit_datetime"].isoformat()
with open("models_info.json", "w") as f:
json.dump(models_info, f, indent=4)
print("\nFinding models to deprecate:")
n_models_to_deprecate = 0
models_to_deprecate = {}
for model, info in models_info.items():
n_downloads = info["downloads"]
if n_downloads < thresh_num_downloads:
n_models_to_deprecate += 1
models_to_deprecate[model] = info
print(f"\nModel: {model}")
print(f"Downloads: {n_downloads}")
print(f"Date: {info['first_commit_datetime']}")
# sort models to deprecate by downloads (lowest downloads first)
models_to_deprecate = sorted(models_to_deprecate.items(), key=lambda x: x[1]["downloads"])
print("\nModels to deprecate: ", "\n" + "\n".join([model[0] for model in models_to_deprecate]))
print(f"\nNumber of models to deprecate: {n_models_to_deprecate}")
print("Before deprecating make sure to verify the models, including if they're used as a module in other models.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--save_model_info", action="store_true", help="Save the retrieved model info to a json file.")
parser.add_argument(
"--use_cache", action="store_true", help="Use the cached model info instead of calling the hub."
)
parser.add_argument(
"--thresh_num_downloads",
type=int,
default=5_000,
help=(
"Threshold number of downloads below which a model should be deprecated. Default is 5,000. If you are "
"considering a sweep and using a cache, set this to the highest number of the sweep."
),
)
parser.add_argument(
"--thresh_date",
type=str,
default=None,
help=(
"Date to consider the first commit from. Format: YYYY-MM-DD. If unset, defaults to one year ago from "
"today."
),
)
parser.add_argument(
"--max_num_models",
type=int,
default=-1,
help="Maximum number of models architectures to consider. -1 means all models. Useful for testing.",
)
args = parser.parse_args()
models_to_deprecate = get_list_of_models_to_deprecate(
thresh_num_downloads=args.thresh_num_downloads,
thresh_date=args.thresh_date,
use_cache=args.use_cache,
save_model_info=args.save_model_info,
max_num_models=args.max_num_models,
)
|
HubModelLister
|
python
|
django__django
|
tests/admin_inlines/models.py
|
{
"start": 9368,
"end": 9606
}
|
class ____(models.Model):
collection = models.ForeignKey(
ProfileCollection, models.SET_NULL, blank=True, null=True
)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
|
Profile
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/src/hypothesis/internal/lambda_sources.py
|
{
"start": 3699,
"end": 16913
}
|
class ____:
# Opcodes, from dis.opmap. These may change between major versions.
NOP = 9
LOAD_FAST = 85
LOAD_FAST_LOAD_FAST = 88
LOAD_FAST_BORROW = 86
LOAD_FAST_BORROW_LOAD_FAST_BORROW = 87
def _normalize_code(f, l):
# A small selection of possible peephole code transformations, based on what
# is actually seen to differ between compilations in our test suite. Each
# entry contains two equivalent opcode sequences, plus a condition
# function called with their respective oparg sequences, which must return
# true for the transformation to be valid.
Checker = Callable[[list[int], list[int]], bool]
transforms: tuple[list[int], list[int], Checker | None] = [
([_op.NOP], [], lambda a, b: True),
(
[_op.LOAD_FAST, _op.LOAD_FAST],
[_op.LOAD_FAST_LOAD_FAST],
lambda a, b: a == [b[0] >> 4, b[0] & 15],
),
(
[_op.LOAD_FAST_BORROW, _op.LOAD_FAST_BORROW],
[_op.LOAD_FAST_BORROW_LOAD_FAST_BORROW],
lambda a, b: a == [b[0] >> 4, b[0] & 15],
),
]
# augment with converse
transforms += [
(
ops_b,
ops_a,
condition and (lambda a, b, condition=condition: condition(b, a)),
)
for ops_a, ops_b, condition in transforms
]
# Normalize equivalent code. We assume that each bytecode op is 2 bytes,
# which is the case since Python 3.6. Since the opcodes values may change
# between version, there is a risk that a transform may not be equivalent
# -- even so, the risk of a bad transform producing a false positive is
# minuscule.
co_code = list(l.__code__.co_code)
f_code = list(f.__code__.co_code)
def alternating(code, i, n):
return code[i : i + 2 * n : 2]
i = 2
while i < max(len(co_code), len(f_code)):
# note that co_code is mutated in loop
if i < min(len(co_code), len(f_code)) and f_code[i] == co_code[i]:
i += 2
else:
for op1, op2, condition in transforms:
if (
op1 == alternating(f_code, i, len(op1))
and op2 == alternating(co_code, i, len(op2))
and condition(
alternating(f_code, i + 1, len(op1)),
alternating(co_code, i + 1, len(op2)),
)
):
break
else:
# no point in continuing since the bytecodes are different anyway
break
# Splice in the transform and continue
co_code = (
co_code[:i] + f_code[i : i + 2 * len(op1)] + co_code[i + 2 * len(op2) :]
)
i += 2 * len(op1)
# Normalize consts, in particular replace any lambda consts with the
# corresponding const from the template function, IFF they have the same
# source key.
f_consts = f.__code__.co_consts
l_consts = l.__code__.co_consts
if len(f_consts) == len(l_consts) and any(
inspect.iscode(l_const) for l_const in l_consts
):
normalized_consts = []
for f_const, l_const in zip(f_consts, l_consts, strict=True):
if (
inspect.iscode(l_const)
and inspect.iscode(f_const)
and _function_key(f_const) == _function_key(l_const)
):
# If the lambdas are compiled from the same source, make them be the
# same object so that the toplevel lambdas end up equal. Note that
# default arguments are not available on the code objects. But if the
# default arguments differ then the lambdas must also differ in other
# ways, since default arguments are set up from bytecode and constants.
# I.e., this appears to be safe wrt false positives.
normalized_consts.append(f_const)
else:
normalized_consts.append(l_const)
else:
normalized_consts = l_consts
return l.__code__.replace(
co_code=bytes(co_code),
co_consts=tuple(normalized_consts),
)
_module_map: dict[int, str] = {}
def _mimic_lambda_from_node(f, node):
# Compile the source (represented by an ast.Lambda node) in a context that
# as far as possible mimics the context that f was compiled in. If - and
# only if - this was the source of f then the result is indistinguishable
# from f itself (to a casual observer such as _function_key).
f_globals = f.__globals__.copy()
f_code = f.__code__
source = ast.unparse(node)
# Install values for non-literal argument defaults. Thankfully, these are
# always captured by value - so there is no interaction with the closure.
if f.__defaults__:
for f_default, l_default in zip(
f.__defaults__, node.args.defaults, strict=True
):
if isinstance(l_default, ast.Name):
f_globals[l_default.id] = f_default
if f.__kwdefaults__: # pragma: no cover
for l_default, l_varname in zip(
node.args.kw_defaults, node.args.kwonlyargs, strict=True
):
if isinstance(l_default, ast.Name):
f_globals[l_default.id] = f.__kwdefaults__[l_varname.arg]
# CPython's compiler treats known imports differently than normal globals,
# so check if we use attributes from globals that are modules (if so, we
# import them explicitly and redundantly in the exec below)
referenced_modules = [
(local_name, module)
for attr in extract_all_attributes(node)
if (
isinstance(attr.value, ast.Name)
and (local_name := attr.value.id)
and inspect.ismodule(module := f_globals.get(local_name))
)
]
if not f_code.co_freevars and not referenced_modules:
compiled = eval(source, f_globals)
else:
if f_code.co_freevars:
# We have to reconstruct a local closure. The closure will have
# the same values as the original function, although this is not
# required for source/bytecode equality.
f_globals |= {
f"__lc{i}": c.cell_contents for i, c in enumerate(f.__closure__)
}
captures = [f"{name}=__lc{i}" for i, name in enumerate(f_code.co_freevars)]
capture_str = ";".join(captures) + ";"
else:
capture_str = ""
if referenced_modules:
# We add import statements for all referenced modules, since that
# influences the compiled code. The assumption is that these modules
# were explicitly imported, not assigned, in the source - if not,
# this may/will give a different compilation result.
global _module_map
if len(_module_map) != len(sys.modules): # pragma: no branch
_module_map = {id(module): name for name, module in sys.modules.items()}
imports = [
(module_name, local_name)
for local_name, module in referenced_modules
if (module_name := _module_map.get(id(module))) is not None
]
import_fragments = [f"{name} as {asname}" for name, asname in set(imports)]
import_str = f"import {','.join(import_fragments)}\n"
else:
import_str = ""
exec_str = (
f"{import_str}def __construct_lambda(): {capture_str} return ({source})"
)
exec(exec_str, f_globals)
compiled = f_globals["__construct_lambda"]()
return compiled
def _lambda_code_matches_node(f, node):
try:
compiled = _mimic_lambda_from_node(f, node)
except (NameError, SyntaxError): # pragma: no cover # source is generated from ast
return False
if _function_key(f) == _function_key(compiled):
return True
# Try harder
compiled.__code__ = _normalize_code(f, compiled)
return _function_key(f) == _function_key(compiled)
def _check_unknown_perfectly_aligned_lambda(candidate):
# This is a monkeypatch point for our self-tests, to make unknown
# lambdas raise.
pass
def _lambda_description(f, leeway=50, *, fail_if_confused_with_perfect_candidate=False):
if hasattr(f, "__wrapped_target"):
f = f.__wrapped_target
# You might be wondering how a lambda can have a return-type annotation?
# The answer is that we add this at runtime, in new_given_signature(),
# and we do support strange choices as applying @given() to a lambda.
sig = inspect.signature(f)
assert sig.return_annotation in (Parameter.empty, None), sig
# Using pytest-xdist on Python 3.13, there's an entry in the linecache for
# file "<string>", which then returns nonsense to getsource. Discard it.
linecache.cache.pop("<string>", None)
def format_lambda(body):
# The signature is more informative than the corresponding ast.unparse
# output in the case of default argument values, so add the signature
# to the unparsed body
return (
f"lambda {str(sig)[1:-1]}: {body}" if sig.parameters else f"lambda: {body}"
)
if_confused = format_lambda("<unknown>")
try:
source_lines, lineno0 = inspect.findsource(f)
source_lines = tuple(source_lines) # make it hashable
except OSError:
return if_confused
try:
all_lambdas = AST_LAMBDAS_CACHE[source_lines]
except KeyError:
# The source isn't already parsed, so we try to shortcut by parsing just
# the local block. If that fails to produce a code-identical lambda,
# fall through to the full parse.
local_lines = inspect.getblock(source_lines[lineno0:])
local_block = textwrap.dedent("".join(local_lines))
# The fairly common ".map(lambda x: ...)" case. This partial block
# isn't valid syntax, but it might be if we remove the leading ".".
local_block = local_block.removeprefix(".")
try:
local_tree = ast.parse(local_block)
except SyntaxError:
pass
else:
local_lambdas = extract_all_lambdas(local_tree)
for candidate in local_lambdas:
if reflection.ast_arguments_matches_signature(
candidate.args, sig
) and _lambda_code_matches_node(f, candidate):
return format_lambda(ast.unparse(candidate.body))
# Local parse failed or didn't produce a match, go ahead with the full parse
try:
tree = ast.parse("".join(source_lines))
except SyntaxError:
all_lambdas = []
else:
all_lambdas = extract_all_lambdas(tree)
AST_LAMBDAS_CACHE[source_lines] = all_lambdas
aligned_lambdas = []
for candidate in all_lambdas:
if (
candidate.lineno - leeway <= lineno0 + 1 <= candidate.lineno + leeway
and reflection.ast_arguments_matches_signature(candidate.args, sig)
):
aligned_lambdas.append(candidate)
aligned_lambdas.sort(key=lambda c: abs(lineno0 + 1 - c.lineno))
for candidate in aligned_lambdas:
if _lambda_code_matches_node(f, candidate):
return format_lambda(ast.unparse(candidate.body))
# None of the aligned lambdas match perfectly in generated code.
if aligned_lambdas and aligned_lambdas[0].lineno == lineno0 + 1:
_check_unknown_perfectly_aligned_lambda(aligned_lambdas[0])
return if_confused
def lambda_description(f):
"""
Returns a syntactically-valid expression describing `f`. This is often, but
not always, the exact lambda definition string which appears in the source code.
The difference comes from parsing the lambda ast into `tree` and then returning
the result of `ast.unparse(tree)`, which may differ in whitespace, double vs
single quotes, etc.
Returns a string indicating an unknown body if the parsing gets confused in any way.
"""
try:
return LAMBDA_DESCRIPTION_CACHE[f]
except KeyError:
pass
key = _function_key(f, bounded_size=True)
location = (f.__code__.co_filename, f.__code__.co_firstlineno)
try:
description, failed_locations = LAMBDA_DIGEST_DESCRIPTION_CACHE[key]
except KeyError:
failed_locations = set()
else:
# We got a hit in the digests cache, but only use it if either it has
# a good (known) description, or if it is unknown but we already tried
# to parse its exact source location before.
if "<unknown>" not in description or location in failed_locations:
# use the cached result
LAMBDA_DESCRIPTION_CACHE[f] = description
return description
description = _lambda_description(f)
LAMBDA_DESCRIPTION_CACHE[f] = description
if "<unknown>" in description:
failed_locations.add(location)
else:
failed_locations.clear() # we have a good description now
LAMBDA_DIGEST_DESCRIPTION_CACHE[key] = description, failed_locations
return description
|
_op
|
python
|
doocs__leetcode
|
solution/1300-1399/1349.Maximum Students Taking Exam/Solution.py
|
{
"start": 0,
"end": 902
}
|
class ____:
def maxStudents(self, seats: List[List[str]]) -> int:
def f(seat: List[str]) -> int:
mask = 0
for i, c in enumerate(seat):
if c == '.':
mask |= 1 << i
return mask
@cache
def dfs(seat: int, i: int) -> int:
ans = 0
for mask in range(1 << n):
if (seat | mask) != seat or (mask & (mask << 1)):
continue
cnt = mask.bit_count()
if i == len(ss) - 1:
ans = max(ans, cnt)
else:
nxt = ss[i + 1]
nxt &= ~(mask << 1)
nxt &= ~(mask >> 1)
ans = max(ans, cnt + dfs(nxt, i + 1))
return ans
n = len(seats[0])
ss = [f(s) for s in seats]
return dfs(ss[0], 0)
|
Solution
|
python
|
PyCQA__pylint
|
pylint/reporters/ureports/nodes.py
|
{
"start": 1449,
"end": 2646
}
|
class ____(VNode):
"""Base container node.
attributes
* children : components in this table (i.e. the table's cells)
"""
def __init__(self, children: Iterable[Text | str] = ()) -> None:
super().__init__()
for child in children:
if isinstance(child, VNode):
self.append(child)
else:
self.add_text(child)
def append(self, child: VNode) -> None:
"""Add a node to children."""
assert child not in self.parents()
self.children.append(child)
child.parent = self
def insert(self, index: int, child: VNode) -> None:
"""Insert a child node."""
self.children.insert(index, child)
child.parent = self
def parents(self) -> list[BaseLayout]:
"""Return the ancestor nodes."""
assert self.parent is not self
if self.parent is None:
return []
return [self.parent, *self.parent.parents()]
def add_text(self, text: str) -> None:
"""Shortcut to add text data."""
self.children.append(Text(text))
# non container nodes #########################################################
|
BaseLayout
|
python
|
mlflow__mlflow
|
dev/set_matrix.py
|
{
"start": 2169,
"end": 2391
}
|
class ____(BaseModel):
model_config = ConfigDict(extra="forbid")
pip_release: str
install_dev: str | None = None
module_name: str | None = None
genai: bool = False
repo: str | None = None
|
PackageInfo
|
python
|
sphinx-doc__sphinx
|
sphinx/transforms/post_transforms/images.py
|
{
"start": 1280,
"end": 4388
}
|
class ____(BaseImageConverter):
default_priority = 100
def match(self, node: nodes.image) -> bool:
if not self.env._builder_cls.supported_image_types:
return False
if self.env._builder_cls.supported_remote_images:
return False
return '://' in node['uri']
def handle(self, node: nodes.image) -> None:
try:
basename = os.path.basename(node['uri'])
if '?' in basename:
basename = basename.split('?')[0]
if not basename or len(basename) > MAX_FILENAME_LEN:
filename, ext = os.path.splitext(node['uri'])
basename = (
sha1(filename.encode(), usedforsecurity=False).hexdigest() + ext
)
basename = CRITICAL_PATH_CHAR_RE.sub('_', basename)
uri_hash = sha1(node['uri'].encode(), usedforsecurity=False).hexdigest()
path = Path(self.imagedir, uri_hash, basename)
path.parent.mkdir(parents=True, exist_ok=True)
self._download_image(node, path)
except Exception as exc:
msg = __('Could not fetch remote image: %s [%s]')
logger.warning(msg, node['uri'], exc)
def _download_image(self, node: nodes.image, path: Path) -> None:
headers = {}
if path.exists():
timestamp: float = ceil(path.stat().st_mtime)
headers['If-Modified-Since'] = epoch_to_rfc1123(timestamp)
config = self.config
r = requests.get(
node['uri'],
headers=headers,
_user_agent=config.user_agent,
_tls_info=(config.tls_verify, config.tls_cacerts),
)
if r.status_code >= 400:
msg = __('Could not fetch remote image: %s [%d]')
logger.warning(msg, node['uri'], r.status_code)
else:
self.env.original_image_uri[_StrPath(path)] = node['uri']
if r.status_code == 200:
path.write_bytes(r.content)
if last_modified := r.headers.get('Last-Modified'):
timestamp = rfc1123_to_epoch(last_modified)
os.utime(path, (timestamp, timestamp))
self._process_image(node, path)
def _process_image(self, node: nodes.image, path: Path) -> None:
str_path = _StrPath(path)
self.env.original_image_uri[str_path] = node['uri']
mimetype = guess_mimetype(path, default='*')
if mimetype != '*' and not path.suffix:
# append a suffix if URI does not contain suffix
ext = get_image_extension(mimetype) or ''
with_ext = path.with_name(path.name + ext)
path.replace(with_ext)
self.env.original_image_uri.pop(str_path)
self.env.original_image_uri[_StrPath(with_ext)] = node['uri']
path = with_ext
path_str = str(path)
node['candidates'].pop('?')
node['candidates'][mimetype] = path_str
node['uri'] = path_str
self.env.images.add_file(self.env.current_document.docname, path_str)
|
ImageDownloader
|
python
|
pytorch__pytorch
|
benchmarks/transformer/attention_bias_benchmarks.py
|
{
"start": 2078,
"end": 7582
}
|
class ____(torch.nn.Module):
def __init__(self, num_heads, embed_dim, device=None, dtype=None):
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.head_dim = embed_dim // num_heads
self.embed_dim = embed_dim
assert self.head_dim * num_heads == self.embed_dim, (
"embed_dim must be divisible by num_heads"
)
self.q_proj_weight = Parameter(
torch.empty((embed_dim, embed_dim), **factory_kwargs)
)
self.k_proj_weight = Parameter(
torch.empty((embed_dim, embed_dim), **factory_kwargs)
)
self.v_proj_weight = Parameter(
torch.empty((embed_dim, embed_dim), **factory_kwargs)
)
self.out_proj = Parameter(torch.empty((embed_dim, embed_dim), **factory_kwargs))
self.num_heads = num_heads
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: Union[torch.Tensor, CausalBias],
):
query_projected = F.linear(query, self.q_proj_weight)
key_projected = F.linear(key, self.k_proj_weight)
value_projected = F.linear(value, self.v_proj_weight)
query = query.view(
query_projected.size(0), -1, self.num_heads, self.head_dim
).transpose(1, 2)
key = key.view(
key_projected.size(0), -1, self.num_heads, self.head_dim
).transpose(1, 2)
value = value.view(
value_projected.size(0), -1, self.num_heads, self.head_dim
).transpose(1, 2)
attn = torch.nn.functional.scaled_dot_product_attention(
query,
key,
value,
attn_mask=mask,
dropout_p=0.0,
)
attn = attn.transpose(1, 2).reshape(query.size(0), -1, self.embed_dim)
# Match return signature of nn.MHA
return F.linear(attn, self.out_proj)
def reset_parameters(self):
nn.init.xavier_uniform_(self.q_proj_weight)
nn.init.xavier_uniform_(self.k_proj_weight)
nn.init.xavier_uniform_(self.v_proj_weight)
nn.init.constant_(self.out_proj, 0.0)
def run_single_experiment(config: ExperimentConfig) -> ExperimentResults:
device = torch.device("cuda")
composite_mha = CompositeMHA(
config.num_heads, config.embed_dim, device, config.dtype
)
composite_mha.reset_parameters()
query, key, value = generate_inputs(
config.batch_size,
config.q_seq_len,
config.k_seq_len,
config.embed_dim,
config.dtype,
device,
)
attn_mask = CausalBias(
CausalVariant.LOWER_RIGHT, config.q_seq_len, config.k_seq_len
)
attn_mask_tensor = attn_mask._materialize(device)
materialized_mask_time = benchmark_torch_function_in_microseconds(
composite_mha, query, key, value, attn_mask_tensor
)
attn_mask_subclass_time = benchmark_torch_function_in_microseconds(
composite_mha, query, key, value, attn_mask
)
torch.testing.assert_close(
composite_mha(query, key, value, attn_mask_tensor),
composite_mha(query, key, value, attn_mask),
)
return ExperimentResults(
materialized_mask_time=materialized_mask_time,
attn_mask_subclass_time=attn_mask_subclass_time,
)
def generate_experiment_configs() -> list[ExperimentConfig]:
batch_sizes = [1, 8, 16, 128]
num_heads = [16, 32]
q_kv_seq_lens = [(128, 256), (256, 416), (512, 4097), (1024, 2048), (1, 2048)]
embed_dims = [2048, 4096]
dtypes = [
torch.bfloat16,
]
all_configs = []
for bsz, heads, (q_seq_len, kv_seq_len), embed_dim, dtype in itertools.product(
batch_sizes, num_heads, q_kv_seq_lens, embed_dims, dtypes
):
all_configs.append(
ExperimentConfig(
batch_size=bsz,
num_heads=heads,
q_seq_len=q_seq_len,
k_seq_len=kv_seq_len,
embed_dim=embed_dim,
dtype=dtype,
)
)
return all_configs
def calculate_speedup(results: ExperimentResults) -> float:
return results.materialized_mask_time / results.attn_mask_subclass_time
def print_results(results: list[Experiment]):
# Calculate speedups
speedups = [calculate_speedup(r.results) for r in results]
# Find indices of max and min speedups
max_speedup_index = np.argmax(speedups)
min_speedup_index = np.argmin(speedups)
# Get the config dictionaries
max_config_dict = results[max_speedup_index].config.asdict()
min_config_dict = results[min_speedup_index].config.asdict()
# Create table data
table_data = [
{
"Type": "Average",
"Speedup": np.mean(speedups),
**dict.fromkeys(max_config_dict),
},
{"Type": "Max", "Speedup": speedups[max_speedup_index], **max_config_dict},
{"Type": "Min", "Speedup": speedups[min_speedup_index], **min_config_dict},
]
# Print table
print(tabulate(table_data, headers="keys", tablefmt="pretty"))
def main():
seed = 123
np.random.seed(seed)
torch.manual_seed(seed)
results = []
# Run one timing experiment comparing nn_mha vs composite_mha
for config in tqdm(generate_experiment_configs()):
results.append(Experiment(config, run_single_experiment(config)))
print_results(results)
if __name__ == "__main__":
main()
|
CompositeMHA
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/indices/tree/tree_root_retriever.py
|
{
"start": 487,
"end": 1750
}
|
class ____(BaseRetriever):
"""
Tree root retriever.
This class directly retrieves the answer from the root nodes.
Unlike GPTTreeIndexLeafQuery, this class assumes the graph already stores
the answer (because it was constructed with a query_str), so it does not
attempt to parse information down the graph in order to synthesize an answer.
"""
def __init__(
self,
index: TreeIndex,
callback_manager: Optional[CallbackManager] = None,
object_map: Optional[dict] = None,
verbose: bool = False,
**kwargs: Any,
) -> None:
self._index = index
self._index_struct = index.index_struct
self._docstore = index.docstore
super().__init__(
callback_manager=callback_manager, object_map=object_map, verbose=verbose
)
def _retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
"""Get nodes for response."""
logger.info(f"> Starting query: {query_bundle.query_str}")
root_nodes = self._docstore.get_node_dict(self._index_struct.root_nodes)
sorted_nodes = get_sorted_node_list(root_nodes)
return [NodeWithScore(node=node) for node in sorted_nodes]
|
TreeRootRetriever
|
python
|
wandb__wandb
|
wandb/sdk/lib/service/service_connection.py
|
{
"start": 780,
"end": 2198
}
|
class ____(Exception):
"""Failed to execute an API request to wandb-core."""
def connect_to_service(
asyncer: asyncio_manager.AsyncioManager,
settings: wandb_settings.Settings,
) -> ServiceConnection:
"""Connect to the service process, starting one up if necessary."""
token = service_token.from_env()
if token:
return ServiceConnection(
asyncer=asyncer,
client=token.connect(asyncer=asyncer),
proc=None,
)
else:
return _start_and_connect_service(asyncer, settings)
def _start_and_connect_service(
asyncer: asyncio_manager.AsyncioManager,
settings: wandb_settings.Settings,
) -> ServiceConnection:
"""Start a service process and returns a connection to it.
An atexit hook is registered to tear down the service process and wait for
it to complete. The hook does not run in processes started using the
multiprocessing module.
"""
proc = service_process.start(settings)
client = proc.token.connect(asyncer=asyncer)
proc.token.save_to_env()
hooks = ExitHooks()
hooks.hook()
def teardown_atexit():
conn.teardown(hooks.exit_code)
conn = ServiceConnection(
asyncer=asyncer,
client=client,
proc=proc,
cleanup=lambda: atexit.unregister(teardown_atexit),
)
atexit.register(teardown_atexit)
return conn
|
WandbApiFailedError
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/image_deplot/base.py
|
{
"start": 181,
"end": 3137
}
|
class ____(BaseReader):
"""
Image parser.
Extract tabular data from a chart or figure.
"""
def __init__(
self,
parser_config: Optional[Dict] = None,
keep_image: bool = False,
max_output_tokens=512,
prompt: str = "Generate underlying data table of the figure below:",
):
"""Init params."""
if parser_config is None:
try:
import torch
from PIL import Image # noqa: F401
from transformers import (
Pix2StructForConditionalGeneration,
Pix2StructProcessor,
)
except ImportError:
raise ImportError(
"Please install extra dependencies that are required for "
"the ImageCaptionReader: "
"`pip install torch transformers Pillow`"
)
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.float16 if torch.cuda.is_available() else torch.float32
processor = Pix2StructProcessor.from_pretrained("google/deplot")
model = Pix2StructForConditionalGeneration.from_pretrained(
"google/deplot", torch_dtype=dtype
)
parser_config = {
"processor": processor,
"model": model,
"device": device,
"dtype": dtype,
}
self._parser_config = parser_config
self._keep_image = keep_image
self._max_output_tokens = max_output_tokens
self._prompt = prompt
def load_data(
self, file: Path, extra_info: Optional[Dict] = None
) -> List[Document]:
"""Parse file."""
from llama_index.core.img_utils import img_2_b64
from PIL import Image
# load document image
image = Image.open(file)
if image.mode != "RGB":
image = image.convert("RGB")
# Encode image into base64 string and keep in document
image_str: Optional[str] = None
if self._keep_image:
image_str = img_2_b64(image)
# Parse image into text
model = self._parser_config["model"]
processor = self._parser_config["processor"]
device = self._parser_config["device"]
dtype = self._parser_config["dtype"]
model.to(device)
# unconditional image captioning
inputs = processor(image, self._prompt, return_tensors="pt").to(device, dtype)
out = model.generate(**inputs, max_new_tokens=self._max_output_tokens)
text_str = "Figure or chart with tabular data: " + processor.decode(
out[0], skip_special_tokens=True
)
return [
ImageDocument(
text=text_str,
image=image_str,
extra_info=extra_info or {},
)
]
|
ImageTabularChartReader
|
python
|
SmileyChris__easy-thumbnails
|
easy_thumbnails/tests/test_namers.py
|
{
"start": 2049,
"end": 2421
}
|
class ____(TestCase):
def test_basic(self):
filename = namers.source_hashed(
thumbnailer=FakeThumbnailer(),
prepared_options=['100x100', 'q80', 'crop', 'upscale'],
source_filename='source.jpg',
thumbnail_extension='jpg',
)
self.assertEqual(filename, '1xedFtqllFo9_100x100_QHCa6G1l.jpg')
|
SourceHashed
|
python
|
tornadoweb__tornado
|
tornado/test/autoreload_test.py
|
{
"start": 157,
"end": 9176
}
|
class ____(unittest.TestCase):
def setUp(self):
# When these tests fail the output sometimes exceeds the default maxDiff.
self.maxDiff = 1024
self.path = mkdtemp()
# Most test apps run themselves twice via autoreload. The first time it manually triggers
# a reload (could also do this by touching a file but this is faster since filesystem
# timestamps are not necessarily high resolution). The second time it exits directly
# so that the autoreload wrapper (if it is used) doesn't catch it.
#
# The last line of each such test's "main" program should be
# exec(open("run_twice_magic.py").read())
self.write_files(
{
"run_twice_magic.py": """
import os
import sys
import tornado.autoreload
sys.stdout.flush()
if "TESTAPP_STARTED" not in os.environ:
os.environ["TESTAPP_STARTED"] = "1"
tornado.autoreload._reload()
else:
os._exit(0)
"""
}
)
def tearDown(self):
try:
shutil.rmtree(self.path)
except OSError:
# Windows disallows deleting files that are in use by
# another process, and even though we've waited for our
# child process below, it appears that its lock on these
# files is not guaranteed to be released by this point.
# Sleep and try again (once).
time.sleep(1)
shutil.rmtree(self.path)
def write_files(self, tree, base_path=None):
"""Write a directory tree to self.path.
tree is a dictionary mapping file names to contents, or
sub-dictionaries representing subdirectories.
"""
if base_path is None:
base_path = self.path
for name, contents in tree.items():
if isinstance(contents, dict):
os.mkdir(os.path.join(base_path, name))
self.write_files(contents, os.path.join(base_path, name))
else:
with open(os.path.join(base_path, name), "w", encoding="utf-8") as f:
f.write(textwrap.dedent(contents))
def run_subprocess(self, args):
# Make sure the tornado module under test is available to the test
# application
parts = [os.getcwd()]
if "PYTHONPATH" in os.environ:
parts += [
os.path.join(os.getcwd(), part)
for part in os.environ["PYTHONPATH"].split(os.pathsep)
]
pythonpath = os.pathsep.join(parts)
p = Popen(
args,
stdout=subprocess.PIPE,
env=dict(os.environ, PYTHONPATH=pythonpath),
cwd=self.path,
universal_newlines=True,
encoding="utf-8",
)
# This timeout needs to be fairly generous for pypy due to jit
# warmup costs.
for i in range(40):
if p.poll() is not None:
break
time.sleep(0.1)
else:
p.kill()
raise Exception("subprocess failed to terminate")
out = p.communicate()[0]
self.assertEqual(p.returncode, 0)
return out
def test_reload(self):
main = """\
import sys
# In module mode, the path is set to the parent directory and we can import testapp.
try:
import testapp
except ImportError:
print("import testapp failed")
else:
print("import testapp succeeded")
spec = getattr(sys.modules[__name__], '__spec__', None)
print(f"Starting {__name__=}, __spec__.name={getattr(spec, 'name', None)}")
exec(open("run_twice_magic.py", encoding="utf-8").read())
"""
# Create temporary test application
self.write_files(
{
"testapp": {
"__init__.py": "",
"__main__.py": main,
},
}
)
# The autoreload wrapper should support all the same modes as the python interpreter.
# The wrapper itself should have no effect on this test so we try all modes with and
# without it.
for wrapper in [False, True]:
with self.subTest(wrapper=wrapper):
with self.subTest(mode="module"):
if wrapper:
base_args = [sys.executable, "-m", "tornado.autoreload"]
else:
base_args = [sys.executable]
# In module mode, the path is set to the parent directory and we can import
# testapp. Also, the __spec__.name is set to the fully qualified module name.
out = self.run_subprocess(base_args + ["-m", "testapp"])
self.assertEqual(
out,
(
"import testapp succeeded\n"
+ "Starting __name__='__main__', __spec__.name=testapp.__main__\n"
)
* 2,
)
with self.subTest(mode="file"):
out = self.run_subprocess(base_args + ["testapp/__main__.py"])
# In file mode, we do not expect the path to be set so we can import testapp,
# but when the wrapper is used the -m argument to the python interpreter
# does this for us.
expect_import = (
"import testapp succeeded"
if wrapper
else "import testapp failed"
)
# In file mode there is no qualified module spec.
self.assertEqual(
out,
f"{expect_import}\nStarting __name__='__main__', __spec__.name=None\n"
* 2,
)
with self.subTest(mode="directory"):
# Running as a directory finds __main__.py like a module. It does not manipulate
# sys.path but it does set a spec with a name of exactly __main__.
out = self.run_subprocess(base_args + ["testapp"])
expect_import = (
"import testapp succeeded"
if wrapper
else "import testapp failed"
)
self.assertEqual(
out,
f"{expect_import}\nStarting __name__='__main__', __spec__.name=__main__\n"
* 2,
)
def test_reload_wrapper_preservation(self):
# This test verifies that when `python -m tornado.autoreload`
# is used on an application that also has an internal
# autoreload, the reload wrapper is preserved on restart.
main = """\
import sys
# This import will fail if path is not set up correctly
import testapp
if 'tornado.autoreload' not in sys.modules:
raise Exception('started without autoreload wrapper')
print('Starting')
exec(open("run_twice_magic.py", encoding="utf-8").read())
"""
self.write_files(
{
"testapp": {
"__init__.py": "",
"__main__.py": main,
},
}
)
out = self.run_subprocess(
[sys.executable, "-m", "tornado.autoreload", "-m", "testapp"]
)
self.assertEqual(out, "Starting\n" * 2)
def test_reload_wrapper_args(self):
main = """\
import os
import sys
print(os.path.basename(sys.argv[0]))
print(f'argv={sys.argv[1:]}')
exec(open("run_twice_magic.py", encoding="utf-8").read())
"""
# Create temporary test application
self.write_files({"main.py": main})
# Make sure the tornado module under test is available to the test
# application
out = self.run_subprocess(
[
sys.executable,
"-m",
"tornado.autoreload",
"main.py",
"arg1",
"--arg2",
"-m",
"arg3",
],
)
self.assertEqual(out, "main.py\nargv=['arg1', '--arg2', '-m', 'arg3']\n" * 2)
def test_reload_wrapper_until_success(self):
main = """\
import os
import sys
if "TESTAPP_STARTED" in os.environ:
print("exiting cleanly")
sys.exit(0)
else:
print("reloading")
exec(open("run_twice_magic.py", encoding="utf-8").read())
"""
# Create temporary test application
self.write_files({"main.py": main})
out = self.run_subprocess(
[sys.executable, "-m", "tornado.autoreload", "--until-success", "main.py"]
)
self.assertEqual(out, "reloading\nexiting cleanly\n")
|
AutoreloadTest
|
python
|
arrow-py__arrow
|
arrow/locales.py
|
{
"start": 123744,
"end": 126374
}
|
class ____(Locale):
names = ["zu", "zu-za"]
past = "{0} edlule"
future = "{0} "
and_word = "futhi"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[Mapping[str, str], str]]] = {
"now": "manje",
"second": {"past": "umzuzwana", "future": "ngomzuzwana"},
"seconds": {"past": "{0} imizuzwana", "future": "{0} ngemizuzwana"},
"minute": {"past": "umzuzu", "future": "ngomzuzu"},
"minutes": {"past": "{0} imizuzu", "future": "{0} ngemizuzu"},
"hour": {"past": "ihora", "future": "ngehora"},
"hours": {"past": "{0} amahora", "future": "{0} emahoreni"},
"day": {"past": "usuku", "future": "ngosuku"},
"days": {"past": "{0} izinsuku", "future": "{0} ezinsukwini"},
"week": {"past": "isonto", "future": "ngesonto"},
"weeks": {"past": "{0} amasonto", "future": "{0} emasontweni"},
"month": {"past": "inyanga", "future": "ngenyanga"},
"months": {"past": "{0} izinyanga", "future": "{0} ezinyangeni"},
"year": {"past": "unyaka", "future": "ngonyak"},
"years": {"past": "{0} iminyaka", "future": "{0} eminyakeni"},
}
def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str:
"""Zulu aware time frame format function, takes into account
the differences between past and future forms."""
abs_delta = abs(delta)
form = self.timeframes[timeframe]
if isinstance(form, str):
return form.format(abs_delta)
if delta > 0:
key = "future"
else:
key = "past"
form = form[key]
return form.format(abs_delta)
month_names = [
"",
"uMasingane",
"uNhlolanja",
"uNdasa",
"UMbasa",
"UNhlaba",
"UNhlangulana",
"uNtulikazi",
"UNcwaba",
"uMandulo",
"uMfumfu",
"uLwezi",
"uZibandlela",
]
month_abbreviations = [
"",
"uMasingane",
"uNhlolanja",
"uNdasa",
"UMbasa",
"UNhlaba",
"UNhlangulana",
"uNtulikazi",
"UNcwaba",
"uMandulo",
"uMfumfu",
"uLwezi",
"uZibandlela",
]
day_names = [
"",
"uMsombuluko",
"uLwesibili",
"uLwesithathu",
"uLwesine",
"uLwesihlanu",
"uMgqibelo",
"iSonto",
]
day_abbreviations = [
"",
"uMsombuluko",
"uLwesibili",
"uLwesithathu",
"uLwesine",
"uLwesihlanu",
"uMgqibelo",
"iSonto",
]
|
ZuluLocale
|
python
|
yaml__pyyaml
|
lib/yaml/tokens.py
|
{
"start": 1677,
"end": 1720
}
|
class ____(Token):
id = ','
|
FlowEntryToken
|
python
|
getsentry__sentry
|
tests/sentry/deletions/tasks/test_nodestore.py
|
{
"start": 417,
"end": 3919
}
|
class ____(TestCase):
def create_n_events_with_group(self, n_events: int) -> list[Event]:
events = []
for _ in range(n_events):
event = self.store_event(
data={"fingerprint": [uuid4().hex]}, project_id=self.project.id
)
events.append(event)
return events
def fetch_events_from_eventstore(self, group_ids: list[int], dataset: Dataset) -> list[Event]:
return fetch_events_from_eventstore(
project_id=self.project.id,
group_ids=group_ids,
dataset=dataset,
referrer=Referrer.DELETIONS_GROUP.value,
tenant_ids={
"referrer": Referrer.DELETIONS_GROUP.value,
"organization_id": self.project.organization_id,
},
)
def test_simple_deletion_with_events(self) -> None:
"""Test nodestore deletion when events are found."""
events = self.create_n_events_with_group(n_events=5)
group_ids = [event.group_id for event in events if event.group_id is not None]
# Verify events exist in both eventstore and nodestore before deletion
events = self.fetch_events_from_eventstore(group_ids, dataset=Dataset.Events)
assert len(events) == 5
with self.tasks():
delete_events_for_groups_from_nodestore_and_eventstore.apply_async(
kwargs={
"organization_id": self.project.organization_id,
"project_id": self.project.id,
"group_ids": group_ids,
"times_seen": [1] * len(group_ids),
"transaction_id": uuid4().hex,
"dataset_str": Dataset.Events.value,
"referrer": "deletions.groups",
},
)
# Events should be deleted from eventstore after nodestore deletion
events_after = self.fetch_events_from_eventstore(group_ids, dataset=Dataset.Events)
assert len(events_after) == 0
def test_deletion_with_project_deleted(self) -> None:
"""Test nodestore deletion when project is deleted."""
events = self.create_n_events_with_group(n_events=5)
group_ids = [event.group_id for event in events if event.group_id is not None]
# Verify events exist in both eventstore and nodestore before deletion
events = self.fetch_events_from_eventstore(group_ids, dataset=Dataset.Events)
assert len(events) == 5
# Deleting the project will cause Snuba to raise an error when fetching the event IDs.
self.project.delete()
with self.tasks():
# To delete events from the nodestore we fetch the event IDs from the eventstore (Snuba),
# however, when we delete the project, Snuba will raise an error.
delete_events_for_groups_from_nodestore_and_eventstore.apply_async(
kwargs={
"organization_id": self.project.organization_id,
"project_id": self.project.id,
"group_ids": group_ids,
"times_seen": [1] * len(group_ids),
"transaction_id": uuid4().hex,
"dataset_str": Dataset.Events.value,
"referrer": "deletions.groups",
},
)
with pytest.raises(UnqualifiedQueryError):
self.fetch_events_from_eventstore(group_ids, dataset=Dataset.Events)
|
NodestoreDeletionTaskTest
|
python
|
milvus-io__pymilvus
|
pymilvus/orm/connections.py
|
{
"start": 1437,
"end": 1911
}
|
class ____(type):
instance = None
def __init__(cls, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def __call__(cls, *args, **kwargs):
if cls.instance:
return cls.instance
cls.instance = cls.__new__(cls)
cls.instance.__init__(*args, **kwargs)
return cls.instance
@synchronized
def __new__(cls, *args, **kwargs):
return super().__new__(cls, *args, **kwargs)
|
SingleInstanceMetaClass
|
python
|
numba__numba
|
numba/cuda/cudadecl.py
|
{
"start": 2485,
"end": 2616
}
|
class ____(ConcreteTemplate):
key = cuda.threadfence_system
cases = [signature(types.none)]
@register
|
Cuda_threadfence_system
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-split-of-positive-even-integers.py
|
{
"start": 44,
"end": 423
}
|
class ____(object):
def maximumEvenSplit(self, finalSum):
"""
:type finalSum: int
:rtype: List[int]
"""
if finalSum%2:
return []
result = []
i = 2
while i <= finalSum:
result.append(i)
finalSum -= i
i += 2
result[-1] += finalSum
return result
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/llava_next/modeling_llava_next.py
|
{
"start": 9522,
"end": 10684
}
|
class ____(PreTrainedModel):
config: LlavaNextConfig
base_model_prefix = "model"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_no_split_modules = ["LlamaDecoderLayer"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
_supports_flex_attn = True
_supports_attention_backend = True
@torch.no_grad()
def _init_weights(self, module):
std = getattr(self.config, "initializer_range", self.config.get_text_config().initializer_range)
if isinstance(module, nn.Linear):
init.normal_(module.weight, mean=0.0, std=std)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, LlavaNextModel):
embed_std = 1 / math.sqrt(self.config.text_config.hidden_size)
init.normal_(module.image_newline, mean=0.0, std=embed_std)
@auto_docstring(
custom_intro="""
The Llava-Next model which consists of a vision backbone and a language model without language modeling head.
"""
)
|
LlavaNextPreTrainedModel
|
python
|
django__django
|
django/contrib/postgres/lookups.py
|
{
"start": 1230,
"end": 1620
}
|
class ____(SearchVectorExact):
lookup_name = "search"
def process_lhs(self, qn, connection):
if not isinstance(self.lhs.output_field, SearchVectorField):
config = getattr(self.rhs, "config", None)
self.lhs = SearchVector(self.lhs, config=config)
lhs, lhs_params = super().process_lhs(qn, connection)
return lhs, lhs_params
|
SearchLookup
|
python
|
great-expectations__great_expectations
|
great_expectations/experimental/metric_repository/metrics.py
|
{
"start": 1547,
"end": 1798
}
|
class ____(MetricRepositoryBaseModel):
type: str = Field(description="Exception type if an exception is thrown")
message: str = Field(description="Exception message if an exception is thrown")
_ValueType = TypeVar("_ValueType")
|
MetricException
|
python
|
davidhalter__jedi
|
test/completion/pep0484_comments.py
|
{
"start": 544,
"end": 764
}
|
class ____:
class BB:
pass
def test(a):
# type: (AA.BB) -> None
#? AA.BB()
a
def test(a):
# type: (AA.BB,) -> None
#? AA.BB()
a
a,b = 1, 2 # type: str, float
#? str()
a
#? float()
b
|
AA
|
python
|
kamyu104__LeetCode-Solutions
|
Python/construct-binary-search-tree-from-preorder-traversal.py
|
{
"start": 191,
"end": 916
}
|
class ____(object):
def bstFromPreorder(self, preorder):
"""
:type preorder: List[int]
:rtype: TreeNode
"""
def bstFromPreorderHelper(preorder, left, right, index):
if index[0] == len(preorder) or \
preorder[index[0]] < left or \
preorder[index[0]] > right:
return None
root = TreeNode(preorder[index[0]])
index[0] += 1
root.left = bstFromPreorderHelper(preorder, left, root.val, index)
root.right = bstFromPreorderHelper(preorder, root.val, right, index)
return root
return bstFromPreorderHelper(preorder, float("-inf"), float("inf"), [0])
|
Solution
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-omni/dagster_omni/component.py
|
{
"start": 775,
"end": 7555
}
|
class ____(StateBackedComponent, dg.Model, dg.Resolvable):
"""Pulls in the contents of an Omni workspace into Dagster assets.
Example:
.. code-block:: yaml
# defs.yaml
type: dagster_omni.OmniComponent
attributes:
workspace:
base_url: https://your-company.omniapp.co
api_key: "{{ env.OMNI_API_KEY }}"
"""
workspace: OmniWorkspace = Field(
description="Defines configuration for interacting with an Omni instance.",
)
translation: Optional[ResolvedOmniTranslationFn] = Field(
default=None,
description="Defines how to translate an Omni object into an AssetSpec object.",
)
defs_state: ResolvedDefsStateConfig = DefsStateConfigArgs.versioned_state_storage()
@property
def defs_state_config(self) -> DefsStateConfig:
return DefsStateConfig.from_args(self.defs_state, default_key=self.__class__.__name__)
async def write_state_to_path(self, state_path: Path) -> None:
"""Fetch documents from Omni API and write state to path."""
state = await self.workspace.fetch_omni_state()
state_path.write_text(dg.serialize_value(state))
def load_state_from_path(self, state_path: Path) -> OmniWorkspaceData:
"""Load state from path using Dagster's deserialization system."""
return dg.deserialize_value(state_path.read_text(), OmniWorkspaceData)
def _get_default_omni_spec(
self, context: dg.ComponentLoadContext, data: OmniTranslatorData, workspace: OmniWorkspace
) -> Optional[dg.AssetSpec]:
"""Core function for converting an Omni document into an AssetSpec object."""
if isinstance(data.obj, OmniDocument):
doc = data.obj
maybe_deps = [
self.get_asset_spec(
context, OmniTranslatorData(obj=query, workspace_data=data.workspace_data)
)
for query in data.obj.queries
]
prefix = doc.folder.path.split("/") if doc.folder else []
user = data.workspace_data.get_user(doc.owner.id)
owner_email = user.primary_email if user else None
return dg.AssetSpec(
key=dg.AssetKey([*prefix, doc.name]),
group_name=prefix[0].replace("-", "_") if prefix else None,
tags={label.name: "" for label in doc.labels},
deps=list(filter(None, maybe_deps)),
metadata={
**OmniDocumentMetadataSet.from_document(workspace, doc),
TRANSLATOR_DATA_METADATA_KEY: data,
},
kinds={"omni"},
owners=[owner_email] if owner_email else None,
)
if isinstance(data.obj, OmniQuery):
return dg.AssetSpec(key=dg.AssetKey([data.obj.query_config.table]))
return None
@public
def get_asset_spec(
self, context: dg.ComponentLoadContext, data: OmniTranslatorData
) -> Optional[dg.AssetSpec]:
"""Generates an AssetSpec for a given Omni document.
This method can be overridden in a subclass to customize how Omni documents
(workbooks, queries) are converted to Dagster asset specs. By default, it applies
any configured translation function to the base asset spec.
Args:
context: The component load context provided by Dagster
data: The OmniTranslatorData containing information about the Omni document
Returns:
An AssetSpec that represents the Omni document as a Dagster asset, or None
if the document should not be represented as an asset
Example:
Override this method to add custom metadata based on document properties:
.. code-block:: python
from dagster_omni import OmniComponent
import dagster as dg
class CustomOmniComponent(OmniComponent):
def get_asset_spec(self, context, data):
base_spec = super().get_asset_spec(context, data)
if base_spec:
return base_spec.replace_attributes(
metadata={
**base_spec.metadata,
"omni_type": type(data.obj).__name__,
"workspace": data.workspace_data.workspace_id
}
)
return None
"""
base_asset_spec = self._get_default_omni_spec(context, data, self.workspace)
if self.translation and base_asset_spec:
return self.translation(base_asset_spec, data)
else:
return base_asset_spec
def _build_asset_specs(
self, context: dg.ComponentLoadContext, workspace_data: OmniWorkspaceData
) -> list[dg.AssetSpec]:
"""Invokes the `get_asset_spec` method on all objects in the provided `workspace_data`.
Filters out any cases where the asset_spec is `None`, and provides a helpful error
message in cases where keys overlap between different documents.
"""
maybe_specs = [
self.get_asset_spec(context, OmniTranslatorData(obj=doc, workspace_data=workspace_data))
for doc in workspace_data.documents
]
specs_by_key: dict[dg.AssetKey, list[dg.AssetSpec]] = defaultdict(list)
for spec in filter(None, maybe_specs):
specs_by_key[spec.key].append(spec)
for key, specs in specs_by_key.items():
if len(specs) == 1:
continue
ids = [OmniDocumentMetadataSet.extract(spec.metadata).url or spec for spec in specs]
ids_str = "\n\t".join(map(str, ids))
raise DagsterInvalidDefinitionError(
f"Multiple objects map to the same key {key}:"
f"\n\t{ids_str}\n"
"Please ensure that each object has a unique name by updating the `translation` function."
)
return list(itertools.chain.from_iterable(specs_by_key.values()))
def build_defs_from_workspace_data(
self, context: dg.ComponentLoadContext, workspace_data: OmniWorkspaceData
) -> dg.Definitions:
return dg.Definitions(assets=self._build_asset_specs(context, workspace_data))
def build_defs_from_state(
self, context: dg.ComponentLoadContext, state_path: Optional[Path]
) -> dg.Definitions:
if state_path is None:
return dg.Definitions()
state = self.load_state_from_path(state_path)
return self.build_defs_from_workspace_data(context, state)
|
OmniComponent
|
python
|
openai__openai-python
|
tests/api_resources/test_webhooks.py
|
{
"start": 1211,
"end": 8900
}
|
class ____:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_unwrap_with_secret(self, client: openai.OpenAI) -> None:
headers = create_test_headers()
unwrapped = client.webhooks.unwrap(TEST_PAYLOAD, headers, secret=TEST_SECRET)
assert unwrapped.id == "evt_685c059ae3a481909bdc86819b066fb6"
assert unwrapped.created_at == 1750861210
@parametrize
def test_unwrap_without_secret(self, client: openai.OpenAI) -> None:
headers = create_test_headers()
with pytest.raises(ValueError, match="The webhook secret must either be set"):
client.webhooks.unwrap(TEST_PAYLOAD, headers)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_verify_signature_valid(self, client: openai.OpenAI) -> None:
headers = create_test_headers()
# Should not raise - this is a truly valid signature for this timestamp
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@parametrize
def test_verify_signature_invalid_secret_format(self, client: openai.OpenAI) -> None:
headers = create_test_headers()
with pytest.raises(ValueError, match="The webhook secret must either be set"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=None)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_verify_signature_invalid(self, client: openai.OpenAI) -> None:
headers = create_test_headers()
with pytest.raises(InvalidWebhookSignatureError, match="The given webhook signature does not match"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret="invalid_secret")
@parametrize
def test_verify_signature_missing_webhook_signature_header(self, client: openai.OpenAI) -> None:
headers = create_test_headers(signature=None)
del headers["webhook-signature"]
with pytest.raises(ValueError, match="Could not find webhook-signature header"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@parametrize
def test_verify_signature_missing_webhook_timestamp_header(self, client: openai.OpenAI) -> None:
headers = create_test_headers()
del headers["webhook-timestamp"]
with pytest.raises(ValueError, match="Could not find webhook-timestamp header"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@parametrize
def test_verify_signature_missing_webhook_id_header(self, client: openai.OpenAI) -> None:
headers = create_test_headers()
del headers["webhook-id"]
with pytest.raises(ValueError, match="Could not find webhook-id header"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_verify_signature_payload_bytes(self, client: openai.OpenAI) -> None:
headers = create_test_headers()
client.webhooks.verify_signature(TEST_PAYLOAD.encode("utf-8"), headers, secret=TEST_SECRET)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
def test_unwrap_with_client_secret(self) -> None:
test_client = openai.OpenAI(base_url=base_url, api_key="test-api-key", webhook_secret=TEST_SECRET)
headers = create_test_headers()
unwrapped = test_client.webhooks.unwrap(TEST_PAYLOAD, headers)
assert unwrapped.id == "evt_685c059ae3a481909bdc86819b066fb6"
assert unwrapped.created_at == 1750861210
@parametrize
def test_verify_signature_timestamp_too_old(self, client: openai.OpenAI) -> None:
# Use a timestamp that's older than 5 minutes from our test timestamp
old_timestamp = TEST_TIMESTAMP - 400 # 6 minutes 40 seconds ago
headers = create_test_headers(timestamp=old_timestamp, signature="v1,dummy_signature")
with pytest.raises(InvalidWebhookSignatureError, match="Webhook timestamp is too old"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_verify_signature_timestamp_too_new(self, client: openai.OpenAI) -> None:
# Use a timestamp that's in the future beyond tolerance from our test timestamp
future_timestamp = TEST_TIMESTAMP + 400 # 6 minutes 40 seconds in the future
headers = create_test_headers(timestamp=future_timestamp, signature="v1,dummy_signature")
with pytest.raises(InvalidWebhookSignatureError, match="Webhook timestamp is too new"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_verify_signature_custom_tolerance(self, client: openai.OpenAI) -> None:
# Use a timestamp that's older than default tolerance but within custom tolerance
old_timestamp = TEST_TIMESTAMP - 400 # 6 minutes 40 seconds ago from test timestamp
headers = create_test_headers(timestamp=old_timestamp, signature="v1,dummy_signature")
# Should fail with default tolerance
with pytest.raises(InvalidWebhookSignatureError, match="Webhook timestamp is too old"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
# Should also fail with custom tolerance of 10 minutes (signature won't match)
with pytest.raises(InvalidWebhookSignatureError, match="The given webhook signature does not match"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET, tolerance=600)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_verify_signature_recent_timestamp_succeeds(self, client: openai.OpenAI) -> None:
# Use a recent timestamp with dummy signature
headers = create_test_headers(signature="v1,dummy_signature")
# Should fail on signature verification (not timestamp validation)
with pytest.raises(InvalidWebhookSignatureError, match="The given webhook signature does not match"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_verify_signature_multiple_signatures_one_valid(self, client: openai.OpenAI) -> None:
# Test multiple signatures: one invalid, one valid
multiple_signatures = f"v1,invalid_signature {TEST_SIGNATURE}"
headers = create_test_headers(signature=multiple_signatures)
# Should not raise when at least one signature is valid
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_verify_signature_multiple_signatures_all_invalid(self, client: openai.OpenAI) -> None:
# Test multiple invalid signatures
multiple_invalid_signatures = "v1,invalid_signature1 v1,invalid_signature2"
headers = create_test_headers(signature=multiple_invalid_signatures)
with pytest.raises(InvalidWebhookSignatureError, match="The given webhook signature does not match"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
|
TestWebhooks
|
python
|
pytest-dev__pytest
|
src/_pytest/terminal.py
|
{
"start": 10084,
"end": 11068
}
|
class ____:
"""Simple structure to hold warnings information captured by ``pytest_warning_recorded``.
:ivar str message:
User friendly message about the warning.
:ivar str|None nodeid:
nodeid that generated the warning (see ``get_location``).
:ivar tuple fslocation:
File system location of the source of the warning (see ``get_location``).
"""
message: str
nodeid: str | None = None
fslocation: tuple[str, int] | None = None
count_towards_summary: ClassVar = True
def get_location(self, config: Config) -> str | None:
"""Return the more user-friendly information about the location of a warning, or None."""
if self.nodeid:
return self.nodeid
if self.fslocation:
filename, linenum = self.fslocation
relpath = bestrelpath(config.invocation_params.dir, absolutepath(filename))
return f"{relpath}:{linenum}"
return None
@final
|
WarningReport
|
python
|
pallets__werkzeug
|
src/werkzeug/debug/console.py
|
{
"start": 3310,
"end": 5516
}
|
class ____(code.InteractiveInterpreter):
locals: dict[str, t.Any]
def __init__(self, globals: dict[str, t.Any], locals: dict[str, t.Any]) -> None:
self.loader = _ConsoleLoader()
locals = {
**globals,
**locals,
"dump": dump,
"help": helper,
"__loader__": self.loader,
}
super().__init__(locals)
original_compile = self.compile
def compile(source: str, filename: str, symbol: str) -> CodeType | None:
code = original_compile(source, filename, symbol)
if code is not None:
self.loader.register(code, source)
return code
self.compile = compile # type: ignore[assignment]
self.more = False
self.buffer: list[str] = []
def runsource(self, source: str, **kwargs: t.Any) -> str: # type: ignore
source = f"{source.rstrip()}\n"
ThreadedStream.push()
prompt = "... " if self.more else ">>> "
try:
source_to_eval = "".join(self.buffer + [source])
if super().runsource(source_to_eval, "<debugger>", "single"):
self.more = True
self.buffer.append(source)
else:
self.more = False
del self.buffer[:]
finally:
output = ThreadedStream.fetch()
return f"{prompt}{escape(source)}{output}"
def runcode(self, code: CodeType) -> None:
try:
exec(code, self.locals)
except Exception:
self.showtraceback()
def showtraceback(self) -> None:
from .tbtools import DebugTraceback
exc = t.cast(BaseException, sys.exc_info()[1])
te = DebugTraceback(exc, skip=1)
sys.stdout._write(te.render_traceback_html()) # type: ignore
def showsyntaxerror(self, filename: str | None = None) -> None:
from .tbtools import DebugTraceback
exc = t.cast(BaseException, sys.exc_info()[1])
te = DebugTraceback(exc, skip=4)
sys.stdout._write(te.render_traceback_html()) # type: ignore
def write(self, data: str) -> None:
sys.stdout.write(data)
|
_InteractiveConsole
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/test_exceptions.py
|
{
"start": 2309,
"end": 2389
}
|
class ____(Exception):
def __init__(self, x):
self.x = x
|
NaiveException
|
python
|
python__mypy
|
mypy/constraints.py
|
{
"start": 27683,
"end": 27886
}
|
class ____(BoolTypeQuery):
def __init__(self) -> None:
super().__init__(ALL_STRATEGY)
def visit_uninhabited_type(self, t: UninhabitedType) -> bool:
return False
|
CompleteTypeVisitor
|
python
|
numba__numba
|
numba/cuda/tests/cudadrv/test_deallocations.py
|
{
"start": 2564,
"end": 4740
}
|
class ____(CUDATestCase):
def test_basic(self):
harr = np.arange(5)
darr1 = cuda.to_device(harr)
deallocs = cuda.current_context().memory_manager.deallocations
deallocs.clear()
self.assertEqual(len(deallocs), 0)
with cuda.defer_cleanup():
darr2 = cuda.to_device(harr)
del darr1
self.assertEqual(len(deallocs), 1)
del darr2
self.assertEqual(len(deallocs), 2)
deallocs.clear()
self.assertEqual(len(deallocs), 2)
deallocs.clear()
self.assertEqual(len(deallocs), 0)
def test_nested(self):
harr = np.arange(5)
darr1 = cuda.to_device(harr)
deallocs = cuda.current_context().memory_manager.deallocations
deallocs.clear()
self.assertEqual(len(deallocs), 0)
with cuda.defer_cleanup():
with cuda.defer_cleanup():
darr2 = cuda.to_device(harr)
del darr1
self.assertEqual(len(deallocs), 1)
del darr2
self.assertEqual(len(deallocs), 2)
deallocs.clear()
self.assertEqual(len(deallocs), 2)
deallocs.clear()
self.assertEqual(len(deallocs), 2)
deallocs.clear()
self.assertEqual(len(deallocs), 0)
def test_exception(self):
harr = np.arange(5)
darr1 = cuda.to_device(harr)
deallocs = cuda.current_context().memory_manager.deallocations
deallocs.clear()
self.assertEqual(len(deallocs), 0)
class CustomError(Exception):
pass
with self.assertRaises(CustomError):
with cuda.defer_cleanup():
darr2 = cuda.to_device(harr)
del darr2
self.assertEqual(len(deallocs), 1)
deallocs.clear()
self.assertEqual(len(deallocs), 1)
raise CustomError
deallocs.clear()
self.assertEqual(len(deallocs), 0)
del darr1
self.assertEqual(len(deallocs), 1)
deallocs.clear()
self.assertEqual(len(deallocs), 0)
|
TestDeferCleanup
|
python
|
walkccc__LeetCode
|
solutions/1576. Replace All ?'s to Avoid Consecutive Repeating Characters/1576.py
|
{
"start": 0,
"end": 438
}
|
class ____:
def modifyString(self, s: str) -> str:
ans = []
def nextAvailable(ans: list[int], s: str, i: int) -> str:
c = 'a'
while ((i > 0 and ans[i - 1] == c) or
(i + 1 < len(s) and c == s[i + 1])):
c = chr(ord(c) + 1)
return c
for i, c in enumerate(s):
if c == '?':
ans.append(nextAvailable(ans, s, i))
else:
ans.append(c)
return ''.join(ans)
|
Solution
|
python
|
ApeWorX__ape
|
src/ape_ethereum/transactions.py
|
{
"start": 2028,
"end": 4753
}
|
class ____(TransactionAPI):
def serialize_transaction(self) -> bytes:
if not self.signature:
message = "The transaction is not signed."
if not self.sender:
message = (
f"{message} "
"Did you forget to add the `sender=` kwarg to the transaction function call?"
)
raise SignatureError(message, transaction=self)
txn_data = self.model_dump(by_alias=True, exclude={"sender", "type"})
# This messes up the signature
if txn_data.get("to") == ZERO_ADDRESS:
del txn_data["to"]
# Adjust bytes in the access list if necessary.
if "accessList" in txn_data:
adjusted_access_list = []
for item in txn_data["accessList"]:
adjusted_item = {**item}
storage_keys_corrected = [
to_hex(k) if isinstance(k, bytes) else k for k in item.get("storageKeys", [])
]
if storage_keys_corrected:
adjusted_item["storageKeys"] = storage_keys_corrected
adjusted_access_list.append(adjusted_item)
txn_data["accessList"] = adjusted_access_list
if "authorizationList" in txn_data:
adjusted_auth_list = []
for item in txn_data["authorizationList"]:
adjusted_item = {
k: to_hex(v) if isinstance(v, bytes) else v for k, v in item.items()
}
adjusted_auth_list.append(adjusted_item)
txn_data["authorizationList"] = adjusted_auth_list
unsigned_txn = serializable_unsigned_transaction_from_dict(txn_data)
signature = (self.signature.v, to_int(self.signature.r), to_int(self.signature.s))
signed_txn = encode_transaction(unsigned_txn, signature)
impersonated_accounts = self.account_manager.test_accounts._impersonated_accounts
# If this is a real sender (not impersonated), verify its signature.
if self.sender and self.sender not in impersonated_accounts:
recovered_signer = EthAccount.recover_transaction(signed_txn)
if recovered_signer != self.sender:
raise SignatureError(
f"Recovered signer '{recovered_signer}' doesn't match sender {self.sender}!",
transaction=self,
)
return signed_txn
# TODO: In 0.9, either use hex-str or hex-bytes between both this
# and ReceiptAPI (make consistent).
@property
def txn_hash(self) -> HexBytes:
txn_bytes = self.serialize_transaction()
return HexBytes(keccak(txn_bytes))
|
BaseTransaction
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/errors.py
|
{
"start": 6130,
"end": 6427
}
|
class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneError,)
name = "RepositoryLocationNotFound"
def __init__(self, location_name):
super().__init__()
self.message = f"Location {location_name} does not exist."
|
GrapheneRepositoryLocationNotFound
|
python
|
apache__airflow
|
providers/alibaba/tests/unit/alibaba/cloud/operators/test_analyticdb_spark.py
|
{
"start": 2849,
"end": 4784
}
|
class ____:
@mock.patch(ADB_SPARK_OPERATOR_STRING.format("AnalyticDBSparkHook"))
def test_execute(self, mock_hook):
"""Test submit AnalyticDB Spark Batch Application works as expected."""
operator = AnalyticDBSparkBatchOperator(
file=MOCK_FILE,
cluster_id=MOCK_CLUSTER_ID,
rg_name=MOCK_RG_NAME,
adb_spark_conn_id=MOCK_ADB_SPARK_CONN_ID,
region=MOCK_REGION,
task_id=MOCK_TASK_ID,
)
operator.execute(None)
mock_hook.assert_called_once_with(adb_spark_conn_id=MOCK_ADB_SPARK_CONN_ID, region=MOCK_REGION)
mock_hook.return_value.submit_spark_app.assert_called_once_with(
cluster_id=MOCK_CLUSTER_ID,
rg_name=MOCK_RG_NAME,
file=MOCK_FILE,
class_name=None,
args=None,
conf=None,
jars=None,
py_files=None,
files=None,
driver_resource_spec=None,
executor_resource_spec=None,
num_executors=None,
archives=None,
name=None,
)
@mock.patch(ADB_SPARK_OPERATOR_STRING.format("AnalyticDBSparkBaseOperator.hook"))
def test_execute_with_exception(self, mock_hook):
"""Test submit AnalyticDB Spark Batch Application raises ValueError with invalid parameter."""
# Given
mock_hook.submit_spark_app.side_effect = ValueError("List of strings expected")
# When
operator = AnalyticDBSparkBatchOperator(
file=MOCK_FILE,
args=(True, False),
cluster_id=MOCK_CLUSTER_ID,
rg_name=MOCK_RG_NAME,
adb_spark_conn_id=MOCK_ADB_SPARK_CONN_ID,
region=MOCK_REGION,
task_id=MOCK_TASK_ID,
)
with pytest.raises(ValueError, match="List of strings expected"):
operator.execute(None)
|
TestAnalyticDBSparkBatchOperator
|
python
|
google__pytype
|
pytype/tests/test_attributes2.py
|
{
"start": 80,
"end": 1060
}
|
class ____(test_base.BaseTest):
"""Tests for strict attribute checking on None."""
def test_explicit_none(self):
errors = self.CheckWithErrors("""
from typing import Optional
def f(x: Optional[str]):
return x.upper() # attribute-error[e]
""")
self.assertErrorRegexes(errors, {"e": r"upper.*None"})
def test_closure(self):
self.Check("""
from typing import Optional
d = ... # type: Optional[dict]
if d:
formatter = lambda x: d.get(x, '')
else:
formatter = lambda x: ''
formatter('key')
""")
def test_overwrite_global(self):
errors = self.CheckWithErrors("""
from typing import Optional
d = ... # type: Optional[dict]
if d:
formatter = lambda x: d.get(x, '') # attribute-error[e]
else:
formatter = lambda x: ''
d = None
formatter('key') # line 8
""")
self.assertErrorRegexes(errors, {"e": r"get.*None"})
|
TestStrictNone
|
python
|
dask__dask
|
dask/dataframe/dask_expr/io/parquet.py
|
{
"start": 7308,
"end": 7862
}
|
class ____(Expr):
_parameters = [
"frame",
"path",
"fs",
"fmd",
"engine",
"offset",
"partition_on",
"write_metadata_file",
"name_function",
"write_kwargs",
"append",
]
@property
def _meta(self):
return None
def _divisions(self):
return (None, None)
def _lower(self):
return ToParquetBarrier(
ToParquetData(
*self.operands,
),
*self.operands[1:],
)
|
ToParquet
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 25291,
"end": 25564
}
|
class ____(sgqlc.types.Enum):
"""The privacy of a Gist
Enumeration Choices:
* `ALL`: Gists that are public and secret
* `PUBLIC`: Public
* `SECRET`: Secret
"""
__schema__ = github_schema
__choices__ = ("ALL", "PUBLIC", "SECRET")
|
GistPrivacy
|
python
|
wireservice__csvkit
|
csvkit/utilities/csvsql.py
|
{
"start": 422,
"end": 13430
}
|
class ____(CSVKitUtility):
description = 'Generate SQL statements for one or more CSV files, or execute those statements directly on a ' \
'database, and execute one or more SQL queries.'
# Override 'f' because the utility accepts multiple files.
override_flags = ['f']
def add_arguments(self):
self.argparser.add_argument(
metavar='FILE', nargs='*', dest='input_paths', default=['-'],
help='The CSV file(s) to operate on. If omitted, will accept input as piped data via STDIN.')
self.argparser.add_argument(
'-i', '--dialect', dest='dialect', choices=DIALECTS,
help='Dialect of SQL to generate. Cannot be used with --db.')
self.argparser.add_argument(
'--db', dest='connection_string',
help='If present, a SQLAlchemy connection string to use to directly execute generated SQL on a database.')
self.argparser.add_argument(
'--engine-option', dest='engine_option', nargs=2, action='append', default=[],
help="A keyword argument to SQLAlchemy's create_engine(), as a space-separated pair. "
"This option can be specified multiple times. For example: thick_mode True")
self.argparser.add_argument(
'--query', dest='queries', action='append',
help='Execute one or more SQL queries delimited by --sql-delimiter, and output the result of the last '
'query as CSV. QUERY may be a filename. --query may be specified multiple times.')
self.argparser.add_argument(
'--insert', dest='insert', action='store_true',
help='Insert the data into the table. Requires --db.')
self.argparser.add_argument(
'--prefix', action='append', default=[],
help='Add an expression following the INSERT keyword, like OR IGNORE or OR REPLACE.')
self.argparser.add_argument(
'--before-insert', dest='before_insert',
help='Before the INSERT command, execute one or more SQL queries delimited by --sql-delimiter. '
'Requires --insert.')
self.argparser.add_argument(
'--after-insert', dest='after_insert',
help='After the INSERT command, execute one or more SQL queries delimited by --sql-delimiter. '
'Requires --insert.')
self.argparser.add_argument(
'--sql-delimiter', dest='sql_delimiter', default=';',
help='Delimiter separating SQL queries in --query, --before-insert, and --after-insert.')
self.argparser.add_argument(
'--tables', dest='table_names',
help='A comma-separated list of names of tables to be created. By default, the tables will be named after '
'the filenames without extensions or "stdin".')
self.argparser.add_argument(
'--no-constraints', dest='no_constraints', action='store_true',
help='Generate a schema without length limits or null checks. Useful when sampling big tables.')
self.argparser.add_argument(
'--unique-constraint', dest='unique_constraint',
help='A column-separated list of names of columns to include in a UNIQUE constraint.')
self.argparser.add_argument(
'--no-create', dest='no_create', action='store_true',
help='Skip creating the table. Requires --insert.')
self.argparser.add_argument(
'--create-if-not-exists', dest='create_if_not_exists', action='store_true',
help='Create the table if it does not exist, otherwise keep going. Requires --insert.')
self.argparser.add_argument(
'--overwrite', dest='overwrite', action='store_true',
help='Drop the table if it already exists. Requires --insert. Cannot be used with --no-create.')
self.argparser.add_argument(
'--db-schema', dest='db_schema',
help='Optional name of database schema to create table(s) in.')
self.argparser.add_argument(
'-y', '--snifflimit', dest='sniff_limit', type=int, default=1024,
help='Limit CSV dialect sniffing to the specified number of bytes. '
'Specify "0" to disable sniffing entirely, or "-1" to sniff the entire file.')
self.argparser.add_argument(
'-I', '--no-inference', dest='no_inference', action='store_true',
help='Disable type inference (and --locale, --date-format, --datetime-format, --no-leading-zeroes) '
'when parsing the input.')
self.argparser.add_argument(
'--chunk-size', dest='chunk_size', type=int,
help='Chunk size for batch insert into the table. Requires --insert.')
self.argparser.add_argument(
'--min-col-len', dest='min_col_len', type=int, default=1,
help='The minimum length of text columns.')
self.argparser.add_argument(
'--col-len-multiplier', dest='col_len_multiplier', type=int, default=1,
help='Multiply the maximum column length by this multiplier to accomodate larger values in later runs.')
def main(self):
if isatty(sys.stdin) and self.args.input_paths == ['-']:
self.argparser.error('You must provide an input file or piped data.')
self.input_files = []
self.connection = None
self.table_names = []
self.unique_constraint = []
if self.args.table_names:
self.table_names = self.args.table_names.split(',')
if self.args.unique_constraint:
self.unique_constraint = self.args.unique_constraint.split(',')
# Create a SQLite database in memory if no connection string is specified
if self.args.queries and not self.args.connection_string:
self.args.connection_string = "sqlite:///:memory:"
self.args.insert = True
if self.args.dialect and self.args.connection_string:
self.argparser.error('The --dialect option is only valid when neither --db nor --query are specified.')
if self.args.insert and not self.args.connection_string:
self.argparser.error('The --insert option is only valid when either --db or --query is specified.')
if self.args.no_create and not self.args.insert:
self.argparser.error('The --no-create option is only valid if --insert is also specified.')
if self.args.create_if_not_exists and not self.args.insert:
self.argparser.error('The --create-if-not-exists option is only valid if --insert is also specified.')
if self.args.overwrite and not self.args.insert:
self.argparser.error('The --overwrite option is only valid if --insert is also specified.')
if self.args.overwrite and self.args.no_create:
self.argparser.error('The --overwrite option is only valid if --no-create is not specified.')
if self.args.before_insert and not self.args.insert:
self.argparser.error('The --before-insert option is only valid if --insert is also specified.')
if self.args.after_insert and not self.args.insert:
self.argparser.error('The --after-insert option is only valid if --insert is also specified.')
if self.args.chunk_size and not self.args.insert:
self.argparser.error('The --chunk-size option is only valid if --insert is also specified.')
if self.args.no_create and self.args.create_if_not_exists:
self.argparser.error('The --no-create and --create-if-not-exists options are mutually exclusive.')
# Lazy open files
for path in self.args.input_paths:
self.input_files.append(self._open_input_file(path))
# Establish database validity before reading CSV files
if self.args.connection_string:
try:
engine = create_engine(self.args.connection_string, **parse_list(self.args.engine_option))
except ImportError as e:
raise ImportError(
"You don't appear to have the necessary database backend installed for connection string you're "
"trying to use. Available backends include:\n\nPostgreSQL:\tpip install psycopg2\nMySQL:\t\tpip "
"install mysql-connector-python OR pip install mysqlclient\n\nFor details on connection strings "
"and other backends, please see the SQLAlchemy documentation on dialects at:\n\n"
"https://www.sqlalchemy.org/docs/dialects/"
) from e
self.connection = engine.connect()
try:
self._failsafe_main()
finally:
for f in self.input_files:
f.close()
if self.connection:
self.connection.close()
engine.dispose()
def _failsafe_main(self):
"""
Inner main function. If anything fails in here, file handles and
database connections will be safely closed.
"""
if self.connection:
transaction = self.connection.begin()
for f in self.input_files:
try:
# Try to use name specified via --tables
table_name = self.table_names.pop(0)
except IndexError:
if f == sys.stdin:
table_name = "stdin"
else:
# Use filename as table name
table_name = os.path.splitext(os.path.basename(f.name))[0]
table = None
sniff_limit = self.args.sniff_limit if self.args.sniff_limit != -1 else None
try:
table = agate.Table.from_csv(
f,
skip_lines=self.args.skip_lines,
sniff_limit=sniff_limit,
column_types=self.get_column_types(),
**self.reader_kwargs,
)
except StopIteration:
# Catch cases where no table data was provided and fall through
# to query logic
continue
if table:
if self.connection:
if self.args.before_insert:
for query in self.args.before_insert.split(self.args.sql_delimiter):
self.connection.exec_driver_sql(query)
table.to_sql(
self.connection,
table_name,
overwrite=self.args.overwrite,
create=not self.args.no_create,
create_if_not_exists=self.args.create_if_not_exists,
insert=self.args.insert and len(table.rows) > 0,
prefixes=self.args.prefix,
db_schema=self.args.db_schema,
constraints=not self.args.no_constraints,
unique_constraint=self.unique_constraint,
chunk_size=self.args.chunk_size,
min_col_len=self.args.min_col_len,
col_len_multiplier=self.args.col_len_multiplier,
)
if self.args.after_insert:
for query in self.args.after_insert.split(self.args.sql_delimiter):
self.connection.exec_driver_sql(query)
# Output SQL statements
else:
statement = table.to_sql_create_statement(
table_name,
dialect=self.args.dialect,
db_schema=self.args.db_schema,
constraints=not self.args.no_constraints,
unique_constraint=self.unique_constraint,
)
self.output_file.write(f'{statement}\n')
if self.connection:
if self.args.queries:
queries = []
for query in self.args.queries:
if os.path.exists(query):
with open(query) as f:
query = f.read()
queries += query.split(self.args.sql_delimiter)
# Execute the specified SQL queries.
rows = None
for query in queries:
if query.strip():
rows = self.connection.exec_driver_sql(query)
# Output the result of the last query as CSV
if rows.returns_rows:
output = agate.csv.writer(self.output_file, **self.writer_kwargs)
output.writerow(rows._metadata.keys)
for row in rows:
output.writerow(row)
transaction.commit()
def launch_new_instance():
utility = CSVSQL()
utility.run()
if __name__ == '__main__':
launch_new_instance()
|
CSVSQL
|
python
|
ethereum__web3.py
|
tests/core/utilities/test_attach_modules.py
|
{
"start": 488,
"end": 5204
}
|
class ____(Module):
def start_ws(self):
return True
def test_attach_modules():
mods = {
"geth": (
MockGeth,
{
"admin": MockGethAdmin,
},
),
"eth": MockEth,
}
w3 = Web3(EthereumTesterProvider(), modules={})
attach_modules(w3, mods)
assert w3.eth.block_number() == 42
assert w3.geth.admin.start_ws() is True
def test_attach_single_module_as_tuple():
w3 = Web3(EthereumTesterProvider(), modules={"eth": (MockEth,)})
assert w3.eth.block_number() == 42
def test_attach_modules_multiple_levels_deep(module1):
mods = {
"eth": MockEth,
"geth": (
MockGeth,
{
"module1": (
module1,
{
"admin": MockGethAdmin,
},
),
},
),
}
w3 = Web3(EthereumTesterProvider(), modules={})
attach_modules(w3, mods)
assert w3.eth.block_number() == 42
assert w3.geth.module1.admin.start_ws() is True
def test_attach_modules_with_wrong_module_format():
mods = {"eth": (MockEth, MockEth, MockEth)}
w3 = Web3(EthereumTesterProvider(), modules={})
with pytest.raises(
Web3ValidationError, match="Module definitions can only have 1 or 2 elements"
):
attach_modules(w3, mods)
def test_attach_modules_with_existing_modules():
mods = {
"eth": MockEth,
}
w3 = Web3(EthereumTesterProvider(), modules=mods)
with pytest.raises(
Web3AttributeError,
match=("The web3 object already has an attribute with that name"),
):
attach_modules(w3, mods)
def test_attach_external_modules_multiple_levels_deep(
module1, module2, module3, module4
):
w3 = Web3(
EthereumTesterProvider(),
external_modules={
"module1": module1,
"module2": (
module2,
{
"submodule1": (
module3,
{
"submodule2": module4,
},
),
},
),
},
)
assert w3.is_connected()
# assert instantiated with default modules
assert hasattr(w3, "geth")
assert hasattr(w3, "eth")
assert is_integer(w3.eth.chain_id)
# assert instantiated with module1
assert hasattr(w3, "module1")
assert w3.module1.a == "a"
assert w3.module1.b == "b"
# assert instantiated with module2 + submodules
assert hasattr(w3, "module2")
assert w3.module2.c == "c"
assert w3.module2.d() == "d"
assert hasattr(w3.module2, "submodule1")
assert w3.module2.submodule1.e == "e"
assert hasattr(w3.module2.submodule1, "submodule2")
assert w3.module2.submodule1.submodule2.f == "f"
def test_attach_external_modules_that_do_not_inherit_from_module_class(
module1_unique,
module2_unique,
module3_unique,
module4_unique,
):
w3 = Web3(
EthereumTesterProvider(),
external_modules={
"module1": module1_unique,
"module2": (
module2_unique,
{
"submodule1": (
module3_unique,
{
"submodule2": module4_unique,
},
),
},
),
},
)
# assert module1 attached
assert hasattr(w3, "module1")
assert w3.module1.a == "a"
assert w3.module1.b() == "b"
assert w3.module1.return_eth_chain_id == w3.eth.chain_id
# assert module2 + submodules attached
assert hasattr(w3, "module2")
assert w3.module2.c == "c"
assert w3.module2.d() == "d"
assert hasattr(w3.module2, "submodule1")
assert w3.module2.submodule1.e == "e"
assert hasattr(w3.module2.submodule1, "submodule2")
assert w3.module2.submodule1.submodule2.f == "f"
# assert default modules intact
assert hasattr(w3, "geth")
assert hasattr(w3, "eth")
assert is_integer(w3.eth.chain_id)
def test_attach_modules_for_module_with_more_than_one_init_argument(
module_many_init_args,
):
with pytest.raises(
UnsupportedOperation,
match=(
"A module class may accept a single `Web3` instance as "
"the first argument of its __init__\\(\\) method. More "
"than one argument found for ModuleManyArgs: \\['a', 'b']"
),
):
Web3(
EthereumTesterProvider(),
external_modules={"module_should_fail": module_many_init_args},
)
|
MockGethAdmin
|
python
|
encode__django-rest-framework
|
rest_framework/utils/encoders.py
|
{
"start": 347,
"end": 2826
}
|
class ____(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time/timedelta,
decimal types, generators and other basic python objects.
"""
def default(self, obj):
# For Date Time string spec, see ECMA 262
# https://ecma-international.org/ecma-262/5.1/#sec-15.9.1.15
if isinstance(obj, Promise):
return force_str(obj)
elif isinstance(obj, datetime.datetime):
representation = obj.isoformat()
if representation.endswith('+00:00'):
representation = representation[:-6] + 'Z'
return representation
elif isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, datetime.time):
if timezone and timezone.is_aware(obj):
raise ValueError("JSON can't represent timezone-aware times.")
representation = obj.isoformat()
return representation
elif isinstance(obj, datetime.timedelta):
return str(obj.total_seconds())
elif isinstance(obj, decimal.Decimal):
# Serializers will coerce decimals to strings by default.
return float(obj)
elif isinstance(obj, uuid.UUID):
return str(obj)
elif isinstance(obj, (
ipaddress.IPv4Address,
ipaddress.IPv6Address,
ipaddress.IPv4Network,
ipaddress.IPv6Network,
ipaddress.IPv4Interface,
ipaddress.IPv6Interface)
):
return str(obj)
elif isinstance(obj, QuerySet):
return tuple(obj)
elif isinstance(obj, bytes):
# Best-effort for binary blobs. See #4187.
return obj.decode()
elif hasattr(obj, 'tolist'):
# Numpy arrays and array scalars.
return obj.tolist()
elif (coreapi is not None) and isinstance(obj, (coreapi.Document, coreapi.Error)):
raise RuntimeError(
'Cannot return a coreapi object from a JSON view. '
'You should be using a schema renderer instead for this view.'
)
elif hasattr(obj, '__getitem__'):
cls = (list if isinstance(obj, (list, tuple)) else dict)
with contextlib.suppress(Exception):
return cls(obj)
elif hasattr(obj, '__iter__'):
return tuple(item for item in obj)
return super().default(obj)
|
JSONEncoder
|
python
|
spack__spack
|
lib/spack/spack/vendor/ruamel/yaml/events.py
|
{
"start": 5245,
"end": 5314
}
|
class ____(CollectionStartEvent):
__slots__ = ()
|
SequenceStartEvent
|
python
|
google__jax
|
tests/pallas/tpu_pallas_test.py
|
{
"start": 93940,
"end": 95279
}
|
class ____(PallasBaseTest):
def test_scratch_input_vmap(self):
"""Test that vmapp-ing a kernel with scratch inputs works correctly."""
# Scratch inputs are only available for PallasTPU. This is why this test
# does not live with the other vmap tests in:
# jax/tests/pallas/pallas_test.py
def add_one_with_scratch(x_ref, o_ref, scratch_ref):
scratch_ref[...] = jnp.ones_like(scratch_ref[...])
o_ref[...] = x_ref[...] + scratch_ref[...]
tile_size = 128
tile_shape = (tile_size, tile_size)
array_shape = (2 * tile_size, 2 * tile_size)
vmapped_add_one_with_scratch = jax.vmap(
pl.pallas_call(
add_one_with_scratch,
out_shape=jax.ShapeDtypeStruct(array_shape, jnp.int32),
grid_spec=pltpu.PrefetchScalarGridSpec(
num_scalar_prefetch=0,
in_specs=[pl.BlockSpec(tile_shape, lambda i, j: (i, j))],
out_specs=pl.BlockSpec(tile_shape, lambda i, j: (i, j)),
scratch_shapes=[pltpu.VMEM(tile_shape, dtype=jnp.int32)],
grid=(2, 2),
),
)
)
x = jnp.broadcast_to(jnp.arange(array_shape[0]), (10, *array_shape))
out = vmapped_add_one_with_scratch(x)
out_ref = x + 1
np.testing.assert_array_equal(out, out_ref, strict=True)
|
PallasCallVmapTest
|
python
|
ray-project__ray
|
python/ray/autoscaler/_private/node_launcher.py
|
{
"start": 695,
"end": 6841
}
|
class ____:
"""Launches Ray nodes in the main thread using
`BaseNodeLauncher.launch_node()`.
This is a superclass of NodeLauncher, which launches nodes asynchronously
in the background.
By default, the subclass NodeLauncher is used to launch nodes in subthreads.
That behavior can be flagged off in the provider config by setting
`foreground_node_launch: True`; the autoscaler will then makes blocking calls to
BaseNodeLauncher.launch_node() in the main thread.
"""
def __init__(
self,
provider,
pending,
event_summarizer,
node_provider_availability_tracker: NodeProviderAvailabilityTracker,
session_name: Optional[str] = None,
prom_metrics=None,
node_types=None,
index=None,
*args,
**kwargs,
):
self.pending = pending
self.event_summarizer = event_summarizer
self.node_provider_availability_tracker = node_provider_availability_tracker
self.prom_metrics = prom_metrics or AutoscalerPrometheusMetrics(
session_name=session_name
)
self.provider = provider
self.node_types = node_types
self.index = str(index) if index is not None else ""
def launch_node(
self, config: Dict[str, Any], count: int, node_type: str
) -> Optional[Dict]:
self.log("Got {} nodes to launch.".format(count))
created_nodes = self._launch_node(config, count, node_type)
self.pending.dec(node_type, count)
return created_nodes
def _launch_node(
self, config: Dict[str, Any], count: int, node_type: str
) -> Optional[Dict]:
if self.node_types:
assert node_type, node_type
# The `worker_nodes` field is deprecated in favor of per-node-type
# node_configs. We allow it for backwards-compatibility.
launch_config = copy.deepcopy(config.get("worker_nodes", {}))
if node_type:
launch_config.update(
config["available_node_types"][node_type]["node_config"]
)
resources = copy.deepcopy(
config["available_node_types"][node_type]["resources"]
)
labels = copy.deepcopy(
config["available_node_types"][node_type].get("labels", {})
)
launch_hash = hash_launch_conf(launch_config, config["auth"])
node_config = copy.deepcopy(config.get("worker_nodes", {}))
node_tags = {
TAG_RAY_NODE_NAME: "ray-{}-worker".format(config["cluster_name"]),
TAG_RAY_NODE_KIND: NODE_KIND_WORKER,
TAG_RAY_NODE_STATUS: STATUS_UNINITIALIZED,
TAG_RAY_LAUNCH_CONFIG: launch_hash,
}
# A custom node type is specified; set the tag in this case, and also
# merge the configs. We merge the configs instead of overriding, so
# that the bootstrapped per-cloud properties are preserved.
# TODO(ekl) this logic is duplicated in commands.py (keep in sync)
if node_type:
node_tags[TAG_RAY_USER_NODE_TYPE] = node_type
node_config.update(launch_config)
node_launch_start_time = time.time()
error_msg = None
full_exception = None
created_nodes = {}
try:
created_nodes = self.provider.create_node_with_resources_and_labels(
node_config, node_tags, count, resources, labels
)
except NodeLaunchException as node_launch_exception:
self.node_provider_availability_tracker.update_node_availability(
node_type, int(node_launch_start_time), node_launch_exception
)
if node_launch_exception.src_exc_info is not None:
full_exception = "\n".join(
traceback.format_exception(*node_launch_exception.src_exc_info)
)
error_msg = (
f"Failed to launch {{}} node(s) of type {node_type}. "
f"({node_launch_exception.category}): "
f"{node_launch_exception.description}"
)
except Exception:
error_msg = f"Failed to launch {{}} node(s) of type {node_type}."
full_exception = traceback.format_exc()
else:
# Record some metrics/observability information when a node is launched.
launch_time = time.time() - node_launch_start_time
for _ in range(count):
# Note: when launching multiple nodes we observe the time it
# took all nodes to launch for each node. For example, if 4
# nodes were created in 25 seconds, we would observe the 25
# second create time 4 times.
self.prom_metrics.worker_create_node_time.observe(launch_time)
self.prom_metrics.started_nodes.inc(count)
self.node_provider_availability_tracker.update_node_availability(
node_type=node_type,
timestamp=int(node_launch_start_time),
node_launch_exception=None,
)
if error_msg is not None:
self.event_summarizer.add(
error_msg,
quantity=count,
aggregate=operator.add,
)
self.log(error_msg)
self.prom_metrics.node_launch_exceptions.inc()
self.prom_metrics.failed_create_nodes.inc(count)
else:
self.log("Launching {} nodes, type {}.".format(count, node_type))
self.event_summarizer.add(
"Adding {} node(s) of type " + str(node_type) + ".",
quantity=count,
aggregate=operator.add,
)
if full_exception is not None:
self.log(full_exception)
return created_nodes
def log(self, statement):
# launcher_class is "BaseNodeLauncher", or "NodeLauncher" if called
# from that subclass.
launcher_class: str = type(self).__name__
prefix = "{}{}:".format(launcher_class, self.index)
logger.info(prefix + " {}".format(statement))
|
BaseNodeLauncher
|
python
|
getsentry__sentry
|
src/sentry/api/authentication.py
|
{
"start": 5822,
"end": 6829
}
|
class ____(BasicAuthentication):
def authenticate_header(self, request: Request) -> str:
return 'xBasic realm="%s"' % self.www_authenticate_realm
def transform_auth(
self,
user: int | User | RpcUser | None | AnonymousUser,
request_auth: Any,
entity_id_tag: str | None = None,
**tags,
) -> tuple[RpcUser | AnonymousUser, AuthenticatedToken | None]:
if isinstance(user, int):
user = user_service.get_user(user_id=user)
elif isinstance(user, User):
user = user_service.get_user(user_id=user.id)
if user is None:
user = AnonymousUser()
auth_token = AuthenticatedToken.from_token(request_auth)
if auth_token and entity_id_tag:
scope = sentry_sdk.get_isolation_scope()
scope.set_tag(entity_id_tag, auth_token.entity_id)
for k, v in tags.items():
scope.set_tag(k, v)
return (user, auth_token)
|
QuietBasicAuthentication
|
python
|
huggingface__transformers
|
src/transformers/models/cohere/configuration_cohere.py
|
{
"start": 1138,
"end": 8664
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CohereModel`]. It is used to instantiate an Cohere
model according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information. Instantiating a configuration
with the defaults will yield a similar configuration to that of the [CohereForAI/c4ai-command-r-v01](https://huggingface.co/CohereForAI/c4ai-command-r-v01) model.
Args:
vocab_size (`int`, *optional*, defaults to 256000):
Vocabulary size of the Cohere model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`CohereModel`]
hidden_size (`int`, *optional*, defaults to 8192):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 22528):
Dimension of the MLP representations.
logit_scale (`float`, *optional*, defaults to 0.0625):
The scaling factor for the output logits.
num_hidden_layers (`int`, *optional*, defaults to 40):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 64):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 5):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 255001):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
use_qk_norm (`bool`, *optional*, defaults to `False`):
Whether to use query-key normalization in the attention
```python
>>> from transformers import CohereModel, CohereConfig
>>> # Initializing a Cohere model configuration
>>> configuration = CohereConfig()
>>> # Initializing a model from the Cohere configuration
>>> model = CohereModel(configuration) # doctest: +SKIP
>>> # Accessing the model configuration
>>> configuration = model.config # doctest: +SKIP
```"""
model_type = "cohere"
keys_to_ignore_at_inference = ["past_key_values"]
default_theta = 500000.0
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 256000,
hidden_size: Optional[int] = 8192,
intermediate_size: Optional[int] = 22528,
logit_scale: Optional[float] = 0.0625,
num_hidden_layers: Optional[int] = 40,
num_attention_heads: Optional[int] = 64,
num_key_value_heads: Optional[int] = None,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 8192,
initializer_range: Optional[float] = 0.02,
layer_norm_eps: Optional[int] = 1e-5,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = 0,
bos_token_id: Optional[int] = 5,
eos_token_id: Optional[int] = 255001,
tie_word_embeddings: Optional[bool] = True,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
use_qk_norm: Optional[bool] = False,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.logit_scale = logit_scale
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.use_qk_norm = use_qk_norm
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["CohereConfig"]
|
CohereConfig
|
python
|
google__flatbuffers
|
tests/monster_test_generated.py
|
{
"start": 18985,
"end": 20767
}
|
class ____(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Stat()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsStat(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def StatBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4D\x4F\x4E\x53", size_prefixed=size_prefixed)
# Stat
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Stat
def Id(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Stat
def Val(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# Stat
def Count(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint16Flags, o + self._tab.Pos)
return 0
def StatStart(builder):
builder.StartObject(3)
def StatAddId(builder, id):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(id), 0)
def StatAddVal(builder, val):
builder.PrependInt64Slot(1, val, 0)
def StatAddCount(builder, count):
builder.PrependUint16Slot(2, count, 0)
def StatEnd(builder):
return builder.EndObject()
|
Stat
|
python
|
PyCQA__pylint
|
tests/functional/a/assignment/assignment_from_no_return.py
|
{
"start": 677,
"end": 950
}
|
class ____:
"""Parent class"""
def compute(self):
"""This isn't supported by all child classes"""
raise ValueError('Not supported for this object')
def test(self):
"""Test"""
result = self.compute()
return result
|
Parent
|
python
|
astropy__astropy
|
astropy/modeling/spline.py
|
{
"start": 21928,
"end": 24128
}
|
class ____(_SplineFitter):
"""
Fit a spline using least-squares regression.
"""
def __call__(self, model, x, y, **kwargs):
"""
Fit a spline to data using least-squares with exact knots.
Parameters
----------
model : `Spline1D`
The spline model to fit.
x : array-like
The x data values.
y : array-like
The y data values.
**kwargs : dict, optional
Additional keyword arguments:
- ``t`` : array-like, optional
Interior knots for the spline. If not provided, the
model's existing interior knots (``t_interior``) are
used if available. See
`scipy.interpolate.LSQUnivariateSpline` for details.
- ``weights`` : array-like, optional
Weights for the data points.
- ``bbox`` : array-like, optional
The bounding box limits as ``[xmin, xmax]``. Default is
``[None, None]``.
Returns
-------
fitted_model : `Spline1D`
A copy of the input model with fitted parameters.
"""
return super().__call__(model, x, y, **kwargs)
def _fit_method(self, model, x, y, **kwargs):
t = kwargs.pop("t", None)
weights = kwargs.pop("weights", None)
bbox = kwargs.pop("bbox", [None, None])
if t is not None:
if model.user_knots:
warnings.warn(
"The user-specified knots from the input model "
"will be overwritten by knots passed into this "
"function",
AstropyUserWarning,
)
else:
if model.user_knots:
t = model.t_interior
else:
raise RuntimeError("No knots have been provided")
if bbox != [None, None]:
model.bounding_box = bbox
from scipy.interpolate import LSQUnivariateSpline
spline = LSQUnivariateSpline(x, y, t, w=weights, bbox=bbox, k=model.degree)
model.tck = spline._eval_args
return spline
|
SplineExactKnotsFitter
|
python
|
mitmproxy__pdoc
|
pdoc/doc.py
|
{
"start": 33528,
"end": 39786
}
|
class ____(Doc[types.FunctionType]):
"""
Representation of a function's documentation.
This class covers all "flavors" of functions, for example it also
supports `@classmethod`s or `@staticmethod`s.
"""
kind = "function"
wrapped: WrappedFunction
"""The original wrapped function (e.g., `staticmethod(func)`)"""
obj: types.FunctionType
"""The unwrapped "real" function."""
def __init__(
self,
modulename: str,
qualname: str,
func: WrappedFunction,
taken_from: tuple[str, str],
):
"""Initialize a function's documentation object."""
unwrapped: types.FunctionType
if isinstance(func, (classmethod, staticmethod)):
unwrapped = func.__func__ # type: ignore
elif isinstance(func, singledispatchmethod):
unwrapped = func.func # type: ignore
elif hasattr(func, "__wrapped__"):
unwrapped = func.__wrapped__
else:
unwrapped = func
super().__init__(modulename, qualname, unwrapped, taken_from)
self.wrapped = func # type: ignore
@cache
@_include_fullname_in_traceback
def __repr__(self):
if self.is_classmethod:
t = "class"
elif self.is_staticmethod:
t = "static"
elif self.qualname != _safe_getattr(self.obj, "__name__", None):
t = "method"
else:
t = "function"
return f"<{_decorators(self)}{t} {self.funcdef} {self.name}{self.signature}: ...{_docstr(self)}>"
@cached_property
def docstring(self) -> str:
doc = Doc.docstring.__get__(self) # type: ignore
if not doc:
# inspect.getdoc fails for inherited @classmethods and unbound @property descriptors.
# We now do an ugly dance to obtain the bound object instead,
# that somewhat resembles what inspect._findclass is doing.
cls = sys.modules.get(_safe_getattr(self.obj, "__module__", None), None)
for name in _safe_getattr(self.obj, "__qualname__", "").split(".")[:-1]:
cls = _safe_getattr(cls, name, None)
unbound = _safe_getattr(cls, "__dict__", {}).get(self.name)
is_classmethod_property = isinstance(unbound, classmethod) and isinstance(
unbound.__func__, (property, cached_property)
)
if not is_classmethod_property:
# We choke on @classmethod @property, but that's okay because it's been deprecated with Python 3.11.
# Directly accessing them would give us the return value, which has the wrong docstring.
doc = _safe_getdoc(_safe_getattr(cls, self.name, None))
if doc == object.__init__.__doc__:
# inspect.getdoc(Foo.__init__) returns the docstring, for object.__init__ if left undefined...
return ""
else:
return doc
@cached_property
def is_classmethod(self) -> bool:
"""
`True` if this function is a `@classmethod`, `False` otherwise.
"""
return isinstance(self.wrapped, classmethod)
@cached_property
def is_staticmethod(self) -> bool:
"""
`True` if this function is a `@staticmethod`, `False` otherwise.
"""
return isinstance(self.wrapped, staticmethod)
@cached_property
def decorators(self) -> list[str]:
"""A list of all decorators the function is decorated with."""
decorators = []
obj: types.FunctionType = self.obj # type: ignore
for t in doc_ast.parse(obj).decorator_list:
decorators.append(f"@{doc_ast.unparse(t)}")
return decorators
@cached_property
def funcdef(self) -> str:
"""
The string of keywords used to define the function, i.e. `"def"` or `"async def"`.
"""
if inspect.iscoroutinefunction(self.obj) or inspect.isasyncgenfunction(
self.obj
):
return "async def"
else:
return "def"
@cached_property
def signature(self) -> inspect.Signature:
"""
The function's signature.
This usually returns an instance of `_PrettySignature`, a subclass of `inspect.Signature`
that contains pdoc-specific optimizations. For example, long argument lists are split over multiple lines
in repr(). Additionally, all types are already resolved.
If the signature cannot be determined, a placeholder Signature object is returned.
"""
if self.obj is object.__init__:
# there is a weird edge case were inspect.signature returns a confusing (self, /, *args, **kwargs)
# signature for the default __init__ method.
return inspect.Signature()
try:
sig = _PrettySignature.from_callable(self.obj)
except Exception:
return inspect.Signature(
[inspect.Parameter("unknown", inspect.Parameter.POSITIONAL_OR_KEYWORD)]
)
mod = inspect.getmodule(self.obj)
globalns = _safe_getattr(mod, "__dict__", {})
localns = globalns
for parent_cls_name in self.qualname.split(".")[:-1]:
parent_cls = localns.get(parent_cls_name, object)
localns = _safe_getattr(parent_cls, "__dict__", None)
if localns is None:
break # pragma: no cover
if self.name == "__init__":
sig = sig.replace(return_annotation=empty)
else:
sig = sig.replace(
return_annotation=safe_eval_type(
sig.return_annotation, globalns, localns, mod, self.fullname
)
)
for p in sig.parameters.values():
p._annotation = safe_eval_type( # type: ignore
p.annotation, globalns, localns, mod, self.fullname
)
return sig
@cached_property
def signature_without_self(self) -> inspect.Signature:
"""Like `signature`, but without the first argument.
This is useful to display constructors.
"""
return self.signature.replace(
parameters=list(self.signature.parameters.values())[1:]
)
|
Function
|
python
|
ansible__ansible
|
lib/ansible/executor/module_common.py
|
{
"start": 41550,
"end": 60406
}
|
class ____:
"""Cached Python module created by AnsiballZ."""
# FIXME: switch this to use a locked down pickle config or don't use pickle- easy to mess up and reach objects that shouldn't be pickled
zip_data: bytes
metadata: ModuleMetadata
source_mapping: dict[str, str]
"""A mapping of controller absolute source locations to target relative source locations within the AnsiballZ payload."""
def dump(self, path: str) -> None:
temp_path = pathlib.Path(path + '-part')
with temp_path.open('wb') as cache_file:
pickle.dump(self, cache_file)
temp_path.rename(path)
@classmethod
def load(cls, path: str) -> t.Self:
with pathlib.Path(path).open('rb') as cache_file:
return pickle.load(cache_file)
def _find_module_utils(
*,
module_name: str,
b_module_data: bytes,
module_path: str,
module_args: dict[object, object],
task_vars: dict[str, object],
templar: Templar,
module_compression: str,
async_timeout: int,
become_plugin: BecomeBase | None,
environment: dict[str, str],
remote_is_local: bool = False
) -> _BuiltModule:
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_substyle: t.Literal['binary', 'jsonargs', 'non_native_want_json', 'old', 'powershell', 'python']
module_style: t.Literal['binary', 'new', 'non_native_want_json', 'old']
module_substyle = module_style = 'old'
# module_style is something important to calling code (ActionBase). It
# determines how arguments are formatted (json vs k=v) and whether
# a separate arguments file needs to be sent over the wire.
# module_substyle is extra information that's useful internally. It tells
# us what we have to look to substitute in the module files and whether
# we're using module replacer or ansiballz to format the module itself.
if _is_binary(b_module_data):
module_substyle = module_style = 'binary'
elif REPLACER in b_module_data:
# Do REPLACER before from ansible.module_utils because we need make sure
# we substitute "from ansible.module_utils basic" for REPLACER
module_style = 'new'
module_substyle = 'python'
b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
elif NEW_STYLE_PYTHON_MODULE_RE.search(b_module_data):
module_style = 'new'
module_substyle = 'python'
elif REPLACER_WINDOWS in b_module_data:
module_style = 'new'
module_substyle = 'powershell'
b_module_data = b_module_data.replace(REPLACER_WINDOWS, b'#AnsibleRequires -PowerShell Ansible.ModuleUtils.Legacy')
elif re.search(b'#Requires -Module', b_module_data, re.IGNORECASE) \
or re.search(b'#Requires -Version', b_module_data, re.IGNORECASE) \
or re.search(b'#AnsibleRequires -(OSVersion|PowerShell|CSharpUtil|Wrapper)', b_module_data, re.IGNORECASE):
module_style = 'new'
module_substyle = 'powershell'
elif REPLACER_JSONARGS in b_module_data:
module_style = 'new'
module_substyle = 'jsonargs'
elif b'WANT_JSON' in b_module_data:
module_substyle = module_style = 'non_native_want_json'
shebang = None
# Neither old-style, non_native_want_json nor binary modules should be modified
# except for the shebang line (Done by modify_module)
if module_style in ('old', 'non_native_want_json', 'binary'):
return _BuiltModule(
b_module_data=b_module_data,
module_style=module_style,
shebang=shebang,
serialization_profile='legacy',
)
output = BytesIO()
try:
remote_module_fqn = _get_ansible_module_fqn(module_path)
except ValueError:
# Modules in roles currently are not found by the fqn heuristic so we
# fallback to this. This means that relative imports inside a module from
# a role may fail. Absolute imports should be used for future-proofness.
# People should start writing collections instead of modules in roles so we
# may never fix this
display.debug('ANSIBALLZ: Could not determine module FQN')
# FIXME: add integration test to validate that builtins and legacy modules with the same name are tracked separately by the caching mechanism
# FIXME: surrogate FQN should be unique per source path- role-packaged modules with name collisions can still be aliased
remote_module_fqn = 'ansible.legacy.%s' % module_name
if module_substyle == 'python':
date_time = datetime.datetime.now(datetime.timezone.utc)
if date_time.year < 1980:
raise AnsibleError(f'Cannot create zipfile due to pre-1980 configured date: {date_time}')
try:
compression_method = getattr(zipfile, module_compression)
except AttributeError:
display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
compression_method = zipfile.ZIP_STORED
extension_manager = _builder.ExtensionManager.create(task_vars=task_vars)
extension_key = '~'.join(extension_manager.extension_names) if extension_manager.extension_names else 'none'
lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache') # type: ignore[attr-defined]
cached_module_filename = os.path.join(lookup_path, '-'.join((remote_module_fqn, module_compression, extension_key)))
os.makedirs(os.path.dirname(cached_module_filename), exist_ok=True)
cached_module: _CachedModule | None = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
cached_module = _CachedModule.load(cached_module_filename)
else:
display.debug('ANSIBALLZ: Acquiring lock')
lock_path = f'{cached_module_filename}.lock'
with _locking.named_mutex(lock_path):
display.debug(f'ANSIBALLZ: Lock acquired: {lock_path}')
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: Creating module')
# Create the module zip data
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
# walk the module imports, looking for module_utils to send- they'll be added to the zipfile
module_metadata = recursive_finder(
module_name,
remote_module_fqn,
Origin(path=module_path).tag(b_module_data),
zf,
date_time,
extension_manager,
)
display.debug('ANSIBALLZ: Writing module into payload')
_add_module_to_zip(zf, date_time, remote_module_fqn, b_module_data, module_path, extension_manager)
zf.close()
zip_data = base64.b64encode(zipoutput.getvalue())
# Write the assembled module to a temp file (write to temp
# so that no one looking for the file reads a partially
# written file)
os.makedirs(lookup_path, exist_ok=True)
display.debug('ANSIBALLZ: Writing module')
cached_module = _CachedModule(zip_data=zip_data, metadata=module_metadata, source_mapping=extension_manager.source_mapping)
cached_module.dump(cached_module_filename)
display.debug('ANSIBALLZ: Done creating module')
if not cached_module:
display.debug('ANSIBALLZ: Reading module after lock')
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
try:
cached_module = _CachedModule.load(cached_module_filename)
except OSError as ex:
raise AnsibleError('A different worker process failed to create module file. '
'Look at traceback for that process for debugging information.') from ex
o_interpreter, o_args = _extract_interpreter(b_module_data)
if o_interpreter is None:
o_interpreter = u'/usr/bin/python'
shebang, interpreter = _get_shebang(o_interpreter, task_vars, templar, o_args, remote_is_local=remote_is_local)
# FUTURE: the module cache entry should be invalidated if we got this value from a host-dependent source
rlimit_nofile = C.config.get_config_value('PYTHON_MODULE_RLIMIT_NOFILE', variables=task_vars)
if not isinstance(rlimit_nofile, int):
rlimit_nofile = int(templar._engine.template(rlimit_nofile, options=TemplateOptions(value_for_omit=0)))
if not isinstance(cached_module.metadata, ModuleMetadataV1):
raise NotImplementedError()
params = dict(ANSIBLE_MODULE_ARGS=module_args,)
encoder = get_module_encoder(cached_module.metadata.serialization_profile, Direction.CONTROLLER_TO_MODULE)
try:
encoded_params = json.dumps(params, cls=encoder)
except TypeError as ex:
raise AnsibleError(f'Failed to serialize arguments for the {module_name!r} module.') from ex
extension_manager.source_mapping = cached_module.source_mapping
code = _get_ansiballz_code(shebang)
args = dict(
ansible_module=module_name,
module_fqn=remote_module_fqn,
profile=cached_module.metadata.serialization_profile,
date_time=date_time,
rlimit_nofile=rlimit_nofile,
params=encoded_params,
extensions=extension_manager.get_extensions(),
zip_data=to_text(cached_module.zip_data),
)
args_string = '\n'.join(f'{key}={value!r},' for key, value in args.items())
wrapper = f"""{code}
if __name__ == "__main__":
_ansiballz_main(
{args_string}
)
"""
output.write(to_bytes(wrapper))
module_metadata = cached_module.metadata
b_module_data = output.getvalue()
elif module_substyle == 'powershell':
module_metadata = ModuleMetadataV1(serialization_profile='legacy') # DTFIX-FUTURE: support serialization profiles for PowerShell modules
# Powershell/winrm don't actually make use of shebang so we can
# safely set this here. If we let the fallback code handle this
# it can fail in the presence of the UTF8 BOM commonly added by
# Windows text editors
shebang = '#!powershell'
# create the common exec wrapper payload and set that as the module_data
# bytes
b_module_data = ps_manifest._create_powershell_wrapper(
name=remote_module_fqn,
module_data=b_module_data,
module_path=module_path,
module_args=module_args,
environment=environment,
async_timeout=async_timeout,
become_plugin=become_plugin,
substyle=module_substyle,
task_vars=task_vars,
profile=module_metadata.serialization_profile,
)
elif module_substyle == 'jsonargs':
encoder = get_module_encoder('legacy', Direction.CONTROLLER_TO_MODULE)
module_args_json = to_bytes(json.dumps(module_args, cls=encoder))
# these strings could be included in a third-party module but
# officially they were included in the 'basic' snippet for new-style
# python modules (which has been replaced with something else in
# ansiballz) If we remove them from jsonargs-style module replacer
# then we can remove them everywhere.
python_repred_args = to_bytes(repr(module_args_json))
b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
b_module_data = b_module_data.replace(
REPLACER_SELINUX,
to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS))) # type: ignore[attr-defined]
# The main event -- substitute the JSON args string into the module
b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)
syslog_facility = task_vars.get(
'ansible_syslog_facility',
C.DEFAULT_SYSLOG_FACILITY) # type: ignore[attr-defined]
facility = b'syslog.' + to_bytes(syslog_facility, errors='surrogate_or_strict')
b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)
module_metadata = ModuleMetadataV1(serialization_profile='legacy')
else:
module_metadata = ModuleMetadataV1(serialization_profile='legacy')
if not isinstance(module_metadata, ModuleMetadataV1):
raise NotImplementedError(type(module_metadata))
return _BuiltModule(
b_module_data=b_module_data,
module_style=module_style,
shebang=shebang,
serialization_profile=module_metadata.serialization_profile,
)
def _extract_interpreter(b_module_data):
"""
Used to extract shebang expression from binary module data and return a text
string with the shebang, or None if no shebang is detected.
"""
interpreter = None
args = []
b_lines = b_module_data.split(b"\n", 1)
if b_lines[0].startswith(b"#!"):
b_shebang = b_lines[0].strip()
# shlex.split needs text on Python 3
cli_split = shlex.split(to_text(b_shebang[2:], errors='surrogate_or_strict'))
# convert args to text
cli_split = [to_text(a, errors='surrogate_or_strict') for a in cli_split]
interpreter = cli_split[0]
args = cli_split[1:]
return interpreter, args
def modify_module(
*,
module_name: str,
module_path,
module_args,
templar,
task_vars=None,
module_compression='ZIP_STORED',
async_timeout=0,
become_plugin=None,
environment=None,
remote_is_local=False,
) -> _BuiltModule:
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of
properties not available here.
"""
task_vars = {} if task_vars is None else task_vars
environment = {} if environment is None else environment
with open(module_path, 'rb') as f:
# read in the module source
b_module_data = f.read()
module_bits = _find_module_utils(
module_name=module_name,
b_module_data=b_module_data,
module_path=module_path,
module_args=module_args,
task_vars=task_vars,
templar=templar,
module_compression=module_compression,
async_timeout=async_timeout,
become_plugin=become_plugin,
environment=environment,
remote_is_local=remote_is_local,
)
b_module_data = module_bits.b_module_data
shebang = module_bits.shebang
if module_bits.module_style == 'binary':
return _BuiltModule(
b_module_data=module_bits.b_module_data,
module_style=module_bits.module_style,
shebang=to_text(module_bits.shebang, nonstring='passthru'),
serialization_profile=module_bits.serialization_profile,
)
elif shebang is None:
interpreter, args = _extract_interpreter(b_module_data)
# No interpreter/shebang, assume a binary module?
if interpreter is not None:
shebang, new_interpreter = _get_shebang(interpreter, task_vars, templar, args, remote_is_local=remote_is_local)
# update shebang
b_lines = b_module_data.split(b"\n", 1)
if interpreter != new_interpreter:
b_lines[0] = to_bytes(shebang, errors='surrogate_or_strict', nonstring='passthru')
b_module_data = b"\n".join(b_lines)
return _BuiltModule(
b_module_data=b_module_data,
module_style=module_bits.module_style,
shebang=shebang,
serialization_profile=module_bits.serialization_profile,
)
def _get_action_arg_defaults(action: str, task: Task, templar: TemplateEngine) -> dict[str, t.Any]:
action_groups = task._parent._play._action_groups
defaults = task.module_defaults
# Get the list of groups that contain this action
if action_groups is None:
msg = (
"Finding module_defaults for action %s. "
"The caller has not passed the action_groups, so any "
"that may include this action will be ignored."
)
display.warning(msg=msg)
group_names = []
else:
group_names = action_groups.get(action, [])
tmp_args: dict[str, t.Any] = {}
module_defaults = {}
# Merge latest defaults into dict, since they are a list of dicts
if isinstance(defaults, list):
for default in defaults:
module_defaults.update(default)
for default in module_defaults:
if default.startswith('group/'):
group_name = default.split('group/')[-1]
if group_name in group_names:
tmp_args.update(templar.resolve_to_container(module_defaults.get(f'group/{group_name}', {})))
# handle specific action defaults
tmp_args.update(templar.resolve_to_container(module_defaults.get(action, {})))
return tmp_args
def _apply_action_arg_defaults(action: str, task: Task, action_args: dict[str, t.Any], templar: Templar) -> dict[str, t.Any]:
args = _get_action_arg_defaults(action, task, templar._engine)
args.update(action_args)
return args
|
_CachedModule
|
python
|
sympy__sympy
|
sympy/polys/matrices/sdm.py
|
{
"start": 509,
"end": 65339
}
|
class ____(dict):
r"""Sparse matrix based on polys domain elements
This is a dict subclass and is a wrapper for a dict of dicts that supports
basic matrix arithmetic +, -, *, **.
In order to create a new :py:class:`~.SDM`, a dict
of dicts mapping non-zero elements to their
corresponding row and column in the matrix is needed.
We also need to specify the shape and :py:class:`~.Domain`
of our :py:class:`~.SDM` object.
We declare a 2x2 :py:class:`~.SDM` matrix belonging
to QQ domain as shown below.
The 2x2 Matrix in the example is
.. math::
A = \left[\begin{array}{ccc}
0 & \frac{1}{2} \\
0 & 0 \end{array} \right]
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> elemsdict = {0:{1:QQ(1, 2)}}
>>> A = SDM(elemsdict, (2, 2), QQ)
>>> A
{0: {1: 1/2}}
We can manipulate :py:class:`~.SDM` the same way
as a Matrix class
>>> from sympy import ZZ
>>> A = SDM({0:{1: ZZ(2)}, 1:{0:ZZ(1)}}, (2, 2), ZZ)
>>> B = SDM({0:{0: ZZ(3)}, 1:{1:ZZ(4)}}, (2, 2), ZZ)
>>> A + B
{0: {0: 3, 1: 2}, 1: {0: 1, 1: 4}}
Multiplication
>>> A*B
{0: {1: 8}, 1: {0: 3}}
>>> A*ZZ(2)
{0: {1: 4}, 1: {0: 2}}
"""
fmt = 'sparse'
is_DFM = False
is_DDM = False
def __init__(self, elemsdict, shape, domain):
super().__init__(elemsdict)
self.shape = self.rows, self.cols = m, n = shape
self.domain = domain
if not all(0 <= r < m for r in self):
raise DMBadInputError("Row out of range")
if not all(0 <= c < n for row in self.values() for c in row):
raise DMBadInputError("Column out of range")
def getitem(self, i, j):
try:
return self[i][j]
except KeyError:
m, n = self.shape
if -m <= i < m and -n <= j < n:
try:
return self[i % m][j % n]
except KeyError:
return self.domain.zero
else:
raise IndexError("index out of range")
def setitem(self, i, j, value):
m, n = self.shape
if not (-m <= i < m and -n <= j < n):
raise IndexError("index out of range")
i, j = i % m, j % n
if value:
try:
self[i][j] = value
except KeyError:
self[i] = {j: value}
else:
rowi = self.get(i, None)
if rowi is not None:
try:
del rowi[j]
except KeyError:
pass
else:
if not rowi:
del self[i]
def extract_slice(self, slice1, slice2):
m, n = self.shape
ri = range(m)[slice1]
ci = range(n)[slice2]
sdm = {}
for i, row in self.items():
if i in ri:
row = {ci.index(j): e for j, e in row.items() if j in ci}
if row:
sdm[ri.index(i)] = row
return self.new(sdm, (len(ri), len(ci)), self.domain)
def extract(self, rows, cols):
if not (self and rows and cols):
return self.zeros((len(rows), len(cols)), self.domain)
m, n = self.shape
if not (-m <= min(rows) <= max(rows) < m):
raise IndexError('Row index out of range')
if not (-n <= min(cols) <= max(cols) < n):
raise IndexError('Column index out of range')
# rows and cols can contain duplicates e.g. M[[1, 2, 2], [0, 1]]
# Build a map from row/col in self to list of rows/cols in output
rowmap = defaultdict(list)
colmap = defaultdict(list)
for i2, i1 in enumerate(rows):
rowmap[i1 % m].append(i2)
for j2, j1 in enumerate(cols):
colmap[j1 % n].append(j2)
# Used to efficiently skip zero rows/cols
rowset = set(rowmap)
colset = set(colmap)
sdm1 = self
sdm2 = {}
for i1 in rowset & sdm1.keys():
row1 = sdm1[i1]
row2 = {}
for j1 in colset & row1.keys():
row1_j1 = row1[j1]
for j2 in colmap[j1]:
row2[j2] = row1_j1
if row2:
for i2 in rowmap[i1]:
sdm2[i2] = row2.copy()
return self.new(sdm2, (len(rows), len(cols)), self.domain)
def __str__(self):
rowsstr = []
for i, row in self.items():
elemsstr = ', '.join('%s: %s' % (j, elem) for j, elem in row.items())
rowsstr.append('%s: {%s}' % (i, elemsstr))
return '{%s}' % ', '.join(rowsstr)
def __repr__(self):
cls = type(self).__name__
rows = dict.__repr__(self)
return '%s(%s, %s, %s)' % (cls, rows, self.shape, self.domain)
@classmethod
def new(cls, sdm, shape, domain):
"""
Parameters
==========
sdm: A dict of dicts for non-zero elements in SDM
shape: tuple representing dimension of SDM
domain: Represents :py:class:`~.Domain` of SDM
Returns
=======
An :py:class:`~.SDM` object
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> elemsdict = {0:{1: QQ(2)}}
>>> A = SDM.new(elemsdict, (2, 2), QQ)
>>> A
{0: {1: 2}}
"""
return cls(sdm, shape, domain)
def copy(A):
"""
Returns the copy of a :py:class:`~.SDM` object
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> elemsdict = {0:{1:QQ(2)}, 1:{}}
>>> A = SDM(elemsdict, (2, 2), QQ)
>>> B = A.copy()
>>> B
{0: {1: 2}, 1: {}}
"""
Ac = {i: Ai.copy() for i, Ai in A.items()}
return A.new(Ac, A.shape, A.domain)
@classmethod
def from_list(cls, ddm, shape, domain):
"""
Create :py:class:`~.SDM` object from a list of lists.
Parameters
==========
ddm:
list of lists containing domain elements
shape:
Dimensions of :py:class:`~.SDM` matrix
domain:
Represents :py:class:`~.Domain` of :py:class:`~.SDM` object
Returns
=======
:py:class:`~.SDM` containing elements of ddm
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> ddm = [[QQ(1, 2), QQ(0)], [QQ(0), QQ(3, 4)]]
>>> A = SDM.from_list(ddm, (2, 2), QQ)
>>> A
{0: {0: 1/2}, 1: {1: 3/4}}
See Also
========
to_list
from_list_flat
from_dok
from_ddm
"""
m, n = shape
if not (len(ddm) == m and all(len(row) == n for row in ddm)):
raise DMBadInputError("Inconsistent row-list/shape")
getrow = lambda i: {j:ddm[i][j] for j in range(n) if ddm[i][j]}
irows = ((i, getrow(i)) for i in range(m))
sdm = {i: row for i, row in irows if row}
return cls(sdm, shape, domain)
@classmethod
def from_ddm(cls, ddm):
"""
Create :py:class:`~.SDM` from a :py:class:`~.DDM`.
Examples
========
>>> from sympy.polys.matrices.ddm import DDM
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> ddm = DDM( [[QQ(1, 2), 0], [0, QQ(3, 4)]], (2, 2), QQ)
>>> A = SDM.from_ddm(ddm)
>>> A
{0: {0: 1/2}, 1: {1: 3/4}}
>>> SDM.from_ddm(ddm).to_ddm() == ddm
True
See Also
========
to_ddm
from_list
from_list_flat
from_dok
"""
return cls.from_list(ddm, ddm.shape, ddm.domain)
def to_list(M):
"""
Convert a :py:class:`~.SDM` object to a list of lists.
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> elemsdict = {0:{1:QQ(2)}, 1:{}}
>>> A = SDM(elemsdict, (2, 2), QQ)
>>> A.to_list()
[[0, 2], [0, 0]]
"""
m, n = M.shape
zero = M.domain.zero
ddm = [[zero] * n for _ in range(m)]
for i, row in M.items():
for j, e in row.items():
ddm[i][j] = e
return ddm
def to_list_flat(M):
"""
Convert :py:class:`~.SDM` to a flat list.
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> A = SDM({0:{1:QQ(2)}, 1:{0: QQ(3)}}, (2, 2), QQ)
>>> A.to_list_flat()
[0, 2, 3, 0]
>>> A == A.from_list_flat(A.to_list_flat(), A.shape, A.domain)
True
See Also
========
from_list_flat
to_list
to_dok
to_ddm
"""
m, n = M.shape
zero = M.domain.zero
flat = [zero] * (m * n)
for i, row in M.items():
for j, e in row.items():
flat[i*n + j] = e
return flat
@classmethod
def from_list_flat(cls, elements, shape, domain):
"""
Create :py:class:`~.SDM` from a flat list of elements.
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> A = SDM.from_list_flat([QQ(0), QQ(2), QQ(0), QQ(0)], (2, 2), QQ)
>>> A
{0: {1: 2}}
>>> A == A.from_list_flat(A.to_list_flat(), A.shape, A.domain)
True
See Also
========
to_list_flat
from_list
from_dok
from_ddm
"""
m, n = shape
if len(elements) != m * n:
raise DMBadInputError("Inconsistent flat-list shape")
sdm = defaultdict(dict)
for inj, element in enumerate(elements):
if element:
i, j = divmod(inj, n)
sdm[i][j] = element
return cls(sdm, shape, domain)
def to_flat_nz(M):
"""
Convert :class:`SDM` to a flat list of nonzero elements and data.
Explanation
===========
This is used to operate on a list of the elements of a matrix and then
reconstruct a modified matrix with elements in the same positions using
:meth:`from_flat_nz`. Zero elements are omitted from the list.
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> A = SDM({0:{1:QQ(2)}, 1:{0: QQ(3)}}, (2, 2), QQ)
>>> elements, data = A.to_flat_nz()
>>> elements
[2, 3]
>>> A == A.from_flat_nz(elements, data, A.domain)
True
See Also
========
from_flat_nz
to_list_flat
sympy.polys.matrices.ddm.DDM.to_flat_nz
sympy.polys.matrices.domainmatrix.DomainMatrix.to_flat_nz
"""
dok = M.to_dok()
indices = tuple(dok)
elements = list(dok.values())
data = (indices, M.shape)
return elements, data
@classmethod
def from_flat_nz(cls, elements, data, domain):
"""
Reconstruct a :class:`~.SDM` after calling :meth:`to_flat_nz`.
See :meth:`to_flat_nz` for explanation.
See Also
========
to_flat_nz
from_list_flat
sympy.polys.matrices.ddm.DDM.from_flat_nz
sympy.polys.matrices.domainmatrix.DomainMatrix.from_flat_nz
"""
indices, shape = data
dok = dict(zip(indices, elements))
return cls.from_dok(dok, shape, domain)
def to_dod(M):
"""
Convert to dictionary of dictionaries (dod) format.
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> A = SDM({0: {1: QQ(2)}, 1: {0: QQ(3)}}, (2, 2), QQ)
>>> A.to_dod()
{0: {1: 2}, 1: {0: 3}}
See Also
========
from_dod
sympy.polys.matrices.domainmatrix.DomainMatrix.to_dod
"""
return {i: row.copy() for i, row in M.items()}
@classmethod
def from_dod(cls, dod, shape, domain):
"""
Create :py:class:`~.SDM` from dictionary of dictionaries (dod) format.
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> dod = {0: {1: QQ(2)}, 1: {0: QQ(3)}}
>>> A = SDM.from_dod(dod, (2, 2), QQ)
>>> A
{0: {1: 2}, 1: {0: 3}}
>>> A == SDM.from_dod(A.to_dod(), A.shape, A.domain)
True
See Also
========
to_dod
sympy.polys.matrices.domainmatrix.DomainMatrix.to_dod
"""
sdm = defaultdict(dict)
for i, row in dod.items():
for j, e in row.items():
if e:
sdm[i][j] = e
return cls(sdm, shape, domain)
def to_dok(M):
"""
Convert to dictionary of keys (dok) format.
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> A = SDM({0: {1: QQ(2)}, 1: {0: QQ(3)}}, (2, 2), QQ)
>>> A.to_dok()
{(0, 1): 2, (1, 0): 3}
See Also
========
from_dok
to_list
to_list_flat
to_ddm
"""
return {(i, j): e for i, row in M.items() for j, e in row.items()}
@classmethod
def from_dok(cls, dok, shape, domain):
"""
Create :py:class:`~.SDM` from dictionary of keys (dok) format.
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> dok = {(0, 1): QQ(2), (1, 0): QQ(3)}
>>> A = SDM.from_dok(dok, (2, 2), QQ)
>>> A
{0: {1: 2}, 1: {0: 3}}
>>> A == SDM.from_dok(A.to_dok(), A.shape, A.domain)
True
See Also
========
to_dok
from_list
from_list_flat
from_ddm
"""
sdm = defaultdict(dict)
for (i, j), e in dok.items():
if e:
sdm[i][j] = e
return cls(sdm, shape, domain)
def iter_values(M):
"""
Iterate over the nonzero values of a :py:class:`~.SDM` matrix.
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> A = SDM({0: {1: QQ(2)}, 1: {0: QQ(3)}}, (2, 2), QQ)
>>> list(A.iter_values())
[2, 3]
"""
for row in M.values():
yield from row.values()
def iter_items(M):
"""
Iterate over indices and values of the nonzero elements.
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> A = SDM({0: {1: QQ(2)}, 1: {0: QQ(3)}}, (2, 2), QQ)
>>> list(A.iter_items())
[((0, 1), 2), ((1, 0), 3)]
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.iter_items
"""
for i, row in M.items():
for j, e in row.items():
yield (i, j), e
def to_ddm(M):
"""
Convert a :py:class:`~.SDM` object to a :py:class:`~.DDM` object
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> A = SDM({0:{1:QQ(2)}, 1:{}}, (2, 2), QQ)
>>> A.to_ddm()
[[0, 2], [0, 0]]
"""
return DDM(M.to_list(), M.shape, M.domain)
def to_sdm(M):
"""
Convert to :py:class:`~.SDM` format (returns self).
"""
return M
@doctest_depends_on(ground_types=['flint'])
def to_dfm(M):
"""
Convert a :py:class:`~.SDM` object to a :py:class:`~.DFM` object
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> A = SDM({0:{1:QQ(2)}, 1:{}}, (2, 2), QQ)
>>> A.to_dfm()
[[0, 2], [0, 0]]
See Also
========
to_ddm
to_dfm_or_ddm
sympy.polys.matrices.domainmatrix.DomainMatrix.to_dfm
"""
return M.to_ddm().to_dfm()
@doctest_depends_on(ground_types=['flint'])
def to_dfm_or_ddm(M):
"""
Convert to :py:class:`~.DFM` if possible, else :py:class:`~.DDM`.
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> A = SDM({0:{1:QQ(2)}, 1:{}}, (2, 2), QQ)
>>> A.to_dfm_or_ddm()
[[0, 2], [0, 0]]
>>> type(A.to_dfm_or_ddm()) # depends on the ground types
<class 'sympy.polys.matrices._dfm.DFM'>
See Also
========
to_ddm
to_dfm
sympy.polys.matrices.domainmatrix.DomainMatrix.to_dfm_or_ddm
"""
return M.to_ddm().to_dfm_or_ddm()
@classmethod
def zeros(cls, shape, domain):
r"""
Returns a :py:class:`~.SDM` of size shape,
belonging to the specified domain
In the example below we declare a matrix A where,
.. math::
A := \left[\begin{array}{ccc}
0 & 0 & 0 \\
0 & 0 & 0 \end{array} \right]
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> A = SDM.zeros((2, 3), QQ)
>>> A
{}
"""
return cls({}, shape, domain)
@classmethod
def ones(cls, shape, domain):
one = domain.one
m, n = shape
row = dict(zip(range(n), [one]*n))
sdm = {i: row.copy() for i in range(m)}
return cls(sdm, shape, domain)
@classmethod
def eye(cls, shape, domain):
"""
Returns a identity :py:class:`~.SDM` matrix of dimensions
size x size, belonging to the specified domain
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> I = SDM.eye((2, 2), QQ)
>>> I
{0: {0: 1}, 1: {1: 1}}
"""
if isinstance(shape, int):
rows, cols = shape, shape
else:
rows, cols = shape
one = domain.one
sdm = {i: {i: one} for i in range(min(rows, cols))}
return cls(sdm, (rows, cols), domain)
@classmethod
def diag(cls, diagonal, domain, shape=None):
if shape is None:
shape = (len(diagonal), len(diagonal))
sdm = {i: {i: v} for i, v in enumerate(diagonal) if v}
return cls(sdm, shape, domain)
def transpose(M):
"""
Returns the transpose of a :py:class:`~.SDM` matrix
Examples
========
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy import QQ
>>> A = SDM({0:{1:QQ(2)}, 1:{}}, (2, 2), QQ)
>>> A.transpose()
{1: {0: 2}}
"""
MT = sdm_transpose(M)
return M.new(MT, M.shape[::-1], M.domain)
def __add__(A, B):
if not isinstance(B, SDM):
return NotImplemented
elif A.shape != B.shape:
raise DMShapeError("Matrix size mismatch: %s + %s" % (A.shape, B.shape))
return A.add(B)
def __sub__(A, B):
if not isinstance(B, SDM):
return NotImplemented
elif A.shape != B.shape:
raise DMShapeError("Matrix size mismatch: %s - %s" % (A.shape, B.shape))
return A.sub(B)
def __neg__(A):
return A.neg()
def __mul__(A, B):
"""A * B"""
if isinstance(B, SDM):
return A.matmul(B)
elif B in A.domain:
return A.mul(B)
else:
return NotImplemented
def __rmul__(a, b):
if b in a.domain:
return a.rmul(b)
else:
return NotImplemented
def matmul(A, B):
"""
Performs matrix multiplication of two SDM matrices
Parameters
==========
A, B: SDM to multiply
Returns
=======
SDM
SDM after multiplication
Raises
======
DomainError
If domain of A does not match
with that of B
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{1: ZZ(2)}, 1:{0:ZZ(1)}}, (2, 2), ZZ)
>>> B = SDM({0:{0:ZZ(2), 1:ZZ(3)}, 1:{0:ZZ(4)}}, (2, 2), ZZ)
>>> A.matmul(B)
{0: {0: 8}, 1: {0: 2, 1: 3}}
"""
if A.domain != B.domain:
raise DMDomainError
m, n = A.shape
n2, o = B.shape
if n != n2:
raise DMShapeError
C = sdm_matmul(A, B, A.domain, m, o)
return A.new(C, (m, o), A.domain)
def mul(A, b):
"""
Multiplies each element of A with a scalar b
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{1: ZZ(2)}, 1:{0:ZZ(1)}}, (2, 2), ZZ)
>>> A.mul(ZZ(3))
{0: {1: 6}, 1: {0: 3}}
"""
Csdm = sdm_scalar_mul(A, b, lambda x, y: x * y, A.domain)
return A.new(Csdm, A.shape, A.domain)
def rmul(A, b):
Csdm = sdm_scalar_mul(A, b, lambda x, y: y * x, A.domain)
return A.new(Csdm, A.shape, A.domain)
def mul_elementwise(A, B):
if A.domain != B.domain:
raise DMDomainError
if A.shape != B.shape:
raise DMShapeError
K = A.domain
zero = K.zero
if K.is_EXRAW:
fmul_zero_a = lambda e: e * zero
fmul_zero_b = lambda e: zero * e
Csdm = binop_dict(A, B, mul, fmul_zero_a, fmul_zero_b)
else:
fzero = lambda e: zero
Csdm = binop_dict(A, B, mul, fzero, fzero)
return A.new(Csdm, A.shape, A.domain)
def add(A, B):
"""
Adds two :py:class:`~.SDM` matrices
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{1: ZZ(2)}, 1:{0:ZZ(1)}}, (2, 2), ZZ)
>>> B = SDM({0:{0: ZZ(3)}, 1:{1:ZZ(4)}}, (2, 2), ZZ)
>>> A.add(B)
{0: {0: 3, 1: 2}, 1: {0: 1, 1: 4}}
"""
Csdm = binop_dict(A, B, add, pos, pos)
return A.new(Csdm, A.shape, A.domain)
def sub(A, B):
"""
Subtracts two :py:class:`~.SDM` matrices
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{1: ZZ(2)}, 1:{0:ZZ(1)}}, (2, 2), ZZ)
>>> B = SDM({0:{0: ZZ(3)}, 1:{1:ZZ(4)}}, (2, 2), ZZ)
>>> A.sub(B)
{0: {0: -3, 1: 2}, 1: {0: 1, 1: -4}}
"""
Csdm = binop_dict(A, B, sub, pos, neg)
return A.new(Csdm, A.shape, A.domain)
def neg(A):
"""
Returns the negative of a :py:class:`~.SDM` matrix
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{1: ZZ(2)}, 1:{0:ZZ(1)}}, (2, 2), ZZ)
>>> A.neg()
{0: {1: -2}, 1: {0: -1}}
"""
Csdm = unop_dict(A, neg)
return A.new(Csdm, A.shape, A.domain)
def convert_to(A, K):
"""
Converts the :py:class:`~.Domain` of a :py:class:`~.SDM` matrix to K
Examples
========
>>> from sympy import ZZ, QQ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{1: ZZ(2)}, 1:{0:ZZ(1)}}, (2, 2), ZZ)
>>> A.convert_to(QQ)
{0: {1: 2}, 1: {0: 1}}
"""
Kold = A.domain
if K == Kold:
return A.copy()
Ak = unop_dict(A, lambda e: K.convert_from(e, Kold))
return A.new(Ak, A.shape, K)
def nnz(A):
"""Number of non-zero elements in the :py:class:`~.SDM` matrix.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{1: ZZ(2)}, 1:{0:ZZ(1)}}, (2, 2), ZZ)
>>> A.nnz()
2
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.nnz
"""
return sum(map(len, A.values()))
def scc(A):
"""Strongly connected components of a square matrix *A*.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{0: ZZ(2)}, 1:{1:ZZ(1)}}, (2, 2), ZZ)
>>> A.scc()
[[0], [1]]
See also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.scc
"""
rows, cols = A.shape
assert rows == cols
V = range(rows)
Emap = {v: list(A.get(v, [])) for v in V}
return _strongly_connected_components(V, Emap)
def rref(A):
"""
Returns reduced-row echelon form and list of pivots for the :py:class:`~.SDM`
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{0:QQ(1), 1:QQ(2)}, 1:{0:QQ(2), 1:QQ(4)}}, (2, 2), QQ)
>>> A.rref()
({0: {0: 1, 1: 2}}, [0])
"""
B, pivots, _ = sdm_irref(A)
return A.new(B, A.shape, A.domain), pivots
def rref_den(A):
"""
Returns reduced-row echelon form (RREF) with denominator and pivots.
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{0:QQ(1), 1:QQ(2)}, 1:{0:QQ(2), 1:QQ(4)}}, (2, 2), QQ)
>>> A.rref_den()
({0: {0: 1, 1: 2}}, 1, [0])
"""
K = A.domain
A_rref_sdm, denom, pivots = sdm_rref_den(A, K)
A_rref = A.new(A_rref_sdm, A.shape, A.domain)
return A_rref, denom, pivots
def inv(A):
"""
Returns inverse of a matrix A
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{0:QQ(1), 1:QQ(2)}, 1:{0:QQ(3), 1:QQ(4)}}, (2, 2), QQ)
>>> A.inv()
{0: {0: -2, 1: 1}, 1: {0: 3/2, 1: -1/2}}
"""
return A.to_dfm_or_ddm().inv().to_sdm()
def det(A):
"""
Returns determinant of A
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{0:QQ(1), 1:QQ(2)}, 1:{0:QQ(3), 1:QQ(4)}}, (2, 2), QQ)
>>> A.det()
-2
"""
# It would be better to have a sparse implementation of det for use
# with very sparse matrices. Extremely sparse matrices probably just
# have determinant zero and we could probably detect that very quickly.
# In the meantime, we convert to a dense matrix and use ddm_idet.
#
# If GROUND_TYPES=flint though then we will use Flint's implementation
# if possible (dfm).
return A.to_dfm_or_ddm().det()
def lu(A):
"""
Returns LU decomposition for a matrix A
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{0:QQ(1), 1:QQ(2)}, 1:{0:QQ(3), 1:QQ(4)}}, (2, 2), QQ)
>>> A.lu()
({0: {0: 1}, 1: {0: 3, 1: 1}}, {0: {0: 1, 1: 2}, 1: {1: -2}}, [])
"""
L, U, swaps = A.to_ddm().lu()
return A.from_ddm(L), A.from_ddm(U), swaps
def qr(self):
"""
QR decomposition for SDM (Sparse Domain Matrix).
Returns:
- Q: Orthogonal matrix as a SDM.
- R: Upper triangular matrix as a SDM.
"""
ddm_q, ddm_r = self.to_ddm().qr()
Q = ddm_q.to_sdm()
R = ddm_r.to_sdm()
return Q, R
def lu_solve(A, b):
"""
Uses LU decomposition to solve Ax = b,
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{0:QQ(1), 1:QQ(2)}, 1:{0:QQ(3), 1:QQ(4)}}, (2, 2), QQ)
>>> b = SDM({0:{0:QQ(1)}, 1:{0:QQ(2)}}, (2, 1), QQ)
>>> A.lu_solve(b)
{1: {0: 1/2}}
"""
return A.from_ddm(A.to_ddm().lu_solve(b.to_ddm()))
def fflu(self):
"""
Fraction free LU decomposition of SDM.
Uses DDM implementation.
See Also
========
sympy.polys.matrices.ddm.DDM.fflu
"""
ddm_p, ddm_l, ddm_d, ddm_u = self.to_dfm_or_ddm().fflu()
P = ddm_p.to_sdm()
L = ddm_l.to_sdm()
D = ddm_d.to_sdm()
U = ddm_u.to_sdm()
return P, L, D, U
def nullspace(A):
"""
Nullspace of a :py:class:`~.SDM` matrix A.
The domain of the matrix must be a field.
It is better to use the :meth:`~.DomainMatrix.nullspace` method rather
than this method which is otherwise no longer used.
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{0:QQ(1), 1:QQ(2)}, 1:{0: QQ(2), 1: QQ(4)}}, (2, 2), QQ)
>>> A.nullspace()
({0: {0: -2, 1: 1}}, [1])
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.nullspace
The preferred way to get the nullspace of a matrix.
"""
ncols = A.shape[1]
one = A.domain.one
B, pivots, nzcols = sdm_irref(A)
K, nonpivots = sdm_nullspace_from_rref(B, one, ncols, pivots, nzcols)
K = dict(enumerate(K))
shape = (len(K), ncols)
return A.new(K, shape, A.domain), nonpivots
def nullspace_from_rref(A, pivots=None):
"""
Returns nullspace for a :py:class:`~.SDM` matrix ``A`` in RREF.
The domain of the matrix can be any domain.
The matrix must already be in reduced row echelon form (RREF).
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{0:QQ(1), 1:QQ(2)}, 1:{0: QQ(2), 1: QQ(4)}}, (2, 2), QQ)
>>> A_rref, pivots = A.rref()
>>> A_null, nonpivots = A_rref.nullspace_from_rref(pivots)
>>> A_null
{0: {0: -2, 1: 1}}
>>> pivots
[0]
>>> nonpivots
[1]
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.nullspace
The higher-level function that would usually be called instead of
calling this one directly.
sympy.polys.matrices.domainmatrix.DomainMatrix.nullspace_from_rref
The higher-level direct equivalent of this function.
sympy.polys.matrices.ddm.DDM.nullspace_from_rref
The equivalent function for dense :py:class:`~.DDM` matrices.
"""
m, n = A.shape
K = A.domain
if pivots is None:
pivots = sorted(map(min, A.values()))
if not pivots:
return A.eye((n, n), K), list(range(n))
elif len(pivots) == n:
return A.zeros((0, n), K), []
# In fraction-free RREF the nonzero entry inserted for the pivots is
# not necessarily 1.
pivot_val = A[0][pivots[0]]
assert not K.is_zero(pivot_val)
pivots_set = set(pivots)
# Loop once over all nonzero entries making a map from column indices
# to the nonzero entries in that column along with the row index of the
# nonzero entry. This is basically the transpose of the matrix.
nonzero_cols = defaultdict(list)
for i, Ai in A.items():
for j, Aij in Ai.items():
nonzero_cols[j].append((i, Aij))
# Usually in SDM we want to avoid looping over the dimensions of the
# matrix because it is optimised to support extremely sparse matrices.
# Here in nullspace though every zero column becomes a nonzero column
# so we need to loop once over the columns at least (range(n)) rather
# than just the nonzero entries of the matrix. We can still avoid
# an inner loop over the rows though by using the nonzero_cols map.
basis = []
nonpivots = []
for j in range(n):
if j in pivots_set:
continue
nonpivots.append(j)
vec = {j: pivot_val}
for ip, Aij in nonzero_cols[j]:
vec[pivots[ip]] = -Aij
basis.append(vec)
sdm = dict(enumerate(basis))
A_null = A.new(sdm, (len(basis), n), K)
return (A_null, nonpivots)
def particular(A):
ncols = A.shape[1]
B, pivots, nzcols = sdm_irref(A)
P = sdm_particular_from_rref(B, ncols, pivots)
rep = {0:P} if P else {}
return A.new(rep, (1, ncols-1), A.domain)
def hstack(A, *B):
"""Horizontally stacks :py:class:`~.SDM` matrices.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0: {0: ZZ(1), 1: ZZ(2)}, 1: {0: ZZ(3), 1: ZZ(4)}}, (2, 2), ZZ)
>>> B = SDM({0: {0: ZZ(5), 1: ZZ(6)}, 1: {0: ZZ(7), 1: ZZ(8)}}, (2, 2), ZZ)
>>> A.hstack(B)
{0: {0: 1, 1: 2, 2: 5, 3: 6}, 1: {0: 3, 1: 4, 2: 7, 3: 8}}
>>> C = SDM({0: {0: ZZ(9), 1: ZZ(10)}, 1: {0: ZZ(11), 1: ZZ(12)}}, (2, 2), ZZ)
>>> A.hstack(B, C)
{0: {0: 1, 1: 2, 2: 5, 3: 6, 4: 9, 5: 10}, 1: {0: 3, 1: 4, 2: 7, 3: 8, 4: 11, 5: 12}}
"""
Anew = dict(A.copy())
rows, cols = A.shape
domain = A.domain
for Bk in B:
Bkrows, Bkcols = Bk.shape
assert Bkrows == rows
assert Bk.domain == domain
for i, Bki in Bk.items():
Ai = Anew.get(i, None)
if Ai is None:
Anew[i] = Ai = {}
for j, Bkij in Bki.items():
Ai[j + cols] = Bkij
cols += Bkcols
return A.new(Anew, (rows, cols), A.domain)
def vstack(A, *B):
"""Vertically stacks :py:class:`~.SDM` matrices.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0: {0: ZZ(1), 1: ZZ(2)}, 1: {0: ZZ(3), 1: ZZ(4)}}, (2, 2), ZZ)
>>> B = SDM({0: {0: ZZ(5), 1: ZZ(6)}, 1: {0: ZZ(7), 1: ZZ(8)}}, (2, 2), ZZ)
>>> A.vstack(B)
{0: {0: 1, 1: 2}, 1: {0: 3, 1: 4}, 2: {0: 5, 1: 6}, 3: {0: 7, 1: 8}}
>>> C = SDM({0: {0: ZZ(9), 1: ZZ(10)}, 1: {0: ZZ(11), 1: ZZ(12)}}, (2, 2), ZZ)
>>> A.vstack(B, C)
{0: {0: 1, 1: 2}, 1: {0: 3, 1: 4}, 2: {0: 5, 1: 6}, 3: {0: 7, 1: 8}, 4: {0: 9, 1: 10}, 5: {0: 11, 1: 12}}
"""
Anew = dict(A.copy())
rows, cols = A.shape
domain = A.domain
for Bk in B:
Bkrows, Bkcols = Bk.shape
assert Bkcols == cols
assert Bk.domain == domain
for i, Bki in Bk.items():
Anew[i + rows] = Bki
rows += Bkrows
return A.new(Anew, (rows, cols), A.domain)
def applyfunc(self, func, domain):
sdm = {i: {j: func(e) for j, e in row.items()} for i, row in self.items()}
return self.new(sdm, self.shape, domain)
def charpoly(A):
"""
Returns the coefficients of the characteristic polynomial
of the :py:class:`~.SDM` matrix. These elements will be domain elements.
The domain of the elements will be same as domain of the :py:class:`~.SDM`.
Examples
========
>>> from sympy import QQ, Symbol
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy.polys import Poly
>>> A = SDM({0:{0:QQ(1), 1:QQ(2)}, 1:{0:QQ(3), 1:QQ(4)}}, (2, 2), QQ)
>>> A.charpoly()
[1, -5, -2]
We can create a polynomial using the
coefficients using :py:class:`~.Poly`
>>> x = Symbol('x')
>>> p = Poly(A.charpoly(), x, domain=A.domain)
>>> p
Poly(x**2 - 5*x - 2, x, domain='QQ')
"""
K = A.domain
n, _ = A.shape
pdict = sdm_berk(A, n, K)
plist = [K.zero] * (n + 1)
for i, pi in pdict.items():
plist[i] = pi
return plist
def is_zero_matrix(self):
"""
Says whether this matrix has all zero entries.
"""
return not self
def is_upper(self):
"""
Says whether this matrix is upper-triangular. True can be returned
even if the matrix is not square.
"""
return all(i <= j for i, row in self.items() for j in row)
def is_lower(self):
"""
Says whether this matrix is lower-triangular. True can be returned
even if the matrix is not square.
"""
return all(i >= j for i, row in self.items() for j in row)
def is_diagonal(self):
"""
Says whether this matrix is diagonal. True can be returned
even if the matrix is not square.
"""
return all(i == j for i, row in self.items() for j in row)
def diagonal(self):
"""
Returns the diagonal of the matrix as a list.
"""
m, n = self.shape
zero = self.domain.zero
return [row.get(i, zero) for i, row in self.items() if i < n]
def lll(A, delta=QQ(3, 4)):
"""
Returns the LLL-reduced basis for the :py:class:`~.SDM` matrix.
"""
return A.to_dfm_or_ddm().lll(delta=delta).to_sdm()
def lll_transform(A, delta=QQ(3, 4)):
"""
Returns the LLL-reduced basis and transformation matrix.
"""
reduced, transform = A.to_dfm_or_ddm().lll_transform(delta=delta)
return reduced.to_sdm(), transform.to_sdm()
def binop_dict(A, B, fab, fa, fb):
Anz, Bnz = set(A), set(B)
C = {}
for i in Anz & Bnz:
Ai, Bi = A[i], B[i]
Ci = {}
Anzi, Bnzi = set(Ai), set(Bi)
for j in Anzi & Bnzi:
Cij = fab(Ai[j], Bi[j])
if Cij:
Ci[j] = Cij
for j in Anzi - Bnzi:
Cij = fa(Ai[j])
if Cij:
Ci[j] = Cij
for j in Bnzi - Anzi:
Cij = fb(Bi[j])
if Cij:
Ci[j] = Cij
if Ci:
C[i] = Ci
for i in Anz - Bnz:
Ai = A[i]
Ci = {}
for j, Aij in Ai.items():
Cij = fa(Aij)
if Cij:
Ci[j] = Cij
if Ci:
C[i] = Ci
for i in Bnz - Anz:
Bi = B[i]
Ci = {}
for j, Bij in Bi.items():
Cij = fb(Bij)
if Cij:
Ci[j] = Cij
if Ci:
C[i] = Ci
return C
def unop_dict(A, f):
B = {}
for i, Ai in A.items():
Bi = {}
for j, Aij in Ai.items():
Bij = f(Aij)
if Bij:
Bi[j] = Bij
if Bi:
B[i] = Bi
return B
def sdm_transpose(M):
MT = {}
for i, Mi in M.items():
for j, Mij in Mi.items():
try:
MT[j][i] = Mij
except KeyError:
MT[j] = {i: Mij}
return MT
def sdm_dotvec(A, B, K):
return K.sum(A[j] * B[j] for j in A.keys() & B.keys())
def sdm_matvecmul(A, B, K):
C = {}
for i, Ai in A.items():
Ci = sdm_dotvec(Ai, B, K)
if Ci:
C[i] = Ci
return C
def sdm_matmul(A, B, K, m, o):
#
# Should be fast if A and B are very sparse.
# Consider e.g. A = B = eye(1000).
#
# The idea here is that we compute C = A*B in terms of the rows of C and
# B since the dict of dicts representation naturally stores the matrix as
# rows. The ith row of C (Ci) is equal to the sum of Aik * Bk where Bk is
# the kth row of B. The algorithm below loops over each nonzero element
# Aik of A and if the corresponding row Bj is nonzero then we do
# Ci += Aik * Bk.
# To make this more efficient we don't need to loop over all elements Aik.
# Instead for each row Ai we compute the intersection of the nonzero
# columns in Ai with the nonzero rows in B. That gives the k such that
# Aik and Bk are both nonzero. In Python the intersection of two sets
# of int can be computed very efficiently.
#
if K.is_EXRAW:
return sdm_matmul_exraw(A, B, K, m, o)
C = {}
B_knz = set(B)
for i, Ai in A.items():
Ci = {}
Ai_knz = set(Ai)
for k in Ai_knz & B_knz:
Aik = Ai[k]
for j, Bkj in B[k].items():
Cij = Ci.get(j, None)
if Cij is not None:
Cij = Cij + Aik * Bkj
if Cij:
Ci[j] = Cij
else:
Ci.pop(j)
else:
Cij = Aik * Bkj
if Cij:
Ci[j] = Cij
if Ci:
C[i] = Ci
return C
def sdm_matmul_exraw(A, B, K, m, o):
#
# Like sdm_matmul above except that:
#
# - Handles cases like 0*oo -> nan (sdm_matmul skips multiplication by zero)
# - Uses K.sum (Add(*items)) for efficient addition of Expr
#
zero = K.zero
C = {}
B_knz = set(B)
for i, Ai in A.items():
Ci_list = defaultdict(list)
Ai_knz = set(Ai)
# Nonzero row/column pair
for k in Ai_knz & B_knz:
Aik = Ai[k]
if zero * Aik == zero:
# This is the main inner loop:
for j, Bkj in B[k].items():
Ci_list[j].append(Aik * Bkj)
else:
for j in range(o):
Ci_list[j].append(Aik * B[k].get(j, zero))
# Zero row in B, check for infinities in A
for k in Ai_knz - B_knz:
zAik = zero * Ai[k]
if zAik != zero:
for j in range(o):
Ci_list[j].append(zAik)
# Add terms using K.sum (Add(*terms)) for efficiency
Ci = {}
for j, Cij_list in Ci_list.items():
Cij = K.sum(Cij_list)
if Cij:
Ci[j] = Cij
if Ci:
C[i] = Ci
# Find all infinities in B
for k, Bk in B.items():
for j, Bkj in Bk.items():
if zero * Bkj != zero:
for i in range(m):
Aik = A.get(i, {}).get(k, zero)
# If Aik is not zero then this was handled above
if Aik == zero:
Ci = C.get(i, {})
Cij = Ci.get(j, zero) + Aik * Bkj
if Cij != zero:
Ci[j] = Cij
C[i] = Ci
else:
Ci.pop(j, None)
if Ci:
C[i] = Ci
else:
C.pop(i, None)
return C
def sdm_scalar_mul(A, b, op, K):
"""
Handles special cases like 0 * oo -> nan by creating a dense result
when necessary. For all other cases, it uses the fast sparse approach.
"""
zero = K.zero
if K.is_EXRAW and op(zero, b) != zero:
Csdm = sdm_scalar_mul_exraw(A, b, op, K)
else:
Csdm = unop_dict(A, lambda aij: op(aij, b))
return Csdm
def sdm_scalar_mul_exraw(A, b, op, K):
zero = K.zero
zero_prod = op(zero, b)
m, n = A.shape
Csdm = {i: dict.fromkeys(range(n), zero_prod) for i in range(m)}
for i, Ai in A.items():
Ci = Csdm[i]
for j, Aij in Ai.items():
Cij = op(Aij, b)
if Cij == zero:
del Ci[j]
else:
Ci[j] = Cij
return Csdm
def sdm_irref(A):
"""RREF and pivots of a sparse matrix *A*.
Compute the reduced row echelon form (RREF) of the matrix *A* and return a
list of the pivot columns. This routine does not work in place and leaves
the original matrix *A* unmodified.
The domain of the matrix must be a field.
Examples
========
This routine works with a dict of dicts sparse representation of a matrix:
>>> from sympy import QQ
>>> from sympy.polys.matrices.sdm import sdm_irref
>>> A = {0: {0: QQ(1), 1: QQ(2)}, 1: {0: QQ(3), 1: QQ(4)}}
>>> Arref, pivots, _ = sdm_irref(A)
>>> Arref
{0: {0: 1}, 1: {1: 1}}
>>> pivots
[0, 1]
The analogous calculation with :py:class:`~.MutableDenseMatrix` would be
>>> from sympy import Matrix
>>> M = Matrix([[1, 2], [3, 4]])
>>> Mrref, pivots = M.rref()
>>> Mrref
Matrix([
[1, 0],
[0, 1]])
>>> pivots
(0, 1)
Notes
=====
The cost of this algorithm is determined purely by the nonzero elements of
the matrix. No part of the cost of any step in this algorithm depends on
the number of rows or columns in the matrix. No step depends even on the
number of nonzero rows apart from the primary loop over those rows. The
implementation is much faster than ddm_rref for sparse matrices. In fact
at the time of writing it is also (slightly) faster than the dense
implementation even if the input is a fully dense matrix so it seems to be
faster in all cases.
The elements of the matrix should support exact division with ``/``. For
example elements of any domain that is a field (e.g. ``QQ``) should be
fine. No attempt is made to handle inexact arithmetic.
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.rref
The higher-level function that would normally be used to call this
routine.
sympy.polys.matrices.dense.ddm_irref
The dense equivalent of this routine.
sdm_rref_den
Fraction-free version of this routine.
"""
#
# Any zeros in the matrix are not stored at all so an element is zero if
# its row dict has no index at that key. A row is entirely zero if its
# row index is not in the outer dict. Since rref reorders the rows and
# removes zero rows we can completely discard the row indices. The first
# step then copies the row dicts into a list sorted by the index of the
# first nonzero column in each row.
#
# The algorithm then processes each row Ai one at a time. Previously seen
# rows are used to cancel their pivot columns from Ai. Then a pivot from
# Ai is chosen and is cancelled from all previously seen rows. At this
# point Ai joins the previously seen rows. Once all rows are seen all
# elimination has occurred and the rows are sorted by pivot column index.
#
# The previously seen rows are stored in two separate groups. The reduced
# group consists of all rows that have been reduced to a single nonzero
# element (the pivot). There is no need to attempt any further reduction
# with these. Rows that still have other nonzeros need to be considered
# when Ai is cancelled from the previously seen rows.
#
# A dict nonzerocolumns is used to map from a column index to a set of
# previously seen rows that still have a nonzero element in that column.
# This means that we can cancel the pivot from Ai into the previously seen
# rows without needing to loop over each row that might have a zero in
# that column.
#
# Row dicts sorted by index of first nonzero column
# (Maybe sorting is not needed/useful.)
Arows = sorted((Ai.copy() for Ai in A.values()), key=min)
# Each processed row has an associated pivot column.
# pivot_row_map maps from the pivot column index to the row dict.
# This means that we can represent a set of rows purely as a set of their
# pivot indices.
pivot_row_map = {}
# Set of pivot indices for rows that are fully reduced to a single nonzero.
reduced_pivots = set()
# Set of pivot indices for rows not fully reduced
nonreduced_pivots = set()
# Map from column index to a set of pivot indices representing the rows
# that have a nonzero at that column.
nonzero_columns = defaultdict(set)
while Arows:
# Select pivot element and row
Ai = Arows.pop()
# Nonzero columns from fully reduced pivot rows can be removed
Ai = {j: Aij for j, Aij in Ai.items() if j not in reduced_pivots}
# Others require full row cancellation
for j in nonreduced_pivots & set(Ai):
Aj = pivot_row_map[j]
Aij = Ai[j]
Ainz = set(Ai)
Ajnz = set(Aj)
for k in Ajnz - Ainz:
Ai[k] = - Aij * Aj[k]
Ai.pop(j)
Ainz.remove(j)
for k in Ajnz & Ainz:
Aik = Ai[k] - Aij * Aj[k]
if Aik:
Ai[k] = Aik
else:
Ai.pop(k)
# We have now cancelled previously seen pivots from Ai.
# If it is zero then discard it.
if not Ai:
continue
# Choose a pivot from Ai:
j = min(Ai)
Aij = Ai[j]
pivot_row_map[j] = Ai
Ainz = set(Ai)
# Normalise the pivot row to make the pivot 1.
#
# This approach is slow for some domains. Cross cancellation might be
# better for e.g. QQ(x) with division delayed to the final steps.
Aijinv = Aij**-1
for l in Ai:
Ai[l] *= Aijinv
# Use Aij to cancel column j from all previously seen rows
for k in nonzero_columns.pop(j, ()):
Ak = pivot_row_map[k]
Akj = Ak[j]
Aknz = set(Ak)
for l in Ainz - Aknz:
Ak[l] = - Akj * Ai[l]
nonzero_columns[l].add(k)
Ak.pop(j)
Aknz.remove(j)
for l in Ainz & Aknz:
Akl = Ak[l] - Akj * Ai[l]
if Akl:
Ak[l] = Akl
else:
# Drop nonzero elements
Ak.pop(l)
if l != j:
nonzero_columns[l].remove(k)
if len(Ak) == 1:
reduced_pivots.add(k)
nonreduced_pivots.remove(k)
if len(Ai) == 1:
reduced_pivots.add(j)
else:
nonreduced_pivots.add(j)
for l in Ai:
if l != j:
nonzero_columns[l].add(j)
# All done!
pivots = sorted(reduced_pivots | nonreduced_pivots)
pivot2row = {p: n for n, p in enumerate(pivots)}
nonzero_columns = {c: {pivot2row[p] for p in s} for c, s in nonzero_columns.items()}
rows = [pivot_row_map[i] for i in pivots]
rref = dict(enumerate(rows))
return rref, pivots, nonzero_columns
def sdm_rref_den(A, K):
"""
Return the reduced row echelon form (RREF) of A with denominator.
The RREF is computed using fraction-free Gauss-Jordan elimination.
Explanation
===========
The algorithm used is the fraction-free version of Gauss-Jordan elimination
described as FFGJ in [1]_. Here it is modified to handle zero or missing
pivots and to avoid redundant arithmetic. This implementation is also
optimized for sparse matrices.
The domain $K$ must support exact division (``K.exquo``) but does not need
to be a field. This method is suitable for most exact rings and fields like
:ref:`ZZ`, :ref:`QQ` and :ref:`QQ(a)`. In the case of :ref:`QQ` or
:ref:`K(x)` it might be more efficient to clear denominators and use
:ref:`ZZ` or :ref:`K[x]` instead.
For inexact domains like :ref:`RR` and :ref:`CC` use ``ddm_irref`` instead.
Examples
========
>>> from sympy.polys.matrices.sdm import sdm_rref_den
>>> from sympy.polys.domains import ZZ
>>> A = {0: {0: ZZ(1), 1: ZZ(2)}, 1: {0: ZZ(3), 1: ZZ(4)}}
>>> A_rref, den, pivots = sdm_rref_den(A, ZZ)
>>> A_rref
{0: {0: -2}, 1: {1: -2}}
>>> den
-2
>>> pivots
[0, 1]
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.rref_den
Higher-level interface to ``sdm_rref_den`` that would usually be used
instead of calling this function directly.
sympy.polys.matrices.sdm.sdm_rref_den
The ``SDM`` method that uses this function.
sdm_irref
Computes RREF using field division.
ddm_irref_den
The dense version of this algorithm.
References
==========
.. [1] Fraction-free algorithms for linear and polynomial equations.
George C. Nakos , Peter R. Turner , Robert M. Williams.
https://dl.acm.org/doi/10.1145/271130.271133
"""
#
# We represent each row of the matrix as a dict mapping column indices to
# nonzero elements. We will build the RREF matrix starting from an empty
# matrix and appending one row at a time. At each step we will have the
# RREF of the rows we have processed so far.
#
# Our representation of the RREF divides it into three parts:
#
# 1. Fully reduced rows having only a single nonzero element (the pivot).
# 2. Partially reduced rows having nonzeros after the pivot.
# 3. The current denominator and divisor.
#
# For example if the incremental RREF might be:
#
# [2, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# [0, 0, 2, 0, 0, 0, 7, 0, 0, 0]
# [0, 0, 0, 0, 0, 2, 0, 0, 0, 0]
# [0, 0, 0, 0, 0, 0, 0, 2, 0, 0]
# [0, 0, 0, 0, 0, 0, 0, 0, 2, 0]
#
# Here the second row is partially reduced and the other rows are fully
# reduced. The denominator would be 2 in this case. We distinguish the
# fully reduced rows because we can handle them more efficiently when
# adding a new row.
#
# When adding a new row we need to multiply it by the current denominator.
# Then we reduce the new row by cross cancellation with the previous rows.
# Then if it is not reduced to zero we take its leading entry as the new
# pivot, cross cancel the new row from the previous rows and update the
# denominator. In the fraction-free version this last step requires
# multiplying and dividing the whole matrix by the new pivot and the
# current divisor. The advantage of building the RREF one row at a time is
# that in the sparse case we only need to work with the relatively sparse
# upper rows of the matrix. The simple version of FFGJ in [1] would
# multiply and divide all the dense lower rows at each step.
# Handle the trivial cases.
if not A:
return ({}, K.one, [])
elif len(A) == 1:
Ai, = A.values()
j = min(Ai)
Aij = Ai[j]
return ({0: Ai.copy()}, Aij, [j])
# For inexact domains like RR[x] we use quo and discard the remainder.
# Maybe it would be better for K.exquo to do this automatically.
if K.is_Exact:
exquo = K.exquo
else:
exquo = K.quo
# Make sure we have the rows in order to make this deterministic from the
# outset.
_, rows_in_order = zip(*sorted(A.items()))
col_to_row_reduced = {}
col_to_row_unreduced = {}
reduced = col_to_row_reduced.keys()
unreduced = col_to_row_unreduced.keys()
# Our representation of the RREF so far.
A_rref_rows = []
denom = None
divisor = None
# The rows that remain to be added to the RREF. These are sorted by the
# column index of their leading entry. Note that sorted() is stable so the
# previous sort by unique row index is still needed to make this
# deterministic (there may be multiple rows with the same leading column).
A_rows = sorted(rows_in_order, key=min)
for Ai in A_rows:
# All fully reduced columns can be immediately discarded.
Ai = {j: Aij for j, Aij in Ai.items() if j not in reduced}
# We need to multiply the new row by the current denominator to bring
# it into the same scale as the previous rows and then cross-cancel to
# reduce it wrt the previous unreduced rows. All pivots in the previous
# rows are equal to denom so the coefficients we need to make a linear
# combination of the previous rows to cancel into the new row are just
# the ones that are already in the new row *before* we multiply by
# denom. We compute that linear combination first and then multiply the
# new row by denom before subtraction.
Ai_cancel = {}
for j in unreduced & Ai.keys():
# Remove the pivot column from the new row since it would become
# zero anyway.
Aij = Ai.pop(j)
Aj = A_rref_rows[col_to_row_unreduced[j]]
for k, Ajk in Aj.items():
Aik_cancel = Ai_cancel.get(k)
if Aik_cancel is None:
Ai_cancel[k] = Aij * Ajk
else:
Aik_cancel = Aik_cancel + Aij * Ajk
if Aik_cancel:
Ai_cancel[k] = Aik_cancel
else:
Ai_cancel.pop(k)
# Multiply the new row by the current denominator and subtract.
Ai_nz = set(Ai)
Ai_cancel_nz = set(Ai_cancel)
d = denom or K.one
for k in Ai_cancel_nz - Ai_nz:
Ai[k] = -Ai_cancel[k]
for k in Ai_nz - Ai_cancel_nz:
Ai[k] = Ai[k] * d
for k in Ai_cancel_nz & Ai_nz:
Aik = Ai[k] * d - Ai_cancel[k]
if Aik:
Ai[k] = Aik
else:
Ai.pop(k)
# Now Ai has the same scale as the other rows and is reduced wrt the
# unreduced rows.
# If the row is reduced to zero then discard it.
if not Ai:
continue
# Choose a pivot for this row.
j = min(Ai)
Aij = Ai.pop(j)
# Cross cancel the unreduced rows by the new row.
# a[k][l] = (a[i][j]*a[k][l] - a[k][j]*a[i][l]) / divisor
for pk, k in list(col_to_row_unreduced.items()):
Ak = A_rref_rows[k]
if j not in Ak:
# This row is already reduced wrt the new row but we need to
# bring it to the same scale as the new denominator. This step
# is not needed in sdm_irref.
for l, Akl in Ak.items():
Akl = Akl * Aij
if divisor is not None:
Akl = exquo(Akl, divisor)
Ak[l] = Akl
continue
Akj = Ak.pop(j)
Ai_nz = set(Ai)
Ak_nz = set(Ak)
for l in Ai_nz - Ak_nz:
Ak[l] = - Akj * Ai[l]
if divisor is not None:
Ak[l] = exquo(Ak[l], divisor)
# This loop also not needed in sdm_irref.
for l in Ak_nz - Ai_nz:
Ak[l] = Aij * Ak[l]
if divisor is not None:
Ak[l] = exquo(Ak[l], divisor)
for l in Ai_nz & Ak_nz:
Akl = Aij * Ak[l] - Akj * Ai[l]
if Akl:
if divisor is not None:
Akl = exquo(Akl, divisor)
Ak[l] = Akl
else:
Ak.pop(l)
if not Ak:
col_to_row_unreduced.pop(pk)
col_to_row_reduced[pk] = k
i = len(A_rref_rows)
A_rref_rows.append(Ai)
if Ai:
col_to_row_unreduced[j] = i
else:
col_to_row_reduced[j] = i
# Update the denominator.
if not K.is_one(Aij):
if denom is None:
denom = Aij
else:
denom *= Aij
if divisor is not None:
denom = exquo(denom, divisor)
# Update the divisor.
divisor = denom
if denom is None:
denom = K.one
# Sort the rows by their leading column index.
col_to_row = {**col_to_row_reduced, **col_to_row_unreduced}
row_to_col = {i: j for j, i in col_to_row.items()}
A_rref_rows_col = [(row_to_col[i], Ai) for i, Ai in enumerate(A_rref_rows)]
pivots, A_rref = zip(*sorted(A_rref_rows_col))
pivots = list(pivots)
# Insert the pivot values
for i, Ai in enumerate(A_rref):
Ai[pivots[i]] = denom
A_rref_sdm = dict(enumerate(A_rref))
return A_rref_sdm, denom, pivots
def sdm_nullspace_from_rref(A, one, ncols, pivots, nonzero_cols):
"""Get nullspace from A which is in RREF"""
nonpivots = sorted(set(range(ncols)) - set(pivots))
K = []
for j in nonpivots:
Kj = {j:one}
for i in nonzero_cols.get(j, ()):
Kj[pivots[i]] = -A[i][j]
K.append(Kj)
return K, nonpivots
def sdm_particular_from_rref(A, ncols, pivots):
"""Get a particular solution from A which is in RREF"""
P = {}
for i, j in enumerate(pivots):
Ain = A[i].get(ncols-1, None)
if Ain is not None:
P[j] = Ain / A[i][j]
return P
def sdm_berk(M, n, K):
"""
Berkowitz algorithm for computing the characteristic polynomial.
Explanation
===========
The Berkowitz algorithm is a division-free algorithm for computing the
characteristic polynomial of a matrix over any commutative ring using only
arithmetic in the coefficient ring. This implementation is for sparse
matrices represented in a dict-of-dicts format (like :class:`SDM`).
Examples
========
>>> from sympy import Matrix
>>> from sympy.polys.matrices.sdm import sdm_berk
>>> from sympy.polys.domains import ZZ
>>> M = {0: {0: ZZ(1), 1:ZZ(2)}, 1: {0:ZZ(3), 1:ZZ(4)}}
>>> sdm_berk(M, 2, ZZ)
{0: 1, 1: -5, 2: -2}
>>> Matrix([[1, 2], [3, 4]]).charpoly()
PurePoly(lambda**2 - 5*lambda - 2, lambda, domain='ZZ')
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.charpoly
The high-level interface to this function.
sympy.polys.matrices.dense.ddm_berk
The dense version of this function.
References
==========
.. [1] https://en.wikipedia.org/wiki/Samuelson%E2%80%93Berkowitz_algorithm
"""
zero = K.zero
one = K.one
if n == 0:
return {0: one}
elif n == 1:
pdict = {0: one}
if M00 := M.get(0, {}).get(0, zero):
pdict[1] = -M00
# M = [[a, R],
# [C, A]]
a, R, C, A = K.zero, {}, {}, defaultdict(dict)
for i, Mi in M.items():
for j, Mij in Mi.items():
if i and j:
A[i-1][j-1] = Mij
elif i:
C[i-1] = Mij
elif j:
R[j-1] = Mij
else:
a = Mij
# T = [ 1, 0, 0, 0, 0, ... ]
# [ -a, 1, 0, 0, 0, ... ]
# [ -R*C, -a, 1, 0, 0, ... ]
# [ -R*A*C, -R*C, -a, 1, 0, ... ]
# [-R*A^2*C, -R*A*C, -R*C, -a, 1, ... ]
# [ ... ]
# T is (n+1) x n
#
# In the sparse case we might have A^m*C = 0 for some m making T banded
# rather than triangular so we just compute the nonzero entries of the
# first column rather than constructing the matrix explicitly.
AnC = C
RC = sdm_dotvec(R, C, K)
Tvals = [one, -a, -RC]
for i in range(3, n+1):
AnC = sdm_matvecmul(A, AnC, K)
if not AnC:
break
RAnC = sdm_dotvec(R, AnC, K)
Tvals.append(-RAnC)
# Strip trailing zeros
while Tvals and not Tvals[-1]:
Tvals.pop()
q = sdm_berk(A, n-1, K)
# This would be the explicit multiplication T*q but we can do better:
#
# T = {}
# for i in range(n+1):
# Ti = {}
# for j in range(max(0, i-len(Tvals)+1), min(i+1, n)):
# Ti[j] = Tvals[i-j]
# T[i] = Ti
# Tq = sdm_matvecmul(T, q, K)
#
# In the sparse case q might be mostly zero. We know that T[i,j] is nonzero
# for i <= j < i + len(Tvals) so if q does not have a nonzero entry in that
# range then Tq[j] must be zero. We exploit this potential banded
# structure and the potential sparsity of q to compute Tq more efficiently.
Tvals = Tvals[::-1]
Tq = {}
for i in range(min(q), min(max(q)+len(Tvals), n+1)):
Ti = dict(enumerate(Tvals, i-len(Tvals)+1))
if Tqi := sdm_dotvec(Ti, q, K):
Tq[i] = Tqi
return Tq
|
SDM
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.