language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/conditionally_patch_dependency/package.py
{ "start": 218, "end": 630 }
class ____(Package): """Package that conditionally requries a patched version of a dependency.""" homepage = "http://www.example.com" url = "http://www.example.com/patch-a-dependency-1.0.tar.gz" version("1.0", md5="0123456789abcdef0123456789abcdef") variant("jasper", default=False) depends_on("libelf@0.8.10", patches=[patch("uuid.patch")], when="+jasper")
ConditionallyPatchDependency
python
ray-project__ray
python/ray/tune/error.py
{ "start": 816, "end": 1115 }
class ____(_SubCategoryTuneError): """Error that happens when waiting to get the next event to handle from RayTrialExecutor. Note: RayTaskError will be raised by itself and will not be using this category. This category is for everything else.""" pass
_TuneNoNextExecutorEventError
python
pytorch__pytorch
test/torch_np/numpy_tests/linalg/test_linalg.py
{ "start": 3126, "end": 10256 }
class ____: def __init__(self, name, a, b, tags=None): """ A bundle of arguments to be passed to a test case, with an identifying name, the operands a and b, and a set of tags to filter the tests """ if tags is None: tags = set() assert_(isinstance(name, str)) self.name = name self.a = a self.b = b self.tags = frozenset(tags) # prevent shared tags def check(self, do): """ Run the function `do` on this test case, expanding arguments """ do(self.a, self.b, tags=self.tags) def __repr__(self): return f"<LinalgCase: {self.name}>" def apply_tag(tag, cases): """ Add the given tag (a string) to each of the cases (a list of LinalgCase objects) """ assert tag in all_tags, "Invalid tag" for case in cases: case.tags = case.tags | {tag} return cases # # Base test cases # np.random.seed(1234) CASES = [] # square test cases CASES += apply_tag( "square", [ LinalgCase( "single", array([[1.0, 2.0], [3.0, 4.0]], dtype=single), array([2.0, 1.0], dtype=single), ), LinalgCase( "double", array([[1.0, 2.0], [3.0, 4.0]], dtype=double), array([2.0, 1.0], dtype=double), ), LinalgCase( "double_2", array([[1.0, 2.0], [3.0, 4.0]], dtype=double), array([[2.0, 1.0, 4.0], [3.0, 4.0, 6.0]], dtype=double), ), LinalgCase( "csingle", array([[1.0 + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle), array([2.0 + 1j, 1.0 + 2j], dtype=csingle), ), LinalgCase( "cdouble", array([[1.0 + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), array([2.0 + 1j, 1.0 + 2j], dtype=cdouble), ), LinalgCase( "cdouble_2", array([[1.0 + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), array( [[2.0 + 1j, 1.0 + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble ), ), LinalgCase( "0x0", np.empty((0, 0), dtype=double), np.empty((0,), dtype=double), tags={"size-0"}, ), LinalgCase("8x8", np.random.rand(8, 8), np.random.rand(8)), LinalgCase("1x1", np.random.rand(1, 1), np.random.rand(1)), LinalgCase("nonarray", [[1, 2], [3, 4]], [2, 1]), ], ) # non-square test-cases CASES += apply_tag( "nonsquare", [ LinalgCase( "single_nsq_1", array([[1.0, 2.0, 3.0], [3.0, 4.0, 6.0]], dtype=single), array([2.0, 1.0], dtype=single), ), LinalgCase( "single_nsq_2", array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=single), array([2.0, 1.0, 3.0], dtype=single), ), LinalgCase( "double_nsq_1", array([[1.0, 2.0, 3.0], [3.0, 4.0, 6.0]], dtype=double), array([2.0, 1.0], dtype=double), ), LinalgCase( "double_nsq_2", array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=double), array([2.0, 1.0, 3.0], dtype=double), ), LinalgCase( "csingle_nsq_1", array( [[1.0 + 1j, 2.0 + 2j, 3.0 - 3j], [3.0 - 5j, 4.0 + 9j, 6.0 + 2j]], dtype=csingle, ), array([2.0 + 1j, 1.0 + 2j], dtype=csingle), ), LinalgCase( "csingle_nsq_2", array( [[1.0 + 1j, 2.0 + 2j], [3.0 - 3j, 4.0 - 9j], [5.0 - 4j, 6.0 + 8j]], dtype=csingle, ), array([2.0 + 1j, 1.0 + 2j, 3.0 - 3j], dtype=csingle), ), LinalgCase( "cdouble_nsq_1", array( [[1.0 + 1j, 2.0 + 2j, 3.0 - 3j], [3.0 - 5j, 4.0 + 9j, 6.0 + 2j]], dtype=cdouble, ), array([2.0 + 1j, 1.0 + 2j], dtype=cdouble), ), LinalgCase( "cdouble_nsq_2", array( [[1.0 + 1j, 2.0 + 2j], [3.0 - 3j, 4.0 - 9j], [5.0 - 4j, 6.0 + 8j]], dtype=cdouble, ), array([2.0 + 1j, 1.0 + 2j, 3.0 - 3j], dtype=cdouble), ), LinalgCase( "cdouble_nsq_1_2", array( [[1.0 + 1j, 2.0 + 2j, 3.0 - 3j], [3.0 - 5j, 4.0 + 9j, 6.0 + 2j]], dtype=cdouble, ), array([[2.0 + 1j, 1.0 + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble), ), LinalgCase( "cdouble_nsq_2_2", array( [[1.0 + 1j, 2.0 + 2j], [3.0 - 3j, 4.0 - 9j], [5.0 - 4j, 6.0 + 8j]], dtype=cdouble, ), array( [[2.0 + 1j, 1.0 + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble, ), ), LinalgCase("8x11", np.random.rand(8, 11), np.random.rand(8)), LinalgCase("1x5", np.random.rand(1, 5), np.random.rand(1)), LinalgCase("5x1", np.random.rand(5, 1), np.random.rand(5)), LinalgCase("0x4", np.random.rand(0, 4), np.random.rand(0), tags={"size-0"}), LinalgCase("4x0", np.random.rand(4, 0), np.random.rand(4), tags={"size-0"}), ], ) # hermitian test-cases CASES += apply_tag( "hermitian", [ LinalgCase("hsingle", array([[1.0, 2.0], [2.0, 1.0]], dtype=single), None), LinalgCase("hdouble", array([[1.0, 2.0], [2.0, 1.0]], dtype=double), None), LinalgCase( "hcsingle", array([[1.0, 2 + 3j], [2 - 3j, 1]], dtype=csingle), None ), LinalgCase( "hcdouble", array([[1.0, 2 + 3j], [2 - 3j, 1]], dtype=cdouble), None ), LinalgCase("hempty", np.empty((0, 0), dtype=double), None, tags={"size-0"}), LinalgCase("hnonarray", [[1, 2], [2, 1]], None), LinalgCase("matrix_b_only", array([[1.0, 2.0], [2.0, 1.0]]), None), LinalgCase("hmatrix_1x1", np.random.rand(1, 1), None), ], ) # # Gufunc test cases # def _make_generalized_cases(): new_cases = [] for case in CASES: if not isinstance(case.a, np.ndarray): continue a = np.stack([case.a, 2 * case.a, 3 * case.a]) if case.b is None: b = None else: b = np.stack([case.b, 7 * case.b, 6 * case.b]) new_case = LinalgCase( case.name + "_tile3", a, b, tags=case.tags | {"generalized"} ) new_cases.append(new_case) a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape) if case.b is None: b = None else: b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape) new_case = LinalgCase( case.name + "_tile213", a, b, tags=case.tags | {"generalized"} ) new_cases.append(new_case) return new_cases CASES += _make_generalized_cases() # # Test different routines against the above cases #
LinalgCase
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_doi.py
{ "start": 1571, "end": 3865 }
class ____(ColumnMapExpectation): """Expect column values to be valid DOI format.""" # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "well_formed_doi": [ "10.1430/8105", "10.1392/BC1.0", "10.3207/2959859860", "10.1038/nphys1170", "10.1594/PANGAEA.726855", ], "malformed_doi": [ "", "11.1038/nphys1170", "10.103/nphys1170", "10.1038.nphys1170", "This is not a valid DOI", ], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "well_formed_doi"}, "out": {"success": True}, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "malformed_doi"}, "out": {"success": False}, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.valid_doi" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ("mostly",) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This object contains metadata for display in the public Gallery library_metadata = { "maturity": "experimental", "tags": ["experimental", "hackathon", "typed-entities"], "contributors": [ "@voidforall", ], } if __name__ == "__main__": ExpectColumnValuesToBeValidDoi().print_diagnostic_checklist()
ExpectColumnValuesToBeValidDoi
python
streamlit__streamlit
lib/streamlit/runtime/caching/cache_resource_api.py
{ "start": 18084, "end": 20856 }
class ____(Cache[R]): """Manages cached values for a single st.cache_resource function.""" def __init__( self, key: str, max_entries: float, ttl_seconds: float, validate: ValidateFunc | None, display_name: str, ) -> None: super().__init__() self.key = key self.display_name = display_name self._mem_cache: TTLCache[str, CachedResult[R]] = TTLCache( maxsize=max_entries, ttl=ttl_seconds, timer=cache_utils.TTLCACHE_TIMER ) self._mem_cache_lock = threading.Lock() self.validate = validate @property def max_entries(self) -> float: return self._mem_cache.maxsize @property def ttl_seconds(self) -> float: return self._mem_cache.ttl def read_result(self, key: str) -> CachedResult[R]: """Read a value and associated messages from the cache. Raise `CacheKeyNotFoundError` if the value doesn't exist. """ with self._mem_cache_lock: if key not in self._mem_cache: # key does not exist in cache. raise CacheKeyNotFoundError() result = self._mem_cache[key] if self.validate is not None and not self.validate(result.value): # Validate failed: delete the entry and raise an error. del self._mem_cache[key] raise CacheKeyNotFoundError() return result @gather_metrics("_cache_resource_object") def write_result(self, key: str, value: R, messages: list[MsgData]) -> None: """Write a value and associated messages to the cache.""" main_id = st._main.id sidebar_id = st.sidebar.id with self._mem_cache_lock: self._mem_cache[key] = CachedResult(value, messages, main_id, sidebar_id) def _clear(self, key: str | None = None) -> None: with self._mem_cache_lock: if key is None: self._mem_cache.clear() elif key in self._mem_cache: del self._mem_cache[key] def get_stats(self) -> list[CacheStat]: # Shallow clone our cache. Computing item sizes is potentially # expensive, and we want to minimize the time we spend holding # the lock. with self._mem_cache_lock: cache_entries = list(self._mem_cache.values()) # Lazy-load vendored package to prevent import of numpy from streamlit.vendor.pympler.asizeof import asizeof return [ CacheStat( category_name="st_cache_resource", cache_name=self.display_name, byte_length=asizeof(entry), ) for entry in cache_entries ]
ResourceCache
python
scipy__scipy
scipy/io/_idl.py
{ "start": 4208, "end": 4339 }
class ____: '''Class used to define pointers''' def __init__(self, index): self.index = index return
Pointer
python
protocolbuffers__protobuf
python/google/protobuf/internal/descriptor_pool_test.py
{ "start": 31311, "end": 32867 }
class ____(DescriptorPoolTestBase, unittest.TestCase): def setUp(self): self.pool = descriptor_pool.Default() self.factory_test1_fd = descriptor_pb2.FileDescriptorProto.FromString( factory_test1_pb2.DESCRIPTOR.serialized_pb) self.factory_test2_fd = descriptor_pb2.FileDescriptorProto.FromString( factory_test2_pb2.DESCRIPTOR.serialized_pb) def testFindMethods(self): self.assertIs( self.pool.FindFileByName('google/protobuf/unittest.proto'), unittest_pb2.DESCRIPTOR) self.assertIs( self.pool.FindMessageTypeByName('proto2_unittest.TestAllTypes'), unittest_pb2.TestAllTypes.DESCRIPTOR) self.assertIs( self.pool.FindFieldByName( 'proto2_unittest.TestAllTypes.optional_int32'), unittest_pb2.TestAllTypes.DESCRIPTOR.fields_by_name['optional_int32']) self.assertIs( self.pool.FindEnumTypeByName('proto2_unittest.ForeignEnum'), unittest_pb2.ForeignEnum.DESCRIPTOR) self.assertIs( self.pool.FindExtensionByName( 'proto2_unittest.optional_int32_extension'), unittest_pb2.DESCRIPTOR.extensions_by_name['optional_int32_extension']) self.assertIs( self.pool.FindOneofByName('proto2_unittest.TestAllTypes.oneof_field'), unittest_pb2.TestAllTypes.DESCRIPTOR.oneofs_by_name['oneof_field']) self.assertIs( self.pool.FindServiceByName('proto2_unittest.TestService'), unittest_pb2.DESCRIPTOR.services_by_name['TestService']) @testing_refleaks.TestCase
DefaultDescriptorPoolTest
python
automl__auto-sklearn
test/test_pipeline/components/data_preprocessing/test_minority_coalescence.py
{ "start": 339, "end": 1045 }
class ____(unittest.TestCase): def test_data_type_consistency(self): X = np.random.randint(3, 6, (3, 4)) Y = MinorityCoalescer().fit_transform(X) self.assertFalse(scipy.sparse.issparse(Y)) X = scipy.sparse.csc_matrix( ([3, 6, 4, 5], ([0, 1, 2, 1], [3, 2, 1, 0])), shape=(3, 4) ) Y = MinorityCoalescer().fit_transform(X) self.assertTrue(scipy.sparse.issparse(Y)) def test_no_coalescence(self): X = np.random.randint(0, 255, (3, 4)) Y = NoCoalescence().fit_transform(X) np.testing.assert_array_almost_equal(Y, X) # Assert no copies were made self.assertEqual(id(X), id(Y))
MinorityCoalescerTest
python
kubernetes-client__python
kubernetes/client/models/v1_binding.py
{ "start": 383, "end": 6673 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'kind': 'str', 'metadata': 'V1ObjectMeta', 'target': 'V1ObjectReference' } attribute_map = { 'api_version': 'apiVersion', 'kind': 'kind', 'metadata': 'metadata', 'target': 'target' } def __init__(self, api_version=None, kind=None, metadata=None, target=None, local_vars_configuration=None): # noqa: E501 """V1Binding - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._kind = None self._metadata = None self._target = None self.discriminator = None if api_version is not None: self.api_version = api_version if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata self.target = target @property def api_version(self): """Gets the api_version of this V1Binding. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this V1Binding. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this V1Binding. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V1Binding. # noqa: E501 :type: str """ self._api_version = api_version @property def kind(self): """Gets the kind of this V1Binding. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V1Binding. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this V1Binding. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V1Binding. # noqa: E501 :type: str """ self._kind = kind @property def metadata(self): """Gets the metadata of this V1Binding. # noqa: E501 :return: The metadata of this V1Binding. # noqa: E501 :rtype: V1ObjectMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """Sets the metadata of this V1Binding. :param metadata: The metadata of this V1Binding. # noqa: E501 :type: V1ObjectMeta """ self._metadata = metadata @property def target(self): """Gets the target of this V1Binding. # noqa: E501 :return: The target of this V1Binding. # noqa: E501 :rtype: V1ObjectReference """ return self._target @target.setter def target(self, target): """Sets the target of this V1Binding. :param target: The target of this V1Binding. # noqa: E501 :type: V1ObjectReference """ if self.local_vars_configuration.client_side_validation and target is None: # noqa: E501 raise ValueError("Invalid value for `target`, must not be `None`") # noqa: E501 self._target = target def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1Binding): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1Binding): return True return self.to_dict() != other.to_dict()
V1Binding
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/never2.py
{ "start": 522, "end": 657 }
class ____(Generic[T_contra]): def __init__(self, x: T_contra): ... def func3(x: U) -> U | ClassC[Never]: return ClassC(x)
ClassC
python
getsentry__sentry
src/sentry/plugins/sentry_useragents/apps.py
{ "start": 36, "end": 344 }
class ____(AppConfig): name = "sentry.plugins.sentry_useragents" def ready(self) -> None: from sentry.plugins.base import register from .models import BrowserPlugin, DevicePlugin, OsPlugin register(BrowserPlugin) register(OsPlugin) register(DevicePlugin)
Config
python
dask__distributed
distributed/diagnostics/tests/test_nanny_plugin.py
{ "start": 6910, "end": 7988 }
class ____(NannyPlugin): def teardown(self, nanny): raise RuntimeError("test error") @gen_cluster(client=True, nthreads=[("", 1)], Worker=Nanny) async def test_unregister_nanny_plugin_with_broken_teardown_raises(c, s, a): await c.register_plugin(BrokenTeardownPlugin(), name="TestPlugin1") with pytest.raises(RuntimeError, match="test error"): with captured_logger("distributed.nanny", level=logging.ERROR) as caplog: await c.unregister_worker_plugin("TestPlugin1", nanny=True) logs = caplog.getvalue() assert "TestPlugin1 failed to teardown" in logs assert "test error" in logs @gen_cluster(client=True, nthreads=[]) async def test_nanny_plugin_with_broken_teardown_logs_on_close(c, s): await c.register_plugin(BrokenTeardownPlugin(), name="TestPlugin1") with captured_logger("distributed.nanny", level=logging.ERROR) as caplog: async with Nanny(s.address): pass logs = caplog.getvalue() assert "TestPlugin1 failed to teardown" in logs assert "test error" in logs
BrokenTeardownPlugin
python
joerick__pyinstrument
test/low_level/test_frame_info.py
{ "start": 154, "end": 2671 }
class ____: def get_frame_info_for_a_method(self, getter_function, del_local): if del_local: del self frame = inspect.currentframe() assert frame return getter_function(frame) def get_frame_info_with_cell_variable(self, getter_function, del_local): def an_inner_function(): # reference self to make it a cell variable if self: pass if del_local: del self frame = inspect.currentframe() assert frame return getter_function(frame) @classmethod def get_frame_info_for_a_class_method(cls, getter_function, del_local): if del_local: del cls frame = inspect.currentframe() assert frame return getter_function(frame) @classmethod def get_frame_info_for_a_class_method_where_cls_is_reassigned(cls, getter_function, del_local): cls = 1 if del_local: del cls frame = inspect.currentframe() assert frame return getter_function(frame) def test_frame_info(): frame = inspect.currentframe() assert frame assert stat_profile_c.get_frame_info(frame) == stat_profile_python.get_frame_info(frame) def test_frame_info_hide_true(): __tracebackhide__ = True frame = inspect.currentframe() assert frame assert stat_profile_c.get_frame_info(frame) == stat_profile_python.get_frame_info(frame) def test_frame_info_hide_false(): """to avoid calling FastToLocals on the c side, __tracebackhide__ = True and __tracebackhide__ = False are treated the same. All that matters is that the var is defined """ __tracebackhide__ = False frame = inspect.currentframe() assert frame assert stat_profile_c.get_frame_info(frame) == stat_profile_python.get_frame_info(frame) instance = AClass() @pytest.mark.parametrize( "test_function", [ instance.get_frame_info_for_a_method, AClass.get_frame_info_for_a_class_method, instance.get_frame_info_with_cell_variable, AClass.get_frame_info_for_a_class_method_where_cls_is_reassigned, ], ) @pytest.mark.parametrize("del_local", [True, False]) def test_frame_info_with_classes(test_function, del_local): c_frame_info = test_function(stat_profile_c.get_frame_info, del_local=del_local) py_frame_info = test_function(stat_profile_python.get_frame_info, del_local=del_local) assert c_frame_info == py_frame_info
AClass
python
PrefectHQ__prefect
tests/server/orchestration/api/test_flow_runs.py
{ "start": 87418, "end": 89923 }
class ____: @pytest.fixture def url(self) -> str: return "/flow_runs/lateness" @pytest.fixture() async def late_flow_runs(self, session, flow): flow_runs = [] for i in range(5): one_minute_ago = now("UTC") - datetime.timedelta(minutes=1) flow_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, state=schemas.states.Scheduled(scheduled_time=one_minute_ago), ), ) await models.flow_runs.set_flow_run_state( session=session, flow_run_id=flow_run.id, state=schemas.states.Running( timestamp=one_minute_ago + datetime.timedelta(seconds=i) ), ) flow_runs.append(flow_run) await session.commit() return flow_runs async def test_average_lateness_no_flow_runs(self, url: str, client): response = await client.post(url) assert response.status_code == 200, response.text # If no flow runs match the filter, the average lateness is null. assert response.content == b"null" async def test_average_lateness( self, url: str, client, late_flow_runs, ): response = await client.post(url) assert response.status_code == 200, response.text # The flow runs in `late_flow_runs` are created in a loop and the # lateness is the iteration count of the loop. There are 5 flow runs, # so avg(0 + 1 + 2 + 3 + 4) == 2.0 assert response.content == b"2.0" async def test_supports_filters( self, url: str, client, late_flow_runs, ): flow_run_ids = [flow_run.id for flow_run in late_flow_runs[-2:]] flow_run_filter = schemas.filters.FlowRunFilter( id=schemas.filters.FlowRunFilterId(any_=flow_run_ids) ) response = await client.post( url, json={"flow_runs": flow_run_filter.model_dump(mode="json")} ) assert response.status_code == 200, response.text # The flow runs in `late_flow_runs` are created in a loop and the # lateness is the iteration count of the loop. We're only looking at # the last two flow runs in that list so avg(3 + 4) == 3.5 assert response.content == b"3.5"
TestFlowRunLateness
python
pytorch__pytorch
torch/fx/passes/net_min_base.py
{ "start": 1184, "end": 2297 }
class ____: """ Args: `accumulate_error`: Instead of using a's input for both converted module to verify , use the previous outputs of each converted module as input to accumulate the errors. `traverse_method`: "sequential" or "binary" or "accumulate" Determine the way of traverse the nodes in FX module. `find_all`: Minimizer will go through the entire model and return all problematic nodes. `return_intermediate`: If true, when using `run_nodes()` function to run the model, intermediate results of all the ops will be returned as output. `all_outputs`: If true, when using `_run_and_compare()` function, all the output nodes in the subgraph will be used for comparison. """ accumulate_error: bool = False traverse_method: str = "sequential" find_all: bool = False return_intermediate: bool = False all_outputs: bool = False def __str__(self): settings_str = "FX Minimizer Settings:\n" for k, v in vars(self).items(): settings_str += f"\t{k}: {v}\n" return settings_str
_MinimizerSettingBase
python
getsentry__sentry
tests/apidocs/endpoints/teams/test_projects.py
{ "start": 136, "end": 807 }
class ____(APIDocsTestCase): def setUp(self) -> None: team = self.create_team(organization=self.organization) self.create_project(name="foo", organization=self.organization, teams=[team]) self.url = reverse( "sentry-api-0-team-project-index", kwargs={ "organization_id_or_slug": self.organization.slug, "team_id_or_slug": team.slug, }, ) self.login_as(user=self.user) def test_get(self) -> None: response = self.client.get(self.url) request = RequestFactory().get(self.url) self.validate_schema(request, response)
TeamsProjectsDocs
python
realpython__materials
python-unittest/skip_tests.py
{ "start": 29, "end": 684 }
class ____(unittest.TestCase): @unittest.skip("Unconditionally skipped test") def test_unimportant(self): self.fail("The test should be skipped") @unittest.skipIf(sys.version_info < (3, 12), "Requires Python >= 3.12") def test_using_calendar_constants(self): import calendar self.assertEqual(calendar.Month(10), calendar.OCTOBER) @unittest.skipUnless(sys.platform.startswith("win"), "Requires Windows") def test_windows_support(self): from ctypes import WinDLL, windll self.assertIsInstance(windll.kernel32, WinDLL) if __name__ == "__main__": unittest.main(verbosity=2)
SkipTestExample
python
tensorflow__tensorflow
tensorflow/python/data/kernel_tests/batch_test.py
{ "start": 2032, "end": 11937 }
class ____(test_base.DatasetTestBase, parameterized.TestCase): @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine( count=[0, 28], batch_size=[14, 15], drop_remainder=[True, False], num_parallel_calls=[None, 1, 2, 4]))) def testBasic(self, count, batch_size, drop_remainder, num_parallel_calls): """Tests the batch dataset logic for various input configurations. Args: count: the number of input elements batch_size: the batch size drop_remainder: whether a smaller batch size should be produced if batch size does not divide number of inputs evenly num_parallel_calls: the number batches to process asynchronously in parallel """ # The pipeline is TensorSliceDataset -> MapDataset(square_3) -> # RepeatDataset(count) -> BatchDataset(batch_size). components = (np.arange(7), np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis], np.array(37.0) * np.arange(7)) def _map_fn(x, y, z): return math_ops.square(x), math_ops.square(y), math_ops.square(z) dataset = dataset_ops.Dataset.from_tensor_slices(components).map( _map_fn).repeat(count).batch(batch_size, drop_remainder, num_parallel_calls) get_next = self.getNext(dataset) if drop_remainder: dim0 = batch_size else: dim0 = None self.assertEqual( [ts.as_list() for ts in nest.flatten( dataset_ops.get_legacy_output_shapes(dataset))], [[dim0] + list(c.shape[1:]) for c in components]) num_full_batches = (count * 7) // batch_size for i in range(num_full_batches): result = self.evaluate(get_next()) for component, result_component in zip(components, result): for j in range(batch_size): self.assertAllEqual(component[(i * batch_size + j) % 7]**2, result_component[j]) if not drop_remainder and (count * 7) % batch_size > 0: result = self.evaluate(get_next()) for component, result_component in zip(components, result): for j in range((count * 7) % batch_size): self.assertAllEqual( component[(num_full_batches * batch_size + j) % 7]**2, result_component[j]) with self.assertRaises(errors.OutOfRangeError): result = self.evaluate(get_next()) @combinations.generate(test_base.default_test_combinations()) def testInvalidBatchSize(self): with self.assertRaises(errors.InvalidArgumentError): dataset = (dataset_ops.Dataset.range(10).batch(0)) self.evaluate(dataset._variant_tensor) @combinations.generate(test_base.default_test_combinations()) def testDataset(self): def map_fn(i): return dataset_ops.Dataset.from_tensors(i) dataset = dataset_ops.Dataset.range(10).map(map_fn).batch(5) dataset = dataset.map(lambda x: x) dataset = dataset.unbatch().flat_map(lambda x: x) self.assertDatasetProduces(dataset, expected_output=range(10)) def testSparse(self): def _sparse(i): return sparse_tensor.SparseTensorValue( indices=[[0]], values=(i * [1]), dense_shape=[1]) dataset = dataset_ops.Dataset.range(10).map(_sparse).batch(5) expected_output = [ sparse_tensor.SparseTensorValue( indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]], values=[i * 5, i * 5 + 1, i * 5 + 2, i * 5 + 3, i * 5 + 4], dense_shape=[5, 1]) for i in range(2) ] self.assertDatasetProduces(dataset, expected_output=expected_output) @combinations.generate(test_base.default_test_combinations()) def testSparseWithDifferentDenseShapes(self): def _sparse(i): return sparse_tensor.SparseTensorValue( indices=array_ops.expand_dims( math_ops.range(i, dtype=dtypes.int64), 1), values=array_ops.fill([math_ops.cast(i, dtypes.int32)], i), dense_shape=[i]) dataset = dataset_ops.Dataset.range(10).map(_sparse).batch(5) expected_output = [] for i in range(2): expected_indices = [] expected_outputs = [] for j in range(5): for k in range(i * 5 + j): expected_indices.append([j, k]) expected_outputs.append(i * 5 + j) expected_output.append( sparse_tensor.SparseTensorValue( indices=expected_indices, values=expected_outputs, dense_shape=[5, (i + 1) * 5 - 1])) self.assertDatasetProduces(dataset, expected_output=expected_output) @combinations.generate(test_base.default_test_combinations()) def testSparseNested(self): def _sparse(i): return sparse_tensor.SparseTensorValue( indices=[[0]], values=(i * [1]), dense_shape=[1]) dataset = dataset_ops.Dataset.range(10).map(_sparse).batch(5).batch(2) expected_output = [ sparse_tensor.SparseTensorValue( indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [0, 4, 0], [1, 0, 0], [1, 1, 0], [1, 2, 0], [1, 3, 0], [1, 4, 0]], values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dense_shape=[2, 5, 1]) ] self.assertDatasetProduces(dataset, expected_output=expected_output) @combinations.generate(test_base.default_test_combinations()) def testShapeError(self): def generator(): yield [1.0, 2.0, 3.0] yield [4.0, 5.0, 6.0] yield [7.0, 8.0, 9.0, 10.0] dataset = ( dataset_ops.Dataset.from_generator( generator, dtypes.float32, output_shapes=[None]).batch(3)) self.assertDatasetProduces( dataset, expected_error=( errors.InvalidArgumentError, r"Cannot batch tensors with different shapes in component 0. First " r"element had shape \[3\] and element 2 had shape \[4\].")) @combinations.generate(test_base.default_test_combinations()) def testRagged(self): def _ragged(i): return ragged_tensor.RaggedTensor.from_tensor(i * [[1]]) dataset = dataset_ops.Dataset.range(10).map(_ragged).batch(5) expected_output = [ ragged_factory_ops.constant([[[0]], [[1]], [[2]], [[3]], [[4]]]), ragged_factory_ops.constant([[[5]], [[6]], [[7]], [[8]], [[9]]]) ] self.assertDatasetProduces(dataset, expected_output=expected_output) @combinations.generate(test_base.default_test_combinations()) def testRaggedWithDifferentShapes(self): dataset = dataset_ops.Dataset.range(10).map(ragged_math_ops.range).batch(5) expected_output = [ ragged_concat_ops.stack([ragged_math_ops.range(i) for i in range(5)]), ragged_concat_ops.stack( [ragged_math_ops.range(i) for i in range(5, 10)]) ] self.assertDatasetProduces(dataset, expected_output=expected_output) @combinations.generate(test_base.default_test_combinations()) def testRaggedNested(self): def _ragged(i): return ragged_tensor.RaggedTensor.from_tensor(i * [[1]]) dataset = dataset_ops.Dataset.range(10).map(_ragged).batch(5).batch(2) expected_output = [ ragged_factory_ops.constant([[[[0]], [[1]], [[2]], [[3]], [[4]]], [[[5]], [[6]], [[7]], [[8]], [[9]]]]) ] self.assertDatasetProduces(dataset, expected_output=expected_output) @combinations.generate(test_base.default_test_combinations()) def testNoneComponent(self): dataset = dataset_ops.Dataset.range(10).map(lambda x: (x, None)).batch( 10).map(lambda x, y: x) self.assertDatasetProduces(dataset, expected_output=[list(range(10))]) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine( local_determinism=[None, True, False], global_determinism=[True, False]))) def testDeterminismConfiguration(self, local_determinism, global_determinism): expect_determinism = local_determinism or (local_determinism is None and global_determinism) elements = list(range(100)) def dataset_fn(delay_ms): def sleep(x): time.sleep(delay_ms / 1000) return x def map_function(x): if math_ops.equal(x, 0): return script_ops.py_func(sleep, [x], x.dtype) else: return x dataset = dataset_ops.Dataset.from_tensor_slices(elements) dataset = dataset.map( map_function, num_parallel_calls=2, deterministic=local_determinism) dataset = dataset.batch( batch_size=6, num_parallel_calls=2, deterministic=local_determinism).unbatch() opts = options_lib.Options() opts.deterministic = global_determinism dataset = dataset.with_options(opts) return dataset self.checkDeterminism(dataset_fn, expect_determinism, elements) @combinations.generate(test_base.eager_only_combinations()) def testCheckpointLargeBatches(self): # Batches of size 512M dataset = dataset_ops.Dataset.from_tensors( array_ops.ones((64, 1024, 1024), dtype=dtypes.float32)).repeat() dataset = dataset.batch(2, num_parallel_calls=5) iterator = iter(dataset) next(iterator) # request an element to fill the buffer ckpt = trackable_utils.Checkpoint(iterator=iterator) manager = checkpoint_management.CheckpointManager( ckpt, self.get_temp_dir(), max_to_keep=1) manager.save() @combinations.generate( combinations.times(test_base.default_test_combinations(), combinations.combine(num_parallel_calls=[None, 1]))) def testName(self, num_parallel_calls): dataset = dataset_ops.Dataset.range(5).batch( 5, num_parallel_calls=num_parallel_calls, name="batch") self.assertDatasetProduces(dataset, [list(range(5))])
BatchTest
python
allegroai__clearml
clearml/utilities/gpu/pyrsmi.py
{ "start": 3041, "end": 5095 }
class ____(c_int): RSMI_STATUS_SUCCESS = 0x0 RSMI_STATUS_INVALID_ARGS = 0x1 RSMI_STATUS_NOT_SUPPORTED = 0x2 RSMI_STATUS_FILE_ERROR = 0x3 RSMI_STATUS_PERMISSION = 0x4 RSMI_STATUS_OUT_OF_RESOURCES = 0x5 RSMI_STATUS_INTERNAL_EXCEPTION = 0x6 RSMI_STATUS_INPUT_OUT_OF_BOUNDS = 0x7 RSMI_STATUS_INIT_ERROR = 0x8 RSMI_INITIALIZATION_ERROR = RSMI_STATUS_INIT_ERROR RSMI_STATUS_NOT_YET_IMPLEMENTED = 0x9 RSMI_STATUS_NOT_FOUND = 0xA RSMI_STATUS_INSUFFICIENT_SIZE = 0xB RSMI_STATUS_INTERRUPT = 0xC RSMI_STATUS_UNEXPECTED_SIZE = 0xD RSMI_STATUS_NO_DATA = 0xE RSMI_STATUS_UNKNOWN_ERROR = 0xFFFFFFFF #Dictionary of rsmi ret codes and it's verbose output rsmi_status_verbose_err_out = { rsmi_status_t.RSMI_STATUS_SUCCESS: 'Operation was successful', rsmi_status_t.RSMI_STATUS_INVALID_ARGS: 'Invalid arguments provided', rsmi_status_t.RSMI_STATUS_NOT_SUPPORTED: 'Not supported on the given system', rsmi_status_t.RSMI_STATUS_FILE_ERROR: 'Problem accessing a file', rsmi_status_t.RSMI_STATUS_PERMISSION: 'Permission denied', rsmi_status_t.RSMI_STATUS_OUT_OF_RESOURCES: 'Unable to acquire memory or other resource', rsmi_status_t.RSMI_STATUS_INTERNAL_EXCEPTION: 'An internal exception was caught', rsmi_status_t.RSMI_STATUS_INPUT_OUT_OF_BOUNDS: 'Provided input is out of allowable or safe range', rsmi_status_t.RSMI_INITIALIZATION_ERROR: 'Error occured during rsmi initialization', rsmi_status_t.RSMI_STATUS_NOT_YET_IMPLEMENTED: 'Requested function is not implemented on this setup', rsmi_status_t.RSMI_STATUS_NOT_FOUND: 'Item searched for but not found', rsmi_status_t.RSMI_STATUS_INSUFFICIENT_SIZE: 'Insufficient resources available', rsmi_status_t.RSMI_STATUS_INTERRUPT: 'Interrupt occured during execution', rsmi_status_t.RSMI_STATUS_UNEXPECTED_SIZE: 'Unexpected amount of data read', rsmi_status_t.RSMI_STATUS_NO_DATA: 'No data found for the given input', rsmi_status_t.RSMI_STATUS_UNKNOWN_ERROR: 'Unknown error occured' }
rsmi_status_t
python
psf__black
tests/data/cases/trailing_comma_optional_parens1.py
{ "start": 581, "end": 1127 }
class ____: def b(self): if self.connection.mysql_is_mariadb and ( 10, 4, 3, ) < self.connection.mysql_version < (10, 5, 2): pass # output if e1234123412341234.winerror not in ( _winapi.ERROR_SEM_TIMEOUT, _winapi.ERROR_PIPE_BUSY, ) or _check_timeout(t): pass if x: if y: new_id = ( max( Vegetable.objects.order_by("-id")[0].id, Mineral.objects.order_by("-id")[0].id, ) + 1 )
A
python
pytorch__pytorch
torch/_subclasses/fake_tensor.py
{ "start": 45280, "end": 128572 }
class ____(TorchDispatchMode): cache: dict[_DispatchCacheKey, _DispatchCacheEntry] = {} cache_hits: int = 0 cache_misses: int = 0 cache_bypasses: dict[str, int] = defaultdict(int) # Every time you retrace using the same fake tensor mode, you should # advance the epoch so we don't reuse unbacked memos epoch: int = 0 in_kernel_invocation: bool = False static_shapes: bool shape_env: Optional[ShapeEnv] _stack: Optional[str] allow_meta: bool # NestedTensor uses a tensor_id_counter to uniquely identify offsets. # This counter is incremented when an offsets is used to create an NJT # for the first time. To avoid mutating eager state if we construct NJT # during tracing, we maintain a separate counter on the FakeTensorMode. # The initial count is set to the current eager tensor_id_counter value # upon initialization, and every time you retrace using the same fake tensor # mode, you should reset the counter to the initial count. nt_tensor_id_counter: int = -1 nt_tensor_id_initial_count: int = -1 def __init__( self, *, allow_fallback_kernels: bool = True, allow_non_fake_inputs: bool = False, shape_env: Optional[ShapeEnv] = None, static_shapes: Optional[bool] = None, # TODO: This is a temporary measure, see # https://github.com/pytorch/pytorch/pull/126245#discussion_r1604185748 # We're currently solely using this to impede population of # item_memo for 0d scalar tensor inputs when export, because this # causes things that used to be deferred runtime asserts to turn into # guards, and then the guards are just lost. We can potentially fix # this by ensuring guards also get put in the graph, but this is # pending a rework of how deferred runtime asserts in export. Once # that's done, we can remove this. export: bool = False, ) -> None: log.debug("create_mode 0x%x", id(self)) super().__init__() self.allow_fallback_kernels = allow_fallback_kernels import torch._dynamo.config import torch._functorch.config self.propagate_real_tensors = ( torch._functorch.config.fake_tensor_propagate_real_tensors ) self.fake_tensor_converter = FakeTensorConverter( copy_data=self.propagate_real_tensors, export=export, ) if static_shapes is not None: self.static_shapes = static_shapes else: self.static_shapes = shape_env is None # This is temporarily patched to True in Dynamo to grandfather in some # places where we unconditionally allow scalar outputs, TO BE REMOVED self.allow_scalar_outputs = False self._allow_unsafe_data_ptr_access = ( torch._functorch.config.fake_tensor_allow_unsafe_data_ptr_access ) self.allow_meta = torch._functorch.config.fake_tensor_allow_meta self.cache_enabled: bool = ( torch._dynamo.config.fake_tensor_cache_enabled and not self.propagate_real_tensors ) self.cache_crosscheck_enabled = ( torch._dynamo.config.fake_tensor_cache_crosscheck_enabled ) # A flag that controls, whether we want to invoke ops on mix of # real weights/global variables and fake inputs self.allow_non_fake_inputs = allow_non_fake_inputs # [in_kernel_invocation] # when FakeTensor is invoked in user code, .device should return # the fake_device of the tensor so that code such as as `if x.is_cuda` # or torch.zeros([10, 10], device=x.device) continues to execute as if # the FakeTensor were real. However, within kernel execution, we return # the `Meta` device because all computation within the kernels should # behave as if the Tensors are on meta devices. Kernels should allocate # new tensors on meta devices, and checks like `is_meta` should return true. # within python refs, we always return the real device by defining # the device property self.in_kernel_invocation = False # True if we enter'ed and actually enabled fake tensor mode, # false if it was a no-op. Not thread safe but neither is # in_kernel_invocation # If another fake mode was already active when we enter, we also stash it here. # That way when we exit, we know to re-enable the previous fake mode. self.enter_stack: list[ tuple[bool, Optional[TorchDispatchMode], Optional[bool]] ] = [] self.shape_env = shape_env self._stack_trace = traceback.extract_stack() self._stack = None # Indicates to our torch_dispatch dispatching infra that # this is an "infra" mode with lower dispatching precedence. self._mode_key = torch._C._TorchDispatchModeKey.FAKE import torch.nested._internal.nested_tensor self.nt_tensor_id_initial_count = ( torch.nested._internal.nested_tensor._tensor_id_counter ) self.nt_tensor_id_counter = self.nt_tensor_id_initial_count def reset_nt_tensor_id_counter(self) -> None: self.nt_tensor_id_counter = self.nt_tensor_id_initial_count # Typically, there is only one fake tensor mode and you test for it by # doing an isinstance test. However, in some situations, there might be # TWO fake tensor modes. The canonical example of this is exporting # a fake model: there is an outer fake mode created by the user, and # an inner fake mode created by Dynamo. The two phase process is required # because the outer fake mode typically won't have a ShapeEnv, even if # the user is interested in exporting with dynamic shapes (so the inner # fake mode will actually have a ShapeEnv and swap in symbolic sizes.) # # In this case, it's insufficient to test only one FakeTensor: you need # to distinguish between our fake tensor and other fake tensors. That's # what this function does. def is_our_fake(self, t: object) -> TypeGuard[FakeTensor]: return isinstance(t, FakeTensor) and t.fake_mode is self # If we should avoid device init. This changes the behavior of various APIs: # - We avoid constant-prop on Tensors with ops that move them to another device # - We change the torch.tensor ctor contract to never materialize # tensors on device # (see NOTE: [torch.tensor, lift_fresh, and device movement]) @property def avoid_device_init(self) -> bool: if torch.xpu._is_compiled(): assert not torch.cuda._is_compiled() return not torch.xpu.is_available() return not ( torch.cuda.is_available() or (hasattr(torch, "hpu") and torch.hpu.is_available()) ) @property def stack(self) -> str: if self._stack is None: self._stack = "".join(traceback.format_list(self._stack_trace)) return self._stack @count # pyrefly: ignore [bad-override] def __torch_dispatch__( self, func: OpOverload, types: Sequence[type], args: Sequence[object] = (), kwargs: Mapping[str, object] = immutable_dict(), ) -> object: # FakeTensorMode should not be set when we're inside of it. assert ( torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.FAKE) is None ), func try: return self.dispatch(func, types, args, kwargs) except TypeError: log.exception("fake tensor raised TypeError") raise # No-op if FakeTensorMode is already in use def __enter__(self) -> Self: import torch.nested._internal.nested_tensor prev_only_lift_cpu_tensors = None if self.avoid_device_init: # See NOTE: [torch.tensor, lift_fresh, and device movement] prev_only_lift_cpu_tensors = torch._C._only_lift_cpu_tensors() torch._C._set_only_lift_cpu_tensors(True) # In the case of CPU-only build or cuda device unavailable, # we patch the cuda device guard to use NoOpDeviceGuardImpl. # This enables us to trace over cuda kernels under FakeTensorMode. torch._C._ensureCUDADeviceGuardSet() maybe_prev_fake_mode = torch._C._unset_dispatch_mode(self._mode_key) if self is not maybe_prev_fake_mode: self.enter_stack.append( (True, maybe_prev_fake_mode, prev_only_lift_cpu_tensors) ) return super().__enter__() else: # no-op (still need to re-set the fake mode though since we unset it) torch._C._set_dispatch_mode(self) self.enter_stack.append((False, None, prev_only_lift_cpu_tensors)) return self def __exit__( self, a: Optional[type[BaseException]], b: Optional[BaseException], c: Optional[TracebackType], ) -> None: ( live, maybe_prev_fake_mode, maybe_prev_only_lift_cpu_tensors, ) = self.enter_stack.pop() if live: super().__exit__(a, b, c) # Re-enable the previous fake mode, if there was one. if maybe_prev_fake_mode is not None: torch._C._set_dispatch_mode(maybe_prev_fake_mode) if maybe_prev_only_lift_cpu_tensors is not None: torch._C._set_only_lift_cpu_tensors(maybe_prev_only_lift_cpu_tensors) @classmethod def is_infra_mode(cls) -> bool: return True @classmethod def cache_info(cls) -> DispatchCacheInfo: """ Query the state of the dispatch cache. """ return DispatchCacheInfo( FakeTensorMode.cache_hits, FakeTensorMode.cache_misses, dict(FakeTensorMode.cache_bypasses), len(FakeTensorMode.cache), ) @classmethod def cache_clear(cls) -> None: """ Clear the dispatch cache. """ cls.cache_hits = 0 cls.cache_misses = 0 cls.cache_bypasses.clear() cls.cache.clear() def _cached_dispatch_impl( self, func: OpOverload, types: Sequence[type], args: Sequence[object], kwargs: Mapping[str, object], ) -> object: """ Lookup a cache entry for the given arguments. If none exists, dispatch and cache the result (if the result is eligible for caching). """ state = None key = None try: state = _CacheKeyState(self.shape_env) key = self._cache_key(state, func, args, kwargs) except _BypassDispatchCache as e: # We couldn't create the cache key at all if ( isinstance(func, torch._ops.HigherOrderOperator) and func.name() == "invoke_subgraph" ): hc_log.debug( "Fake tensor cache failed: identifier = %s, reason = %s", args[1], e.reason, ) FakeTensorMode.cache_bypasses[e.reason] += 1 if key is None: # Do this dispatch outside the above except handler so if it # generates its own exception there won't be a __context__ caused by # the caching mechanism. # pyrefly: ignore [bad-argument-type] return self._dispatch_impl(func, types, args, kwargs) assert state is not None if state.cache_on_shape_env(): assert state.shape_env is not None cache = state.shape_env.fake_tensor_cache set_cache_key = _set_cache_key_for_shape_env else: cache = FakeTensorMode.cache set_cache_key = _set_cache_key entry = cache.get(key, None) if entry is not None: if isinstance(entry, _DispatchCacheBypassEntry): # This represents a negative cache entry - we already saw that the # output is uncachable. Compute it from first principals. FakeTensorMode.cache_bypasses[entry.reason] += 1 # pyrefly: ignore [bad-argument-type] return self._dispatch_impl(func, types, args, kwargs) # We have a cache entry. # pyrefly: ignore [bad-argument-type] output = self._output_from_cache_entry(state, entry, key, func, args) FakeTensorMode.cache_hits += 1 if self.cache_crosscheck_enabled: # For debugging / testing: Validate that the output synthesized # from the cache matches the output created by normal dispatch. with disable_fake_tensor_cache(self): # pyrefly: ignore [bad-argument-type] self._crosscheck_cache_output(output, func, types, args, kwargs) return output # We don't have a cache entry. # pyrefly: ignore [bad-argument-type] output = self._dispatch_impl(func, types, args, kwargs) try: # pyrefly: ignore [bad-argument-type] entry = self._make_cache_entry(state, key, func, args, kwargs, output) except _BypassDispatchCache as e: # We ran "extra" checks on the cache key and determined that it's no # good. Record the reason and mark it so we don't bother validating # again. if ( isinstance(func, torch._ops.HigherOrderOperator) and func.name() == "invoke_subgraph" ): hc_log.debug( "Fake tensor cache failed: identifier = %s, reason = %s", args[1], e.reason, ) FakeTensorMode.cache_bypasses[e.reason] += 1 set_cache_key(cache, key, _DispatchCacheBypassEntry(e.reason)) return output set_cache_key(cache, key, entry) FakeTensorMode.cache_misses += 1 return output def _cache_key( self, state: _CacheKeyState, func: OpOverload, args: Sequence[object], kwargs: Mapping[str, object], ) -> _DispatchCacheKey: """ Create a cache key given the dispatch args. Raises _BypassDispatchCache for any situation that precludes caching. """ is_tracing = torch.fx.experimental.proxy_tensor.get_proxy_mode() is not None key_values = [ func, # Capture the default_dtype mode since that can affect the output tensor, # e.g., when operating on constant float values. torch.get_default_dtype(), # Capture the current device to support, e.g., cache tensor creation, # where there isn't necessarily a tensor to take the device from. torch._C._get_default_device(), # We want to create tensors from cached metadata only when the inference # mode is the same. torch.is_inference_mode_enabled(), # Shape env settings could affect behavior. One example seen in the wild: # Disallowing dynamic shapes can introduce a DynamicOutputShapeException # where it wasn't seen on a previous instance of the same op. self.shape_env.settings if self.shape_env else None, # ProxyTorchDispatchMode needs to track how SymNodes are constructed # so we need to handle things a little different depending on # whether we're tracing or not. is_tracing, ] if state.known_symbols: # If there are symbols then include the epoch - this is really more # of a Shape env var which lives on the FakeTensorMode. # pyrefly: ignore [bad-argument-type] key_values.append(self.epoch) # Collect the id_hashed objects to attach a weakref finalize later id_hashed_objects: list[object] = [] # Translate any FakeTensor args to metadata. if args: # pyrefly: ignore [bad-argument-type] self._prep_args_for_hash(key_values, args, state, id_hashed_objects) if kwargs: # pyrefly: ignore [bad-argument-type] self._prep_args_for_hash(key_values, kwargs, state, id_hashed_objects) key = _DispatchCacheKey(tuple(key_values)) for id_hashed_obj in id_hashed_objects: weakref.finalize( id_hashed_obj, functools.partial(evict_fake_tensor_cache_key, key=key) ) id_hashed_objects.clear() return key def _validate_cache_key( self, func: OpOverload, args: Sequence[object], kwargs: Mapping[str, object], ) -> None: """ Validate that the cache key generated by _cache_key will be reasonable. """ from torch._higher_order_ops.utils import registered_hop_fake_fns # For hops, we perform the validity check in _make_cache_entry because we # need to have the output tensor. if ( isinstance(func, torch._ops.HigherOrderOperator) and func in registered_hop_fake_fns ): return # Avoid caching for any ops that would require a more sophisticated # caching implementation, e.g., data dependent ops or ops that modify # the inputs. if torch.Tag.data_dependent_output in func.tags: raise _BypassDispatchCache("data dependent output") if torch.Tag.dynamic_output_shape in func.tags: if func is aten.index.Tensor: _, new_kwargs = normalize_function( # type: ignore[misc] func, args=args, # type: ignore[arg-type] kwargs=kwargs, # type: ignore[arg-type] normalize_to_only_use_kwargs=True, ) for index in new_kwargs["indices"]: # index calls nonzero for bool or int8 tensors, and # therefore has a dynamic shape output. For other dtypes, # the output shape depends on the input shape (and not data) if isinstance(index, torch.Tensor) and index.dtype in ( torch.bool, torch.int8, ): raise _BypassDispatchCache("dynamic output shape") return raise _BypassDispatchCache("dynamic output shape") if torch.Tag.inplace_view in func.tags: raise _BypassDispatchCache("inplace view") if func is aten._unsafe_view.default: raise _BypassDispatchCache("unsafe view") if func in self.lift_fns: raise _BypassDispatchCache("lift") if func.name() == "inductor::resize_storage_bytes_": raise _BypassDispatchCache("inductor::resize_storage_bytes_") if not torch._library.utils.is_builtin(func): raise _BypassDispatchCache("non-builtin") # In order to handle storage aliasing, we need to establish the alias # for any view op on a cache hit. But CompositeImplicitAutograd ops may # or may not alias the input, so just punt on caching these. if func.is_view and torch._C._dispatch_has_kernel_for_dispatch_key( func.name(), torch._C.DispatchKey.CompositeImplicitAutograd ): raise _BypassDispatchCache("CompositeImplicitAutograd") def _prep_args_for_hash( self, result: list[object], args: Union[Mapping[str, object], Sequence[object], Iterable[object]], state: _CacheKeyState, id_hashed_objects: list[object], ) -> None: """ Translate the provided args into a form suitable for caching at FakeTensor dispatch, i.e., convert unhashable types like lists & dicts into tuples and convert FakeTensors into metadata. Raises _BypassDispatchCache to signal unsupported cases that should bypass caching. """ from torch._higher_order_ops.auto_functionalize import ( FunctionalCallableWithEpilogue, ) from torch._higher_order_ops.utils import FunctionalizeCtxWrapper if isinstance(args, (list, tuple, dict)): result.append(type(args)) result.append(f"length_{len(args)}") if isinstance(args, dict): self._prep_args_for_hash(result, args.keys(), state, id_hashed_objects) self._prep_args_for_hash(result, args.values(), state, id_hashed_objects) return for arg in args: if isinstance(arg, FakeTensor): if not self.is_our_fake(arg): raise _BypassDispatchCache("not our fake") if arg.constant is not None: raise _BypassDispatchCache("constant attribute") if is_sparse_any(arg): raise _BypassDispatchCache(f"{arg.layout} tensor") metadata = extract_tensor_metadata(arg) metadata._flatten_into(result, self, state) elif isinstance(arg, Tensor): raise _BypassDispatchCache("non-fake tensor") elif isinstance(arg, SymInt): state.convert_sym_int(result, arg) elif isinstance(arg, (SymBool, SymFloat)): raise _BypassDispatchCache("symbolic shape") elif isinstance(arg, (list, tuple, dict)): self._prep_args_for_hash(result, arg, state, id_hashed_objects) elif isinstance(arg, types.FunctionType): raise _BypassDispatchCache("function argument") elif isinstance(arg, torch.fx.GraphModule): # This is used for invoke_subgraph where id(graph_module) allows # us to cache fake outputs result.append(type(arg)) result.append(id(arg)) id_hashed_objects.append(arg) elif isinstance(arg, FunctionalizeCtxWrapper): # Special case for AOT Dispatcher first pass, where the fake # tensor is called on the functional wrapper of the subgraph. result.append(hash(arg)) # functional wrapper is destroyed after fake tensor prop. We # need to put the finalizer on the subgraph. id_hashed_objects.append(arg.subgraph) elif isinstance(arg, FunctionalCallableWithEpilogue): result.append(type(arg)) result.append(hash(arg)) id_hashed_objects.append(arg.orig_callable) else: # It's important to capture the type of the arg since, e.g., 1 and 1.0 # hash to the same value, but can produce different dtypes for the # output tensor. result.append(type(arg)) result.append(arg) def _validate_output_for_cache_entry( self, state: _CacheKeyState, key: _DispatchCacheKey, func: OpOverload, args: Sequence[object], kwargs: Mapping[str, object], output: Optional[FakeTensor], ) -> None: # Is this even possible? According to the signature this can be None but # not `int`. So either the signature is a lie or (part of) this line is # unnecessary... if isinstance(output, (int, type(None))): return # Check for symbolic content that should bypass caching - raises # _BypassDispatchCache if necessary. _validate_symbolic_output_for_caching(state, output) # Some ops return tuples of Tensors, but it's rare, so avoid # the complexity of caching other types. if not isinstance(output, FakeTensor): raise _BypassDispatchCache("non-FakeTensor output") # Avoid caching FakeTensors with constants attached since those # can be invalidated. if output.constant is not None: raise _BypassDispatchCache("constant attribute") # TODO: support caching sparse outputs? if output.is_sparse: raise _BypassDispatchCache("sparse output") if is_sparse_compressed(output): raise _BypassDispatchCache("sparse compressed output") # Can an in-place op really reference a kwarg? If so, then we need # to extend the implementation to handle it. for kval in kwargs.values(): if id(kval) == id(output): raise _BypassDispatchCache("kwarg aliases output") def _get_output_info_for_cache_entry( self, state: _CacheKeyState, key: _DispatchCacheKey, func: OpOverload, args: Sequence[object], kwargs: Mapping[str, object], output: FakeTensor, ) -> _DispatchCacheEntryOutputInfo: if isinstance(output, (int, torch.SymInt, type(None))): return _DispatchCacheEntryOutputInfo( inplace_idx=None, metadata=None, view_idx=None, constant_value=output ) # If this is an in-place op, the entry records which input arg is aliased. for idx in range(len(args)): if id(args[idx]) == id(output): return _DispatchCacheEntryOutputInfo( inplace_idx=idx, metadata=None, view_idx=None ) # Otherwise, create an entry that records the output tensor's metadata. view_idx = None if isinstance(func, torch._ops.OpOverload) and func.is_view: idxs = [i for i, t in enumerate(args) if isinstance(t, Tensor)] assert len(idxs) == 1 view_idx = idxs[0] metadata = extract_tensor_metadata(output) metadata.shape = tuple(state.convert_output(v) for v in metadata.shape) metadata.stride = tuple(state.convert_output(v) for v in metadata.stride) metadata.storage_offset = state.convert_output(metadata.storage_offset) metadata.storage_bytes = ( None if metadata.storage_bytes is None else state.convert_output(metadata.storage_bytes) ) entry = _DispatchCacheEntryOutputInfo( inplace_idx=None, metadata=metadata, view_idx=view_idx, ) # N.B.: Some checks for bypassing the cache would be performed on the # output tensor synthesized from the cached metadata. As an optimization, # we can synthesize a tensor here and do the checks on that instance. # This approach keeps the (more frequent) cache-hit path as lightweight # as possible. entry_for_synth_output = _DispatchCacheValidEntry( output_infos=(entry,), is_output_tuple=False ) from torch.fx.experimental.symbolic_shapes import GuardOnDataDependentSymNode try: synth_output = self._output_from_cache_entry( state, entry_for_synth_output, key, func, args ) except GuardOnDataDependentSymNode: # This should probably never really happen. If it does it means that # although the original call didn't get a data-dependent error when # we tried to reconstruct the output we did - that's almost # certainly a bug. raise _BypassDispatchCache("data dependent symnode") from None # Make sure the dispatch_key_set from the synthesized output tensor will # be the same. synth_key_set = torch._C._dispatch_key_set(synth_output) key_set = torch._C._dispatch_key_set(output) if synth_key_set != key_set: raise _BypassDispatchCache("dispatch_key_set mismatch") return entry def _make_cache_entry( self, state: _CacheKeyState, key: _DispatchCacheKey, func: OpOverload, args: Sequence[object], kwargs: Mapping[str, object], output: Optional[FakeTensor], ) -> _DispatchCacheValidEntry: """ Make a cache entry object for the given 'output' Tensor. Raises _BypassDispatchCache if the output tensor has characteristics that prevent caching it. """ from torch._higher_order_ops.utils import registered_hop_fake_fns from torch.fx.experimental.symbolic_shapes import has_free_unbacked_symbols self._validate_cache_key(func, args, kwargs) # For hops, lets look at the output tensor to find any unbacked symints. # If there are none, then we rely on the existing checks to validate # caching. # NB: Note that the HOPs that sta alive till FakeTensor are functional, # once they support mutations, we will have to revisit this logic. if ( isinstance(func, torch._ops.HigherOrderOperator) and func in registered_hop_fake_fns ): assert isinstance(output, tuple) non_cacheable = any( isinstance(o, (torch.Tensor, torch.SymInt)) and has_free_unbacked_symbols(o) for o in output ) if non_cacheable: raise _BypassDispatchCache(f"unbacked symbol in HOP {func} output") if isinstance(output, (int, torch.SymInt, type(None))): output_info = _DispatchCacheEntryOutputInfo( inplace_idx=None, metadata=None, view_idx=None, constant_value=output ) return _DispatchCacheValidEntry( output_infos=(output_info,), is_output_tuple=False ) if isinstance(output, tuple): for out_element in output: self._validate_output_for_cache_entry( state, key, # pyrefly: ignore [bad-argument-type] func, args, kwargs, out_element, ) else: self._validate_output_for_cache_entry( state, key, # pyrefly: ignore [bad-argument-type] func, args, kwargs, output, ) if isinstance(output, tuple): output_infos = [ self._get_output_info_for_cache_entry( state, key, # pyrefly: ignore [bad-argument-type] func, args, kwargs, out_elem, ) for out_elem in output ] return _DispatchCacheValidEntry( # pyrefly: ignore [bad-argument-type] output_infos=tuple(output_infos), is_output_tuple=True, ) else: output_info = self._get_output_info_for_cache_entry( state, key, # pyrefly: ignore [bad-argument-type] func, args, kwargs, output, ) return _DispatchCacheValidEntry( output_infos=(output_info,), is_output_tuple=False ) def _get_output_tensor_from_cache_entry( self, state: _CacheKeyState, entry: _DispatchCacheEntryOutputInfo, key: _DispatchCacheKey, func: OpOverload, args: Sequence[object], ) -> Optional[FakeTensor]: if ( entry.inplace_idx is None and entry.metadata is None and entry.view_idx is None ): assert entry.constant_value is not SingletonConstant return entry.constant_value if entry.inplace_idx is not None: # This is an in-place op; return the aliased arg. inplace_arg = args[entry.inplace_idx] assert isinstance(inplace_arg, FakeTensor) return inplace_arg # Synthesize a new FakeTensor with the cached metadata. metadata = entry.metadata if metadata is None: return None assert not is_sparse_any(metadata) def check_value( value: _MetadataIntLike, state: _CacheKeyState ) -> Union[IntLikeType]: if isinstance(value, _SymIntOutputStub): assert state.shape_env is not None return value.extract(key, state.shape_env) else: assert not isinstance(value, _PySymInputStub) return value shape = tuple(check_value(v, state) for v in metadata.shape) stride = tuple(check_value(v, state) for v in metadata.stride) storage_offset = check_value(metadata.storage_offset, state) if metadata.storage_bytes is not None: check_value(metadata.storage_bytes, state) maybe_suppress: Callable[[], typing.ContextManager] = contextlib.nullcontext if self.shape_env is not None: maybe_suppress = self.shape_env.suppress_guards with in_kernel_invocation_manager(self), maybe_suppress(): empty = torch.empty_strided( shape, stride, dtype=metadata.dtype, layout=metadata.layout, device="meta", requires_grad=metadata.requires_grad, ) if metadata.is_conj: torch._C._set_conj(empty, True) if metadata.is_neg: torch._C._set_neg(empty, True) if isinstance(func, torch._ops.OpOverload) and func.is_view: # For view ops, the storage should be the same as the tensor input. view_arg = args[cast(int, entry.view_idx)] assert isinstance(view_arg, FakeTensor) storage = view_arg.untyped_storage() with in_kernel_invocation_manager(self), maybe_suppress(): empty.set_(storage, storage_offset, shape, stride) return FakeTensor(self, empty, metadata.device) def _output_from_cache_entry( self, state: _CacheKeyState, entry: _DispatchCacheValidEntry, key: _DispatchCacheKey, func: OpOverload, args: Sequence[object], ) -> Union[Optional[FakeTensor], tuple[Optional[FakeTensor], ...]]: """ Create a new FakeTensor from the cache entry. """ if entry.is_output_tuple: outputs = [ self._get_output_tensor_from_cache_entry( state, output_info, key, func, args ) for output_info in entry.output_infos ] return tuple(outputs) else: return self._get_output_tensor_from_cache_entry( state, entry.output_infos[0], key, func, args ) def _crosscheck_cache_output( self, output: Union[Optional[FakeTensor], tuple[Optional[FakeTensor], ...]], func: OpOverload, types: Sequence[type], args: Sequence[object], kwargs: Mapping[str, object], ) -> None: """ Helper to validate that the output synthesized from the cache matches the output created by normal dispatch. """ def assert_helper(a: Any, b: Any) -> None: if isinstance(a, tuple): assert isinstance(b, tuple) assert len(a) == len(b) for l, r in zip(a, b): assert_helper(l, r) elif isinstance(a, int): assert isinstance(b, int) and a == b elif a is None: assert b is None elif isinstance(a, py_sym_types): assert type(a) is type(b) and a.node is b.node elif isinstance(a, torch.Tensor): assert isinstance(b, torch.Tensor) assert_metadata_eq(assert_eq, a, b) else: raise RuntimeError(f"Unsupported type {type(a)}") try: true_output = self._dispatch_impl(func, types, args, kwargs) except Exception as e: raise RuntimeError( f"FakeTensor cache crosscheck failure: func={func}, " f"args={args}, kwargs={kwargs}: Dispatch raised={e}" ) from e try: assert_helper(true_output, output) except Exception as e: raise RuntimeError( f"FakeTensor cache crosscheck failure: func={func}, " f"args={args}, kwargs={kwargs}" ) from e def dispatch( self, func: OpOverload, types: Sequence[type], args: Sequence[object] = (), kwargs: Mapping[str, object] = immutable_dict(), ) -> object: kwargs = kwargs or {} with no_dispatch(): log.debug("%s %s %s", func, args, kwargs) if func in _DISPATCH_META_HANDLERS: return _DISPATCH_META_HANDLERS[func](args) if log.getEffectiveLevel() <= logging.DEBUG: log.debug( "%sFakeTensorMode.__torch_dispatch__: %s", " " * RECURSION_COUNT, func ) # NOTE: incr is intentionally unused for a RAII pattern incr = IncrementRecursionCount() # noqa: F841 # Some attribute queries that can be serviced directly # See Note [is_coalesced is dispatched] if func in _DISPATCH_HANDLE_DIRECTLY: # NB: no_dispatch is ok here too, this func is very simple with in_kernel_invocation_manager(self): return func(*args, **kwargs) if self.cache_enabled: return self._cached_dispatch_impl(func, types, args, kwargs) else: return self._dispatch_impl(func, types, args, kwargs) def _maybe_infer_fake( self, func: OpOverload, path: KeyPath, fake: object, real: object ) -> tuple[Optional[object], bool]: """ Helper to cross-check fake/real output properties & values, and create new fake vals if mismatched. Returns tuple of object & boolean, for whether or not it was overwrriten """ import sympy from torch._subclasses.fake_utils import _check_fake_real_tensors def _check_fake_real_vals(fake: Any, real: Any) -> None: # use real values + ShapeEnv to check mismatches between potentially symbolic values if isinstance(fake, (SymInt, SymFloat)): # symbolic expression, ask ShapeEnv to substitute known backed/unbacked values assert self.shape_env is not None if ( not fake.node.expr.free_symbols - self.shape_env.var_to_val.keys() - self.shape_env.unbacked_var_to_val.keys() ): if ( self.shape_env._maybe_evaluate_static( sympy.Eq(fake.node.expr, real), compute_hint=True ) is not sympy.S.true ): raise MetadataMismatchError( f"mismatch between fake value {fake} and real value {real} " ) elif isinstance( fake, (int, float, bool) ): # concrete value, check direct equality if fake != real: raise MetadataMismatchError( f"mismatch between fake value {fake} and real value {real} " ) if isinstance(fake, torch.Tensor): try: _check_fake_real_tensors( real, # type: ignore[arg-type] fake, # type: ignore[arg-type] context="Real tensor propagation found", sizes=False, # manual check below strides=False, # skip strides storage_offset=True, requires_grad=False, # issues with FakeTensorConverter preserving requires_grad ) except MetadataMismatchError as exc: if torch._functorch.config.generate_fake_kernels_from_real_mismatches: dtrace_structured( "mismatched_fake_kernel", metadata_fn=lambda: { "op": str(func), "reason": exc.reason, # noqa: F821 }, ) return _infer_fake_from_real_tensor(self, func, real), True # type: ignore[arg-type] raise MetadataMismatchError( f"Real tensor propagation found a metadata mismatch between " f"fake tensor {fake} and real tensor {real}, " f" at output{keystr(path)}, for func: {func}" ) from exc for j, (s_fake, s_real) in enumerate(zip(fake.size(), real.size())): # type: ignore[attr-defined] try: _check_fake_real_vals(s_fake, s_real) except MetadataMismatchError as exc: if torch._functorch.config.generate_fake_kernels_from_real_mismatches: dtrace_structured( "mismatched_fake_kernel", metadata_fn=lambda: { "op": str(func), "reason": exc.reason, # noqa: F821 }, ) return _infer_fake_from_real_tensor(self, func, real), True # type: ignore[arg-type] raise MetadataMismatchError( f"Real tensor propagation found an output size mismatch between " f"fake shape {s_fake} and real shape {s_real}, " f"at output{keystr(path)}.size({j}), for func: {func}" ) from exc elif fake is None and real is not None: if torch._functorch.config.generate_fake_kernels_from_real_mismatches: dtrace_structured( "mismatched_fake_kernel", metadata_fn=lambda: { "op": str(func), "reason": f"mismatch between fake value {fake} and real value {real}", # noqa: F821 }, ) return _infer_fake_from_real_tensor(self, func, real), True # type: ignore[arg-type] raise MetadataMismatchError( f"Real tensor propagation found a metadata mismatch between " f"fake tensor {fake} and real tensor {real}, " f" at output{keystr(path)}, for func: {func}" ) else: try: _check_fake_real_vals(fake, real) except MetadataMismatchError as exc: raise MetadataMismatchError( f"Real tensor propagation found an output value mismatch between " f"fake output value {fake} and real output value {real}, " f"at output{keystr(path)}, for func: {func}" ) from exc return fake, False def _maybe_infer_fake_kernel_from_pytree_out( self, func: OpOverload, fake_in: object, real_in: object, fake_out: object, real_out: object, ) -> Optional[object]: """ Helper to cross-check fake/real output properties & values, and create new fake vals if mismatched, but at the kernel level. Means this handles pytree outputs & checks aliasing. """ from torch._subclasses.fake_utils import _check_alias_info # we might have to clear pending unbacked symbols, if we override the kernel pending_unbacked = None if self.shape_env: pending_unbacked = list(self.shape_env.pending_fresh_unbacked_symbols) def _clear_pending_unbacked() -> None: self.shape_env.pending_fresh_unbacked_symbols = list( # type: ignore[union-attr] set(self.shape_env.pending_fresh_unbacked_symbols).difference( # type: ignore[union-attr] pending_unbacked # type: ignore[arg-type] ) ) fake_paths_leaves, fake_spec = pytree.tree_flatten_with_path(fake_out) real_leaves, _ = pytree.tree_flatten(real_out) try: # catch aliasing mismatches between fake/real tensors _check_alias_info( "Real tensor propagation found", real_out, real_in, fake_out, fake_in ) except MetadataMismatchError as exc: # if mismatch found, optionally infer fake kernel if torch._functorch.config.generate_fake_kernels_from_real_mismatches: dtrace_structured( "mismatched_fake_kernel", metadata_fn=lambda: { "op": str(func), "reason": ( f"Mismatched aliasing spec between fake kernel and real kernel: {exc.reason}" # noqa: F821 ), }, ) # if aliasing mismatches are found, it's likely that the fake tensor impl # is incorrectly aliasing, since we don't support aliasing custom ops. # in this case we can default to inferring non-aliasing fake kernels from the real outputs. _clear_pending_unbacked() return tree_map( lambda x: _infer_fake_from_real_tensor(self, func, x), real_out ) else: raise MetadataMismatchError( f"Real tensor propagation found an aliasing mismatch between " f"fake output {fake_out} and real output {real_out}, " f" for func: {func}" ) from exc # if no errors raised, run cross checks on fake/real tensors, # optionally overriding individual fake tensors, if individual meta kernel output is incorrect. fake_leaves, overrides = zip( *[ self._maybe_infer_fake(func, _fake_path, _fake_out, _real_out) for (_fake_path, _fake_out), _real_out in zip( fake_paths_leaves, real_leaves ) ] ) if ( any(overrides) and pending_unbacked ): # only keep new pending unbacked symbols _clear_pending_unbacked() return pytree.tree_unflatten(fake_leaves, fake_spec) def _dispatch_impl( self, func: OpOverload, types: Sequence[type], args: Sequence[object], kwargs: Mapping[str, object], ) -> Optional[FakeTensor]: from torch._higher_order_ops.utils import registered_hop_fake_fns flat_args, args_spec = pytree.tree_flatten((args, kwargs)) # DO NOT PUT LOGIC BEFORE UNRECOGNIZED TYPE CHECKING # We must throw NotImplemented in case of unrecognized types to handle subclasses. # Throwing the exception will pass the control to the next __torch_dispatch__. # See [subclass inputs] below # NB: If you're seeing a mysterious infinite loop involving fake # tensor, it might be related to this line. Though I'm not sure # how you'll know to read this comment, as this line won't show up # in the stack trace. has_unrecognized_types = _check_for_subclass(flat_args) if has_unrecognized_types: unrecognized_types = [ type(x) for x in flat_args if _check_for_subclass_arg(x) ] not_implemented_log.debug( "FakeTensorMode unrecognized subclass(es): %s", unrecognized_types ) return NotImplemented flat_arg_fake_tensors = [t for t in flat_args if self.is_our_fake(t)] has_symbolic_sizes = any( i._has_symbolic_sizes_strides for i in flat_arg_fake_tensors ) or any(isinstance(a, SymInt) for a in flat_args) converter = self.fake_tensor_converter is_lift_func = func in self.lift_fns # If we are trying to avoid device init, then we need to avoid constant # prop on constant tensors for ops that change devices. avoiding_device_init = False if self.avoid_device_init: if ( func is torch.ops.aten._to_copy.default and "device" in kwargs and kwargs["device"].type != "cpu" # type: ignore[attr-defined] ): avoiding_device_init = True if func is torch.ops.prims.device_put.default: avoiding_device_init = True # skip const prop for aten._to_copy if # 1. input tensor is on "meta" device # 2. destination device is unavailable, captured by `avoiding_device_init` device_conversion_skip_const_prop = ( func is torch.ops.aten._to_copy.default and isinstance(args[0], torch.Tensor) and args[0].device.type == "meta" ) or avoiding_device_init # To constant propagate through these functions: # 1, If this is a lift due to a torch.tensor call, # the input tensor is guaranteed to be a # constant, so we keep a copy of the original argument along so # we can query it if we're asked to item() it at some later point. # (Note that you can always call a lift fn manually, so we do # have to check if there are any fake tensors!) # 2, Some functions that allow Python numbers to bind to Tensors, e.g, torch.div if (is_lift_func and not flat_arg_fake_tensors) or ( should_allow_numbers_as_tensors(func) and not has_symbolic_sizes and not flat_arg_fake_tensors and not device_conversion_skip_const_prop ): assert all(t.constant is not None for t in flat_arg_fake_tensors), ( f"{func} should not have fake inputs without constants" ) const_flat_args = [ a.constant if self.is_our_fake(a) else a for a in flat_args ] const_args, const_kwargs = pytree.tree_unflatten(const_flat_args, args_spec) out = func(*const_args, **const_kwargs) if type(out) is Tensor and self.may_turn_const(out): # NB: not in_kernel_invocation_manager because we're doing real # compute here # NB: no_dispatch() here is VERY DANGEROUS (like, segfault # dangerous) if this is actually a wrapper subclass tensor, # therefore the exact type test above with no_dispatch(): out = out.clone() return converter.from_real_tensor(self, out, make_constant=True) # if we are in the dispatch mode, we will enter this function even if the inputs # are not FakeTensors. For now, throw if any non-Fake Tensor inputs # and just support constructors. # this is generated from torch.tensor(), which does not use the # dispatcher, to allow wrapper subclasses to wrap the new tensor if is_lift_func: assert len(kwargs) == 0 and len(args) == 1, f"{args} {kwargs}" if type(args[0]) is Tensor: return converter.from_real_tensor(self, args[0]) # Recompute flat_arg_fake_tensors here again in case some of the inputs # were real tensors and fakified in validate_and_convert_non_fake_tensors (flat_args, flat_arg_fake_tensors) = self.validate_and_convert_non_fake_tensors( func, converter, flat_args, args_spec ) del args, kwargs # Invalidated # The current constant handling only support tracing systems # (aot autograd, torchdynamo) where each operation is run consecutively. # Because each operation is run in order, we can trace out and support # sequences like: x = torch.tensor(0.); y = x.add_(1) # Whenever a constant is written to but with inputs that cannot be evaluated # statically, such as random_(), we invalidate all constants that alias the input # We will rely on functionalization for use of fake tensors constants as persistent # objects on an FX Graph. # We dispatch size/stride/numel on the FakeTensor not its constant, so bail on inplace_view all_constant = all(e.constant is not None for e in flat_arg_fake_tensors) if ( isinstance(func, torch._ops.OpOverload) and torch.Tag.nondeterministic_seeded not in func.tags and torch.Tag.inplace_view not in func.tags and all_constant and len(flat_arg_fake_tensors) != 0 and not has_symbolic_sizes and not avoiding_device_init and func is not aten._nested_tensor_from_tensor_list.default ): const_flat_args = [ a.constant if self.is_our_fake(a) else a for a in flat_args ] const_args, const_kwargs = pytree.tree_unflatten(const_flat_args, args_spec) # NB: not in_kernel_invocation_manager(self) as we want to do REAL # compute with no_dispatch(): out = func(*const_args, **const_kwargs) flat_out = pytree.tree_leaves(out) flat_out_tensors = [t for t in flat_out if isinstance(t, Tensor)] all_constant = all(self.may_turn_const(t) for t in flat_out_tensors) if all_constant: return pytree.tree_map_only( Tensor, lambda t: converter.from_real_tensor(self, t, make_constant=True), out, ) # we weren't able to turn outputs to constants, # so invalidate all constants that might be aliases of the outputs for ten in flat_out_tensors: converter.invalidate_constant_aliases(ten) # we are falling through to running non constant tensors, any input constant that # is written to must be invalidated args, kwargs = pytree.tree_unflatten(flat_args, args_spec) if ( isinstance(func, torch._ops.HigherOrderOperator) and func in registered_hop_fake_fns ): # Reenable the fake tensor mode for the registered fake function maybe_ignore_fresh_unbacked_symbols = ( contextlib.nullcontext if self.shape_env is None else self.shape_env.ignore_fresh_unbacked_symbols ) with self, maybe_ignore_fresh_unbacked_symbols(): # pyrefly: ignore [index-error] return registered_hop_fake_fns[func](*args, **kwargs) self.invalidate_written_to_constants(func, flat_arg_fake_tensors, args, kwargs) def maybe_to_real_tensor( t: T, ) -> Optional[Union[T, Tensor, torch._C.ScriptObject]]: if isinstance(t, FakeTensor): return t.real_tensor elif isinstance(t, py_sym_types): assert self.shape_env is not None return t.node.pytype( t.node.expr.xreplace(self.shape_env.var_to_val).xreplace( self.shape_env.unbacked_var_to_val ) ) elif isinstance(t, FakeScriptObject): return t.real_obj else: return t from torch.fx.experimental.symbolic_shapes import ( compute_unbacked_bindings, free_unbacked_symbols, ) nil = object() real_out = nil if ( self.propagate_real_tensors and all(e.real_tensor is not None for e in flat_arg_fake_tensors) and not any( ( isinstance(a, py_sym_types) and (syms := free_unbacked_symbols(a)) and self.shape_env is not None and any(s not in self.shape_env.unbacked_var_to_val for s in syms) ) for a in flat_args ) ): log.debug("propagate_real_tensors %s", func) real_flat_args = [maybe_to_real_tensor(a) for a in flat_args] real_args, real_kwargs = pytree.tree_unflatten(real_flat_args, args_spec) is_builtin = library_utils.is_builtin(func) if not is_builtin: mutation_checker = library_utils.MutationChecker( func, real_flat_args, args_spec ) try: real_out = func(*real_args, **real_kwargs) except ZeroDivisionError as exc: # we shouldn't broadly catch all errors here; # some come from real-kernel mutation/aliasing checks we want to run. # add more exception types as needed. log.debug( # noqa: G200 "real-tensor fallback failed for %s: %s; silently ignoring", func, exc, ) if not is_builtin: mutation_checker.check() # type: ignore[possibly-undefined] library_utils.check_aliasing_constraint(func._name, flat_args, real_out) elif self.propagate_real_tensors: # This can happen occasionally legitimately, specifically when you # are inside the meta of a data dependent operation and you create # a tensor on an unbacked SymInt; at this point in time we don't # know what the unbacked SymInt is, but we will know later. # However, if there's a bug in the condition above, this condition # will also trigger. log.debug( "SKIPPED propagate_real_tensors %s(%s, %s) %s", func, flat_arg_fake_tensors, flat_args, self.shape_env.unbacked_var_to_val if self.shape_env else None, ) def maybe_propagate_real_tensors(fake_out: T) -> T: import sympy log.debug("maybe_propagate_real_tensors %s", func) def go(t: object, real_t: Tensor) -> None: if isinstance(t, FakeTensor): # NB: unconditionally overwrite log.debug( "maybe_propagate_real_tensors %s -> %s", id(t), id(real_t) ) t.real_tensor = real_t for s, real_s in zip(t.size(), real_t.size()): go(s, real_s) # type: ignore[arg-type] for s, real_s in zip(t.stride(), real_t.stride()): go(s, real_s) # type: ignore[arg-type] go(t.storage_offset(), real_t.storage_offset()) # type: ignore[arg-type] elif isinstance(t, py_sym_types) and free_unbacked_symbols(t): if isinstance(t.node.expr, sympy.Symbol): assert self.shape_env is not None self.shape_env.set_unbacked_var_to_val(t.node.expr, real_t) elif ( isinstance(s := t.node.expr, sympy.Eq) and isinstance(s.lhs, sympy.Symbol) and s.rhs == 1 ): assert self.shape_env is not None self.shape_env.set_unbacked_var_to_val(s, int(real_t)) if real_out is not nil: # cross check fake/real outputs, and optionally override fake kernel mismatches if not torch._functorch.config.generate_fake_kernels_from_real_mismatches: self._maybe_infer_fake_kernel_from_pytree_out( func, (args, kwargs), (real_args, real_kwargs), fake_out, real_out, ) else: # this can override the output only when the flag is True fake_out = self._maybe_infer_fake_kernel_from_pytree_out( # type: ignore[assignment] func, (args, kwargs), (real_args, real_kwargs), fake_out, real_out, ) # populate unbacked_var_to_val if ( not isinstance(fake_out, Tensor) and not isinstance(real_out, Tensor) and type(fake_out) is not type(real_out) ): # This can happen when decompositions have different return types, # e.g. namedtuple vs. tuple vs. list. tree_map_( go, tuple(pytree.tree_flatten(fake_out)), tuple(pytree.tree_flatten(real_out)), ) else: tree_map_(go, fake_out, real_out) # If a data-dependent op is used in a decomposition, we # may need to get the unbacked settings "early" # TODO: Is this really needed? compute_unbacked_bindings(self.shape_env, fake_out, peek=True) # pyrefly: ignore [bad-return] return fake_out # Try for fastpath if has_symbolic_sizes: fast_impl = get_fast_op_impls().get(func) if fast_impl is not None: return maybe_propagate_real_tensors(fast_impl(self, *args, **kwargs)) # If there's a Python meta, prefer that over the decomposition from torch._decomp import meta_table if ( func not in meta_table and not self.cpp_meta_supports_symint(func) and not ( has_symbolic_sizes and func in self._unbacked_special_fake_handling_ops ) ): from torch._decomp import decomposition_table # Prefer Python decompositions over C++ ones if func in decomposition_table and ( has_symbolic_sizes or ( # TODO: Remove these exclusions, so that we can remove # this leg entirely torch_decomp_decompositions(func) and all(not is_sparse_any(e) for e in flat_arg_fake_tensors) ) ): with self: return maybe_propagate_real_tensors( decomposition_table[func](*args, **kwargs) ) with self: # Decomposes CompositeImplicitAutograd ops r = func.decompose(*args, **kwargs) if r is not NotImplemented: return maybe_propagate_real_tensors(r) # prims already wrap FakeTensor inputs to FakeTensor outputs # and do device logic, we dont need do anything but run them # and ensure that Meta kernels are dispatched to (see) # Fake Tensor Dispatch Keys # TODO - we should be use the prim aten impl # TODO - fix prims complex ops if ( "prims::" in func._schema.name and hasattr(func, "prim_meta_impl") and not stride_incorrect_op(func) ): with self: return maybe_propagate_real_tensors( func.prim_meta_impl(*args, **kwargs) ) profiles = torch._dynamo.config._custom_ops_profile if profiles is not None: if func in profiles.data: return profiles.generic_fake_kernel(func, self, *args, **kwargs) if ( self.propagate_real_tensors and real_out is not nil and not library_utils.is_builtin(func) and self.shape_env is not None ): # Automatically infer a Fake kernel if there isn't one. if not library_utils.has_fake_kernel(func): result = inferred_fake_kernel_from_real_out(self, func, real_out) dtrace_structured( "missing_fake_kernel", metadata_fn=lambda: { "op": str(func), }, ) return maybe_propagate_real_tensors(result) # Users can register FakeTensor rules for custom operators # Call them if they exist. maybe_fake_impl = torch._library.simple_registry.singleton.find( func.name() ).fake_impl.kernel if maybe_fake_impl: try: ctx = torch._library.fake_impl.FakeImplCtx(self, func) with torch._library.fake_impl.set_ctx_getter(lambda: ctx), self: result = maybe_fake_impl(*args, **kwargs) return maybe_propagate_real_tensors(result) except MissingOpProfile as e: # If we have a fake kernel registered generated from OpProfiles # but there doesn't exist a profile for the existing inputs, and we are in if ( self.propagate_real_tensors and real_out is not nil and not library_utils.is_builtin(func) and self.shape_env is not None ): result = inferred_fake_kernel_from_real_out(self, func, real_out) dtrace_structured( "missing_fake_kernel", metadata_fn=lambda: { "op": str(func), }, ) return maybe_propagate_real_tensors(result) else: raise e # special handling for funcs registered through `register_op_impl`, # e.g., manipulating args on constructor calls to construct meta tensors # and then afterwards wrapping them to a FakeTensor for run_impl_check, op_impl in op_implementations_checks: if run_impl_check(func): op_impl_out = op_impl(self, func, *args, **kwargs) if op_impl_out is not NotImplemented: return maybe_propagate_real_tensors(op_impl_out) def maybe_run_unsafe_fallback( error: Optional[RuntimeError] = None, ) -> Optional[FakeTensor]: # We infer the meta of a custom ops that return None to just # return None. custom ops are not allowed to mutate metadata # of their inputs, so this is safe. if torch._library.utils.can_generate_trivial_fake_impl(func): return None # no meta kernel registered, fallback to kernel for the device if has_symbolic_sizes or not self.can_run_unsafe_fallback(func): raise UnsupportedOperatorException(func) if error is None: error = UnsupportedOperatorException(func) return run_fallback_kernel(self, func, flat_args, args_spec, error) # Optimization: If there is no Meta kernel, it takes a surprisingly long # amount of time to catch the NotImplementedError, so we check it here. if not has_meta(func): fallback = maybe_run_unsafe_fallback() return maybe_propagate_real_tensors(fallback) # run kernel registered to meta for func, which include # python meta registrations, prims, decomps, and c++ meta fns (structured kernels) # It's possible that the kernel will return NotImplementedError try: with in_kernel_invocation_manager(self): r = func(*args, **kwargs) except NotImplementedError as not_implemented_error: return maybe_run_unsafe_fallback(not_implemented_error) except Exception: log.exception("failed while attempting to run meta for %s", func) raise return maybe_propagate_real_tensors( self.wrap_meta_outputs_with_default_device_logic( r, func, flat_args, device=kwargs.get("device") ) ) # WARNING: DO NOT add any additional namespaces/operators here if they refer to operators # outside of the pytorch/pytorch library! Any pre-existing things here # are either in the pytorch/pytorch library or have been grandfathered in. # The fallback does not always work and MAY CRASH and emit unreadable error messages # so it should not be allowed by default. _can_run_unsafe_fallback_allowed_namespaces = ordered_set( "debugprims", "prims", "aten", "xla", "vision", "torchtext", "torchaudio", "quantized", ) def can_run_unsafe_fallback(self, func: OpOverload) -> bool: if not self.allow_fallback_kernels: return False # It's OK to try the fallback for built-in ops (e.g. aten, prims) # because we control and test these but the fallback leads to unexpected behavior # in user-defined custom ops return ( func.namespace in self._can_run_unsafe_fallback_allowed_namespaces or func.name() == "fbgemm::gmm" ) def validate_and_convert_non_fake_tensors( self, func: OpOverload, converter: FakeTensorConverter, flat_args: Sequence[object], args_spec: TreeSpec, ) -> tuple[list[object], list[FakeTensor]]: """ Checks if the list of tensors are fake tensors. If not, try to convert them to fake tensors. Returns the original args, kwargs, and a flattened list of (args, kwargs) that are fake tensors. """ flat_arg_fake_tensors: list[FakeTensor] = [] def validate(x: T) -> Union[T, FakeTensor]: if not isinstance(x, Tensor): return x nonlocal flat_arg_fake_tensors if not self.is_our_fake(x): if hasattr(func, "tags") and torch.Tag.inplace_view in func.tags: args, kwargs = pytree.tree_unflatten(flat_args, args_spec) raise AssertionError( f"Can't call metadata mutating ops on non-Fake Tensor inputs. Found in {render_call(func, args, kwargs)}" ) allow_non_fake_inputs = ( self.allow_non_fake_inputs if fake_tensor_tls.allow_non_fake_inputs_override is None else fake_tensor_tls.allow_non_fake_inputs_override ) if not allow_non_fake_inputs: if isinstance(x, FakeTensor) and x.fake_mode is not self: raise AssertionError("Mixing fake modes NYI") args, kwargs = pytree.tree_unflatten(flat_args, args_spec) raise AssertionError( f"Please convert all Tensors to FakeTensors first or instantiate FakeTensorMode " f"with 'allow_non_fake_inputs'. Found in {render_call(func, args, kwargs)}" ) out = converter.from_real_tensor(self, x) else: out = x flat_arg_fake_tensors.append(out) return out validated_args = [validate(a) for a in flat_args] return validated_args, flat_arg_fake_tensors def wrap_meta_outputs_with_default_device_logic( self, r: object, func: OpOverload, flat_args: Sequence[object], device: torch.device, ) -> PyTree: converter = self.fake_tensor_converter # Lazily initialized, in case there are no tensor returns common_device = None has_scalar_only_inputs = False def wrap(e: T) -> Union[T, FakeTensor]: nonlocal common_device nonlocal has_scalar_only_inputs if not isinstance(e, Tensor): return e if common_device is None: ( common_device, has_scalar_only_inputs, ) = FakeTensor._find_common_device(func, flat_args) is_our_fake = self.is_our_fake(e) if is_our_fake: torch._check( e.device == common_device, lambda: f"FakeTensor is wrapped to wrong device, found {e.device}, expected {common_device}", ) return cast(T, e) elif converter is not None: if has_scalar_only_inputs: # Under FakeTensorMode, op accepts scalar only inputs, such as aten.add/sub/mul/div, # returns a real scalar tensor on CPU. See TensorMeta() in _prims/__init__.py for details. # We thus directly convert real tensor to fake tensor. return converter.from_real_tensor(self, e) else: return converter.from_meta_and_device( self, e, device or common_device ) else: # pyrefly: ignore [bad-return] return e return tree_map(wrap, r) def create_symbolic_nested_int( self, *, nt_tensor_id: Optional[int] = None ) -> torch.SymInt: # See Note: [Creating symbolic nested int] # Returned nested int always has coeff=1; multiply the result by coeff if needed import torch.nested._internal.nested_tensor from torch.nested._internal.nested_int import NestedIntNode if nt_tensor_id is None: nt_tensor_id = self.nt_tensor_id_counter assert self.enter_stack, "should only called while FakeTensorMode is active" self.nt_tensor_id_counter += 1 hint = torch.SymInt(NestedIntNode(nt_tensor_id, 1)) src = torch._dynamo.source.EphemeralSource("intermediate_offsets_or_lengths") assert self.shape_env is not None ret = self.shape_env.create_symintnode( sym=self.shape_env.create_symbol( val=hint, source=src, ), hint=hint, source=src, ) return ret _cpp_meta_supports_symint = ordered_set( aten.empty.memory_format, aten.empty_strided.default, aten.as_strided_scatter.default, aten.as_strided.default, aten.as_strided_.default, aten.zeros.default, aten.detach.default, aten.view_as_real.default, aten.view_as_complex.default, aten.set_.source_Storage_storage_offset, aten._sparse_coo_tensor_with_dims_and_tensors.default, ) _unbacked_special_fake_handling_ops = ordered_set( aten.view.default, aten._unsafe_view.default, aten.slice.Tensor, ) def cpp_meta_supports_symint(self, func: OpOverload) -> bool: if torch.Tag.view_copy in func.tags: return True return func in self._cpp_meta_supports_symint lift_fns = ordered_set(aten.lift_fresh.default, aten.lift_fresh_copy.default) def may_turn_const(self, t: Tensor) -> bool: return ( t.numel() <= CONSTANT_NUMEL_LIMIT and not is_sparse_any(t) and not self.is_our_fake(t) and t.device.type != "meta" ) def invalidate_written_to_constants( self, func: OpOverload, flat_arg_fake_tensors: Sequence[FakeTensor], args: Sequence[object], kwargs: Mapping[str, object], ) -> None: any_constant = any(e.constant is not None for e in flat_arg_fake_tensors) schema_info = get_schema_info(func) if any_constant and schema_info.is_mutable(): _, new_kwargs = normalize_function( # type: ignore[misc] func, args=args, # type: ignore[arg-type] kwargs=kwargs, # type: ignore[arg-type] normalize_to_only_use_kwargs=True, ) for k, v in new_kwargs.items(): k = k if (k != "input" or schema_info.has_argument(k)) else "self" if ( self.is_our_fake(v) and schema_info.is_mutable(k) and v.constant is not None ): self.fake_tensor_converter.invalidate_constant_aliases(v.constant) def from_tensor( self, tensor: Tensor, *, static_shapes: Optional[bool] = None, source: Optional[Source] = None, symbolic_context: Optional[SymbolicContext] = None, trace: bool = True, ) -> FakeTensor: shape_env: Optional[ShapeEnv] = self.shape_env if static_shapes is None: static_shapes = self.static_shapes if static_shapes: assert symbolic_context is None, ( "cannot set both static_shapes and symbolic_context" ) shape_env = None return self.fake_tensor_converter.from_real_tensor( self, tensor, shape_env=shape_env, source=source, symbolic_context=symbolic_context, trace=trace, ) _StoragePointer = object def _validate_symbolic_output_for_caching( state: _CacheKeyState, output: FakeTensor ) -> None: """ Validate symbolic content in output and raise _BypassDispatchCache if caching should be bypassed. Args: state: Cache key state containing known symbols output: Output to validate proxy_mode_active: Whether PROXY dispatch mode is currently active Raises: _BypassDispatchCache: If output contains symbolic content that prevents caching Details: If our output contains any symbols that didn't appear in the input then we need to bypass. Usually this will be unbacked symbols which can't be properly reconstructed but there could be "weird" cases where backed symbols spontaneously appear (from non-input state)? If we're proxy (symbol) tracing and the output contains ANY symbols then we need to bypass. The problem is that ProxyTorchDispatchMode relies on SymNode object identity and being able to see the construction of SymNodes. We could improve the proxy tracing case in a few ways: 1. If the output SymNodes are directly copied from inputs then this is actually fine - they're already tracked. This would probably be the biggest bang/buck. 2. If the output (tensors) are all direct copies of the inputs then this is also fine - since they're inputs they must be tracked. We already compute this we just don't plumb it around enough. 3. If the output SymNodes are already tracked by the proxy then this is also actually fine - they're properly tracked. This probably wouldn't be common since for most outputs we use torch.empty_strided() and recompute strides. 4. We could use the proxy to track "how" the SymNodes were computed and when using the cache we could "replay" them properly to teach the proxy how to build them. """ from torch.fx.experimental.symbolic_shapes import _iterate_exprs, _iterate_nodes is_tracing = torch.fx.experimental.proxy_tensor.get_proxy_mode() is not None if is_tracing: # Check for SymNode types in PROXY mode - this should bypass caching # regardless of whether symbols are known or not for _ in _iterate_nodes(output): raise _BypassDispatchCache("Proxy mode with SymNode output") else: # Check for unrepresented symbols in tensor expressions for s in _iterate_exprs(output): for symbol in s.free_symbols: if symbol not in state.known_symbols: raise _BypassDispatchCache("unrepresented symbol in output") # NB: returns fake tensors def run_fallback_kernel( fake_mode: FakeTensorMode, func: OpOverload, flat_args: Sequence[object], args_spec: PyTree, orig_not_implemented_exception: RuntimeError, ) -> FakeTensor: # these should all be supported, just to be safe # avoid fallback for operators which inplace modify metadata # because the input fake tensors would be umodified if torch.Tag.inplace_view in func.tags: raise orig_not_implemented_exception inp_impls = {} # Don't use in_kernel_invocation_manager(fake_mode) as we want to do # REAL compute (not with meta device) with no_dispatch(): def to_real_tensor(e: T) -> Union[T, Tensor]: if fake_mode.is_our_fake(e): out = torch.zeros_like(e, device=e.fake_device) if e.is_sparse: out._coalesced_(e.is_coalesced()) inp_impls[id(out)] = e return out return e flat_args = [to_real_tensor(a) for a in flat_args] args, kwargs = pytree.tree_unflatten(flat_args, args_spec) r = func(*args, **kwargs) storages: set[_StoragePointer] = set() for e in flat_args: if isinstance(e, Tensor): if not is_sparse_any(e): storages.add(e._typed_storage()._cdata) # TODO: also check metadata change on inputs # proper aliasing/metadata relationship between outputs and inputs will # not be set up, bc of conversion to device, unless we can reuse an # input impl def map_out(e: T) -> Union[T, FakeTensor]: if id(e) not in inp_impls and ( isinstance(e, Tensor) and not is_sparse_any(e) and e._typed_storage()._cdata in storages ): raise orig_not_implemented_exception if isinstance(e, Tensor): if id(e) in inp_impls: return inp_impls[id(e)] else: return fake_mode.fake_tensor_converter.from_real_tensor(fake_mode, e) else: return e return pytree.tree_map(map_out, r) def _set_cache_key_for_shape_env( cache: dict[_DispatchCacheKey, _DispatchCacheEntry], key: _DispatchCacheKey, entry: _DispatchCacheEntry, ) -> None: key.strip_shape_env() cache[key] = entry def _set_cache_key( cache: dict[_DispatchCacheKey, _DispatchCacheEntry], key: _DispatchCacheKey, entry: _DispatchCacheEntry, ) -> None: cache[key] = entry # Just for use to allow copying a module to fake tensors, # does not apply elsewhere
FakeTensorMode
python
mlflow__mlflow
dev/pyproject.py
{ "start": 291, "end": 4968 }
class ____(Enum): SKINNY = "skinny" RELEASE = "release" DEV = "dev" TRACING = "tracing" def description(self) -> str: WARNING = "# Auto-generated by dev/pyproject.py. Do not edit manually." if self is PackageType.TRACING: return f"""{WARNING} # This file defines the package metadata of `mlflow-tracing`. """ if self is PackageType.SKINNY: return f"""{WARNING} # This file defines the package metadata of `mlflow-skinny`. """ if self is PackageType.RELEASE: return f"""{WARNING} # This file defines the package metadata of `mlflow`. `mlflow-skinny` and `mlflow-tracing` # are included in the requirements to prevent a version mismatch between `mlflow` and those # child packages. This file will replace `pyproject.toml` when releasing a new version. """ if self is PackageType.DEV: return f"""{WARNING} # This file defines the package metadata of `mlflow` **during development**. To install `mlflow` # from the source code, `mlflow-skinny` and `mlflow-tracing` are NOT included in the requirements. # This file will be replaced by `pyproject.release.toml` when releasing a new version. """ raise ValueError(f"Unreachable: {self}") SEPARATOR = """ # Package metadata: can't be updated manually, use dev/pyproject.py # ----------------------------------------------------------------- # Dev tool settings: can be updated manually """ SKINNY_README = """ <!-- Autogenerated by dev/pyproject.py. Do not edit manually. --> 📣 This is the `mlflow-skinny` package, a lightweight MLflow package without SQL storage, server, UI, or data science dependencies. Additional dependencies can be installed to leverage the full feature set of MLflow. For example: - To use the `mlflow.sklearn` component of MLflow Models, install `scikit-learn`, `numpy` and `pandas`. - To use SQL-based metadata storage, install `sqlalchemy`, `alembic`, and `sqlparse`. - To use serving-based features, install `flask` and `pandas`. --- <br> <br> """ # noqa: E501 # Tracing SDK should only include the minimum set of MLflow modules # to minimize the size of the package. TRACING_INCLUDE_FILES = [ "mlflow", # Flavors that we support auto tracing "mlflow.agno*", "mlflow.anthropic*", "mlflow.autogen*", "mlflow.bedrock*", "mlflow.crewai*", "mlflow.dspy*", "mlflow.gemini*", "mlflow.groq*", "mlflow.langchain*", "mlflow.litellm*", "mlflow.llama_index*", "mlflow.mistral*", "mlflow.openai*", "mlflow.strands*", "mlflow.haystack*", # Other necessary modules "mlflow.azure*", "mlflow.entities*", "mlflow.environment_variables", "mlflow.exceptions", "mlflow.legacy_databricks_cli*", "mlflow.prompt*", "mlflow.protos*", "mlflow.pydantic_ai*", "mlflow.smolagents*", "mlflow.store*", "mlflow.telemetry*", "mlflow.tracing*", "mlflow.tracking*", "mlflow.types*", "mlflow.utils*", "mlflow.version", ] TRACING_EXCLUDE_FILES = [ # Large proto files that are not needed in the package "mlflow/protos/databricks_artifacts_pb2.py", "mlflow/protos/databricks_filesystem_service_pb2.py", "mlflow/protos/databricks_uc_registry_messages_pb2.py", "mlflow/protos/databricks_uc_registry_service_pb2.py", "mlflow/protos/model_registry_pb2.py", "mlflow/protos/unity_catalog_oss_messages_pb2.py", "mlflow/protos/unity_catalog_oss_service_pb2.py", # Test files "tests", "tests.*", ] def find_duplicates(seq): counted = Counter(seq) return [item for item, count in counted.items() if count > 1] def write_file_if_changed(file_path: Path, new_content: str) -> None: if file_path.exists(): existing_content = file_path.read_text() if existing_content == new_content: print(f"No changes in {file_path}, skipping write.") return print(f"Writing changes to {file_path}.") file_path.write_text(new_content) def format_content_with_taplo(content: str) -> str: return ( subprocess.check_output( ["bin/taplo", "fmt", "-"], input=content, text=True, ).strip() + "\n" ) def write_toml_file_if_changed( file_path: Path, description: str, toml_data: dict[str, Any] ) -> None: """ Write a TOML file with description only if content has changed. Formats content with taplo before comparison. """ new_content = description + "\n" + toml.dumps(toml_data) formatted_content = format_content_with_taplo(new_content) write_file_if_changed(file_path, formatted_content)
PackageType
python
Pylons__pyramid
src/pyramid/authentication.py
{ "start": 22464, "end": 24227 }
class ____: """ This class represents an authentication token. You must pass in the shared secret, the userid, and the IP address. Optionally you can include tokens (a list of strings, representing role names), 'user_data', which is arbitrary data available for your own use in later scripts. Lastly, you can override the cookie name and timestamp. Once you provide all the arguments, use .cookie_value() to generate the appropriate authentication ticket. Usage:: token = AuthTicket('sharedsecret', 'username', os.environ['REMOTE_ADDR'], tokens=['admin']) val = token.cookie_value() """ def __init__( self, secret, userid, ip, tokens=(), user_data='', time=None, cookie_name='auth_tkt', secure=False, hashalg='md5', ): self.secret = secret self.userid = userid self.ip = ip self.tokens = ','.join(tokens) self.user_data = user_data if time is None: self.time = time_mod.time() else: self.time = time self.cookie_name = cookie_name self.secure = secure self.hashalg = hashalg def digest(self): return calculate_digest( self.ip, self.time, self.secret, self.userid, self.tokens, self.user_data, self.hashalg, ) def cookie_value(self): v = f'{self.digest()}{int(self.time):08x}{quote(self.userid)}!' if self.tokens: v += self.tokens + '!' v += self.user_data return v # this class licensed under the MIT license (stolen from Paste)
AuthTicket
python
django__django
tests/queries/models.py
{ "start": 6185, "end": 6499 }
class ____(models.Model): id = models.CharField(max_length=20, primary_key=True) custom_pk = models.ManyToManyField(CustomPk) tag = models.CharField(max_length=20) # An inter-related setup with a model subclass that has a nullable # path to another model, and a return path from that model.
CustomPkTag
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 1171661, "end": 1171875 }
class ____(VegaLiteSchema): """SelectionInit schema wrapper.""" _schema = {"$ref": "#/definitions/SelectionInit"} def __init__(self, *args, **kwds): super().__init__(*args, **kwds)
SelectionInit
python
sqlalchemy__sqlalchemy
test/orm/test_core_compilation.py
{ "start": 111531, "end": 112192 }
class ____(fixtures.DeclarativeMappedTest, _CoreCorrelateTest): @classmethod def setup_classes(cls): Base = cls.DeclarativeBasic class T1(Base): __tablename__ = "t1" a = Column(Integer, primary_key=True) @hybridproperty def c(self): return self class T2(Base): __tablename__ = "t2" a = Column(Integer, primary_key=True) @hybridproperty def c(self): return self def _fixture(self): t1, t2 = self.classes("T1", "T2") return t1, t2, select(t1).where(t1.c.a == t2.c.a)
CorrelateTest
python
ethereum__web3.py
web3/exceptions.py
{ "start": 8155, "end": 8342 }
class ____(TaskNotRunning): """ Raised to alert the subscription manager that an exception occurred in the subscription processing task. """
SubscriptionHandlerTaskException
python
kubernetes-client__python
kubernetes/client/models/v1_local_object_reference.py
{ "start": 383, "end": 3958 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'name': 'str' } attribute_map = { 'name': 'name' } def __init__(self, name=None, local_vars_configuration=None): # noqa: E501 """V1LocalObjectReference - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._name = None self.discriminator = None if name is not None: self.name = name @property def name(self): """Gets the name of this V1LocalObjectReference. # noqa: E501 Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501 :return: The name of this V1LocalObjectReference. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this V1LocalObjectReference. Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501 :param name: The name of this V1LocalObjectReference. # noqa: E501 :type: str """ self._name = name def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1LocalObjectReference): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1LocalObjectReference): return True return self.to_dict() != other.to_dict()
V1LocalObjectReference
python
getsentry__sentry
src/sentry/sentry_metrics/indexer/limiters/writes.py
{ "start": 7700, "end": 8682 }
class ____: """ The WritesLimiterFactory is in charge of initializing the WritesLimiter based on a configuration's namespace and options. Ideally this logic would live in the initialization of the backends (postgres etc) but since each backend supports multiple use cases dynamically we just keep the mapping of rate limiters in this factory. """ def __init__(self) -> None: self.rate_limiters: MutableMapping[str, WritesLimiter] = {} def get_ratelimiter(self, config: MetricsIngestConfiguration) -> WritesLimiter: namespace = config.writes_limiter_namespace if namespace not in self.rate_limiters: writes_rate_limiter: WritesLimiter = WritesLimiter( namespace, **config.writes_limiter_cluster_options ) self.rate_limiters[namespace] = writes_rate_limiter return self.rate_limiters[namespace] writes_limiter_factory = WritesLimiterFactory()
WritesLimiterFactory
python
bokeh__bokeh
src/bokeh/models/ui/ui_element.py
{ "start": 1835, "end": 3864 }
class ____(Model): """ A base class for DOM-based UI elements with configurable styling. """ # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) html_attributes = Dict(String, String, default={}, help=""" Allows to configure HTML attributes on the underlying HTML element. """) html_id = Nullable(String, default=None, help=""" Sets the ``id`` attribute of the underlying HTML element. This is a shorthand for the common HTML ``id`` attribute. Alternatively the ``id`` can be set in the ``html_attributes`` dictionary. ``html_id`` takes precedence. """) css_classes = List(String, default=[], help=""" A list of additional CSS classes to add to the underlying DOM element. """).accepts(Seq(String), lambda x: list(x)) css_variables = Dict(String, Either(String, Instance(Node)), default={}, help=""" Allows to define dynamically computed CSS variables. This can be used, for example, to coordinate positioning and styling between canvas' renderers and/or visuals and HTML-based UI elements. Variables defined here are equivalent to setting the same variables under ``:host { ... }`` in a CSS stylesheet. .. note:: This property is experimental and may change at any point. """) styles = Either(Dict(String, Nullable(String)), Instance(Styles), default={}, help=""" Inline CSS styles applied to the underlying DOM element. """) stylesheets = List( Either( Instance(StyleSheet), String, Dict(String, Either(Dict(String, Nullable(String)), Instance(Styles)), ), ), help=""" Additional style-sheets to use for the underlying DOM element. Note that all bokeh's components use shadow DOM, thus any included style sheets must reflect that, e.g. use ``:host`` CSS pseudo selector to access the root DOM element. """) @abstract
StyledElement
python
pytorch__pytorch
test/quantization/pt2e/test_x86inductor_quantizer.py
{ "start": 22345, "end": 23808 }
class ____(QuantizationTestCase): def _test_quantizer( self, model, example_inputs, quantizer, expected_node_occurrence, expected_node_list=None, is_qat=False, debug=False, lower=False, ): m_eager = model.train() if is_qat else model.eval() # program capture m = copy.deepcopy(m_eager) m = export(m, example_inputs, strict=True).module() # QAT Model failed to deepcopy export_model = m if is_qat else copy.deepcopy(m) m = prepare_qat_pt2e(m, quantizer) if is_qat else prepare_pt2e(m, quantizer) # Calibrate m(*example_inputs) prepare_model = copy.deepcopy(m) m = convert_pt2e(m) convert_model = copy.deepcopy(m) if debug: convert_model.print_readable(True) if lower: m = lower_pt2e_quantized_to_x86(m, example_inputs) m(*example_inputs) node_occurrence = { ns.call_function(k): v for k, v in expected_node_occurrence.items() } if expected_node_list is None: expected_node_list = [] node_list = [ns.call_function(n) for n in expected_node_list] self.checkGraphModuleNodes( m, expected_node_occurrence=node_occurrence, expected_node_list=node_list ) return export_model, prepare_model, convert_model @skipIfNoInductorSupport
X86InductorQuantTestCase
python
astropy__astropy
astropy/visualization/stretch.py
{ "start": 7094, "end": 10264 }
class ____(BaseStretch): r""" A power stretch. The stretch is given by: .. math:: y = x^a Parameters ---------- a : float The power index (see the above formula). ``a`` must be greater than 0. Examples -------- .. plot:: :show-source-link: import numpy as np from astropy.visualization import PowerStretch from matplotlib import pyplot as plt fig, ax = plt.subplots(figsize=(5, 5)) x = np.linspace(0, 1, 100) a_vals = (0.3, 0.5, 0.7, 1, 1.5, 2, 3) for a in a_vals: stretch = PowerStretch(a) label = f'{a=}' ax.plot(x, stretch(x, clip=True), label=label) ax.axis('equal') ax.plot(x, x, ls='dotted', color='k', alpha=0.3) ax.set_xlim(0, 1) ax.set_ylim(0, 1) ax.set_xlabel('Input Value') ax.set_ylabel('Output Value') ax.set_title(stretch.__class__.__name__) ax.legend(loc='lower right', fontsize=8) """ @property def _supports_invalid_kw(self): return True def __init__(self, a): super().__init__() if a <= 0: raise ValueError("a must be > 0") self.a = a def __call__(self, values, clip=True, out=None, invalid=None): """ Transform values using this stretch. Parameters ---------- values : array-like The input values, which should already be normalized to the [0:1] range. clip : bool, optional If `True` (default), values outside the [0:1] range are clipped to the [0:1] range. out : ndarray, optional If specified, the output values will be placed in this array (typically used for in-place calculations). invalid : None or float, optional Value to assign NaN values generated by this class. NaNs in the input ``values`` array are not changed. This option is generally used with matplotlib normalization classes, where the ``invalid`` value should map to the matplotlib colormap "under" value (i.e., any finite value < 0). If `None`, then NaN values are not replaced. This keyword has no effect if ``clip=True``. Returns ------- result : ndarray The transformed values. """ values = _prepare(values, clip=clip, out=out) replace_invalid = ( not clip and invalid is not None and ((-1 < self.a < 0) or (0 < self.a < 1)) ) with np.errstate(invalid="ignore"): if replace_invalid: idx = values < 0 np.power(values, self.a, out=values) if replace_invalid: # Assign new NaN (i.e., NaN not in the original input # values, but generated by this class) to the invalid value. values[idx] = invalid return values @property def inverse(self): """A stretch object that performs the inverse operation.""" return PowerStretch(1.0 / self.a)
PowerStretch
python
redis__redis-py
redis/commands/policies.py
{ "start": 7349, "end": 8558 }
class ____(AsyncPolicyResolver): """ Async base class for policy resolvers. """ def __init__( self, policies: PolicyRecords, fallback: Optional[AsyncPolicyResolver] = None ) -> None: self._policies = policies self._fallback = fallback async def resolve(self, command_name: str) -> Optional[CommandPolicies]: parts = command_name.split(".") if len(parts) > 2: raise ValueError(f"Wrong command or module name: {command_name}") module, command = parts if len(parts) == 2 else ("core", parts[0]) if self._policies.get(module, None) is None: if self._fallback is not None: return await self._fallback.resolve(command_name) else: return None if self._policies.get(module).get(command, None) is None: if self._fallback is not None: return await self._fallback.resolve(command_name) else: return None return self._policies.get(module).get(command) @abstractmethod def with_fallback(self, fallback: "AsyncPolicyResolver") -> "AsyncPolicyResolver": pass
AsyncBasePolicyResolver
python
eventlet__eventlet
tests/wsgi_test.py
{ "start": 4327, "end": 6572 }
class ____(Exception): pass def send_expect_close(sock, buf): # Some tests will induce behavior that causes the remote end to # close the connection before all of the data has been written. # With small kernel buffer sizes, this can cause an EPIPE error. # Since the test expects an early close, this can be ignored. try: sock.sendall(buf) except OSError as exc: if support.get_errno(exc) != errno.EPIPE: raise def read_http(sock): fd = sock.makefile('rb') try: response_line = bytes_to_str(fd.readline().rstrip(b'\r\n')) except OSError as exc: # TODO find out whether 54 is ok here or not, I see it when running tests # on Python 3 if support.get_errno(exc) in (10053, 54): raise ConnectionClosed raise if not response_line: raise ConnectionClosed(response_line) header_lines = [] while True: line = fd.readline() if line == b'\r\n': break else: header_lines.append(line) headers_original = {} headers_lower = {} for x in header_lines: x = x.strip() if not x: continue key, value = bytes_to_str(x, encoding='latin1').split(':', 1) key = key.rstrip() value = value.lstrip() key_lower = key.lower() # FIXME: Duplicate headers are allowed as per HTTP RFC standard, # the client and/or intermediate proxies are supposed to treat them # as a single header with values concatenated using space (' ') delimiter. assert key_lower not in headers_lower, "header duplicated: {}".format(key) headers_original[key] = value headers_lower[key_lower] = value content_length_str = headers_lower.get(CONTENT_LENGTH.lower(), '') if content_length_str: num = int(content_length_str) body = fd.read(num) elif response_line.split()[1] in ('204', '304'): body = '' else: # read until EOF body = fd.read() result = HttpReadResult( status=response_line, headers_lower=headers_lower, body=body, headers_original=headers_original) return result
ConnectionClosed
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/assets/graph/remote_asset_graph.py
{ "start": 8587, "end": 9149 }
class ____: """RemoteRepositoryAssetNode paired with additional information from that repository. This split allows repository scoped asset graph to be constructed without depending on schedules/sensors as defining schedule/sensors needs an asset graph. """ asset_node: RemoteRepositoryAssetNode targeting_sensor_names: Sequence[str] targeting_schedule_names: Sequence[str] @property def handle(self) -> RepositoryHandle: return self.asset_node.repository_handle @whitelist_for_serdes @record
RepositoryScopedAssetInfo
python
tensorflow__tensorflow
tensorflow/python/tpu/tpu_test.py
{ "start": 6591, "end": 8583 }
class ____(test.TestCase): def test_all_to_all_zero_split_count(self): with self.assertRaisesRegex( ValueError, "split_count 0 must at least be one"): tpu_ops.all_to_all( x=[0.0, 0.1652, 0.6543], group_assignment=[1, -1], concat_dimension=0, split_dimension=0, split_count=0) def test_all_to_all_group_assignment_wrong_shape(self): with self.assertRaisesRegex( ValueError, "group_assignment must have rank 2"): tpu_ops.all_to_all( x=[0.0, 0.1652, 0.6543], group_assignment=[1, -1], concat_dimension=0, split_dimension=0, split_count=2) def test_all_to_all_split_count_not_equal_to_group_assignment_shape(self): with self.assertRaisesRegex( ValueError, "split_count 1 must equal the size of the second dimension " "of group_assignment 2"): tpu_ops.all_to_all( x=[0.0, 0.1652, 0.6543], group_assignment=[[0, 1], [2, 3]], concat_dimension=0, split_dimension=0, split_count=1) def test_all_to_all_split_count_not_divide_input_shape(self): with self.assertRaisesRegex( ValueError, "input dimension 3 not divisible by split_count 2"): tpu_ops.all_to_all( x=[[0.0], [0.1652], [0.6543]], group_assignment=[[0, 1], [2, 3]], concat_dimension=1, split_dimension=0, split_count=2) def do_einsum(): a = array_ops.placeholder(dtype=dtypes.float32, name="a", shape=[2, 3, 4]) b = array_ops.placeholder(dtype=dtypes.float32, name="b", shape=[2, 4, 5]) return special_math_ops.einsum("abc,acd->abd", a, b) def find_einsum(g): graph_def = g.as_graph_def() for node in graph_def.node: if node.op == "Einsum": return True return False def find_xla_einsum(g): graph_def = g.as_graph_def() for node in graph_def.node: if node.op == "XlaEinsum": return True return False
TPUOpsTest
python
python-visualization__folium
folium/plugins/boat_marker.py
{ "start": 150, "end": 2051 }
class ____(JSCSSMixin, Marker): """Add a Marker in the shape of a boat. Parameters ---------- location: tuple of length 2, default None The latitude and longitude of the marker. If None, then the middle of the map is used. heading: int, default 0 Heading of the boat to an angle value between 0 and 360 degrees wind_heading: int, default None Heading of the wind to an angle value between 0 and 360 degrees If None, then no wind is represented. wind_speed: int, default 0 Speed of the wind in knots. https://github.com/thomasbrueggemann/leaflet.boatmarker """ _template = Template( """ {% macro script(this, kwargs) %} var {{ this.get_name() }} = L.boatMarker( {{ this.location|tojson }}, {{ this.options|tojavascript }} ).addTo({{ this._parent.get_name() }}); {% if this.wind_heading is not none -%} {{ this.get_name() }}.setHeadingWind( {{ this.heading }}, {{ this.wind_speed }}, {{ this.wind_heading }} ); {% else -%} {{this.get_name()}}.setHeading({{this.heading}}); {% endif -%} {% endmacro %} """ ) default_js = [ ( "markerclusterjs", "https://unpkg.com/leaflet.boatmarker/leaflet.boatmarker.min.js", ), ] def __init__( self, location, popup=None, icon=None, heading=0, wind_heading=None, wind_speed=0, **kwargs ): super().__init__(location, popup=popup, icon=icon) self._name = "BoatMarker" self.heading = heading self.wind_heading = wind_heading self.wind_speed = wind_speed self.options = remove_empty(**kwargs)
BoatMarker
python
getsentry__sentry
src/sentry/quotas/base.py
{ "start": 8724, "end": 8835 }
class ____(RateLimit): def __init__(self, **kwargs): super().__init__(False, **kwargs)
NotRateLimited
python
altair-viz__altair
altair/jupyter/jupyter_chart.py
{ "start": 3026, "end": 15231 }
class ____(anywidget.AnyWidget): _esm = load_js_src() _css = r""" .vega-embed { /* Make sure action menu isn't cut off */ overflow: visible; } """ # Public traitlets chart = traitlets.Instance(TopLevelSpec, allow_none=True) spec = traitlets.Dict(allow_none=True).tag(sync=True) debounce_wait = traitlets.Float(default_value=10).tag(sync=True) max_wait = traitlets.Bool(default_value=True).tag(sync=True) local_tz = traitlets.Unicode(default_value=None, allow_none=True).tag(sync=True) debug = traitlets.Bool(default_value=False) embed_options = traitlets.Dict(default_value=None, allow_none=True).tag(sync=True) # Internal selection traitlets _selection_types = traitlets.Dict() _vl_selections = traitlets.Dict().tag(sync=True) # Internal param traitlets _params = traitlets.Dict().tag(sync=True) # Internal comm traitlets for VegaFusion support _chart_state = traitlets.Any(allow_none=True) _js_watch_plan = traitlets.Any(allow_none=True).tag(sync=True) _js_to_py_updates = traitlets.Any(allow_none=True).tag(sync=True) _py_to_js_updates = traitlets.Any(allow_none=True).tag(sync=True) # Track whether charts are configured for offline use _is_offline = False @classmethod def enable_offline(cls, offline: bool = True): """ Configure JupyterChart's offline behavior. Parameters ---------- offline: bool If True, configure JupyterChart to operate in offline mode where JavaScript dependencies are loaded from vl-convert. If False, configure it to operate in online mode where JavaScript dependencies are loaded from CDN dynamically. This is the default behavior. """ from altair.utils._importers import import_vl_convert, vl_version_for_vl_convert if offline: if cls._is_offline: # Already offline return vlc = import_vl_convert() src_lines = load_js_src().split("\n") # Remove leading lines with only whitespace, comments, or imports while src_lines and ( len(src_lines[0].strip()) == 0 or src_lines[0].startswith("import") or src_lines[0].startswith("//") ): src_lines.pop(0) src = "\n".join(src_lines) # vl-convert's javascript_bundle function creates a self-contained JavaScript bundle # for JavaScript snippets that import from a small set of dependencies that # vl-convert includes. To see the available imports and their imported names, run # import vl_convert as vlc # help(vlc.javascript_bundle) bundled_src = vlc.javascript_bundle( src, vl_version=vl_version_for_vl_convert() ) cls._esm = bundled_src cls._is_offline = True else: cls._esm = load_js_src() cls._is_offline = False def __init__( self, chart: TopLevelSpec, debounce_wait: int = 10, max_wait: bool = True, debug: bool = False, embed_options: dict | None = None, **kwargs: Any, ): """ Jupyter Widget for displaying and updating Altair Charts, and retrieving selection and parameter values. Parameters ---------- chart: Chart Altair Chart instance debounce_wait: int Debouncing wait time in milliseconds. Updates will be sent from the client to the kernel after debounce_wait milliseconds of no chart interactions. max_wait: bool If True (default), updates will be sent from the client to the kernel every debounce_wait milliseconds even if there are ongoing chart interactions. If False, updates will not be sent until chart interactions have completed. debug: bool If True, debug messages will be printed embed_options: dict Options to pass to vega-embed. See https://github.com/vega/vega-embed?tab=readme-ov-file#options """ self.params = Params({}) self.selections = Selections({}) super().__init__( chart=chart, debounce_wait=debounce_wait, max_wait=max_wait, debug=debug, embed_options=embed_options, **kwargs, ) @traitlets.observe("chart") def _on_change_chart(self, change): # noqa: C901 """Updates the JupyterChart's internal state when the wrapped Chart instance changes.""" new_chart = change.new selection_watches = [] selection_types = {} initial_params = {} initial_vl_selections = {} empty_selections = {} if new_chart is None: with self.hold_sync(): self.spec = None self._selection_types = selection_types self._vl_selections = initial_vl_selections self._params = initial_params return params = getattr(new_chart, "params", []) if params is not alt.Undefined: for param in new_chart.params: if isinstance(param.name, alt.ParameterName): clean_name = param.name.to_json().strip('"') else: clean_name = param.name select = getattr(param, "select", alt.Undefined) if select != alt.Undefined: if not isinstance(select, dict): select = select.to_dict() select_type = select["type"] if select_type == "point": if not ( select.get("fields", None) or select.get("encodings", None) ): # Point selection with no associated fields or encodings specified. # This is an index-based selection selection_types[clean_name] = "index" empty_selections[clean_name] = IndexSelection( name=clean_name, value=[], store=[] ) else: selection_types[clean_name] = "point" empty_selections[clean_name] = PointSelection( name=clean_name, value=[], store=[] ) elif select_type == "interval": selection_types[clean_name] = "interval" empty_selections[clean_name] = IntervalSelection( name=clean_name, value={}, store=[] ) else: msg = f"Unexpected selection type {select.type}" raise ValueError(msg) selection_watches.append(clean_name) initial_vl_selections[clean_name] = {"value": None, "store": []} else: clean_value = param.value if param.value != alt.Undefined else None initial_params[clean_name] = clean_value # Handle the params generated by transforms for param_name in collect_transform_params(new_chart): initial_params[param_name] = None # Setup params self.params = Params(initial_params) def on_param_traitlet_changed(param_change): new_params = dict(self._params) new_params[param_change["name"]] = param_change["new"] self._params = new_params self.params.observe(on_param_traitlet_changed) # Setup selections self.selections = Selections(empty_selections) # Update properties all together with self.hold_sync(): if using_vegafusion(): if self.local_tz is None: self.spec = None def on_local_tz_change(change): self._init_with_vegafusion(change["new"]) self.observe(on_local_tz_change, ["local_tz"]) else: self._init_with_vegafusion(self.local_tz) else: self.spec = new_chart.to_dict() self._selection_types = selection_types self._vl_selections = initial_vl_selections self._params = initial_params def _init_with_vegafusion(self, local_tz: str): if self.chart is not None: vegalite_spec = self.chart.to_dict(context={"pre_transform": False}) with self.hold_sync(): self._chart_state = compile_to_vegafusion_chart_state( vegalite_spec, local_tz ) self._js_watch_plan = self._chart_state.get_watch_plan()[ "client_to_server" ] self.spec = self._chart_state.get_transformed_spec() # Callback to update chart state and send updates back to client def on_js_to_py_updates(change): if self.debug: updates_str = json.dumps(change["new"], indent=2) print( f"JavaScript to Python VegaFusion updates:\n {updates_str}" ) updates = self._chart_state.update(change["new"]) if self.debug: updates_str = json.dumps(updates, indent=2) print( f"Python to JavaScript VegaFusion updates:\n {updates_str}" ) self._py_to_js_updates = updates self.observe(on_js_to_py_updates, ["_js_to_py_updates"]) @traitlets.observe("_params") def _on_change_params(self, change): for param_name, value in change.new.items(): setattr(self.params, param_name, value) @traitlets.observe("_vl_selections") def _on_change_selections(self, change): """Updates the JupyterChart's public selections traitlet in response to changes that the JavaScript logic makes to the internal _selections traitlet.""" for selection_name, selection_dict in change.new.items(): value = selection_dict["value"] store = selection_dict["store"] selection_type = self._selection_types[selection_name] if selection_type == "index": self.selections._set_value( selection_name, IndexSelection.from_vega(selection_name, signal=value, store=store), ) elif selection_type == "point": self.selections._set_value( selection_name, PointSelection.from_vega(selection_name, signal=value, store=store), ) elif selection_type == "interval": self.selections._set_value( selection_name, IntervalSelection.from_vega( selection_name, signal=value, store=store ), ) def collect_transform_params(chart: TopLevelSpec) -> set[str]: """ Collect the names of params that are defined by transforms. Parameters ---------- chart: Chart from which to extract transform params Returns ------- set of param names """ transform_params = set() # Handle recursive case for prop in ("layer", "concat", "hconcat", "vconcat"): for child in getattr(chart, prop, []): transform_params.update(collect_transform_params(child)) # Handle chart's own transforms transforms = getattr(chart, "transform", []) transforms = transforms if transforms != alt.Undefined else [] for tx in transforms: if hasattr(tx, "param"): transform_params.add(tx.param) return transform_params
JupyterChart
python
psf__black
tests/data/cases/class_methods_new_line.py
{ "start": 983, "end": 1060 }
class ____: a = 1 class Inner: pass
ClassWithSingleFieldWithInner
python
geekcomputers__Python
AutoComplete_App/backend.py
{ "start": 29, "end": 5339 }
class ____: """ It works by building a `WordMap` that stores words to word-follower-count ---------------------------- e.g. To train the following statement: It is not enough to just know how tools work and what they worth, we have got to learn how to use them and to use them well. And with all these new weapons in your arsenal, we would better get those profits fired up we create the following: { It: {is:1} is: {not:1} not: {enough:1} enough: {to:1} to: {just:1, learn:1, use:2} just: {know:1} . . profits: {fired:1} fired: {up:1} } so the word completion for "to" will be "use". For optimization, we use another store `WordPrediction` to save the predictions for each word """ def __init__(self): """ Returns - None Input - None ---------- - Initialize database. we use sqlite3 - Check if the tables exist, if not create them - maintain a class level access to the database connection object """ self.conn = sqlite3.connect("autocompleteDB.sqlite3", autocommit=True) cur = self.conn.cursor() res = cur.execute("SELECT name FROM sqlite_master WHERE name='WordMap'") tables_exist = res.fetchone() if not tables_exist: self.conn.execute("CREATE TABLE WordMap(name TEXT, value TEXT)") self.conn.execute("CREATE TABLE WordPrediction (name TEXT, value TEXT)") cur.execute( "INSERT INTO WordMap VALUES (?, ?)", ( "wordsmap", "{}", ), ) cur.execute( "INSERT INTO WordPrediction VALUES (?, ?)", ( "predictions", "{}", ), ) def train(self, sentence): """ Returns - string Input - str: a string of words called sentence ---------- Trains the sentence. It does this by creating a map of current words to next words and their counts for each time the next word appears after the current word - takes in the sentence and splits it into a list of words - retrieves the word map and predictions map - creates the word map and predictions map together - saves word map and predictions map to the database """ cur = self.conn.cursor() words_list = sentence.split(" ") words_map = cur.execute( "SELECT value FROM WordMap WHERE name='wordsmap'" ).fetchone()[0] words_map = json.loads(words_map) predictions = cur.execute( "SELECT value FROM WordPrediction WHERE name='predictions'" ).fetchone()[0] predictions = json.loads(predictions) for idx in range(len(words_list) - 1): curr_word, next_word = words_list[idx], words_list[idx + 1] if curr_word not in words_map: words_map[curr_word] = {} if next_word not in words_map[curr_word]: words_map[curr_word][next_word] = 1 else: words_map[curr_word][next_word] += 1 # checking the completion word against the next word if curr_word not in predictions: predictions[curr_word] = { "completion_word": next_word, "completion_count": 1, } else: if ( words_map[curr_word][next_word] > predictions[curr_word]["completion_count"] ): predictions[curr_word]["completion_word"] = next_word predictions[curr_word]["completion_count"] = words_map[curr_word][ next_word ] words_map = json.dumps(words_map) predictions = json.dumps(predictions) cur.execute( "UPDATE WordMap SET value = (?) WHERE name='wordsmap'", (words_map,) ) cur.execute( "UPDATE WordPrediction SET value = (?) WHERE name='predictions'", (predictions,), ) return "training complete" def predict(self, word): """ Returns - string Input - string ---------- Returns the completion word of the input word - takes in a word - retrieves the predictions map - returns the completion word of the input word """ cur = self.conn.cursor() predictions = cur.execute( "SELECT value FROM WordPrediction WHERE name='predictions'" ).fetchone()[0] predictions = json.loads(predictions) completion_word = predictions[word.lower()]["completion_word"] return completion_word if __name__ == "__main__": input_ = "It is not enough to just know how tools work and what they worth,\ we have got to learn how to use them and to use them well. And with\ all these new weapons in your arsenal, we would better get those profits fired up" ac = AutoComplete() ac.train(input_) print(ac.predict("to"))
AutoComplete
python
astropy__astropy
astropy/io/votable/exceptions.py
{ "start": 15487, "end": 16206 }
class ____(VOTableSpecWarning): """ The parser has encountered an element that does not exist in the specification, or appears in an invalid context. Check the file against the VOTable schema (with a tool such as `xmllint <http://xmlsoft.org/xmllint.html>`__. If the file validates against the schema, and you still receive this warning, this may indicate a bug in ``astropy.io.votable``. **References**: `1.1 <http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__, `1.2 <http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__ """ message_template = "Unknown tag '{}'. Ignoring" default_args = ("x",)
W10
python
scrapy__scrapy
tests/test_downloader_handlers_http_base.py
{ "start": 1882, "end": 16388 }
class ____(ABC): is_secure = False @property @abstractmethod def download_handler_cls(self) -> type[DownloadHandlerProtocol]: raise NotImplementedError @async_yield_fixture async def download_handler(self) -> AsyncGenerator[DownloadHandlerProtocol]: dh = build_from_crawler(self.download_handler_cls, get_crawler()) yield dh await close_dh(dh) @deferred_f_from_coro_f async def test_download( self, mockserver: MockServer, download_handler: DownloadHandlerProtocol ) -> None: request = Request(mockserver.url("/text", is_secure=self.is_secure)) response = await download_request(download_handler, request) assert response.body == b"Works" @deferred_f_from_coro_f async def test_download_head( self, mockserver: MockServer, download_handler: DownloadHandlerProtocol ) -> None: request = Request( mockserver.url("/text", is_secure=self.is_secure), method="HEAD" ) response = await download_request(download_handler, request) assert response.body == b"" @pytest.mark.parametrize( "http_status", [ pytest.param(http_status, id=f"status={http_status.value}") for http_status in HTTPStatus if http_status.value == 200 or http_status.value // 100 in (4, 5) ], ) @deferred_f_from_coro_f async def test_download_has_correct_http_status_code( self, mockserver: MockServer, download_handler: DownloadHandlerProtocol, http_status: HTTPStatus, ) -> None: request = Request( mockserver.url(f"/status?n={http_status.value}", is_secure=self.is_secure) ) response = await download_request(download_handler, request) assert response.status == http_status.value @deferred_f_from_coro_f async def test_server_receives_correct_request_headers( self, mockserver: MockServer, download_handler: DownloadHandlerProtocol, ) -> None: request_headers = { # common request headers "Accept": "text/html", "Accept-Charset": "utf-8", "Accept-Datetime": "Thu, 31 May 2007 20:35:00 GMT", "Accept-Encoding": "gzip, deflate", # custom headers "X-Custom-Header": "Custom Value", } request = Request( mockserver.url("/echo", is_secure=self.is_secure), headers=request_headers, ) response = await download_request(download_handler, request) assert response.status == HTTPStatus.OK body = json.loads(response.body.decode("utf-8")) assert "headers" in body for header_name, header_value in request_headers.items(): assert header_name in body["headers"] assert body["headers"][header_name] == [header_value] @deferred_f_from_coro_f async def test_server_receives_correct_request_body( self, mockserver: MockServer, download_handler: DownloadHandlerProtocol, ) -> None: request_body = { "message": "It works!", } request = Request( mockserver.url("/echo", is_secure=self.is_secure), body=json.dumps(request_body), ) response = await download_request(download_handler, request) assert response.status == HTTPStatus.OK body = json.loads(response.body.decode("utf-8")) assert json.loads(body["body"]) == request_body @deferred_f_from_coro_f async def test_download_has_correct_response_headers( self, mockserver: MockServer, download_handler: DownloadHandlerProtocol, ) -> None: # these headers will be set on the response in the resource and returned response_headers = { # common response headers "Access-Control-Allow-Origin": "*", "Allow": "Get, Head", "Age": "12", "Cache-Control": "max-age=3600", "Content-Encoding": "gzip", "Content-MD5": "Q2hlY2sgSW50ZWdyaXR5IQ==", "Content-Type": "text/html; charset=utf-8", "Date": "Date: Tue, 15 Nov 1994 08:12:31 GMT", "Pragma": "no-cache", "Retry-After": "120", "Set-Cookie": "CookieName=CookieValue; Max-Age=3600; Version=1", "WWW-Authenticate": "Basic", # custom headers "X-Custom-Header": "Custom Header Value", } request = Request( mockserver.url("/response-headers", is_secure=self.is_secure), headers={"content-type": "application/json"}, body=json.dumps(response_headers), ) response = await download_request(download_handler, request) assert response.status == 200 for header_name, header_value in response_headers.items(): assert header_name in response.headers, ( f"Response was missing expected header {header_name}" ) assert response.headers[header_name] == bytes( header_value, encoding="utf-8" ) @deferred_f_from_coro_f async def test_redirect_status( self, mockserver: MockServer, download_handler: DownloadHandlerProtocol ) -> None: request = Request(mockserver.url("/redirect", is_secure=self.is_secure)) response = await download_request(download_handler, request) assert response.status == 302 @deferred_f_from_coro_f async def test_redirect_status_head( self, mockserver: MockServer, download_handler: DownloadHandlerProtocol ) -> None: request = Request( mockserver.url("/redirect", is_secure=self.is_secure), method="HEAD" ) response = await download_request(download_handler, request) assert response.status == 302 @deferred_f_from_coro_f async def test_timeout_download_from_spider_nodata_rcvd( self, mockserver: MockServer, download_handler: DownloadHandlerProtocol, reactor_pytest: str, ) -> None: if reactor_pytest == "asyncio" and sys.platform == "win32": # https://twistedmatrix.com/trac/ticket/10279 pytest.skip( "This test produces DirtyReactorAggregateError on Windows with asyncio" ) # client connects but no data is received meta = {"download_timeout": 0.5} request = Request(mockserver.url("/wait", is_secure=self.is_secure), meta=meta) d = deferred_from_coro(download_request(download_handler, request)) with pytest.raises((defer.TimeoutError, error.TimeoutError)): await maybe_deferred_to_future(d) @deferred_f_from_coro_f async def test_timeout_download_from_spider_server_hangs( self, mockserver: MockServer, download_handler: DownloadHandlerProtocol, reactor_pytest: str, ) -> None: if reactor_pytest == "asyncio" and sys.platform == "win32": # https://twistedmatrix.com/trac/ticket/10279 pytest.skip( "This test produces DirtyReactorAggregateError on Windows with asyncio" ) # client connects, server send headers and some body bytes but hangs meta = {"download_timeout": 0.5} request = Request( mockserver.url("/hang-after-headers", is_secure=self.is_secure), meta=meta ) d = deferred_from_coro(download_request(download_handler, request)) with pytest.raises((defer.TimeoutError, error.TimeoutError)): await maybe_deferred_to_future(d) @pytest.mark.parametrize("send_header", [True, False]) @deferred_f_from_coro_f async def test_host_header( self, send_header: bool, mockserver: MockServer, download_handler: DownloadHandlerProtocol, ) -> None: host_port = f"{mockserver.host}:{mockserver.port(is_secure=self.is_secure)}" request = Request( mockserver.url("/host", is_secure=self.is_secure), headers={"Host": host_port} if send_header else {}, ) response = await download_request(download_handler, request) assert response.body == host_port.encode() if send_header: assert request.headers.get("Host") == host_port.encode() else: assert not request.headers @deferred_f_from_coro_f async def test_content_length_zero_bodyless_post_request_headers( self, mockserver: MockServer, download_handler: DownloadHandlerProtocol ) -> None: """Tests if "Content-Length: 0" is sent for bodyless POST requests. This is not strictly required by HTTP RFCs but can cause trouble for some web servers. See: https://github.com/scrapy/scrapy/issues/823 https://issues.apache.org/jira/browse/TS-2902 https://github.com/kennethreitz/requests/issues/405 https://bugs.python.org/issue14721 """ request = Request( mockserver.url("/contentlength", is_secure=self.is_secure), method="POST" ) response = await download_request(download_handler, request) assert response.body == b"0" @deferred_f_from_coro_f async def test_content_length_zero_bodyless_post_only_one( self, mockserver: MockServer, download_handler: DownloadHandlerProtocol ) -> None: request = Request( mockserver.url("/echo", is_secure=self.is_secure), method="POST" ) response = await download_request(download_handler, request) headers = Headers(json.loads(response.text)["headers"]) contentlengths = headers.getlist("Content-Length") assert len(contentlengths) == 1 assert contentlengths == [b"0"] @deferred_f_from_coro_f async def test_payload( self, mockserver: MockServer, download_handler: DownloadHandlerProtocol ) -> None: body = b"1" * 100 # PayloadResource requires body length to be 100 request = Request( mockserver.url("/payload", is_secure=self.is_secure), method="POST", body=body, ) response = await download_request(download_handler, request) assert response.body == body @deferred_f_from_coro_f async def test_response_header_content_length( self, mockserver: MockServer, download_handler: DownloadHandlerProtocol ) -> None: request = Request( mockserver.url("/text", is_secure=self.is_secure), method="GET" ) response = await download_request(download_handler, request) assert response.headers[b"content-length"] == b"5" @pytest.mark.parametrize( ("filename", "body", "response_class"), [ ("foo.html", b"", HtmlResponse), ("foo", b"<!DOCTYPE html>\n<title>.</title>", HtmlResponse), ], ) @deferred_f_from_coro_f async def test_response_class( self, filename: str, body: bytes, response_class: type[Response], mockserver: MockServer, download_handler: DownloadHandlerProtocol, ) -> None: request = Request( mockserver.url(f"/{filename}", is_secure=self.is_secure), body=body ) response = await download_request(download_handler, request) assert type(response) is response_class # pylint: disable=unidiomatic-typecheck @deferred_f_from_coro_f async def test_get_duplicate_header( self, mockserver: MockServer, download_handler: DownloadHandlerProtocol ) -> None: request = Request(mockserver.url("/duplicate-header", is_secure=self.is_secure)) response = await download_request(download_handler, request) assert response.headers.getlist(b"Set-Cookie") == [b"a=b", b"c=d"] @deferred_f_from_coro_f async def test_download_is_not_automatically_gzip_decoded( self, download_handler: DownloadHandlerProtocol, mockserver: MockServer ) -> None: """Test download handler does not automatically decode content using the scheme provided in Content-Encoding header""" data = "compress-me" # send a request to mock resource that gzip encodes the "data" url parameter request = Request( mockserver.url(f"/compress?data={data}", is_secure=self.is_secure), headers={ "accept-encoding": "gzip", }, ) response = await download_request(download_handler, request) assert response.status == 200 # check that the Content-Encoding header is gzip content_encoding = response.headers[b"Content-Encoding"] assert content_encoding == b"gzip" # check that the response is still encoded # by checking for the magic number that is always included at the start of a gzip encoding # see https://datatracker.ietf.org/doc/html/rfc1952#page-5 section 2.3.1 GZIP_MAGIC = b"\x1f\x8b" assert response.body[:2] == GZIP_MAGIC, "Response body was not in gzip format" # check that a gzip decoding matches the data sent in the request expected_decoding = bytes(data, encoding="utf-8") assert gzip.decompress(response.body) == expected_decoding @deferred_f_from_coro_f async def test_no_cookie_processing_or_persistence( self, mockserver: MockServer, download_handler: DownloadHandlerProtocol ) -> None: cookie_name = "foo" cookie_value = "bar" # check that cookies are not modified request = Request( mockserver.url( f"/set-cookie?{cookie_name}={cookie_value}", is_secure=self.is_secure ) ) response = await download_request(download_handler, request) assert response.status == 200 set_cookie = response.headers.get(b"Set-Cookie") assert set_cookie == f"{cookie_name}={cookie_value}".encode() # check that cookies are not sent in the next request request = Request(mockserver.url("/echo", is_secure=self.is_secure)) response = await download_request(download_handler, request) assert response.status == 200 headers = Headers(json.loads(response.text)["headers"]) assert "Cookie" not in headers assert "cookie" not in headers
TestHttpBase
python
facebook__pyre-check
api/query.py
{ "start": 1343, "end": 1961 }
class ____: def __init__(self, call: Dict[str, Any]) -> None: self.target: str = "" if "target" in call: self.target = call["target"] else: self.target = call["direct_target"] self.kind: str = call["kind"] self.locations: List[Location] = [ _parse_location(location) for location in call["locations"] ] def __eq__(self, other: "CallGraphTarget") -> bool: return ( self.target == other.target and self.kind == other.kind and self.locations == other.locations )
CallGraphTarget
python
python__mypy
mypy/typeanal.py
{ "start": 104136, "end": 104817 }
class ____(BoolTypeQuery): def __init__(self) -> None: super().__init__(ANY_STRATEGY) def visit_any(self, t: AnyType) -> bool: return t.type_of_any == TypeOfAny.explicit def visit_typeddict_type(self, t: TypedDictType) -> bool: # typeddict is checked during TypedDict declaration, so don't typecheck it here. return False def has_any_from_unimported_type(t: Type) -> bool: """Return true if this type is Any because an import was not followed. If type t is such Any type or has type arguments that contain such Any type this function will return true. """ return t.accept(HasAnyFromUnimportedType())
HasExplicitAny
python
pytorch__pytorch
benchmarks/operator_benchmark/pt/quantization_test.py
{ "start": 2075, "end": 4389 }
class ____(op_bench.TorchBenchmarkBase): r"""Benchmarks both quantization and dequantization.""" def init(self, C, M, N, dtype, axis, mode): assert mode in ("Q", "D") self.input = torch.rand(C, M, N) self.op = torch.quantize_per_channel channel_len = (C, M, N)[axis] self.kwargs = { "scales": torch.tensor([1.0] * channel_len), "zero_points": torch.tensor([0] * channel_len), "dtype": dtype, "axis": axis, } self.set_module_name("QuantizePerChannel") if mode == "D": self.input = self.op(self.input, **self.kwargs) def dequant(input, scales, zero_points, axis: int, dtype: int): return input.dequantize() self.op = dequant self.set_module_name("DequantizePerChannel") self.inputs = { "input": self.input, "scales": torch.tensor([1.0] * channel_len), "zero_points": torch.tensor([0] * channel_len), "axis": axis, "dtype": dtype, } def forward(self, input, scales, zero_points, axis: int, dtype: int): return self.op( input, scales=scales, zero_points=zero_points, axis=axis, dtype=dtype ) op_bench.generate_pt_test( quantize_per_channel_configs_short + quantize_per_channel_configs_long, QuantizePerChannelBenchmark, ) # === Fake Quantization === # Generated benchmarks names start with 'learnable_kernel' or 'original_kernel', # for ex. 'original_kernel_nbits8_cpu_N1_C1_H256_W256_zero_point_dtypetorch.int32_bwdall' fake_quantize_configs_short_dict = { "attr_names": ["N", "C", "H", "W", "zero_point_dtype"], "attrs": [ [1, 3, 512, 512, torch.int32], ], "tags": ["short"], } fake_quantize_configs_long_dict = { "N": [1], "C": [1, 3, 8, 32], "H": [256, 1024], "W": [256, 1024], "zero_point_dtype": [torch.int32], "tags": ["long"], } fake_quantize_configs_short = op_bench.config_list( cross_product_configs={ "device": ("cpu", "cuda"), }, **fake_quantize_configs_short_dict, ) fake_quantize_configs_long = op_bench.cross_product_configs( device=("cpu", "cuda"), **fake_quantize_configs_long_dict )
QuantizePerChannelBenchmark
python
doocs__leetcode
solution/2900-2999/2954.Count the Number of Infection Sequences/Solution.py
{ "start": 111, "end": 541 }
class ____: def numberOfSequence(self, n: int, sick: List[int]) -> int: nums = [b - a - 1 for a, b in pairwise([-1] + sick + [n])] ans = 1 s = sum(nums) ans = fac[s] for x in nums: if x: ans = ans * pow(fac[x], mod - 2, mod) % mod for x in nums[1:-1]: if x > 1: ans = ans * pow(2, x - 1, mod) % mod return ans
Solution
python
getsentry__sentry
tests/sentry/analytics/test_event.py
{ "start": 466, "end": 3305 }
class ____(TestCase): @patch("sentry.analytics.event.uuid1") def test_simple(self, mock_uuid1: MagicMock) -> None: mock_uuid1.return_value = self.get_mock_uuid() result = EventEnvelope( event=ExampleEvent( id=1, map={"key": "value"}, optional=False, ), datetime=datetime(2001, 4, 18, tzinfo=timezone.utc), ) result.datetime = datetime(2001, 4, 18, tzinfo=timezone.utc) assert result.serialize() == { "data": { "id": 1, "map": {"key": "value"}, "optional": False, }, "type": "example", "timestamp": 987552000, "uuid": b"AAEC", } @patch("sentry.analytics.event.uuid1") def test_simple_from_instance(self, mock_uuid1: MagicMock) -> None: mock_uuid1.return_value = self.get_mock_uuid() result = EventEnvelope( ExampleEvent.from_instance( None, id=1, map={"key": "value"}, optional=False, ) ) result.datetime = datetime(2001, 4, 18, tzinfo=timezone.utc) assert result.serialize() == { "data": { "id": 1, "map": {"key": "value"}, "optional": False, }, "type": "example", "timestamp": 987552000, "uuid": b"AAEC", } def test_optional_is_optional(self) -> None: result = ExampleEvent(id=1, map={"key": "value"}) assert result.serialize() == {"id": 1, "map": {"key": "value"}, "optional": None} def test_required_cannot_be_none(self) -> None: with pytest.raises(TypeError): ExampleEvent(map={"key": None}) # type: ignore[call-arg] def test_map_with_instance(self) -> None: result = ExampleEvent(id=1, map=DummyType()) assert result.serialize()["map"] == {"key": "value"} def test_new_fields_without_eventclass(self) -> None: class ExampleEventWithoutEventclass(ExampleEvent): new_field: str = "test" with pytest.raises(TypeError): with self.assertLogs("sentry.analytics.event", logging.WARNING) as cm: ExampleEventWithoutEventclass(id="1", map={"key": "value"}, new_field="test") # type: ignore[arg-type,call-arg] assert "Event class with new fields must use @eventclass decorator" in cm.records[0].msg def test_no_new_fields_without_eventclass(self) -> None: class ExampleEventWithoutEventclass(ExampleEvent): pass with self.assertNoLogs("sentry.analytics.event"): ExampleEventWithoutEventclass(id="1", map={"key": "value"}) # type: ignore[arg-type]
EventTest
python
networkx__networkx
networkx/algorithms/link_analysis/tests/test_hits.py
{ "start": 371, "end": 2546 }
class ____: @classmethod def setup_class(cls): G = nx.DiGraph() edges = [(1, 3), (1, 5), (2, 1), (3, 5), (5, 4), (5, 3), (6, 5)] G.add_edges_from(edges, weight=1) cls.G = G cls.G.a = dict( zip(sorted(G), [0.000000, 0.000000, 0.366025, 0.133975, 0.500000, 0.000000]) ) cls.G.h = dict( zip(sorted(G), [0.366025, 0.000000, 0.211325, 0.000000, 0.211325, 0.211325]) ) def test_hits_numpy(self): G = self.G h, a = _hits_numpy(G) for n in G: assert h[n] == pytest.approx(G.h[n], abs=1e-4) for n in G: assert a[n] == pytest.approx(G.a[n], abs=1e-4) @pytest.mark.parametrize("hits_alg", (nx.hits, _hits_python, _hits_scipy)) def test_hits(self, hits_alg): G = self.G h, a = hits_alg(G, tol=1.0e-08) for n in G: assert h[n] == pytest.approx(G.h[n], abs=1e-4) for n in G: assert a[n] == pytest.approx(G.a[n], abs=1e-4) nstart = {i: 1.0 / 2 for i in G} h, a = hits_alg(G, nstart=nstart) for n in G: assert h[n] == pytest.approx(G.h[n], abs=1e-4) for n in G: assert a[n] == pytest.approx(G.a[n], abs=1e-4) def test_empty(self): G = nx.Graph() assert nx.hits(G) == ({}, {}) assert _hits_numpy(G) == ({}, {}) assert _hits_python(G) == ({}, {}) assert _hits_scipy(G) == ({}, {}) def test_hits_not_convergent(self): G = nx.path_graph(50) with pytest.raises(nx.PowerIterationFailedConvergence): _hits_scipy(G, max_iter=1) with pytest.raises(nx.PowerIterationFailedConvergence): _hits_python(G, max_iter=1) with pytest.raises(nx.PowerIterationFailedConvergence): _hits_scipy(G, max_iter=0) with pytest.raises(nx.PowerIterationFailedConvergence): _hits_python(G, max_iter=0) with pytest.raises(nx.PowerIterationFailedConvergence): nx.hits(G, max_iter=0) with pytest.raises(nx.PowerIterationFailedConvergence): nx.hits(G, max_iter=1)
TestHITS
python
Unity-Technologies__ml-agents
ml-agents/mlagents/trainers/trainer/trainer_factory.py
{ "start": 534, "end": 5007 }
class ____: def __init__( self, trainer_config: Dict[str, TrainerSettings], output_path: str, train_model: bool, load_model: bool, seed: int, param_manager: EnvironmentParameterManager, init_path: str = None, multi_gpu: bool = False, ): """ The TrainerFactory generates the Trainers based on the configuration passed as input. :param trainer_config: A dictionary from behavior name to TrainerSettings :param output_path: The path to the directory where the artifacts generated by the trainer will be saved. :param train_model: If True, the Trainers will train the model and if False, only perform inference. :param load_model: If True, the Trainer will load neural networks weights from the previous run. :param seed: The seed of the Trainers. Dictates how the neural networks will be initialized. :param param_manager: The EnvironmentParameterManager that will dictate when/if the EnvironmentParameters must change. :param init_path: Path from which to load model. :param multi_gpu: If True, multi-gpu will be used. (currently not available) """ self.trainer_config = trainer_config self.output_path = output_path self.init_path = init_path self.train_model = train_model self.load_model = load_model self.seed = seed self.param_manager = param_manager self.multi_gpu = multi_gpu self.ghost_controller = GhostController() def generate(self, behavior_name: str) -> Trainer: trainer_settings = self.trainer_config[behavior_name] return TrainerFactory._initialize_trainer( trainer_settings, behavior_name, self.output_path, self.train_model, self.load_model, self.ghost_controller, self.seed, self.param_manager, self.multi_gpu, ) @staticmethod def _initialize_trainer( trainer_settings: TrainerSettings, brain_name: str, output_path: str, train_model: bool, load_model: bool, ghost_controller: GhostController, seed: int, param_manager: EnvironmentParameterManager, multi_gpu: bool = False, ) -> Trainer: """ Initializes a trainer given a provided trainer configuration and brain parameters, as well as some general training session options. :param trainer_settings: Original trainer configuration loaded from YAML :param brain_name: Name of the brain to be associated with trainer :param output_path: Path to save the model and summary statistics :param keep_checkpoints: How many model checkpoints to keep :param train_model: Whether to train the model (vs. run inference) :param load_model: Whether to load the model or randomly initialize :param ghost_controller: The object that coordinates ghost trainers :param seed: The random seed to use :param param_manager: EnvironmentParameterManager, used to determine a reward buffer length for PPOTrainer :return: """ trainer_artifact_path = os.path.join(output_path, brain_name) min_lesson_length = param_manager.get_minimum_reward_buffer_size(brain_name) trainer: Trainer = None # type: ignore # will be set to one of these, or raise try: trainer_type = all_trainer_types[trainer_settings.trainer_type] trainer = trainer_type( brain_name, min_lesson_length, trainer_settings, train_model, load_model, seed, trainer_artifact_path, ) except KeyError: raise TrainerConfigError( f"The trainer config contains an unknown trainer type " f"{trainer_settings.trainer_type} for brain {brain_name}" ) if trainer_settings.self_play is not None: trainer = GhostTrainer( trainer, brain_name, ghost_controller, min_lesson_length, trainer_settings, train_model, trainer_artifact_path, ) return trainer
TrainerFactory
python
miyuchina__mistletoe
test/test_block_token.py
{ "start": 17479, "end": 19406 }
class ____(unittest.TestCase): def test_match(self): with patch('mistletoe.block_token.TableCell') as mock: line = '| cell 1 | cell 2 |\n' token = block_token.TableRow(line, line_number=10) self.assertEqual(token.row_align, [None]) mock.assert_has_calls([call('cell 1', None, 10), call('cell 2', None, 10)]) def test_easy_table_row(self): with patch('mistletoe.block_token.TableCell') as mock: line = 'cell 1 | cell 2\n' token = block_token.TableRow(line, line_number=10) self.assertEqual(token.row_align, [None]) mock.assert_has_calls([call('cell 1', None, 10), call('cell 2', None, 10)]) def test_short_row(self): with patch('mistletoe.block_token.TableCell') as mock: line = '| cell 1 |\n' token = block_token.TableRow(line, [None, None], 10) self.assertEqual(token.row_align, [None, None]) mock.assert_has_calls([call('cell 1', None, 10), call('', None, 10)]) def test_escaped_pipe_in_cell(self): with patch('mistletoe.block_token.TableCell') as mock: line = '| pipe: `\\|` | cell 2\n' token = block_token.TableRow(line, line_number=10, row_align=[None, None]) self.assertEqual(token.row_align, [None, None]) mock.assert_has_calls([call('pipe: `|`', None, 10), call('cell 2', None, 10)]) @unittest.skip('Even GitHub fails in here, workaround: always put a space before `|`') def test_not_really_escaped_pipe_in_cell(self): with patch('mistletoe.block_token.TableCell') as mock: line = '|ending with a \\\\|cell 2\n' token = block_token.TableRow(line, [None, None], 10) self.assertEqual(token.row_align, [None, None]) mock.assert_has_calls([call('ending with a \\\\', None, 10), call('cell 2', None, 10)])
TestTableRow
python
chroma-core__chroma
chromadb/segment/impl/manager/cache/cache.py
{ "start": 200, "end": 562 }
class ____(ABC): @abstractmethod def get(self, key: uuid.UUID) -> Optional[Segment]: pass @abstractmethod def pop(self, key: uuid.UUID) -> Optional[Segment]: pass @abstractmethod def set(self, key: uuid.UUID, value: Segment) -> None: pass @abstractmethod def reset(self) -> None: pass
SegmentCache
python
Pylons__pyramid
src/pyramid/config/assets.py
{ "start": 7267, "end": 9151 }
class ____: """ An asset source relative to a package. If this asset source is a file, then we expect the ``prefix`` to point to the new name of the file, and the incoming ``resource_name`` will be the empty string, as returned by the ``FileOverride``. """ def __init__(self, package, prefix): self.package = package if hasattr(package, '__name__'): self.pkg_name = package.__name__ else: self.pkg_name = package self.prefix = prefix def get_path(self, resource_name): return f'{self.prefix}{resource_name}' def get_filename(self, resource_name): path = self.get_path(resource_name) if pkg_resources.resource_exists(self.pkg_name, path): return pkg_resources.resource_filename(self.pkg_name, path) def get_stream(self, resource_name): path = self.get_path(resource_name) if pkg_resources.resource_exists(self.pkg_name, path): return pkg_resources.resource_stream(self.pkg_name, path) def get_string(self, resource_name): path = self.get_path(resource_name) if pkg_resources.resource_exists(self.pkg_name, path): return pkg_resources.resource_string(self.pkg_name, path) def exists(self, resource_name): path = self.get_path(resource_name) if pkg_resources.resource_exists(self.pkg_name, path): return True def isdir(self, resource_name): path = self.get_path(resource_name) if pkg_resources.resource_exists(self.pkg_name, path): return pkg_resources.resource_isdir(self.pkg_name, path) def listdir(self, resource_name): path = self.get_path(resource_name) if pkg_resources.resource_exists(self.pkg_name, path): return pkg_resources.resource_listdir(self.pkg_name, path)
PackageAssetSource
python
doocs__leetcode
solution/1400-1499/1451.Rearrange Words in a Sentence/Solution.py
{ "start": 0, "end": 222 }
class ____: def arrangeWords(self, text: str) -> str: words = text.split() words[0] = words[0].lower() words.sort(key=len) words[0] = words[0].title() return " ".join(words)
Solution
python
tensorflow__tensorflow
tensorflow/python/ops/ragged/strings_reduce_join_op_test.py
{ "start": 1086, "end": 5378 }
class ____(test_util.TensorFlowTestCase, parameterized.TestCase): def test_rank_one(self): input_array = [b'this', b'is', b'a', b'test'] truth = b'thisisatest' truth_shape = [] with self.cached_session(): output = ragged_string_ops.reduce_join( inputs=input_array, axis=-1, keepdims=False, separator='') output_array = self.evaluate(output) self.assertAllEqual(truth, output_array) self.assertAllEqual(truth_shape, output.get_shape()) @parameterized.parameters([ { 'input_array': [[ b'this', b'is', b'a', b'test', b'for', b'ragged', b'tensors' ], [b'please', b'do', b'not', b'panic', b'!']], 'axis': 0, 'keepdims': False, 'truth': [ b'thisplease', b'isdo', b'anot', b'testpanic', b'for!', b'ragged', b'tensors' ], 'truth_shape': [7], }, { 'input_array': [[ b'this', b'is', b'a', b'test', b'for', b'ragged', b'tensors' ], [b'please', b'do', b'not', b'panic', b'!']], 'axis': 1, 'keepdims': False, 'truth': [b'thisisatestforraggedtensors', b'pleasedonotpanic!'], 'truth_shape': [2], }, { 'input_array': [[ b'this', b'is', b'a', b'test', b'for', b'ragged', b'tensors' ], [b'please', b'do', b'not', b'panic', b'!']], 'axis': 1, 'keepdims': False, 'truth': [ b'this|is|a|test|for|ragged|tensors', b'please|do|not|panic|!' ], 'truth_shape': [2], 'separator': '|', }, { 'input_array': [[[b'a', b'b'], [b'b', b'c']], [[b'dd', b'ee']]], 'axis': -1, 'keepdims': False, 'truth': [[b'a|b', b'b|c'], [b'dd|ee']], 'truth_shape': [2, None], 'separator': '|', }, { 'input_array': [[[[b'a', b'b', b'c'], [b'dd', b'ee']]], [[[b'f', b'g', b'h'], [b'ii', b'jj']]]], 'axis': -2, 'keepdims': False, 'truth': [[[b'a|dd', b'b|ee', b'c']], [[b'f|ii', b'g|jj', b'h']]], 'truth_shape': [2, None, None], 'separator': '|', }, { 'input_array': [[[b't', b'h', b'i', b's'], [b'i', b's'], [b'a'], [b't', b'e', b's', b't']], [[b'p', b'l', b'e', b'a', b's', b'e'], [b'p', b'a', b'n', b'i', b'c']]], 'axis': -1, 'keepdims': False, 'truth': [[b'this', b'is', b'a', b'test'], [b'please', b'panic']], 'truth_shape': [2, None], 'separator': '', }, { 'input_array': [[[[b't'], [b'h'], [b'i'], [b's']], [[b'i', b's']], [[b'a', b'n']], [[b'e'], [b'r'], [b'r']]], [[[b'p'], [b'l'], [b'e'], [b'a'], [b's'], [b'e']], [[b'p'], [b'a'], [b'n'], [b'i'], [b'c']]]], 'axis': -1, 'keepdims': False, 'truth': [[[b't', b'h', b'i', b's'], [b'is'], [b'an'], [b'e', b'r', b'r']], [[b'p', b'l', b'e', b'a', b's', b'e'], [b'p', b'a', b'n', b'i', b'c']]], 'truth_shape': [2, None, None], 'separator': '', }, ]) def test_different_ranks(self, input_array, axis, keepdims, truth, truth_shape, separator=''): with self.cached_session(): input_tensor = ragged_factory_ops.constant(input_array) output = ragged_string_ops.reduce_join( inputs=input_tensor, axis=axis, keepdims=keepdims, separator=separator) output_array = self.evaluate(output) self.assertAllEqual(truth, output_array) if all(isinstance(s, tensor_shape.Dimension) for s in output.shape): output_shape = [dim.value for dim in output.shape] else: output_shape = output.shape self.assertAllEqual(truth_shape, output_shape) if __name__ == '__main__': googletest.main()
StringsReduceJoinOpTest
python
google__jax
jax/_src/pallas/mosaic_gpu/core.py
{ "start": 48891, "end": 49193 }
class ____: def reduce(self, axes: int | Sequence[int]) -> "SomeLayout": if isinstance(axes, int): axes = (axes,) return ReducedLayout(self, axes) def to_mgpu(self, *args, **kwargs) -> mgpu.FragmentedLayout: raise NotImplementedError @dataclasses.dataclass(frozen=True)
SomeLayout
python
Lightning-AI__lightning
tests/tests_pytorch/loops/test_fetchers.py
{ "start": 1496, "end": 3962 }
class ____(Dataset): def __len__(self): return 3 def __getitem__(self, idx): return idx + 1 @pytest.mark.parametrize("multiple_iterables", [False, True]) @pytest.mark.parametrize("dataset_cls", [IterDataset, SizedDataset]) @pytest.mark.parametrize("prefetch_batches", list(range(5))) def test_prefetch_iterator(multiple_iterables, dataset_cls, prefetch_batches): fetcher = _PrefetchDataFetcher(prefetch_batches=prefetch_batches) assert fetcher.prefetch_batches == prefetch_batches if multiple_iterables: loader = CombinedLoader([DataLoader(dataset_cls()), DataLoader(dataset_cls())]) else: loader = CombinedLoader(DataLoader(dataset_cls())) fetcher.setup(loader) def generate(): generated = [(fetcher.fetched, data, fetcher.done) for data, batch_idx, dataloader_idx in fetcher] assert fetcher.fetched == 3 assert fetcher.done return generated # we can only know the last batch with sized iterables or when we prefetch is_last_batch = [False, False, prefetch_batches > 0 or dataset_cls is SizedDataset] fetched = ( [1, 2, 3] if dataset_cls is SizedDataset else [1, 2, 3, 3, 3, 3, 3][prefetch_batches : prefetch_batches + 3] ) batches = [[1, 1], [2, 2], [3, 3]] if multiple_iterables else [1, 2, 3] expected = list(zip(fetched, batches, is_last_batch)) assert len(expected) == 3 assert generate() == expected # validate reset works properly. assert generate() == expected assert fetcher.fetched == 3 @pytest.mark.parametrize("multiple_iterables", [False, True]) def test_profiler_closing(multiple_iterables): """Tests if the profiler terminates upon raising a StopIteration on an iterable dataset.""" class TestDataset(IterableDataset): def __init__(self): self.list = list(range(1)) def __iter__(self): return iter(self.list) fetcher = _PrefetchDataFetcher() if multiple_iterables: loader = CombinedLoader([DataLoader(TestDataset()), DataLoader(TestDataset())]) else: loader = CombinedLoader(TestDataset()) fetcher.setup(loader) profiler = SimpleProfiler() fetcher._start_profiler = lambda: profiler.start("test") fetcher._stop_profiler = lambda: profiler.stop("test") iter(fetcher) # on epoch 0 start next(fetcher) # raises StopIteration exception assert not bool(profiler.current_actions)
SizedDataset
python
getsentry__sentry
tests/sentry/monitors/test_validators.py
{ "start": 34796, "end": 35876 }
class ____(MonitorTestCase): """Base class for monitor validator tests with common setup.""" def setUp(self): super().setUp() self.user = self.create_user() self.request = RequestFactory().get("/") self.request.user = self.user access = MagicMock() access.has_any_project_scope.return_value = True self.request.access = access self.context = { "request": self.request, "organization": self.organization, "project": self.project, "access": access, } def _get_base_config(self, schedule_type="crontab", **overrides): """Get base monitor config with optional overrides.""" config = { "schedule": "0 * * * *", "scheduleType": schedule_type, "checkinMargin": 5, "maxRuntime": 30, "timezone": "UTC", } if schedule_type == "interval": config["schedule"] = [1, "hour"] config.update(overrides) return config
BaseMonitorValidatorTestCase
python
matplotlib__matplotlib
lib/matplotlib/transforms.py
{ "start": 23215, "end": 37747 }
class ____(BboxBase): """ A mutable bounding box. Examples -------- **Create from known bounds** The default constructor takes the boundary "points" ``[[xmin, ymin], [xmax, ymax]]``. >>> Bbox([[1, 1], [3, 7]]) Bbox([[1.0, 1.0], [3.0, 7.0]]) Alternatively, a Bbox can be created from the flattened points array, the so-called "extents" ``(xmin, ymin, xmax, ymax)`` >>> Bbox.from_extents(1, 1, 3, 7) Bbox([[1.0, 1.0], [3.0, 7.0]]) or from the "bounds" ``(xmin, ymin, width, height)``. >>> Bbox.from_bounds(1, 1, 2, 6) Bbox([[1.0, 1.0], [3.0, 7.0]]) **Create from collections of points** The "empty" object for accumulating Bboxs is the null bbox, which is a stand-in for the empty set. >>> Bbox.null() Bbox([[inf, inf], [-inf, -inf]]) Adding points to the null bbox will give you the bbox of those points. >>> box = Bbox.null() >>> box.update_from_data_xy([[1, 1]]) >>> box Bbox([[1.0, 1.0], [1.0, 1.0]]) >>> box.update_from_data_xy([[2, 3], [3, 2]], ignore=False) >>> box Bbox([[1.0, 1.0], [3.0, 3.0]]) Setting ``ignore=True`` is equivalent to starting over from a null bbox. >>> box.update_from_data_xy([[1, 1]], ignore=True) >>> box Bbox([[1.0, 1.0], [1.0, 1.0]]) .. warning:: It is recommended to always specify ``ignore`` explicitly. If not, the default value of ``ignore`` can be changed at any time by code with access to your Bbox, for example using the method `~.Bbox.ignore`. **Properties of the ``null`` bbox** .. note:: The current behavior of `Bbox.null()` may be surprising as it does not have all of the properties of the "empty set", and as such does not behave like a "zero" object in the mathematical sense. We may change that in the future (with a deprecation period). The null bbox is the identity for intersections >>> Bbox.intersection(Bbox([[1, 1], [3, 7]]), Bbox.null()) Bbox([[1.0, 1.0], [3.0, 7.0]]) except with itself, where it returns the full space. >>> Bbox.intersection(Bbox.null(), Bbox.null()) Bbox([[-inf, -inf], [inf, inf]]) A union containing null will always return the full space (not the other set!) >>> Bbox.union([Bbox([[0, 0], [0, 0]]), Bbox.null()]) Bbox([[-inf, -inf], [inf, inf]]) """ def __init__(self, points, **kwargs): """ Parameters ---------- points : `~numpy.ndarray` A (2, 2) array of the form ``[[x0, y0], [x1, y1]]``. """ super().__init__(**kwargs) points = np.asarray(points, float) if points.shape != (2, 2): raise ValueError('Bbox points must be of the form ' '"[[x0, y0], [x1, y1]]".') self._points = points self._minpos = _default_minpos.copy() self._ignore = True # it is helpful in some contexts to know if the bbox is a # default or has been mutated; we store the orig points to # support the mutated methods self._points_orig = self._points.copy() if DEBUG: ___init__ = __init__ def __init__(self, points, **kwargs): self._check(points) self.___init__(points, **kwargs) def invalidate(self): self._check(self._points) super().invalidate() def frozen(self): # docstring inherited frozen_bbox = super().frozen() frozen_bbox._minpos = self.minpos.copy() return frozen_bbox @staticmethod def unit(): """Create a new unit `Bbox` from (0, 0) to (1, 1).""" return Bbox([[0, 0], [1, 1]]) @staticmethod def null(): """Create a new null `Bbox` from (inf, inf) to (-inf, -inf).""" return Bbox([[np.inf, np.inf], [-np.inf, -np.inf]]) @staticmethod def from_bounds(x0, y0, width, height): """ Create a new `Bbox` from *x0*, *y0*, *width* and *height*. *width* and *height* may be negative. """ return Bbox.from_extents(x0, y0, x0 + width, y0 + height) @staticmethod def from_extents(*args, minpos=None): """ Create a new Bbox from *left*, *bottom*, *right* and *top*. The *y*-axis increases upwards. Parameters ---------- left, bottom, right, top : float The four extents of the bounding box. minpos : float or None If this is supplied, the Bbox will have a minimum positive value set. This is useful when dealing with logarithmic scales and other scales where negative bounds result in floating point errors. """ bbox = Bbox(np.reshape(args, (2, 2))) if minpos is not None: bbox._minpos[:] = minpos return bbox def __format__(self, fmt): return ( 'Bbox(x0={0.x0:{1}}, y0={0.y0:{1}}, x1={0.x1:{1}}, y1={0.y1:{1}})'. format(self, fmt)) def __str__(self): return format(self, '') def __repr__(self): return 'Bbox([[{0.x0}, {0.y0}], [{0.x1}, {0.y1}]])'.format(self) def ignore(self, value): """ Set whether the existing bounds of the box should be ignored by subsequent calls to :meth:`update_from_data_xy`. value : bool - When ``True``, subsequent calls to `update_from_data_xy` will ignore the existing bounds of the `Bbox`. - When ``False``, subsequent calls to `update_from_data_xy` will include the existing bounds of the `Bbox`. """ self._ignore = value def update_from_path(self, path, ignore=None, updatex=True, updatey=True): """ Update the bounds of the `Bbox` to contain the vertices of the provided path. After updating, the bounds will have positive *width* and *height*; *x0* and *y0* will be the minimal values. Parameters ---------- path : `~matplotlib.path.Path` ignore : bool, optional - When ``True``, ignore the existing bounds of the `Bbox`. - When ``False``, include the existing bounds of the `Bbox`. - When ``None``, use the last value passed to :meth:`ignore`. updatex, updatey : bool, default: True When ``True``, update the x/y values. """ if ignore is None: ignore = self._ignore if path.vertices.size == 0 or not (updatex or updatey): return if ignore: points = np.array([[np.inf, np.inf], [-np.inf, -np.inf]]) minpos = np.array([np.inf, np.inf]) else: points = self._points.copy() minpos = self._minpos.copy() valid_points = (np.isfinite(path.vertices[..., 0]) & np.isfinite(path.vertices[..., 1])) if updatex: x = path.vertices[..., 0][valid_points] points[0, 0] = min(points[0, 0], np.min(x, initial=np.inf)) points[1, 0] = max(points[1, 0], np.max(x, initial=-np.inf)) minpos[0] = min(minpos[0], np.min(x[x > 0], initial=np.inf)) if updatey: y = path.vertices[..., 1][valid_points] points[0, 1] = min(points[0, 1], np.min(y, initial=np.inf)) points[1, 1] = max(points[1, 1], np.max(y, initial=-np.inf)) minpos[1] = min(minpos[1], np.min(y[y > 0], initial=np.inf)) if np.any(points != self._points) or np.any(minpos != self._minpos): self.invalidate() if updatex: self._points[:, 0] = points[:, 0] self._minpos[0] = minpos[0] if updatey: self._points[:, 1] = points[:, 1] self._minpos[1] = minpos[1] def update_from_data_x(self, x, ignore=None): """ Update the x-bounds of the `Bbox` based on the passed in data. After updating, the bounds will have positive *width*, and *x0* will be the minimal value. Parameters ---------- x : `~numpy.ndarray` Array of x-values. ignore : bool, optional - When ``True``, ignore the existing bounds of the `Bbox`. - When ``False``, include the existing bounds of the `Bbox`. - When ``None``, use the last value passed to :meth:`ignore`. """ x = np.ravel(x) # The y-component in np.array([x, *y*]).T is not used. We simply pass # x again to not spend extra time on creating an array of unused data self.update_from_data_xy(np.array([x, x]).T, ignore=ignore, updatey=False) def update_from_data_y(self, y, ignore=None): """ Update the y-bounds of the `Bbox` based on the passed in data. After updating, the bounds will have positive *height*, and *y0* will be the minimal value. Parameters ---------- y : `~numpy.ndarray` Array of y-values. ignore : bool, optional - When ``True``, ignore the existing bounds of the `Bbox`. - When ``False``, include the existing bounds of the `Bbox`. - When ``None``, use the last value passed to :meth:`ignore`. """ y = np.ravel(y) # The x-component in np.array([*x*, y]).T is not used. We simply pass # y again to not spend extra time on creating an array of unused data self.update_from_data_xy(np.array([y, y]).T, ignore=ignore, updatex=False) def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True): """ Update the `Bbox` bounds based on the passed in *xy* coordinates. After updating, the bounds will have positive *width* and *height*; *x0* and *y0* will be the minimal values. Parameters ---------- xy : (N, 2) array-like The (x, y) coordinates. ignore : bool, optional - When ``True``, ignore the existing bounds of the `Bbox`. - When ``False``, include the existing bounds of the `Bbox`. - When ``None``, use the last value passed to :meth:`ignore`. updatex, updatey : bool, default: True When ``True``, update the x/y values. """ if len(xy) == 0: return path = Path(xy) self.update_from_path(path, ignore=ignore, updatex=updatex, updatey=updatey) @BboxBase.x0.setter def x0(self, val): self._points[0, 0] = val self.invalidate() @BboxBase.y0.setter def y0(self, val): self._points[0, 1] = val self.invalidate() @BboxBase.x1.setter def x1(self, val): self._points[1, 0] = val self.invalidate() @BboxBase.y1.setter def y1(self, val): self._points[1, 1] = val self.invalidate() @BboxBase.p0.setter def p0(self, val): self._points[0] = val self.invalidate() @BboxBase.p1.setter def p1(self, val): self._points[1] = val self.invalidate() @BboxBase.intervalx.setter def intervalx(self, interval): self._points[:, 0] = interval self.invalidate() @BboxBase.intervaly.setter def intervaly(self, interval): self._points[:, 1] = interval self.invalidate() @BboxBase.bounds.setter def bounds(self, bounds): l, b, w, h = bounds points = np.array([[l, b], [l + w, b + h]], float) if np.any(self._points != points): self._points = points self.invalidate() @property def minpos(self): """ The minimum positive value in both directions within the Bbox. This is useful when dealing with logarithmic scales and other scales where negative bounds result in floating point errors, and will be used as the minimum extent instead of *p0*. """ return self._minpos @minpos.setter def minpos(self, val): self._minpos[:] = val @property def minposx(self): """ The minimum positive value in the *x*-direction within the Bbox. This is useful when dealing with logarithmic scales and other scales where negative bounds result in floating point errors, and will be used as the minimum *x*-extent instead of *x0*. """ return self._minpos[0] @minposx.setter def minposx(self, val): self._minpos[0] = val @property def minposy(self): """ The minimum positive value in the *y*-direction within the Bbox. This is useful when dealing with logarithmic scales and other scales where negative bounds result in floating point errors, and will be used as the minimum *y*-extent instead of *y0*. """ return self._minpos[1] @minposy.setter def minposy(self, val): self._minpos[1] = val def get_points(self): """ Get the points of the bounding box as an array of the form ``[[x0, y0], [x1, y1]]``. """ self._invalid = 0 return self._points def set_points(self, points): """ Set the points of the bounding box directly from an array of the form ``[[x0, y0], [x1, y1]]``. No error checking is performed, as this method is mainly for internal use. """ if np.any(self._points != points): self._points = points self.invalidate() def set(self, other): """ Set this bounding box from the "frozen" bounds of another `Bbox`. """ if np.any(self._points != other.get_points()): self._points = other.get_points() self.invalidate() def mutated(self): """Return whether the bbox has changed since init.""" return self.mutatedx() or self.mutatedy() def mutatedx(self): """Return whether the x-limits have changed since init.""" return (self._points[0, 0] != self._points_orig[0, 0] or self._points[1, 0] != self._points_orig[1, 0]) def mutatedy(self): """Return whether the y-limits have changed since init.""" return (self._points[0, 1] != self._points_orig[0, 1] or self._points[1, 1] != self._points_orig[1, 1])
Bbox
python
tensorflow__tensorflow
tensorflow/python/keras/saving/utils_v1/export_output.py
{ "start": 5989, "end": 7136 }
class ____(ExportOutput): """Represents the output of a regression head.""" def __init__(self, value): """Constructor for `RegressionOutput`. Args: value: a float `Tensor` giving the predicted values. Required. Raises: ValueError: if the value is not a `Tensor` with dtype tf.float32. """ if not (isinstance(value, tensor.Tensor) and value.dtype.is_floating): raise ValueError('Regression output value must be a float32 Tensor; ' 'got {}'.format(value)) self._value = value @property def value(self): return self._value def as_signature_def(self, receiver_tensors): if len(receiver_tensors) != 1: raise ValueError('Regression input must be a single string Tensor; ' 'got {}'.format(receiver_tensors)) (_, examples), = receiver_tensors.items() if dtypes.as_dtype(examples.dtype) != dtypes.string: raise ValueError('Regression input must be a single string Tensor; ' 'got {}'.format(receiver_tensors)) return signature_def_utils.regression_signature_def(examples, self.value)
RegressionOutput
python
huggingface__transformers
src/transformers/models/plbart/modeling_plbart.py
{ "start": 51774, "end": 58162 }
class ____(PLBartPreTrainedModel): def __init__(self, config: PLBartConfig, **kwargs): super().__init__(config, **kwargs) self.model = PLBartModel(config) self.classification_head = PLBartClassificationHead( config.d_model, config.d_model, config.num_labels, config.classifier_dropout, ) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple, Seq2SeqSequenceClassifierOutput]: r""" decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) PLBart uses a specific language id token as the starting token for `decoder_input_ids` generation that varies according to source and target language, *e.g.* 50003 for *en_XX*, and 50001 for *java*. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (: obj:*torch.LongTensor* of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError( f"Passing input embeddings is currently not supported for {self.__class__.__name__}" ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) hidden_states = outputs[0] # last hidden state eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[ :, -1, : ] logits = self.classification_head(sentence_representation) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = "regression" elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, )
PLBartForSequenceClassification
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/methodOverride1.py
{ "start": 9334, "end": 9416 }
class ____(A): def test(self, t: Sequence[int]) -> list[str]: ...
NarrowerReturn
python
getsentry__sentry
src/sentry/backup/crypto.py
{ "start": 1678, "end": 1934 }
class ____(Encryptor): """ Encrypt using a public key stored on the local machine. """ def __init__(self, fp: IO[bytes]): self.__key = fp.read() def get_public_key_pem(self) -> bytes: return self.__key
LocalFileEncryptor
python
pytorch__pytorch
benchmarks/fastrnns/custom_lstms.py
{ "start": 6389, "end": 6928 }
class ____(jit.ScriptModule): def __init__(self, cell, *cell_args): super().__init__() self.cell = cell(*cell_args) @jit.script_method def forward( self, input: Tensor, state: tuple[Tensor, Tensor] ) -> tuple[Tensor, tuple[Tensor, Tensor]]: inputs = input.unbind(0) outputs = torch.jit.annotate(list[Tensor], []) for i in range(len(inputs)): out, state = self.cell(inputs[i], state) outputs += [out] return torch.stack(outputs), state
LSTMLayer
python
pallets__jinja
src/jinja2/exceptions.py
{ "start": 4126, "end": 4458 }
class ____(TemplateSyntaxError): """Like a template syntax error, but covers cases where something in the template caused an error at compile time that wasn't necessarily caused by a syntax error. However it's a direct subclass of :exc:`TemplateSyntaxError` and has the same attributes. """
TemplateAssertionError
python
bokeh__bokeh
tests/unit/bokeh/util/test_version.py
{ "start": 2365, "end": 3097 }
class ____: def test_release_version_unchanged(self) -> None: assert buv._base_version_helper("0.2.3") == "0.2.3" assert buv._base_version_helper("1.2.3") == "1.2.3" def test_dev_version_stripped(self) -> None: assert buv._base_version_helper("0.2.3.dev2") == "0.2.3" assert buv._base_version_helper("1.2.3.dev10") == "1.2.3" def test_rc_version_stripped(self) -> None: assert buv._base_version_helper("0.2.3.rc2") == "0.2.3" assert buv._base_version_helper("1.2.3.rc10") == "1.2.3" #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
Test__base_version_helper
python
pypa__pip
tests/unit/test_req.py
{ "start": 22862, "end": 42207 }
class ____: def setup_method(self) -> None: self.tempdir = tempfile.mkdtemp() def teardown_method(self) -> None: shutil.rmtree(self.tempdir, ignore_errors=True) def test_url_with_query(self) -> None: """InstallRequirement should strip the fragment, but not the query.""" url = "http://foo.com/?p=bar.git;a=snapshot;h=v0.1;sf=tgz" fragment = "#egg=bar" req = install_req_from_line(url + fragment) assert req.link is not None assert req.link.url == url + fragment, req.link def test_pep440_wheel_link_requirement(self) -> None: url = "https://whatever.com/test-0.4-py2.py3-bogus-any.whl" line = "test @ https://whatever.com/test-0.4-py2.py3-bogus-any.whl" req = install_req_from_line(line) parts = str(req.req).split("@", 1) assert len(parts) == 2 assert parts[0].strip() == "test" assert parts[1].strip() == url def test_pep440_url_link_requirement(self) -> None: url = "git+http://foo.com@ref#egg=foo" line = "foo @ git+http://foo.com@ref#egg=foo" req = install_req_from_line(line) parts = str(req.req).split("@", 1) assert len(parts) == 2 assert parts[0].strip() == "foo" assert parts[1].strip() == url def test_url_with_authentication_link_requirement(self) -> None: url = "https://what@whatever.com/test-0.4-py2.py3-bogus-any.whl" line = "https://what@whatever.com/test-0.4-py2.py3-bogus-any.whl" req = install_req_from_line(line) assert req.link is not None assert req.link.is_wheel assert req.link.scheme == "https" assert req.link.url == url def test_str(self) -> None: req = install_req_from_line("simple==0.1") assert str(req) == "simple==0.1" def test_repr(self) -> None: req = install_req_from_line("simple==0.1") assert repr(req) == ("<InstallRequirement object: simple==0.1 editable=False>") def test_invalid_wheel_requirement_raises(self) -> None: with pytest.raises(InvalidWheelFilename): install_req_from_line("invalid.whl") def test_wheel_requirement_sets_req_attribute(self) -> None: req = install_req_from_line("simple-0.1-py2.py3-none-any.whl") assert isinstance(req.req, Requirement) assert str(req.req) == "simple==0.1" def test_url_preserved_line_req(self) -> None: """Confirm the url is preserved in a non-editable requirement""" url = "git+http://foo.com@ref#egg=foo" req = install_req_from_line(url) assert req.link is not None assert req.link.url == url def test_url_preserved_editable_req(self) -> None: """Confirm the url is preserved in a editable requirement""" url = "git+http://foo.com@ref#egg=foo" req = install_req_from_editable(url) assert req.link is not None assert req.link.url == url def test_markers(self) -> None: for line in ( # recommended syntax 'mock3; python_version >= "3"', # with more spaces 'mock3 ; python_version >= "3" ', # without spaces 'mock3;python_version >= "3"', ): req = install_req_from_line(line) assert req.req is not None assert req.req.name == "mock3" assert str(req.req.specifier) == "" assert str(req.markers) == 'python_version >= "3"' def test_markers_semicolon(self) -> None: # check that the markers can contain a semicolon req = install_req_from_line('semicolon; os_name == "a; b"') assert req.req is not None assert req.req.name == "semicolon" assert str(req.req.specifier) == "" assert str(req.markers) == 'os_name == "a; b"' def test_markers_url(self) -> None: # test "URL; markers" syntax url = "http://foo.com/?p=bar.git;a=snapshot;h=v0.1;sf=tgz" line = f'{url}; python_version >= "3"' req = install_req_from_line(line) assert req.link is not None assert req.link.url == url, req.link.url assert str(req.markers) == 'python_version >= "3"' # without space, markers are part of the URL url = "http://foo.com/?p=bar.git;a=snapshot;h=v0.1;sf=tgz" line = f'{url};python_version >= "3"' req = install_req_from_line(line) assert req.link is not None assert req.link.url == line, req.link.url assert req.markers is None def test_markers_match_from_line(self) -> None: # match for markers in ( 'python_version >= "1.0"', f"sys_platform == {sys.platform!r}", ): line = "name; " + markers req = install_req_from_line(line) assert str(req.markers) == str(Marker(markers)) assert req.match_markers() # don't match for markers in ( 'python_version >= "5.0"', f"sys_platform != {sys.platform!r}", ): line = "name; " + markers req = install_req_from_line(line) assert str(req.markers) == str(Marker(markers)) assert not req.match_markers() def test_markers_match(self) -> None: # match for markers in ( 'python_version >= "1.0"', f"sys_platform == {sys.platform!r}", ): line = "name; " + markers req = install_req_from_line(line, comes_from="") assert str(req.markers) == str(Marker(markers)) assert req.match_markers() # don't match for markers in ( 'python_version >= "5.0"', f"sys_platform != {sys.platform!r}", ): line = "name; " + markers req = install_req_from_line(line, comes_from="") assert str(req.markers) == str(Marker(markers)) assert not req.match_markers() def test_extras_for_line_path_requirement(self) -> None: line = "SomeProject[ex1,ex2]" filename = "filename" comes_from = f"-r {filename} (line 1)" req = install_req_from_line(line, comes_from=comes_from) assert len(req.extras) == 2 assert req.extras == {"ex1", "ex2"} def test_extras_for_line_url_requirement(self) -> None: line = "git+https://url#egg=SomeProject[ex1,ex2]" filename = "filename" comes_from = f"-r {filename} (line 1)" req = install_req_from_line(line, comes_from=comes_from) assert len(req.extras) == 2 assert req.extras == {"ex1", "ex2"} def test_extras_for_editable_path_requirement(self) -> None: url = ".[ex1,ex2]" filename = "filename" comes_from = f"-r {filename} (line 1)" req = install_req_from_editable(url, comes_from=comes_from) assert len(req.extras) == 2 assert req.extras == {"ex1", "ex2"} def test_extras_for_editable_url_requirement(self) -> None: url = "git+https://url#egg=SomeProject[ex1,ex2]" filename = "filename" comes_from = f"-r {filename} (line 1)" req = install_req_from_editable(url, comes_from=comes_from) assert len(req.extras) == 2 assert req.extras == {"ex1", "ex2"} def test_unexisting_path(self) -> None: with pytest.raises(InstallationError) as e: install_req_from_line(os.path.join("this", "path", "does", "not", "exist")) err_msg = e.value.args[0] assert "Invalid requirement" in err_msg assert "It looks like a path." in err_msg def test_single_equal_sign(self) -> None: with pytest.raises(InstallationError) as e: install_req_from_line("toto=42") err_msg = e.value.args[0] assert "Invalid requirement" in err_msg assert "= is not a valid operator. Did you mean == ?" in err_msg def test_unidentifiable_name(self) -> None: test_name = "-" with pytest.raises(InstallationError) as e: install_req_from_line(test_name) err_msg = e.value.args[0] assert err_msg.startswith(f"Invalid requirement: '{test_name}'") def test_requirement_file(self) -> None: req_file_path = os.path.join(self.tempdir, "test.txt") with open(req_file_path, "w") as req_file: req_file.write("pip\nsetuptools") with pytest.raises(InstallationError) as e: install_req_from_line(req_file_path) err_msg = e.value.args[0] assert "Invalid requirement" in err_msg assert "It looks like a path. The path does exist." in err_msg assert "appears to be a requirements file." in err_msg assert "If that is the case, use the '-r' flag to install" in err_msg @pytest.mark.parametrize( "inp, out", [ ("pkg", "pkg"), ("pkg==1.0", "pkg==1.0"), ("pkg ; python_version<='3.6'", "pkg"), ("pkg[ext]", "pkg"), ("pkg [ ext1, ext2 ]", "pkg"), ("pkg [ ext1, ext2 ] @ https://example.com/", "pkg@ https://example.com/"), ("pkg [ext] == 1.0; python_version<='3.6'", "pkg==1.0"), ("pkg-all.allowed_chars0 ~= 2.0", "pkg-all.allowed_chars0~=2.0"), ("pkg-all.allowed_chars0 [ext] ~= 2.0", "pkg-all.allowed_chars0~=2.0"), ("simple-0.1-py2.py3-none-any.whl [ext]", "simple==0.1"), ], ) def test_install_req_drop_extras(self, inp: str, out: str) -> None: """ Test behavior of install_req_drop_extras """ req = install_req_from_line(inp) without_extras = install_req_drop_extras(req) assert not without_extras.extras assert str(without_extras.req) == out # if there are no extras they should be the same object, # otherwise they may be a copy due to cache if req.extras: assert req is not without_extras assert req.req is not without_extras.req # comes_from should point to original assert without_extras.comes_from is req # all else should be the same assert without_extras.link == req.link assert without_extras.markers == req.markers assert without_extras.isolated == req.isolated assert without_extras.hash_options == req.hash_options assert without_extras.constraint == req.constraint assert without_extras.config_settings == req.config_settings assert without_extras.user_supplied == req.user_supplied assert without_extras.permit_editable_wheels == req.permit_editable_wheels @pytest.mark.parametrize( "inp, extras, out", [ ("pkg", set(), "pkg"), ("pkg==1.0", set(), "pkg==1.0"), ("pkg[ext]", set(), "pkg[ext]"), ("pkg", {"ext"}, "pkg[ext]"), ("pkg==1.0", {"ext"}, "pkg[ext]==1.0"), ("pkg==1.0", {"ext1", "ext2"}, "pkg[ext1,ext2]==1.0"), ("pkg; python_version<='3.6'", {"ext"}, "pkg[ext]"), ("pkg[ext1,ext2]==1.0", {"ext2", "ext3"}, "pkg[ext1,ext2,ext3]==1.0"), ( "pkg-all.allowed_chars0 [ ext1 ] @ https://example.com/", {"ext2"}, "pkg-all.allowed_chars0[ext1,ext2]@ https://example.com/", ), ], ) def test_install_req_extend_extras( self, inp: str, extras: set[str], out: str ) -> None: """ Test behavior of install_req_extend_extras """ req = install_req_from_line(inp) extended = install_req_extend_extras(req, extras) assert str(extended.req) == out assert extended.req is not None assert set(extended.extras) == set(extended.req.extras) # if extras is not a subset of req.extras then the extended # requirement object should not be the same, otherwise they # might be a copy due to cache if not extras.issubset(req.extras): assert req is not extended assert req.req is not extended.req # all else should be the same assert extended.link == req.link assert extended.markers == req.markers assert extended.isolated == req.isolated assert extended.hash_options == req.hash_options assert extended.constraint == req.constraint assert extended.config_settings == req.config_settings assert extended.user_supplied == req.user_supplied assert extended.permit_editable_wheels == req.permit_editable_wheels @pytest.mark.parametrize( "req_str, expected", [ ( 'foo[extra] @ svn+http://foo ; os_name == "nt"', ('foo ; os_name == "nt"', "svn+http://foo", {"extra"}), ), ( "foo @ svn+http://foo", ("foo", "svn+http://foo", set()), ), ], ) def test_parse_editable_pep508( req_str: str, expected: tuple[str, str, set[str]] ) -> None: assert parse_editable(req_str) == expected @mock.patch("pip._internal.req.req_install.os.path.abspath") @mock.patch("pip._internal.req.req_install.os.path.exists") @mock.patch("pip._internal.req.req_install.os.path.isdir") def test_parse_editable_local( isdir_mock: mock.Mock, exists_mock: mock.Mock, abspath_mock: mock.Mock ) -> None: exists_mock.return_value = isdir_mock.return_value = True # mocks needed to support path operations on windows tests abspath_mock.return_value = "/some/path" assert parse_editable(".") == (None, "file:///some/path", set()) abspath_mock.return_value = "/some/path/foo" assert parse_editable("foo") == ( None, "file:///some/path/foo", set(), ) def test_parse_editable_explicit_vcs() -> None: assert parse_editable("svn+https://foo#egg=foo") == ( "foo", "svn+https://foo#egg=foo", set(), ) def test_parse_editable_vcs_extras() -> None: assert parse_editable("svn+https://foo#egg=foo[extras]") == ( "foo[extras]", "svn+https://foo#egg=foo[extras]", set(), ) @mock.patch("pip._internal.req.req_install.os.path.abspath") @mock.patch("pip._internal.req.req_install.os.path.exists") @mock.patch("pip._internal.req.req_install.os.path.isdir") def test_parse_editable_local_extras( isdir_mock: mock.Mock, exists_mock: mock.Mock, abspath_mock: mock.Mock ) -> None: exists_mock.return_value = isdir_mock.return_value = True abspath_mock.return_value = "/some/path" assert parse_editable(".[extras]") == ( None, "file:///some/path", {"extras"}, ) abspath_mock.return_value = "/some/path/foo" assert parse_editable("foo[bar,baz]") == ( None, "file:///some/path/foo", {"bar", "baz"}, ) def test_mismatched_versions(caplog: pytest.LogCaptureFixture) -> None: req = InstallRequirement( req=Requirement("simplewheel==2.0"), comes_from=None, ) req.source_dir = "/tmp/somewhere" # make req believe it has been unpacked # Monkeypatch! metadata = email.message.Message() metadata["name"] = "simplewheel" metadata["version"] = "1.0" req._metadata = metadata req.assert_source_matches_version() assert caplog.records[-1].message == ( "Requested simplewheel==2.0, but installing version 1.0" ) @pytest.mark.parametrize( "args, expected", [ # Test UNIX-like paths (("/path/to/installable"), True), # Test relative paths (("./path/to/installable"), True), # Test current path (("."), True), # Test url paths (("https://whatever.com/test-0.4-py2.py3-bogus-any.whl"), True), # Test pep440 paths (("test @ https://whatever.com/test-0.4-py2.py3-bogus-any.whl"), True), # Test wheel (("simple-0.1-py2.py3-none-any.whl"), False), ], ) def test_looks_like_path(args: str, expected: bool) -> None: assert _looks_like_path(args) == expected @pytest.mark.skipif( not sys.platform.startswith("win"), reason="Test only available on Windows" ) @pytest.mark.parametrize( "args, expected", [ # Test relative paths ((".\\path\\to\\installable"), True), (("relative\\path"), True), # Test absolute paths (("C:\\absolute\\path"), True), ], ) def test_looks_like_path_win(args: str, expected: bool) -> None: assert _looks_like_path(args) == expected @pytest.mark.parametrize( "args, mock_returns, expected", [ # Test pep440 urls ( ( "/path/to/foo @ git+http://foo.com@ref#egg=foo", "foo @ git+http://foo.com@ref#egg=foo", ), (False, False), None, ), # Test pep440 urls without spaces ( ( "/path/to/foo@git+http://foo.com@ref#egg=foo", "foo @ git+http://foo.com@ref#egg=foo", ), (False, False), None, ), # Test pep440 wheel ( ( "/path/to/test @ https://whatever.com/test-0.4-py2.py3-bogus-any.whl", "test @ https://whatever.com/test-0.4-py2.py3-bogus-any.whl", ), (False, False), None, ), # Test name is not a file (("/path/to/simple==0.1", "simple==0.1"), (False, False), None), ], ) @mock.patch("pip._internal.req.req_install.os.path.isdir") @mock.patch("pip._internal.req.req_install.os.path.isfile") def test_get_url_from_path( isdir_mock: mock.Mock, isfile_mock: mock.Mock, args: tuple[str, str], mock_returns: tuple[bool, bool], expected: None, ) -> None: isdir_mock.return_value = mock_returns[0] isfile_mock.return_value = mock_returns[1] assert _get_url_from_path(*args) is expected @mock.patch("pip._internal.req.req_install.os.path.isdir") @mock.patch("pip._internal.req.req_install.os.path.isfile") def test_get_url_from_path__archive_file( isdir_mock: mock.Mock, isfile_mock: mock.Mock ) -> None: isdir_mock.return_value = False isfile_mock.return_value = True name = "simple-0.1-py2.py3-none-any.whl" url = Path(f"/path/to/{name}").resolve(strict=False).as_uri() assert _get_url_from_path(f"/path/to/{name}", name) == url @mock.patch("pip._internal.req.req_install.os.path.isdir") @mock.patch("pip._internal.req.req_install.os.path.isfile") def test_get_url_from_path__installable_dir( isdir_mock: mock.Mock, isfile_mock: mock.Mock ) -> None: isdir_mock.return_value = True isfile_mock.return_value = True name = "some/setuptools/project" url = Path(f"/path/to/{name}").resolve(strict=False).as_uri() assert _get_url_from_path(f"/path/to/{name}", name) == url @mock.patch("pip._internal.req.req_install.os.path.isdir") def test_get_url_from_path__installable_error(isdir_mock: mock.Mock) -> None: isdir_mock.return_value = True name = "some/setuptools/project" path = os.path.join("/path/to/" + name) with pytest.raises(InstallationError) as e: _get_url_from_path(path, name) err_msg = e.value.args[0] assert "Neither 'setup.py' nor 'pyproject.toml' found" in err_msg
TestInstallRequirement
python
django__django
tests/select_related/models.py
{ "start": 1233, "end": 1360 }
class ____(models.Model): name = models.CharField(max_length=50) family = models.ForeignKey(Family, models.CASCADE)
Genus
python
cython__cython
tests/run/pyclass_scope_T671.py
{ "start": 38, "end": 133 }
class ____(object): """ >>> SimpleAssignment.A 1234 """ A = A
SimpleAssignment
python
coleifer__peewee
playhouse/sqlcipher_ext.py
{ "start": 2356, "end": 3484 }
class ____(object): server_version = __sqlcipher_version__ def _connect(self): params = dict(self.connect_params) passphrase = params.pop('passphrase', '').replace("'", "''") conn = sqlcipher.connect(self.database, isolation_level=None, **params) try: if passphrase: conn.execute("PRAGMA key='%s'" % passphrase) self._add_conn_hooks(conn) except: conn.close() raise return conn def set_passphrase(self, passphrase): if not self.is_closed(): raise ImproperlyConfigured('Cannot set passphrase when database ' 'is open. To change passphrase of an ' 'open database use the rekey() method.') self.connect_params['passphrase'] = passphrase def rekey(self, passphrase): if self.is_closed(): self.connect() self.execute_sql("PRAGMA rekey='%s'" % passphrase.replace("'", "''")) self.connect_params['passphrase'] = passphrase return True
_SqlCipherDatabase
python
faif__python-patterns
patterns/other/hsm/hsm.py
{ "start": 5026, "end": 5190 }
class ____(OutOfService): """No need to override any method.""" def __init__(self, HierachicalStateMachine): self._hsm = HierachicalStateMachine
Failed
python
mlflow__mlflow
mlflow/utils/process.py
{ "start": 165, "end": 6522 }
class ____(Exception): @classmethod def from_completed_process(cls, process): lines = [ f"Non-zero exit code: {process.returncode}", f"Command: {process.args}", ] if process.stdout: lines += [ "", "STDOUT:", process.stdout, ] if process.stderr: lines += [ "", "STDERR:", process.stderr, ] return cls("\n".join(lines)) def _remove_inaccessible_python_path(env): """ Remove inaccessible path from PYTHONPATH environment variable. """ if python_path := env.get("PYTHONPATH"): paths = [p for p in python_path.split(":") if os.access(p, os.R_OK)] env["PYTHONPATH"] = ":".join(paths) return env def _exec_cmd( cmd, *, throw_on_error=True, extra_env=None, capture_output=True, synchronous=True, stream_output=False, **kwargs, ): """A convenience wrapper of `subprocess.Popen` for running a command from a Python script. Args: cmd: The command to run, as a string or a list of strings. throw_on_error: If True, raises an Exception if the exit code of the program is nonzero. extra_env: Extra environment variables to be defined when running the child process. If this argument is specified, `kwargs` cannot contain `env`. capture_output: If True, stdout and stderr will be captured and included in an exception message on failure; if False, these streams won't be captured. synchronous: If True, wait for the command to complete and return a CompletedProcess instance, If False, does not wait for the command to complete and return a Popen instance, and ignore the `throw_on_error` argument. stream_output: If True, stream the command's stdout and stderr to `sys.stdout` as a unified stream during execution. If False, do not stream the command's stdout and stderr to `sys.stdout`. kwargs: Keyword arguments (except `text`) passed to `subprocess.Popen`. Returns: If synchronous is True, return a `subprocess.CompletedProcess` instance, otherwise return a Popen instance. """ if illegal_kwargs := set(kwargs.keys()).intersection({"text"}): raise ValueError(f"`kwargs` cannot contain {list(illegal_kwargs)}") env = kwargs.pop("env", None) if extra_env is not None and env is not None: raise ValueError("`extra_env` and `env` cannot be used at the same time") if capture_output and stream_output: raise ValueError( "`capture_output=True` and `stream_output=True` cannot be specified at the same time" ) # Copy current `os.environ` or passed in `env` to avoid mutating it. env = env or os.environ.copy() if extra_env is not None: env.update(extra_env) if is_in_databricks_runtime(): # in databricks runtime, the PYTHONPATH might contain inaccessible path # which causes virtualenv python environment creation subprocess failure. # as a workaround, we remove inaccessible path out of python path. env = _remove_inaccessible_python_path(env) # In Python < 3.8, `subprocess.Popen` doesn't accept a command containing path-like # objects (e.g. `["ls", pathlib.Path("abc")]`) on Windows. To avoid this issue, # stringify all elements in `cmd`. Note `str(pathlib.Path("abc"))` returns 'abc'. if isinstance(cmd, list): cmd = list(map(str, cmd)) if capture_output or stream_output: if kwargs.get("stdout") is not None or kwargs.get("stderr") is not None: raise ValueError( "stdout and stderr arguments may not be used with capture_output or stream_output" ) kwargs["stdout"] = subprocess.PIPE if capture_output: kwargs["stderr"] = subprocess.PIPE elif stream_output: # Redirect stderr to stdout in order to combine the streams for unified printing to # `sys.stdout`, as documented in # https://docs.python.org/3/library/subprocess.html#subprocess.run kwargs["stderr"] = subprocess.STDOUT process = subprocess.Popen( cmd, env=env, text=True, **kwargs, ) if not synchronous: return process if stream_output: for output_char in iter(lambda: process.stdout.read(1), ""): sys.stdout.write(output_char) stdout, stderr = process.communicate() returncode = process.poll() comp_process = subprocess.CompletedProcess( process.args, returncode=returncode, stdout=stdout, stderr=stderr, ) if throw_on_error and returncode != 0: raise ShellCommandException.from_completed_process(comp_process) return comp_process def _join_commands(*commands): entry_point = ["bash", "-c"] if not is_windows() else ["cmd", "/c"] sep = " && " if not is_windows() else " & " return [*entry_point, sep.join(map(str, commands))] # A global map storing (function, args_tuple) --> (value, pid) _per_process_value_cache_map = {} def cache_return_value_per_process(fn): """ A decorator which globally caches the return value of the decorated function. But if current process forked out a new child process, in child process, old cache values are invalidated. Restrictions: The decorated function must be called with only positional arguments, and all the argument values must be hashable. """ @functools.wraps(fn) def wrapped_fn(*args, **kwargs): if len(kwargs) > 0: raise ValueError( "The function decorated by `cache_return_value_per_process` is not allowed to be " "called with key-word style arguments." ) if (fn, args) in _per_process_value_cache_map: prev_value, prev_pid = _per_process_value_cache_map.get((fn, args)) if os.getpid() == prev_pid: return prev_value new_value = fn(*args) new_pid = os.getpid() _per_process_value_cache_map[(fn, args)] = (new_value, new_pid) return new_value return wrapped_fn
ShellCommandException
python
SmileyChris__easy-thumbnails
easy_thumbnails/tests/test_pixel_processors.py
{ "start": 6680, "end": 8566 }
class ____(TestCase): def test_standard(self): image = Image.new('RGB', (800, 600)) processed = processors.colorspace(image) self.assertEqual(processed.mode, 'RGB') image = Image.new('L', (800, 600)) processed = processors.colorspace(image) self.assertEqual(processed.mode, 'L') def test_transparent(self): image = Image.new('RGBA', (800, 600)) processed = processors.colorspace(image) self.assertEqual(processed.mode, 'RGBA') image = Image.new('LA', (800, 600)) processed = processors.colorspace(image) self.assertEqual(processed.mode, 'LA') def test_replace_alpha(self): image = Image.new('RGBA', (800, 600)) self.assertEqual(image.load()[0, 0], (0, 0, 0, 0)) processed = processors.colorspace(image, replace_alpha='#fefdfc') self.assertEqual(processed.mode, 'RGB') self.assertEqual(processed.load()[0, 0], (254, 253, 252)) image = Image.new('LA', (800, 600)) self.assertEqual(image.load()[0, 0], (0, 0)) processed = processors.colorspace(image, replace_alpha='#fefdfc') self.assertEqual(processed.mode, 'L') self.assertEqual(processed.load()[0, 0], 253) def test_bw(self): image = Image.new('RGB', (800, 600)) processed = processors.colorspace(image, bw=True) self.assertEqual(processed.mode, 'L') image = Image.new('RGBA', (800, 600)) processed = processors.colorspace(image, bw=True) self.assertEqual(processed.mode, 'LA') image = Image.new('L', (800, 600)) processed = processors.colorspace(image, bw=True) self.assertEqual(processed.mode, 'L') image = Image.new('LA', (800, 600)) processed = processors.colorspace(image, bw=True) self.assertEqual(processed.mode, 'LA')
ColorspaceTest
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_font09.py
{ "start": 315, "end": 1458 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_font09.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "bar"}) chart.axis_ids = [108178432, 108321408] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart.add_series({"values": "=Sheet1!$B$1:$B$5"}) chart.add_series({"values": "=Sheet1!$C$1:$C$5"}) chart.set_title( {"name": "Title", "name_font": {"rotation": -45, "baseline": -1}} ) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
django__django
tests/proxy_models/models.py
{ "start": 541, "end": 665 }
class ____(models.Manager): def get_queryset(self): return super().get_queryset().exclude(name="wilma")
SubManager
python
pallets__werkzeug
src/werkzeug/formparser.py
{ "start": 4780, "end": 10705 }
class ____: """This class implements parsing of form data for Werkzeug. By itself it can parse multipart and url encoded form data. It can be subclassed and extended but for most mimetypes it is a better idea to use the untouched stream and expose it as separate attributes on a request object. :param stream_factory: An optional callable that returns a new read and writeable file descriptor. This callable works the same as :meth:`Response._get_file_stream`. :param max_form_memory_size: the maximum number of bytes to be accepted for in-memory stored form data. If the data exceeds the value specified an :exc:`~exceptions.RequestEntityTooLarge` exception is raised. :param max_content_length: If this is provided and the transmitted data is longer than this value an :exc:`~exceptions.RequestEntityTooLarge` exception is raised. :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`MultiDict` is used. :param silent: If set to False parsing errors will not be caught. :param max_form_parts: The maximum number of multipart parts to be parsed. If this is exceeded, a :exc:`~exceptions.RequestEntityTooLarge` exception is raised. .. versionchanged:: 3.0 The ``charset`` and ``errors`` parameters were removed. .. versionchanged:: 3.0 The ``parse_functions`` attribute and ``get_parse_func`` methods were removed. .. versionchanged:: 2.2.3 Added the ``max_form_parts`` parameter. .. versionadded:: 0.8 """ def __init__( self, stream_factory: TStreamFactory | None = None, max_form_memory_size: int | None = None, max_content_length: int | None = None, cls: type[MultiDict[str, t.Any]] | None = None, silent: bool = True, *, max_form_parts: int | None = None, ) -> None: if stream_factory is None: stream_factory = default_stream_factory self.stream_factory = stream_factory self.max_form_memory_size = max_form_memory_size self.max_content_length = max_content_length self.max_form_parts = max_form_parts if cls is None: cls = t.cast("type[MultiDict[str, t.Any]]", MultiDict) self.cls = cls self.silent = silent def parse_from_environ(self, environ: WSGIEnvironment) -> t_parse_result: """Parses the information from the environment as form data. :param environ: the WSGI environment to be used for parsing. :return: A tuple in the form ``(stream, form, files)``. """ stream = get_input_stream(environ, max_content_length=self.max_content_length) content_length = get_content_length(environ) mimetype, options = parse_options_header(environ.get("CONTENT_TYPE")) return self.parse( stream, content_length=content_length, mimetype=mimetype, options=options, ) def parse( self, stream: t.IO[bytes], mimetype: str, content_length: int | None, options: dict[str, str] | None = None, ) -> t_parse_result: """Parses the information from the given stream, mimetype, content length and mimetype parameters. :param stream: an input stream :param mimetype: the mimetype of the data :param content_length: the content length of the incoming data :param options: optional mimetype parameters (used for the multipart boundary for instance) :return: A tuple in the form ``(stream, form, files)``. .. versionchanged:: 3.0 The invalid ``application/x-url-encoded`` content type is not treated as ``application/x-www-form-urlencoded``. """ if mimetype == "multipart/form-data": parse_func = self._parse_multipart elif mimetype == "application/x-www-form-urlencoded": parse_func = self._parse_urlencoded else: return stream, self.cls(), self.cls() if options is None: options = {} try: return parse_func(stream, mimetype, content_length, options) except ValueError: if not self.silent: raise return stream, self.cls(), self.cls() def _parse_multipart( self, stream: t.IO[bytes], mimetype: str, content_length: int | None, options: dict[str, str], ) -> t_parse_result: parser = MultiPartParser( stream_factory=self.stream_factory, max_form_memory_size=self.max_form_memory_size, max_form_parts=self.max_form_parts, cls=self.cls, ) boundary = options.get("boundary", "").encode("ascii") if not boundary: raise ValueError("Missing boundary") form, files = parser.parse(stream, boundary, content_length) return stream, form, files def _parse_urlencoded( self, stream: t.IO[bytes], mimetype: str, content_length: int | None, options: dict[str, str], ) -> t_parse_result: if ( self.max_form_memory_size is not None and content_length is not None and content_length > self.max_form_memory_size ): raise RequestEntityTooLarge() items = parse_qsl( stream.read().decode(), keep_blank_values=True, errors="werkzeug.url_quote", ) return stream, self.cls(items), self.cls()
FormDataParser
python
kamyu104__LeetCode-Solutions
Python/find-duplicate-subtrees.py
{ "start": 50, "end": 810 }
class ____(object): def findDuplicateSubtrees(self, root): """ :type root: TreeNode :rtype: List[TreeNode] """ def getid(root, lookup, trees): if not root: return -1 node_id = lookup[root.val, getid(root.left, lookup, trees), getid(root.right, lookup, trees)] trees[node_id].append(root) return node_id trees = collections.defaultdict(list) lookup = collections.defaultdict() lookup.default_factory = lookup.__len__ getid(root, lookup, trees) return [roots[0] for roots in trees.itervalues() if len(roots) > 1] # Time: O(n * h) # Space: O(n * h)
Solution
python
astropy__astropy
astropy/modeling/functional_models.py
{ "start": 35760, "end": 38714 }
class ____(_InverseTrigonometric1D): """ One dimensional ArcSine model returning values between -pi/2 and pi/2 only. Parameters ---------- amplitude : float Oscillation amplitude for corresponding Sine frequency : float Oscillation frequency for corresponding Sine phase : float Oscillation phase for corresponding Sine See Also -------- Sine1D, ArcCosine1D, ArcTangent1D Notes ----- Model formula: .. math:: f(x) = ((arcsin(x / A) / 2pi) - p) / f The arcsin function being used for this model will only accept inputs in [-A, A]; otherwise, a runtime warning will be thrown and the result will be NaN. To avoid this, the bounding_box has been properly set to accommodate this; therefore, it is recommended that this model always be evaluated with the ``with_bounding_box=True`` option. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import ArcSine1D plt.figure() s1 = ArcSine1D(amplitude=1, frequency=.25) r=np.arange(-1, 1, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([-1, 1, -np.pi/2, np.pi/2]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional ArcSine model function.""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = x / amplitude if isinstance(argument, Quantity): argument = argument.value arc_sine = np.arcsin(argument) / TWOPI return (arc_sine - phase) / frequency @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional ArcSine model derivative.""" d_amplitude = -x / ( TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude) ** 2) ) d_frequency = (phase - (np.arcsin(x / amplitude) / TWOPI)) / frequency**2 d_phase = -1 / frequency * np.ones(x.shape) return [d_amplitude, d_frequency, d_phase] def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)``. """ return -1 * self.amplitude, 1 * self.amplitude @property def inverse(self): """One dimensional inverse of ArcSine.""" return Sine1D( amplitude=self.amplitude, frequency=self.frequency, phase=self.phase )
ArcSine1D
python
jina-ai__jina
tests/integration/pods/test_pod.py
{ "start": 15558, "end": 17366 }
class ____(Executor): @requests def foo(self, docs, **kwargs): for doc in docs: if doc.text == 'slow': time.sleep(1.0) async def _start_create_pod(pod, port_generator, type='worker', executor=None): port = port_generator() pod = _create_worker_pod(port, f'{pod}/{type}', executor) pod.start() return port, pod def _create_worker_pod(port, name='', executor=None): args = _generate_pod_args() args.port = [port] args.name = name args.no_block_on_start = True if executor: args.uses = executor return Pod(args) def _create_head_pod( port, connection_list_dict, name='', polling='ANY', uses_before=None, uses_after=None, ): args = _generate_pod_args() args.port = [port] args.name = name args.runtime_cls = 'HeadRuntime' args.pod_role = PodRoleType.HEAD args.no_block_on_start = True args.polling = PollingType.ANY if polling == 'ANY' else PollingType.ALL if uses_before: args.uses_before_address = uses_before if uses_after: args.uses_after_address = uses_after args.connection_list = json.dumps(connection_list_dict) return Pod(args) def _create_gateway_pod(graph_description, pod_addresses, port, protocol='grpc'): return Pod( set_gateway_parser().parse_args( [ '--graph-description', graph_description, '--deployments-addresses', pod_addresses, '--port', str(port), '--noblock-on-start', '--protocol', protocol, ] ) ) async def async_inputs(): for _ in range(20): yield Document(text='client0-Request')
FastSlowExecutor
python
django-compressor__django-compressor
compressor/finders.py
{ "start": 96, "end": 448 }
class ____(staticfiles.finders.BaseStorageFinder): """ A staticfiles finder that looks in COMPRESS_ROOT for compressed files, to be used during development with staticfiles development file server or during deployment. """ storage = CompressorFileStorage def list(self, ignore_patterns): return []
CompressorFinder
python
readthedocs__readthedocs.org
readthedocs/gold/views.py
{ "start": 822, "end": 1976 }
class ____( PrivateViewMixin, DetailView, FormView, ): """Gold subscription view.""" model = GoldUser form_class = GoldSubscriptionForm template_name = "gold/subscription_detail.html" def get(self, *args, **kwargs): subscribed = self.request.GET.get("subscribed", None) if subscribed == "true": messages.success( self.request, "Thanks for supporting Read the Docs! It really means a lot to us.", ) return super().get(*args, **kwargs) def get_object(self): try: return self.get_queryset().get(user=self.request.user) except self.model.DoesNotExist: return None def get_success_url(self, **__): return reverse_lazy("gold_detail") def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context["form"] = self.get_form() context["golduser"] = self.get_object() context["stripe_publishable"] = ( APIKey.objects.filter(type=APIKeyType.publishable).first().secret ) return context
GoldSubscription
python
Lightning-AI__lightning
src/lightning/fabric/strategies/fsdp.py
{ "start": 3410, "end": 34791 }
class ____(ParallelStrategy, _Sharded): r"""Strategy for Fully Sharded Data Parallel provided by torch.distributed. Fully Sharded Training shards the entire model across all available GPUs, allowing you to scale model size, whilst using efficient communication to reduce overhead. In practice, this means we can remain at parity with PyTorch DDP, whilst scaling our model sizes dramatically. The technique is similar to ZeRO-Stage 3. For more information check out `this blogpost <https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api>`__. Defaults have been set and options have been exposed, but may require configuration based on your level of memory/speed efficiency. We suggest having a look at `this tutorial <https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html>`__ for more information. Arguments: cpu_offload: See ``cpu_offload`` parameter in :class:`torch.distributed.fsdp.FullyShardedDataParallel`. mixed_precision: See ``mixed_precision`` parameter in :class:`torch.distributed.fsdp.FullyShardedDataParallel`. auto_wrap_policy: Same as ``auto_wrap_policy`` parameter in :class:`torch.distributed.fsdp.FullyShardedDataParallel`. For convenience, this also accepts a set of the layer classes to wrap. activation_checkpointing: Deprecated. Use ``activation_checkpointing_policy``. activation_checkpointing_policy: Same as ``auto_wrap_policy`` parameter in :class:`torch.distributed.fsdp.FullyShardedDataParallel` but used when selecting the modules for which you want to enable activation checkpointing. Enabling this can free up a significant amount of memory at the cost of speed since activations in these layers need to be recomputed during backpropagation. For convenience, this also accepts a set of the layer classes to wrap. sharding_strategy: Select whether to shard model parameters, gradients, optimizer states, or a combination of them. Available values are: - ``"FULL_SHARD"``: Shards model parameters, gradients, and optimizer states (default). - ``"SHARD_GRAD_OP"``: Shards gradients and optimizer states only. Model parameters get replicated. - ``"NO_SHARD"``: No sharding (identical to regular DDP). - ``"HYBRID_SHARD"``: Shards model parameters, gradients, and optimizer states within a single machine, but replicates across machines. See also the `device_mesh` parameter below. Also accepts a :class:`torch.distributed.fsdp.ShardingStrategy` enum value. device_mesh: A tuple `(replication size, sharding size)` that defines over how many devices to shard and replicate the model. The product of the two numbers must equal the world size. Only valid in combination with the `HYBRID_SHARD` sharding strategy. state_dict_type: The format in which the state of the model and optimizers gets saved into the checkpoint. - ``"full"``: The full weights and optimizer states get assembled on rank 0 and saved to a single file. - ``"sharded"``: Each rank saves its shard of weights and optimizer states to a file. The checkpoint is a folder with as many files as the world size. \**kwargs: See available parameters in :class:`torch.distributed.fsdp.FullyShardedDataParallel`. """ def __init__( self, accelerator: Optional[Accelerator] = None, parallel_devices: Optional[list[torch.device]] = None, cluster_environment: Optional[ClusterEnvironment] = None, precision: Optional[Precision] = None, process_group_backend: Optional[str] = None, timeout: Optional[timedelta] = default_pg_timeout, cpu_offload: Union[bool, "CPUOffload", None] = None, mixed_precision: Optional["MixedPrecision"] = None, auto_wrap_policy: Optional["_POLICY"] = None, activation_checkpointing: Optional[Union[type[Module], list[type[Module]]]] = None, activation_checkpointing_policy: Optional["_POLICY"] = None, sharding_strategy: "_SHARDING_STRATEGY" = "FULL_SHARD", state_dict_type: Literal["full", "sharded"] = "sharded", device_mesh: Optional[Union[tuple[int], "DeviceMesh"]] = None, **kwargs: Any, ) -> None: super().__init__( accelerator=accelerator, parallel_devices=parallel_devices, cluster_environment=cluster_environment, precision=precision, ) self._num_nodes = 1 self._process_group_backend: Optional[str] = process_group_backend self._timeout: Optional[timedelta] = timeout self._backward_sync_control = _FSDPBackwardSyncControl() self._fsdp_kwargs = _auto_wrap_policy_kwargs(auto_wrap_policy, kwargs) # Enables joint setup of model and optimizer, multiple optimizer param groups, and `torch.compile()` self._fsdp_kwargs.setdefault("use_orig_params", True) if device_mesh is not None: if not _TORCH_GREATER_EQUAL_2_2: raise ValueError("The `device_mesh` argument is only supported in torch >= 2.2.") self._fsdp_kwargs["device_mesh"] = device_mesh self._activation_checkpointing_kwargs = _activation_checkpointing_kwargs( activation_checkpointing, activation_checkpointing_policy ) self._state_dict_type = state_dict_type self.sharding_strategy = _init_sharding_strategy(sharding_strategy, self._fsdp_kwargs) self.cpu_offload = _init_cpu_offload(cpu_offload) self.mixed_precision = mixed_precision @property @override def checkpoint_io(self) -> CheckpointIO: raise NotImplementedError(f"The `{type(self).__name__}` does not use the `CheckpointIO` plugin interface.") @checkpoint_io.setter @override def checkpoint_io(self, io: CheckpointIO) -> None: raise NotImplementedError(f"The `{type(self).__name__}` does not support setting a `CheckpointIO` plugin.") @property @override def root_device(self) -> torch.device: assert self.parallel_devices is not None return self.parallel_devices[self.local_rank] @property def num_nodes(self) -> int: return self._num_nodes @num_nodes.setter def num_nodes(self, num_nodes: int) -> None: self._num_nodes = num_nodes @property def num_processes(self) -> int: return len(self.parallel_devices) if self.parallel_devices is not None else 0 @property @override def distributed_sampler_kwargs(self) -> dict[str, Any]: return {"num_replicas": (self.num_nodes * self.num_processes), "rank": self.global_rank} @property def process_group_backend(self) -> Optional[str]: return self._process_group_backend @property def mixed_precision_config(self) -> Optional["MixedPrecision"]: if self.mixed_precision: return self.mixed_precision plugin = self.precision if isinstance(plugin, FSDPPrecision): return plugin.mixed_precision_config return None @property @override def precision(self) -> FSDPPrecision: plugin = self._precision if plugin is not None: assert isinstance(plugin, FSDPPrecision) return plugin return FSDPPrecision("32-true") @precision.setter @override def precision(self, precision: Optional[Precision]) -> None: if precision is not None and not isinstance(precision, FSDPPrecision): raise TypeError(f"The FSDP strategy can only work with the `FSDPPrecision` plugin, found {precision}") self._precision = precision @override def _configure_launcher(self) -> None: assert self.cluster_environment is not None if not self.cluster_environment.creates_processes_externally: self._launcher = _SubprocessScriptLauncher(self.cluster_environment, self.num_processes, self.num_nodes) @override def setup_environment(self) -> None: super().setup_environment() self._setup_distributed() # if 'device_mesh' in the `_fsdp_kwargs` is provided as a tuple, update it into the `DeviceMesh` object here if isinstance(self._fsdp_kwargs.get("device_mesh"), tuple): from torch.distributed.device_mesh import init_device_mesh self._fsdp_kwargs["device_mesh"] = init_device_mesh("cuda", self._fsdp_kwargs["device_mesh"]) @override def setup_module_and_optimizers( self, module: Module, optimizers: list[Optimizer], scheduler: Optional["_LRScheduler"] = None ) -> tuple[Module, list[Optimizer], Optional["_LRScheduler"]]: """Wraps the model into a :class:`~torch.distributed.fsdp.fully_sharded_data_parallel.FullyShardedDataParallel` module and sets `use_orig_params=True` to keep the reference to the original parameters in the optimizer.""" use_orig_params = self._fsdp_kwargs.get("use_orig_params") if use_orig_params is False: raise ValueError( f"You set `{type(self).__name__}(use_orig_params=False)` but this is not supported when" " setting the model and optimizer up jointly. Either set it to `True` or set the objects" " up in this order: Create the model, call `setup_module`, create the optimizer," " call `setup_optimizer`." ) module = self.setup_module(module) return module, optimizers, scheduler @override def setup_module(self, module: Module) -> Module: """Wraps the model into a :class:`~torch.distributed.fsdp.fully_sharded_data_parallel.FullyShardedDataParallel` module.""" from torch.distributed.fsdp import FullyShardedDataParallel if any(isinstance(mod, FullyShardedDataParallel) for mod in module.modules()): # The user has wrapped their submodules manually, don't apply the auto wrap policy. if _has_meta_device_parameters_or_buffers(module): rank_zero_warn( "The model is already wrapped in `FSDP` but there are still parameters on the meta device." ) if "auto_wrap_policy" in self._fsdp_kwargs: rank_zero_warn( "A FSDP `auto_wrap_policy` is set, but the model is already wrapped. The policy will be ignored." ) del self._fsdp_kwargs["auto_wrap_policy"] else: module = FullyShardedDataParallel( module=module, cpu_offload=self.cpu_offload, mixed_precision=self.mixed_precision_config, sharding_strategy=self.sharding_strategy, device_id=self.root_device.index, **self._fsdp_kwargs, ) _move_torchmetrics_to_device(module, self.root_device) # activation checkpointing needs to be set up after wrapping the model _setup_activation_checkpointing(module, self._activation_checkpointing_kwargs) return module @override def setup_optimizer(self, optimizer: Optimizer) -> Optimizer: """Set up an optimizer for a model wrapped with FSDP. This setup method doesn't modify the optimizer or wrap the optimizer. The only thing it currently does is verify that the optimizer was created after the model was wrapped with :meth:`setup_module` with a reference to the flattened parameters. """ if self._fsdp_kwargs.get("use_orig_params"): return super().setup_optimizer(optimizer) if not _optimizer_has_flat_params(optimizer): # We avoid this limitation by setting `use_orig_params=True` raise ValueError( "The optimizer does not seem to reference any FSDP parameters. HINT: Make sure to create the optimizer" " after setting up the model." ) return optimizer @override def module_to_device(self, module: Module) -> None: pass @override def module_init_context(self, empty_init: Optional[bool] = None) -> AbstractContextManager: precision_init_ctx = self.precision.module_init_context() module_sharded_ctx = self.module_sharded_context() stack = ExitStack() if empty_init: # Materialization happens in `setup`. When modules get wrapped by FSDP, the sequence of operations is: # 1) materialize module 2) call `reset_parameters()` 3) shard the module. # These operations are applied to each submodule 'bottom up' in the module hierarchy. stack.enter_context(torch.device("meta")) stack.enter_context(precision_init_ctx) stack.enter_context(module_sharded_ctx) return stack @override def module_sharded_context(self) -> AbstractContextManager: from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel from torch.distributed.fsdp.wrap import enable_wrap return enable_wrap( wrapper_cls=FullyShardedDataParallel, cpu_offload=self.cpu_offload, mixed_precision=self.mixed_precision_config, sharding_strategy=self.sharding_strategy, device_id=self.root_device.index, **self._fsdp_kwargs, ) @override def all_reduce( self, tensor: Tensor, group: Optional[Any] = None, reduce_op: Optional[Union[ReduceOp, str]] = "mean" ) -> Tensor: if isinstance(tensor, Tensor): return _sync_ddp_if_available(tensor, group, reduce_op=reduce_op) return tensor @override def barrier(self, *args: Any, **kwargs: Any) -> None: if not _distributed_is_initialized(): return if torch.distributed.get_backend() == "nccl": torch.distributed.barrier(device_ids=[self.root_device.index]) else: torch.distributed.barrier() @override def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast: if not _distributed_is_initialized(): return obj obj = [obj] torch.distributed.broadcast_object_list(obj, src, group=_group.WORLD) return obj[0] @override def clip_gradients_norm( self, module: Module, optimizer: Optimizer, max_norm: Union[float, int], norm_type: Union[float, int] = 2.0, error_if_nonfinite: bool = True, ) -> Tensor: """Clip gradients by norm.""" from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel if not isinstance(module, FullyShardedDataParallel): # the root must be wrapped raise TypeError( "Gradient clipping with FSDP is only possible if the module passed to" f" `{type(self).__name__}.clip_gradients_norm` is wrapped in `FullyShardedDataParallel`." f" Got: {module.__class__.__name__}." ) self.precision.unscale_gradients(optimizer) return module.clip_grad_norm_(max_norm=max_norm, norm_type=norm_type) @override def save_checkpoint( self, path: _PATH, state: dict[str, Union[Module, Optimizer, Any]], storage_options: Optional[Any] = None, filter: Optional[dict[str, Callable[[str, Any], bool]]] = None, ) -> None: """Save model, optimizer, and other state to a checkpoint on disk. If the state-dict-type is ``'full'``, the checkpoint will be written to a single file containing the weights, optimizer state and other metadata. If the state-dict-type is ``'sharded'``, the checkpoint gets saved as a directory containing one file per process, with model- and optimizer shards stored per file. Additionally, it creates a metadata file `meta.pt` with the rest of the user's state (only saved from rank 0). """ if storage_options is not None: raise TypeError( "`FSDPStrategy.save_checkpoint(..., storage_options=...)` is not supported because" " `FSDPStrategy` does not use the `CheckpointIO`." ) if filter is not None and self._state_dict_type == "sharded": # https://github.com/pytorch/pytorch/issues/105379 raise NotImplementedError( "FSDP doesn't support loading sharded filtered checkpoints, so saving them is disabled." ) # broadcast the path from rank 0 to ensure all the states are saved in a common path path = Path(self.broadcast(path)) if path.is_dir() and self._state_dict_type == "full" and not _is_sharded_checkpoint(path): raise IsADirectoryError(f"The checkpoint path exists and is a directory: {path}") from torch.distributed.fsdp import FullyShardedDataParallel as FSDP modules = [module for module in state.values() if _has_fsdp_modules(module)] if len(modules) == 0: raise ValueError( "Could not find a FSDP model in the provided checkpoint state. Please provide the model as" " part of the state like so: `save_checkpoint(..., state={'model': model, ...})`. Make sure" " you set up the model (and optimizers if any) through the strategy before saving the checkpoint." ) if len(modules) > 1: raise ValueError( "Found multiple FSDP models in the given state. Saving checkpoints with FSDP is" " currently limited to a single model per checkpoint. To save multiple models, call the" " save method for each model separately with a different path." ) module = modules[0] if self._state_dict_type == "sharded": if path.is_file(): path.unlink() path.mkdir(parents=True, exist_ok=True) state_dict_ctx = _get_sharded_state_dict_context(module) # replace the modules and optimizer objects in the state with their local state dict # and separate the user's metadata converted_state: dict[str, Any] = {} metadata: dict[str, Any] = {} with state_dict_ctx: for key, obj in state.items(): converted: Any if isinstance(obj, Module): converted = obj.state_dict() target_dict = converted_state elif isinstance(obj, Optimizer): converted = FSDP.optim_state_dict(module, obj) target_dict = converted_state else: # everything not a module or optimizer is considered metadata converted = obj.state_dict() if isinstance(obj, _Stateful) else obj target_dict = metadata _apply_filter(key, filter or {}, converted, target_dict) _distributed_checkpoint_save(converted_state, path) if self.global_rank == 0: torch.save(metadata, path / _METADATA_FILENAME) elif self._state_dict_type == "full": if _is_sharded_checkpoint(path): shutil.rmtree(path) state_dict_ctx = _get_full_state_dict_context(module, world_size=self.world_size) full_state: dict[str, Any] = {} with state_dict_ctx: for key, obj in state.items(): if isinstance(obj, Module): converted = obj.state_dict() elif isinstance(obj, Optimizer): converted = FSDP.optim_state_dict(module, obj) else: # everything not a module or optimizer is considered metadata converted = obj.state_dict() if isinstance(obj, _Stateful) else obj _apply_filter(key, filter or {}, converted, full_state) if self.global_rank == 0: torch.save(full_state, path) else: raise ValueError(f"Unknown state_dict_type: {self._state_dict_type}") @override def load_checkpoint( self, path: _PATH, state: Optional[Union[Module, Optimizer, dict[str, Union[Module, Optimizer, Any]]]] = None, strict: bool = True, weights_only: Optional[bool] = None, ) -> dict[str, Any]: """Load the contents from a checkpoint and restore the state of the given objects.""" if not state: raise ValueError( f"Got FSDPStrategy.load_checkpoint(..., state={state!r}) but a state with at least " f" a model instance to reload is required. Pass it in like so:" " FSDPStrategy.load_checkpoint(..., state={'model': model, ...})" ) # broadcast the path from rank 0 to ensure all the states are loaded from a common path path = Path(self.broadcast(path)) if isinstance(state, Module): from lightning.fabric.strategies.model_parallel import _load_raw_module_state_from_path _load_raw_module_state_from_path(path, module=state, world_size=self.world_size, strict=strict) return {} if isinstance(state, Optimizer): raise NotImplementedError( "Loading a single optimizer object from a checkpoint is not supported yet with the FSDP strategy." ) from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp import FullyShardedDataParallel as FSDP modules = {key: module for key, module in state.items() if _has_fsdp_modules(module)} if len(modules) == 0: raise ValueError( "Could not find a FSDP model in the provided checkpoint state. Please provide the model as" " part of the state like so: `load_checkpoint(..., state={'model': model, ...})`. Make sure" " you set up the model (and optimizers if any) through the strategy before loading the checkpoint." ) optimizers = {key: optim for key, optim in state.items() if isinstance(optim, Optimizer)} if len(modules) > 1: raise ValueError( "Found multiple FSDP models in the given state. Loading checkpoints with FSDP is" " currently limited to a single model per checkpoint. To load multiple models, call the" " load method for each model separately with a different path." ) module_key, module = list(modules.items())[0] if _is_sharded_checkpoint(path): state_dict_ctx = _get_sharded_state_dict_context(module) with state_dict_ctx: module_state = {module_key: module.state_dict()} _distributed_checkpoint_load(module_state, path) module.load_state_dict(module_state[module_key], strict=strict) if optimizers: from torch.distributed.checkpoint import FileSystemReader # TODO: replace with newer APIs # https://github.com/pytorch/pytorch/issues/119800#issuecomment-1942156271 reader = FileSystemReader(path=path) # the optimizer states must be loaded separately for optim_key, optim in optimizers.items(): optim_state = load_sharded_optimizer_state_dict( model_state_dict=module_state[module_key], optimizer_key=optim_key, storage_reader=reader, ) flattened_osd = FSDP.optim_state_dict_to_load( optim_state_dict=optim_state[optim_key], model=module, optim=optim, ) optim.load_state_dict(flattened_osd) # Load metadata (anything not a module or optimizer) metadata = torch.load(path / _METADATA_FILENAME, weights_only=weights_only) requested_metadata_keys = state.keys() - modules.keys() - optimizers.keys() _validate_keys_for_strict_loading(requested_metadata_keys, metadata.keys(), strict=strict) for key in requested_metadata_keys: if key not in metadata: continue state[key] = metadata.pop(key) # return the remaining metadata that wasn't requested as part of `state` return metadata if _is_full_checkpoint(path): checkpoint = _lazy_load(path) from lightning.fabric.strategies.model_parallel import ( _load_raw_module_state, _rekey_optimizer_state_if_needed, ) _load_raw_module_state(checkpoint.pop(module_key), module=module, world_size=self.world_size, strict=strict) if isinstance(state, Module): return {} # Materialize lazy tensors if there are any left in the checkpoint # The `torch.Optimizer.load_state_dict` method can't load lazy tensors because of deepcopy pickle issues checkpoint = _materialize_tensors(checkpoint) # Load optimizer states for optim_key, optim in optimizers.items(): # rank0_only should be false because we need to load the optimizer state on all ranks with _get_full_state_dict_context(module, world_size=self.world_size, rank0_only=False): temp_state_dict = _rekey_optimizer_state_if_needed(checkpoint.pop(optim_key), module) optim_state_dict = FSDP.optim_state_dict_to_load( optim_state_dict=temp_state_dict, model=module, optim=optim, ) optim.load_state_dict(optim_state_dict) requested_metadata_keys = state.keys() - modules.keys() - optimizers.keys() _validate_keys_for_strict_loading(requested_metadata_keys, checkpoint.keys(), strict=strict) # Load metadata (anything not a module or optimizer) _move_state_into(source=checkpoint, destination=state, keys=requested_metadata_keys) # return the remaining metadata that wasn't requested as part of `state` return checkpoint raise ValueError( f"The path {str(path)!r} does not point to a valid checkpoint. Make sure the path points to either a" " directory with FSDP checkpoint shards, or a single file with a full checkpoint." ) @classmethod @override def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None: if not torch.distributed.is_available(): return strategy_registry.register( "fsdp", cls, description="Fully Sharded Data Parallel (FSDP) training", ) strategy_registry.register( "fsdp_cpu_offload", cls, description="Fully Sharded Data Parallel (FSDP) training with Full Sharding and CPU Offloading", cpu_offload=True, ) def _setup_distributed(self) -> None: reset_seed() self._set_world_ranks() self._process_group_backend = self._get_process_group_backend() assert self.cluster_environment is not None kwargs: dict[str, Any] = {"timeout": self._timeout} if _TORCH_GREATER_EQUAL_2_3: kwargs["device_id"] = self.root_device if self.root_device.type != "cpu" else None _init_dist_connection(self.cluster_environment, self._process_group_backend, **kwargs) def _get_process_group_backend(self) -> str: return self._process_group_backend or _get_default_process_group_backend_for_device(self.root_device) def _set_world_ranks(self) -> None: if self.cluster_environment is not None: self.cluster_environment.set_global_rank(self.node_rank * self.num_processes + self.local_rank) self.cluster_environment.set_world_size(self.num_nodes * self.num_processes) # `LightningEnvironment.set_global_rank` will do this too, but we cannot rely on that implementation detail # additionally, for some implementations, the setter is a no-op, so it's safer to access the getter rank_zero_only.rank = utils_rank_zero_only.rank = self.global_rank def _activation_checkpointing_kwargs( activation_checkpointing: Optional[Union[type[Module], list[type[Module]]]], activation_checkpointing_policy: Optional["_POLICY"], ) -> dict: if activation_checkpointing is None and activation_checkpointing_policy is None: return {} if activation_checkpointing is not None and activation_checkpointing_policy is not None: raise ValueError( "You cannot set both `activation_checkpointing` and `activation_checkpointing_policy`. Use the latter." ) if activation_checkpointing is not None: if isinstance(activation_checkpointing, list): classes = tuple(activation_checkpointing) else: classes = (activation_checkpointing,) rank_zero_deprecation( f"`FSDPStrategy(activation_checkpointing={activation_checkpointing})` is deprecated, use " f"`FSDPStrategy(activation_checkpointing_policy={set(classes)})` instead." ) return {"check_fn": lambda submodule: isinstance(submodule, classes)} if isinstance(activation_checkpointing_policy, set): return _auto_wrap_policy_kwargs(activation_checkpointing_policy, {}) return {"auto_wrap_policy": activation_checkpointing_policy} def _auto_wrap_policy_kwargs(policy: Optional["_POLICY"], kwargs: dict) -> dict: if policy is None: return kwargs if isinstance(policy, set): from torch.distributed.fsdp.wrap import ModuleWrapPolicy policy = ModuleWrapPolicy(policy) kwargs["auto_wrap_policy"] = policy return kwargs def _setup_activation_checkpointing(module: Module, activation_checkpointing_kwargs: dict) -> None: if not activation_checkpointing_kwargs: return from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import CheckpointWrapper if any(isinstance(mod, CheckpointWrapper) for mod in module.modules()): rank_zero_warn( "FSDP checkpointing is configured, but the model already contains checkpointed layers." " Checkpointing will be ignored." ) return from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointImpl, apply_activation_checkpointing, checkpoint_wrapper, ) if not _TORCH_GREATER_EQUAL_2_2: checkpoint_wrapper = partial(checkpoint_wrapper, checkpoint_impl=CheckpointImpl.NO_REENTRANT) apply_activation_checkpointing(module, checkpoint_wrapper_fn=checkpoint_wrapper, **activation_checkpointing_kwargs)
FSDPStrategy
python
great-expectations__great_expectations
contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_maryland_zip.py
{ "start": 1751, "end": 4094 }
class ____(ColumnMapExpectation): """Expect values in this column to be valid Maryland zipcodes. See https://pypi.org/project/zipcodes/ for more information. """ # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "valid_maryland_zip": ["21209", "20684", "21930", "21911"], "invalid_maryland_zip": ["-10000", "1234", "99999", "25487"], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "valid_maryland_zip"}, "out": {"success": True}, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "invalid_maryland_zip"}, "out": {"success": False}, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.valid_maryland_zip" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ("mostly",) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This object contains metadata for display in the public Gallery library_metadata = { "maturity": "experimental", # "experimental", "beta", or "production" "tags": [ "hackathon", "typed-entities", ], # Tags for this Expectation in the Gallery "contributors": [ # Github handles for all contributors to this Expectation. "@luismdiaz01", "@derekma73", # Don't forget to add your github handle here! ], "requirements": ["zipcodes"], } if __name__ == "__main__": ExpectColumnValuesToBeValidMarylandZip().print_diagnostic_checklist()
ExpectColumnValuesToBeValidMarylandZip
python
huggingface__transformers
src/transformers/models/mistral3/modeling_mistral3.py
{ "start": 7069, "end": 7939 }
class ____(BaseModelOutputWithPast): r""" past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ image_hidden_states: Optional[torch.FloatTensor] = None @auto_docstring
Mistral3ModelOutputWithPast
python
rapidsai__cudf
python/cudf_polars/cudf_polars/dsl/ir.py
{ "start": 53408, "end": 54600 }
class ____(IR): """ Produce a new dataframe selecting given expressions from an input. This is a special case of :class:`Select` where all outputs are a single row. """ __slots__ = ("exprs",) _non_child = ("schema", "exprs") exprs: tuple[expr.NamedExpr, ...] """List of expressions to evaluate to form the new dataframe.""" def __init__( self, schema: Schema, exprs: Sequence[expr.NamedExpr], df: IR ): # pragma: no cover; polars doesn't emit this node yet self.schema = schema self.exprs = tuple(exprs) self.children = (df,) self._non_child_args = (self.exprs,) @classmethod @log_do_evaluate @nvtx_annotate_cudf_polars(message="Reduce") def do_evaluate( cls, exprs: tuple[expr.NamedExpr, ...], df: DataFrame, *, context: IRExecutionContext, ) -> DataFrame: # pragma: no cover; not exposed by polars yet """Evaluate and return a dataframe.""" columns = broadcast(*(e.evaluate(df) for e in exprs), stream=df.stream) assert all(column.size == 1 for column in columns) return DataFrame(columns, stream=df.stream)
Reduce
python
numba__numba
numba/typed/listobject.py
{ "start": 1452, "end": 1800 }
class ____(models.StructModel): def __init__(self, dmm, fe_type): members = [ ('meminfo', _meminfo_listptr), ('data', types.voidptr), # ptr to the C list ] super(ListModel, self).__init__(dmm, fe_type, members) @register_model(ListTypeIterableType) @register_model(ListTypeIteratorType)
ListModel
python
numpy__numpy
numpy/f2py/tests/test_modules.py
{ "start": 1853, "end": 2301 }
class ____(util.F2PyTest): module_name = "fmath" sources = [ util.getpath("tests", "src", "modules", "use_modules.f90"), ] def test_gh25867(self): compiled_mods = [x for x in dir(self.module) if "__" not in x] assert "useops" in compiled_mods assert self.module.useops.sum_and_double(3, 7) == 20 assert "mathops" in compiled_mods assert self.module.mathops.add(3, 7) == 10
TestUsedModule
python
doocs__leetcode
solution/1200-1299/1277.Count Square Submatrices with All Ones/Solution.py
{ "start": 0, "end": 534 }
class ____: def countSquares(self, matrix: List[List[int]]) -> int: m, n = len(matrix), len(matrix[0]) f = [[0] * n for _ in range(m)] ans = 0 for i, row in enumerate(matrix): for j, v in enumerate(row): if v == 0: continue if i == 0 or j == 0: f[i][j] = 1 else: f[i][j] = min(f[i - 1][j - 1], f[i - 1][j], f[i][j - 1]) + 1 ans += f[i][j] return ans
Solution
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pylint/single_string_slots.py
{ "start": 299, "end": 390 }
class ____: __slots__ = ("bar",) def __init__(self, bar): self.bar = bar
Foo
python
huggingface__transformers
src/transformers/models/qwen3_next/modeling_qwen3_next.py
{ "start": 56254, "end": 56361 }
class ____(GenericForTokenClassification, Qwen3NextPreTrainedModel): pass
Qwen3NextForTokenClassification
python
crytic__slither
slither/detectors/functions/cyclomatic_complexity.py
{ "start": 508, "end": 1856 }
class ____(AbstractDetector): """ Detects functions with high (> 11) cyclomatic complexity. """ ARGUMENT = "cyclomatic-complexity" HELP = "Detects functions with high (> 11) cyclomatic complexity" IMPACT = DetectorClassification.INFORMATIONAL CONFIDENCE = DetectorClassification.HIGH WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#cyclomatic-complexity" WIKI_TITLE = "Cyclomatic complexity" WIKI_DESCRIPTION = "Detects functions with high (> 11) cyclomatic complexity." WIKI_EXPLOIT_SCENARIO = "" WIKI_RECOMMENDATION = ( "Reduce cyclomatic complexity by splitting the function into several smaller subroutines." ) def _detect(self) -> List[Output]: results = [] high_cc_functions: List[Tuple[Function, int]] = [] f: Function for c in self.compilation_unit.contracts: for f in c.functions_declared: _check_for_high_cc(high_cc_functions, f) for f in self.compilation_unit.functions_top_level: _check_for_high_cc(high_cc_functions, f) for f, cc in high_cc_functions: info: DETECTOR_INFO = [f, f" has a high cyclomatic complexity ({cc}).\n"] res = self.generate_result(info) results.append(res) return results
CyclomaticComplexity
python
Pylons__pyramid
tests/test_testing.py
{ "start": 15030, "end": 16773 }
class ____(unittest.TestCase): def _callFUT(self, **kw): from pyramid.testing import tearDown return tearDown(**kw) def tearDown(self): from pyramid.threadlocal import manager manager.clear() getSiteManager.reset() def _assertSMHook(self, hook): result = getSiteManager.sethook(None) self.assertEqual(result, hook) def _setSMHook(self, hook): getSiteManager.sethook(hook) def test_defaults(self): from pyramid.threadlocal import manager registry = DummyRegistry() old = {'registry': registry} hook = lambda *arg: None try: self._setSMHook(hook) manager.push(old) self._callFUT() current = manager.get() self.assertNotEqual(current, old) self.assertEqual(registry.inited, 2) finally: result = getSiteManager.sethook(None) self.assertNotEqual(result, hook) def test_registry_cannot_be_inited(self): from pyramid.threadlocal import manager registry = DummyRegistry() def raiseit(name): raise TypeError registry.__init__ = raiseit old = {'registry': registry} try: manager.push(old) self._callFUT() # doesn't blow up current = manager.get() self.assertNotEqual(current, old) self.assertEqual(registry.inited, 1) finally: manager.clear() def test_unhook_zc_false(self): hook = lambda *arg: None try: self._setSMHook(hook) self._callFUT(unhook_zca=False) finally: self._assertSMHook(hook)
Test_tearDown
python
getsentry__sentry
src/sentry/releases/endpoints/project_release_files.py
{ "start": 6907, "end": 8475 }
class ____: """Provides artifact data to ChainPaginator on-demand""" def __init__( self, dist: Distribution | None, files: dict, query: list[str], checksums: list[str] ): self._dist = dist self._files = files self._query = query self._checksums = checksums @cached_property def sorted_and_filtered_files(self) -> list[tuple[str, dict]]: query = self._query checksums = self._checksums files = [ # Mimic "or" operation applied for real querysets: (url, info) for url, info in self._files.items() if (not query or any(search_string.lower() in url.lower() for search_string in query)) and (not checksums or any(checksum in info["sha1"] for checksum in checksums)) ] files.sort(key=lambda item: item[0]) return files def __len__(self) -> int: return len(self.sorted_and_filtered_files) def __getitem__(self, range): return [ pseudo_releasefile(url, info, self._dist) for url, info in self.sorted_and_filtered_files[range] ] def pseudo_releasefile(url, info, dist): """Create a pseudo-ReleaseFile from an ArtifactIndex entry""" return ReleaseFile( name=url, file=File( headers=info.get("headers", {}), size=info["size"], timestamp=info["date_created"], checksum=info["sha1"], ), dist_id=dist.id if dist else dist, ) @region_silo_endpoint
ArtifactSource
python
realpython__materials
duck-typing-python/shapes.py
{ "start": 147, "end": 375 }
class ____: def __init__(self, radius: float) -> None: self.radius = radius def area(self) -> float: return pi * self.radius**2 def perimeter(self) -> float: return 2 * pi * self.radius
Circle
python
getsentry__sentry
tests/sentry/workflow_engine/endpoints/test_validators.py
{ "start": 3150, "end": 3420 }
class ____(MetricIssueComparisonConditionValidator): supported_conditions = frozenset([Condition.GREATER_OR_EQUAL, Condition.LESS_OR_EQUAL]) supported_condition_results = frozenset([DetectorPriorityLevel.HIGH, DetectorPriorityLevel.LOW])
MockDataConditionValidator
python
pandas-dev__pandas
pandas/tests/test_nanops.py
{ "start": 33808, "end": 36100 }
class ____: # xref GH 11974 # Test data + skewness value (computed with scipy.stats.skew) @pytest.fixture def samples(self): return np.sin(np.linspace(0, 1, 200)) @pytest.fixture def actual_skew(self): return -0.1875895205961754 @pytest.mark.parametrize("val", [3075.2, 3075.3, 3075.5]) def test_constant_series(self, val): # xref GH 11974 data = val * np.ones(300) skew = nanops.nanskew(data) assert skew == 0.0 def test_all_finite(self): alpha, beta = 0.3, 0.1 left_tailed = self.prng.beta(alpha, beta, size=100) assert nanops.nanskew(left_tailed) < 0 alpha, beta = 0.1, 0.3 right_tailed = self.prng.beta(alpha, beta, size=100) assert nanops.nanskew(right_tailed) > 0 def test_ground_truth(self, samples, actual_skew): skew = nanops.nanskew(samples) tm.assert_almost_equal(skew, actual_skew) def test_axis(self, samples, actual_skew): samples = np.vstack([samples, np.nan * np.ones(len(samples))]) skew = nanops.nanskew(samples, axis=1) tm.assert_almost_equal(skew, np.array([actual_skew, np.nan])) def test_nans(self, samples): samples = np.hstack([samples, np.nan]) skew = nanops.nanskew(samples, skipna=False) assert np.isnan(skew) def test_nans_skipna(self, samples, actual_skew): samples = np.hstack([samples, np.nan]) skew = nanops.nanskew(samples, skipna=True) tm.assert_almost_equal(skew, actual_skew) @pytest.mark.parametrize( "initial_data, nobs", [ ([-2.05191341e-05, -4.10391103e-05], 27), ([-2.05191341e-10, -4.10391103e-10], 27), ([-2.05191341e-05, -4.10391103e-05], 10_000), ([-2.05191341e-10, -4.10391103e-10], 10_000), ], ) def test_low_variance(self, initial_data, nobs): st = pytest.importorskip("scipy.stats") data = np.zeros((nobs,), dtype=np.float64) data[: len(initial_data)] = initial_data skew = nanops.nanskew(data) expected = st.skew(data, bias=False) tm.assert_almost_equal(skew, expected) @property def prng(self): return np.random.default_rng(2)
TestNanskewFixedValues
python
astropy__astropy
astropy/table/tests/test_masked.py
{ "start": 461, "end": 1129 }
class ____: def setup_method(self, method): self.a = MaskedColumn(name="a", data=[1, 2, 3], fill_value=1) self.b = MaskedColumn(name="b", data=[4, 5, 6], mask=True) self.c = MaskedColumn(name="c", data=[7, 8, 9], mask=False) self.d_mask = np.array([False, True, False]) self.d = MaskedColumn(name="d", data=[7, 8, 7], mask=self.d_mask) self.t = Table([self.a, self.b], masked=True) self.ca = Column(name="ca", data=[1, 2, 3]) self.sc = MaskedColumn( name="sc", data=[(1, 1.0), (2, 2.0), (3, 3.0)], dtype="i8,f8", fill_value=(0, -1.0), )
SetupData
python
tiangolo__fastapi
docs_src/body/tutorial002_py310.py
{ "start": 61, "end": 441 }
class ____(BaseModel): name: str description: str | None = None price: float tax: float | None = None app = FastAPI() @app.post("/items/") async def create_item(item: Item): item_dict = item.dict() if item.tax is not None: price_with_tax = item.price + item.tax item_dict.update({"price_with_tax": price_with_tax}) return item_dict
Item
python
google__jax
tests/pallas/gpu_attention_test.py
{ "start": 6373, "end": 6526 }
class ____(DecodeAttentionTest): INTERPRET = True if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
DecodeAttentionInterpretTest
python
dask__dask
dask/dataframe/dask_expr/_expr.py
{ "start": 105435, "end": 105917 }
class ____(Blockwise): _parameters = ["frame", "method", "skip_check"] operation = staticmethod(methods.fillna_check) _projection_passthrough = True @functools.cached_property def _meta(self): return self.frame._meta def _task(self, name: Key, index: int) -> Task: args = [self._blockwise_arg(op, index) for op in self._args] args[-1] = index != self.skip_check(self.frame) return Task(name, self.operation, *args)
FillnaCheck