language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
astropy__astropy
astropy/tests/runner.py
{ "start": 11850, "end": 21884 }
class ____(TestRunnerBase): """ A test runner for astropy tests. """ def packages_path(self, packages, base_path, error=None, warning=None): """ Generates the path for multiple packages. Parameters ---------- packages : str Comma separated string of packages. base_path : str Base path to the source code or documentation. error : str Error message to be raised as ``ValueError``. Individual package name and path can be accessed by ``{name}`` and ``{path}`` respectively. No error is raised if `None`. (Default: `None`) warning : str Warning message to be issued. Individual package name and path can be accessed by ``{name}`` and ``{path}`` respectively. No warning is issues if `None`. (Default: `None`) Returns ------- paths : list of str List of strings of existing package paths. """ packages = packages.split(",") paths = [] for package in packages: path = os.path.join(base_path, package.replace(".", os.path.sep)) if not os.path.isdir(path): info = {"name": package, "path": path} if error is not None: raise ValueError(error.format(**info)) if warning is not None: warnings.warn(warning.format(**info)) else: paths.append(path) return paths # Increase priority so this warning is displayed first. @keyword(priority=1000) def coverage(self, coverage, kwargs): if coverage: warnings.warn( "The coverage option is ignored on run_tests, since it " "can not be made to work in that context. Use " "'python setup.py test --coverage' instead.", AstropyWarning, ) return [] # test_path depends on self.package_path so make sure this runs before # test_path. @keyword(priority=1) def package(self, package, kwargs): """ package : str, optional The name of a specific package to test, e.g. 'io.fits' or 'utils'. Accepts comma separated string to specify multiple packages. If nothing is specified all default tests are run. """ if package is None: self.package_path = [self.base_path] else: error_message = "package to test is not found: {name} (at path {path})." self.package_path = self.packages_path( package, self.base_path, error=error_message ) if not kwargs["test_path"]: return self.package_path return [] @keyword() def test_path(self, test_path, kwargs): """ test_path : str, optional Specify location to test by path. May be a single file or directory. Must be specified absolutely or relative to the calling directory. """ all_args = [] # Ensure that the package kwarg has been run. self.package(kwargs["package"], kwargs) if test_path: base, ext = os.path.splitext(test_path) if ext in (".rst", ""): if kwargs["docs_path"] is None: # This shouldn't happen from "python setup.py test" raise ValueError( "Can not test .rst files without a docs_path specified." ) abs_docs_path = os.path.abspath(kwargs["docs_path"]) abs_test_path = os.path.abspath( os.path.join(abs_docs_path, os.pardir, test_path) ) common = os.path.commonprefix((abs_docs_path, abs_test_path)) if os.path.exists(abs_test_path) and common == abs_docs_path: # Turn on the doctest_rst plugin all_args.append("--doctest-rst") test_path = abs_test_path # Check that the extensions are in the path and not at the end to # support specifying the name of the test, i.e. # test_quantity.py::test_unit if not ( os.path.isdir(test_path) or (".py" in test_path or ".rst" in test_path) ): raise ValueError( "Test path must be a directory or a path to a .py or .rst file" ) return all_args + [test_path] return [] @keyword() def args(self, args, kwargs): """ args : str, optional Additional arguments to be passed to ``pytest.main`` in the ``args`` keyword argument. """ if args: return shlex.split(args, posix=not sys.platform.startswith("win")) return [] @keyword(default_value=[]) def plugins(self, plugins, kwargs): """ plugins : list, optional Plugins to be passed to ``pytest.main`` in the ``plugins`` keyword argument. """ # Plugins are handled independently by `run_tests` so we define this # keyword just for the docstring return [] @keyword() def verbose(self, verbose, kwargs): """ verbose : bool, optional Convenience option to turn on verbose output from pytest. Passing True is the same as specifying ``-v`` in ``args``. """ if verbose: return ["-v"] return [] @keyword() def pastebin(self, pastebin, kwargs): """ pastebin : ('failed', 'all', None), optional Convenience option for turning on pytest pastebin output. Set to 'failed' to upload info for failed tests, or 'all' to upload info for all tests. """ if pastebin is not None: if pastebin in ["failed", "all"]: return [f"--pastebin={pastebin}"] else: raise ValueError("pastebin should be 'failed' or 'all'") return [] @keyword(default_value="none") def remote_data(self, remote_data, kwargs): """ remote_data : {'none', 'astropy', 'any'}, optional Controls whether to run tests marked with @pytest.mark.remote_data. This can be set to run no tests with remote data (``none``), only ones that use data from http://data.astropy.org (``astropy``), or all tests that use remote data (``any``). The default is ``none``. """ if remote_data is True: remote_data = "any" elif remote_data is False: remote_data = "none" elif remote_data not in ("none", "astropy", "any"): warnings.warn( "The remote_data option should be one of " f"none/astropy/any (found {remote_data}). For backward-compatibility, " "assuming 'any', but you should change the option to be " "one of the supported ones to avoid issues in " "future.", AstropyDeprecationWarning, ) remote_data = "any" return [f"--remote-data={remote_data}"] @keyword() def pdb(self, pdb, kwargs): """ pdb : bool, optional Turn on PDB post-mortem analysis for failing tests. Same as specifying ``--pdb`` in ``args``. """ if pdb: return ["--pdb"] return [] @keyword(0) def parallel(self, parallel, kwargs): """ parallel : int or 'auto', optional When provided, run the tests in parallel on the specified number of CPUs. If parallel is ``'auto'``, it will use the all the cores on the machine. Requires the ``pytest-xdist`` plugin. """ if parallel != 0: try: from xdist import plugin # noqa: F401 except ImportError: raise SystemError( "running tests in parallel requires the pytest-xdist package" ) return ["-n", str(parallel)] return [] @keyword() def docs_path(self, docs_path, kwargs): """ docs_path : str, optional The path to the documentation .rst files. """ paths = [] if docs_path is not None and not kwargs["skip_docs"]: if kwargs["package"] is not None: warning_message = ( "Can not test .rst docs for {name}, since " "docs path ({path}) does not exist." ) paths = self.packages_path( kwargs["package"], docs_path, warning=warning_message ) elif not kwargs["test_path"]: paths = [docs_path] if paths and not kwargs["test_path"]: paths.append("--doctest-rst") return paths @keyword() def skip_docs(self, skip_docs, kwargs): """ skip_docs : `bool`, optional When `True`, skips running the doctests in the .rst files. """ # Skip docs is a bool used by docs_path only. return [] @keyword() def repeat(self, repeat, kwargs): """ repeat : `int`, optional If set, specifies how many times each test should be run. This is useful for diagnosing sporadic failures. """ if repeat: return [f"--repeat={repeat}"] return [] # Override run_tests for astropy-specific fixes def run_tests(self, **kwargs): # This prevents cyclical import problems that make it # impossible to test packages that define Table types on their # own. from astropy.table import Table # noqa: F401 return super().run_tests(**kwargs)
TestRunner
python
getsentry__sentry
tests/sentry/snuba/test_tasks.py
{ "start": 4527, "end": 12605 }
class ____(BaseSnubaTaskTest): expected_status = QuerySubscription.Status.CREATING task = create_subscription_in_snuba # type: ignore[assignment] def test_already_created(self) -> None: sub = self.create_subscription( QuerySubscription.Status.CREATING, subscription_id=uuid4().hex ) create_subscription_in_snuba(sub.id) self.metrics.incr.assert_any_call("snuba.subscriptions.create.already_created_in_snuba") def test(self) -> None: sub = self.create_subscription(QuerySubscription.Status.CREATING) create_subscription_in_snuba(sub.id) sub = QuerySubscription.objects.get(id=sub.id) assert sub.status == QuerySubscription.Status.ACTIVE.value assert sub.subscription_id is not None def test_status_join(self) -> None: sub = self.create_subscription(QuerySubscription.Status.CREATING, query="status:unresolved") create_subscription_in_snuba(sub.id) sub = QuerySubscription.objects.get(id=sub.id) assert sub.status == QuerySubscription.Status.ACTIVE.value assert sub.subscription_id is not None def test_group_id(self) -> None: group_id = 1234 sub = self.create_subscription( QuerySubscription.Status.CREATING, query=f"issue.id:{group_id}" ) with patch.object(_snuba_pool, "urlopen", side_effect=_snuba_pool.urlopen) as urlopen: create_subscription_in_snuba(sub.id) request_body = json.loads(urlopen.call_args[1]["body"]) assert f"group_id = {group_id}" in request_body["query"] sub = QuerySubscription.objects.get(id=sub.id) assert sub.status == QuerySubscription.Status.ACTIVE.value assert sub.subscription_id is not None def test_transaction(self) -> None: sub = self.create_subscription( QuerySubscription.Status.CREATING, dataset=Dataset.Transactions ) create_subscription_in_snuba(sub.id) sub = QuerySubscription.objects.get(id=sub.id) assert sub.status == QuerySubscription.Status.ACTIVE.value assert sub.subscription_id is not None def test_subscription_with_query_extra(self) -> None: sub = self.create_subscription(QuerySubscription.Status.CREATING, query_extra="foo:bar") create_subscription_in_snuba(sub.id) sub = QuerySubscription.objects.get(id=sub.id) assert sub.status == QuerySubscription.Status.ACTIVE.value assert sub.subscription_id is not None def test_subscription_with_query_extra_but_no_query(self) -> None: sub = self.create_subscription(QuerySubscription.Status.CREATING, query_extra="foo:bar") snuba_query = sub.snuba_query snuba_query.update(query="") create_subscription_in_snuba(sub.id) sub = QuerySubscription.objects.get(id=sub.id) assert sub.status == QuerySubscription.Status.ACTIVE.value assert sub.subscription_id is not None @responses.activate def test_adds_type(self) -> None: sub = self.create_subscription(QuerySubscription.Status.CREATING) with patch("sentry.snuba.tasks._snuba_pool") as pool: resp = Mock() resp.status = 202 resp.data = json.dumps({"subscription_id": "123"}) pool.urlopen.return_value = resp create_subscription_in_snuba(sub.id) request_body = json.loads(pool.urlopen.call_args[1]["body"]) assert "type = 'error'" in request_body["query"] @responses.activate def test_granularity_on_metrics_crash_rate_alerts(self) -> None: for tag in [ SessionMRI.RAW_SESSION.value, SessionMRI.RAW_USER.value, "session.status", ]: rh_indexer_record(self.organization.id, tag) for time_window, expected_granularity in [ (30, 10), (90, 60), (5 * 60, 3600), (25 * 60, 3600 * 24), ]: for idx, aggregate in enumerate(["sessions", "users"]): sub = self.create_subscription( dataset=Dataset.Metrics, aggregate=f"percentage({aggregate}_crashed, {aggregate}) AS " f"_crash_rate_alert_aggregate", query="", time_window=int(timedelta(minutes=time_window).total_seconds()), status=QuerySubscription.Status.CREATING, ) with patch("sentry.snuba.tasks._snuba_pool") as pool: resp = Mock() resp.status = 202 resp.data = json.dumps({"subscription_id": "123" + f"{time_window + idx}"}) pool.urlopen.return_value = resp create_subscription_in_snuba(sub.id) request_body = json.loads(pool.urlopen.call_args[1]["body"]) assert request_body["granularity"] == expected_granularity def test_insights_query_spm(self) -> None: time_window = 3600 sub = self.create_subscription( QuerySubscription.Status.CREATING, query="span.module:db", aggregate="spm()", dataset=Dataset.PerformanceMetrics, time_window=time_window, ) with patch("sentry.snuba.tasks._snuba_pool") as pool: resp = Mock() resp.status = 202 resp.data = json.dumps({"subscription_id": "123"}) pool.urlopen.return_value = resp create_subscription_in_snuba(sub.id) request_body = json.loads(pool.urlopen.call_args[1]["body"]) # Validate that the spm function uses the correct time window assert ( "divide(countIf(value, equals(metric_id, 9223372036854776213)), divide(3600, 60)) AS `spm`" in request_body["query"] ) assert request_body["granularity"] == 60 assert request_body["time_window"] == time_window sub = QuerySubscription.objects.get(id=sub.id) assert sub.status == QuerySubscription.Status.ACTIVE.value assert sub.subscription_id is not None def test_eap_rpc_query_count(self) -> None: time_window = 3600 sub = self.create_subscription( QuerySubscription.Status.CREATING, query="span.op:http.client", aggregate="count(span.duration)", dataset=Dataset.EventsAnalyticsPlatform, time_window=time_window, ) with patch.object(_snuba_pool, "urlopen", side_effect=_snuba_pool.urlopen) as urlopen: create_subscription_in_snuba(sub.id) rpc_request_body = urlopen.call_args[1]["body"] createSubscriptionRequest = CreateSubscriptionRequest.FromString(rpc_request_body) assert createSubscriptionRequest.time_window_secs == time_window assert ( createSubscriptionRequest.time_series_request.filter.comparison_filter.op == ComparisonFilter.Op.OP_EQUALS ) assert ( createSubscriptionRequest.time_series_request.filter.comparison_filter.key.name == "sentry.op" ) assert ( createSubscriptionRequest.time_series_request.filter.comparison_filter.value.val_str == "http.client" ) assert ( createSubscriptionRequest.time_series_request.expressions[0].aggregation.aggregate == FUNCTION_COUNT ) assert ( createSubscriptionRequest.time_series_request.expressions[0].aggregation.key.name == "sentry.project_id" ) # Validate that the spm function uses the correct time window sub = QuerySubscription.objects.get(id=sub.id) assert sub.status == QuerySubscription.Status.ACTIVE.value assert sub.subscription_id is not None
CreateSubscriptionInSnubaTest
python
neetcode-gh__leetcode
python/0349-intersection-of-two-arrays.py
{ "start": 0, "end": 275 }
class ____: def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]: seen = set(nums1) res = [] for n in nums2: if n in seen: res.append(n) seen.remove(n) return res
Solution
python
numba__numba
numba/tests/test_random.py
{ "start": 53194, "end": 55403 }
class ____(BaseTest): """ Test np.random.multinomial. """ # A biased dice pvals = np.array([1, 1, 1, 2, 3, 1], dtype=np.float64) pvals /= pvals.sum() def _check_sample(self, n, pvals, sample): """ Check distribution of some samples. """ self.assertIsInstance(sample, np.ndarray) self.assertEqual(sample.shape, (len(pvals),)) self.assertIn(sample.dtype, (np.dtype('int32'), np.dtype('int64'))) # Statistical properties self.assertEqual(sample.sum(), n) for p, nexp in zip(pvals, sample): self.assertGreaterEqual(nexp, 0) self.assertLessEqual(nexp, n) pexp = float(nexp) / n self.assertGreaterEqual(pexp, p * 0.5) self.assertLessEqual(pexp, p * 2.0) def test_multinomial_2(self): """ Test multinomial(n, pvals) """ cfunc = jit(nopython=True)(numpy_multinomial2) n, pvals = 1000, self.pvals res = cfunc(n, pvals) self._check_sample(n, pvals, res) # pvals as list pvals = list(pvals) res = cfunc(n, pvals) self._check_sample(n, pvals, res) # A case with extreme probabilities n = 1000000 pvals = np.array([1, 0, n // 100, 1], dtype=np.float64) pvals /= pvals.sum() res = cfunc(n, pvals) self._check_sample(n, pvals, res) def test_multinomial_3_int(self): """ Test multinomial(n, pvals, size: int) """ cfunc = jit(nopython=True)(numpy_multinomial3) n, pvals = 1000, self.pvals k = 10 res = cfunc(n, pvals, k) self.assertEqual(res.shape[0], k) for sample in res: self._check_sample(n, pvals, sample) def test_multinomial_3_tuple(self): """ Test multinomial(n, pvals, size: tuple) """ cfunc = jit(nopython=True)(numpy_multinomial3) n, pvals = 1000, self.pvals k = (3, 4) res = cfunc(n, pvals, k) self.assertEqual(res.shape[:-1], k) for sample in res.reshape((-1, res.shape[-1])): self._check_sample(n, pvals, sample)
TestRandomMultinomial
python
kamyu104__LeetCode-Solutions
Python/can-convert-string-in-k-moves.py
{ "start": 48, "end": 520 }
class ____(object): def canConvertString(self, s, t, k): """ :type s: str :type t: str :type k: int :rtype: bool """ if len(s) != len(t): return False cnt = [0]*26 for a, b in itertools.izip(s, t): diff = (ord(b)-ord(a)) % len(cnt) if diff != 0 and cnt[diff]*len(cnt) + diff > k: return False cnt[diff] += 1 return True
Solution
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/annotated1.py
{ "start": 1369, "end": 2027 }
class ____: x: Annotated[InitVar[int], "metadata"] d1 = B(x=4) # This should generate an error because x is not an actual member. d1.x Alias1 = Annotated[_T, ""] Alias2 = str Alias3 = Alias1[Alias2] reveal_type(Alias3, expected_text="type[str]") x2: Annotated[str, [*(1, 2)]] x3: Annotated[str, (temp := 1)] async def func3(): x4: Annotated[str, await func3()] x5: Annotated[str, f""] x6: Annotated[str, "abc"] x7: Annotated[str, "a\nb"] x8: Annotated[str, *(1, 2, 3)] def func4(): return Annotated[int, 2 + 2] reveal_type(func4(), expected_text="Annotated") x9 = list[Annotated[int, ""]]() reveal_type(x9, expected_text="list[int]")
B
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocolExplicit1.py
{ "start": 1649, "end": 1697 }
class ____(Mixin7, Protocol7): pass
Concrete7B
python
rapidsai__cudf
python/cudf_polars/cudf_polars/experimental/base.py
{ "start": 11691, "end": 12242 }
class ____: """Column statistics collector.""" __slots__ = ("column_stats", "join_info", "row_count") row_count: dict[IR, ColumnStat[int]] """Estimated row count for each IR node.""" column_stats: dict[IR, dict[str, ColumnStats]] """Column statistics for each IR node.""" join_info: JoinInfo """Join information.""" def __init__(self) -> None: self.row_count: dict[IR, ColumnStat[int]] = {} self.column_stats: dict[IR, dict[str, ColumnStats]] = {} self.join_info = JoinInfo()
StatsCollector
python
ray-project__ray
rllib/utils/replay_buffers/tests/test_multi_agent_prioritized_replay_buffer.py
{ "start": 368, "end": 8439 }
class ____(unittest.TestCase): batch_id = 0 alpha = 1.0 beta = 1.0 def _generate_data(self): self.batch_id += 1 return SampleBatch( { SampleBatch.T: [0, 1], SampleBatch.ACTIONS: 2 * [np.random.choice([0, 1])], SampleBatch.REWARDS: 2 * [np.random.rand()], SampleBatch.OBS: 2 * [np.random.random((4,))], SampleBatch.NEXT_OBS: 2 * [np.random.random((4,))], SampleBatch.TERMINATEDS: 2 * [False, False], SampleBatch.TRUNCATEDS: 2 * [False, True], SampleBatch.SEQ_LENS: [2], SampleBatch.EPS_ID: 2 * [self.batch_id], SampleBatch.AGENT_INDEX: 2 * [self.batch_id], "batch_id": 2 * [self.batch_id], } ) def _add_sample_batch_to_buffer(self, buffer, batch_size, num_batches=5, **kwargs): for i in range(num_batches): data = [self._generate_data() for _ in range(batch_size)] batch = concat_samples(data) buffer.add(batch, **kwargs) def _add_multi_agent_batch_to_buffer( self, buffer, num_policies, num_batches=5, **kwargs ): def _generate_data(policy_id): batch = SampleBatch( { SampleBatch.T: [0], SampleBatch.ACTIONS: [np.random.choice([0, 1])], SampleBatch.REWARDS: [np.random.rand()], SampleBatch.OBS: [np.random.random((4,))], SampleBatch.NEXT_OBS: [np.random.random((4,))], SampleBatch.TERMINATEDS: [np.random.choice([False, True])], SampleBatch.TRUNCATEDS: [np.random.choice([False, False])], SampleBatch.EPS_ID: [self.batch_id], SampleBatch.AGENT_INDEX: [self.batch_id], "batch_id": [self.batch_id], "policy_id": [policy_id], } ) return batch for i in range(num_batches): # genera a few policy batches policy_batches = {idx: _generate_data(idx) for idx in range(num_policies)} self.batch_id += 1 batch = MultiAgentBatch(policy_batches, 1) buffer.add(batch, **kwargs) def test_policy_id_of_multi_agent_batches_independent(self): """Test if indepent sampling yields a MultiAgentBatch with the correct policy id.""" self.batch_id = 0 # Test lockstep mode with different policy ids using MultiAgentBatches buffer = MultiAgentPrioritizedReplayBuffer( capacity=10, replay_mode="independent", num_shards=1, ) self._add_multi_agent_batch_to_buffer(buffer, num_policies=1, num_batches=1) mabatch = buffer.sample(1) assert list(mabatch.policy_batches.keys())[0] == 0 def test_lockstep_mode(self): """Test the lockstep mode by adding batches from multiple policies.""" self.batch_id = 0 num_policies = 4 num_batches = 13 buffer_size = 15 # Test lockstep mode with different policy ids using MultiAgentBatches buffer = MultiAgentPrioritizedReplayBuffer( capacity=buffer_size, replay_mode="lockstep", num_shards=1, ) self._add_multi_agent_batch_to_buffer( buffer, num_policies=num_policies, num_batches=num_batches ) _id, _buffer = next(buffer.replay_buffers.items().__iter__()) assert _id == _ALL_POLICIES assert len(buffer) == num_batches # Add batches until the buffer is full self._add_multi_agent_batch_to_buffer( buffer, num_policies=num_policies, num_batches=num_batches ) assert _id == _ALL_POLICIES assert len(buffer) == buffer_size def test_independent_mode(self): """Test the lockstep mode by adding batches from multiple policies.""" self.batch_id = 0 num_batches = 3 buffer_size = 15 num_policies = 2 # Test lockstep mode with different policy ids using MultiAgentBatches buffer = MultiAgentPrioritizedReplayBuffer( capacity=buffer_size, replay_mode="independent", num_shards=1, ) self._add_multi_agent_batch_to_buffer( buffer, num_policies=num_policies, num_batches=num_batches ) # Sample 4 SampleBatches from only one policy and put it into a # MultiAgentBatch for _id in range(num_policies): for __id in buffer.sample(4, policy_id=_id).policy_batches[_id][ "policy_id" ]: assert __id == _id # Sample without specifying the policy should yield approx. the same # number of batches from each policy num_sampled_dict = {_id: 0 for _id in range(num_policies)} num_samples = 200 for i in range(num_samples): num_items = np.random.randint(1, 5) for _id, batch in buffer.sample(num_items=num_items).policy_batches.items(): num_sampled_dict[_id] += 1 assert len(batch) == num_items assert np.allclose( np.array(list(num_sampled_dict.values())), len(num_sampled_dict) * [200], atol=0.1, ) def test_update_priorities(self): num_batches = 5 buffer_size = 15 # Buffer needs to be in independent mode, lockstep is not supported buffer = MultiAgentPrioritizedReplayBuffer( capacity=buffer_size, prioritized_replay_alpha=self.alpha, prioritized_replay_beta=self.beta, replay_mode="independent", replay_sequence_length=2, num_shards=1, ) # Insert n samples for i in range(num_batches): data = self._generate_data() buffer.add(data, weight=1.0) assert len(buffer) == i + 1 # Fetch records, their indices and weights. mabatch = buffer.sample(3) assert type(mabatch) is MultiAgentBatch samplebatch = mabatch.policy_batches[DEFAULT_POLICY_ID] weights = samplebatch["weights"] indices = samplebatch["batch_indexes"] check(weights, np.ones(shape=(6,))) assert 6 == len(indices) assert len(buffer) == num_batches policy_buffer = buffer.replay_buffers[DEFAULT_POLICY_ID] assert policy_buffer._next_idx == num_batches # Update weight of indices 0, 2, 3, 4, like in our # PrioritizedReplayBuffer tests priority_dict = { DEFAULT_POLICY_ID: ( np.array([0, 2, 3, 4]), np.array([0.01, 0.01, 0.01, 0.01]), ) } buffer.update_priorities(priority_dict) # Expect to sample almost only index 1 # (which still has a weight of 1.0). for _ in range(10): mabatch = buffer.sample(1000) assert type(mabatch) is MultiAgentBatch samplebatch = mabatch.policy_batches[DEFAULT_POLICY_ID] assert type(mabatch) is MultiAgentBatch indices = samplebatch["batch_indexes"] self.assertTrue(1900 < np.sum(indices) < 2200) # Test get_state/set_state. state = buffer.get_state() new_buffer = MultiAgentPrioritizedReplayBuffer( capacity=buffer_size, prioritized_replay_alpha=self.alpha, prioritized_replay_beta=self.beta, replay_mode="independent", num_shards=1, ) new_buffer.set_state(state) batch = new_buffer.sample(1000).policy_batches[DEFAULT_POLICY_ID] indices = batch["batch_indexes"] self.assertTrue(1900 < np.sum(indices) < 2200) if __name__ == "__main__": import sys import pytest sys.exit(pytest.main(["-v", __file__]))
TestMultiAgentPrioritizedReplayBuffer
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/triggers/test_redshift_data.py
{ "start": 1239, "end": 5309 }
class ____: def test_redshift_data_trigger_serialization(self): """ Asserts that the RedshiftDataTrigger correctly serializes its arguments and classpath. """ trigger = RedshiftDataTrigger( statement_id=[], task_id=TEST_TASK_ID, aws_conn_id=TEST_CONN_ID, poll_interval=POLL_INTERVAL, ) classpath, kwargs = trigger.serialize() assert classpath == "airflow.providers.amazon.aws.triggers.redshift_data.RedshiftDataTrigger" assert kwargs == { "statement_id": [], "task_id": TEST_TASK_ID, "poll_interval": POLL_INTERVAL, "aws_conn_id": TEST_CONN_ID, "region_name": None, "botocore_config": None, "verify": None, } @pytest.mark.asyncio @pytest.mark.parametrize( ("return_value", "response"), [ ( True, TriggerEvent({"status": "success", "statement_id": "uuid"}), ), ( False, TriggerEvent( {"status": "error", "message": f"{TEST_TASK_ID} failed", "statement_id": "uuid"} ), ), ], ) @mock.patch( "airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.check_query_is_finished_async" ) @mock.patch( "airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.is_still_running", return_value=False, ) async def test_redshift_data_trigger_run( self, mocked_is_still_running, mock_check_query_is_finised_async, return_value, response ): """ Tests that RedshiftDataTrigger only fires once the query execution reaches a successful state. """ mock_check_query_is_finised_async.return_value = return_value trigger = RedshiftDataTrigger( statement_id="uuid", task_id=TEST_TASK_ID, poll_interval=POLL_INTERVAL, aws_conn_id=TEST_CONN_ID, ) generator = trigger.run() actual = await generator.asend(None) assert response == actual @pytest.mark.asyncio @pytest.mark.parametrize( ("raised_exception", "expected_response"), [ ( RedshiftDataQueryFailedError("Failed"), { "status": "error", "statement_id": "uuid", "message": "Failed", "type": FAILED_STATE, }, ), ( RedshiftDataQueryAbortedError("Aborted"), { "status": "error", "statement_id": "uuid", "message": "Aborted", "type": ABORTED_STATE, }, ), ( Exception(f"{TEST_TASK_ID} failed"), {"status": "error", "statement_id": "uuid", "message": f"{TEST_TASK_ID} failed"}, ), ], ) @mock.patch( "airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.check_query_is_finished_async" ) @mock.patch( "airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.is_still_running", return_value=False, ) async def test_redshift_data_trigger_exception( self, mocked_is_still_running, mock_check_query_is_finised_async, raised_exception, expected_response ): """ Test that RedshiftDataTrigger fires the correct event in case of an error. """ mock_check_query_is_finised_async.side_effect = raised_exception trigger = RedshiftDataTrigger( statement_id="uuid", task_id=TEST_TASK_ID, poll_interval=POLL_INTERVAL, aws_conn_id=TEST_CONN_ID, ) task = [i async for i in trigger.run()] assert len(task) == 1 assert TriggerEvent(expected_response) in task
TestRedshiftDataTrigger
python
cython__cython
Cython/Compiler/ExprNodes.py
{ "start": 375043, "end": 381633 }
class ____(SequenceNode): # Tuple constructor. type = tuple_type is_partly_literal = False gil_message = "Constructing Python tuple" def infer_type(self, env): if self.mult_factor or not self.args: return tuple_type arg_types = [arg.infer_type(env) for arg in self.args] if any(type.is_pyobject or type.is_memoryviewslice or type.is_unspecified or type.is_fused for type in arg_types): return tuple_type return env.declare_tuple_type(self.pos, arg_types).type def analyse_types(self, env, skip_children=False): # reset before re-analysing if self.is_literal: self.is_literal = False if self.is_partly_literal: self.is_partly_literal = False if len(self.args) == 0: self.is_temp = False self.is_literal = True return self if not skip_children: for i, arg in enumerate(self.args): if arg.is_starred: arg.starred_expr_allowed_here = True self.args[i] = arg.analyse_types(env) if (not self.mult_factor and not any((arg.is_starred or arg.type.is_pyobject or arg.type.is_memoryviewslice or arg.type.is_fused) for arg in self.args)): self.type = env.declare_tuple_type(self.pos, (arg.type for arg in self.args)).type self.is_temp = 1 return self node = SequenceNode.analyse_types(self, env, skip_children=True) node = node._create_merge_node_if_necessary(env) if not node.is_sequence_constructor: return node if not all(child.is_literal for child in node.args): return node if not node.mult_factor or ( node.mult_factor.is_literal and isinstance(node.mult_factor.constant_result, int)): node.is_temp = False node.is_literal = True else: if not node.mult_factor.type.is_pyobject and not node.mult_factor.type.is_int: node.mult_factor = node.mult_factor.coerce_to_pyobject(env) node.is_temp = True node.is_partly_literal = True return node def analyse_as_type(self, env): # ctuple type if not self.args: return None item_types = [arg.analyse_as_type(env) for arg in self.args] if any(t is None for t in item_types): return None entry = env.declare_tuple_type(self.pos, item_types) return entry.type def coerce_to(self, dst_type, env): if self.type.is_ctuple: if dst_type.is_ctuple and self.type.size == dst_type.size: return self.coerce_to_ctuple(dst_type, env) elif dst_type is tuple_type or dst_type is py_object_type: coerced_args = [arg.coerce_to_pyobject(env) for arg in self.args] return TupleNode( self.pos, args=coerced_args, type=tuple_type, mult_factor=self.mult_factor, is_temp=1, ).analyse_types(env, skip_children=True) else: return self.coerce_to_pyobject(env).coerce_to(dst_type, env) elif dst_type.is_ctuple and not self.mult_factor: return self.coerce_to_ctuple(dst_type, env) else: return SequenceNode.coerce_to(self, dst_type, env) def as_list(self): constant_result = self.constant_result if isinstance(constant_result, tuple): constant_result = list(constant_result) return ListNode.from_node( self, args=self.args, mult_factor=self.mult_factor, constant_result=constant_result) def is_simple(self): # either temp or constant => always simple return True def nonlocally_immutable(self): # either temp or constant => always safe return True def calculate_result_code(self): return self.result_code def calculate_constant_result(self): if self.mult_factor: raise ValueError() # may exceed the compile time memory self.constant_result = tuple([ arg.constant_result for arg in self.args]) def compile_time_value(self, denv): values = self.compile_time_value_list(denv) assert self.mult_factor is None, self.mult_factor # set only after parsing try: return tuple(values) except Exception as e: self.compile_time_value_error(e) def generate_operation_code(self, code): if len(self.args) == 0: self.result_code = code.name_in_module_state(Naming.empty_tuple) return if self.is_literal or self.is_partly_literal: # The "mult_factor" is part of the deduplication if it is also constant, i.e. when # we deduplicate the multiplied result. Otherwise, only deduplicate the constant part. dedup_key = make_dedup_key(self.type, [self.mult_factor if self.is_literal else None] + self.args) tuple_target = code.get_py_const('tuple', dedup_key=dedup_key) const_code = code.get_cached_constants_writer(tuple_target) if const_code is not None: # constant is not yet initialised const_code.mark_pos(self.pos) self.generate_sequence_packing_code(const_code, tuple_target, plain=not self.is_literal) const_code.put_giveref(tuple_target, py_object_type) if self.is_literal: self.result_code = tuple_target elif self.mult_factor.type.is_int: code.globalstate.use_utility_code( UtilityCode.load_cached("PySequenceMultiply", "ObjectHandling.c")) code.putln('%s = __Pyx_PySequence_Multiply(%s, %s); %s' % ( self.result(), tuple_target, self.mult_factor.result(), code.error_goto_if_null(self.result(), self.pos) )) self.generate_gotref(code) else: code.putln('%s = PyNumber_Multiply(%s, %s); %s' % ( self.result(), tuple_target, self.mult_factor.py_result(), code.error_goto_if_null(self.result(), self.pos) )) self.generate_gotref(code) else: self.type.entry.used = True self.generate_sequence_packing_code(code)
TupleNode
python
coleifer__peewee
tests/base.py
{ "start": 4578, "end": 4808 }
class ____(logging.Handler): def __init__(self, *args, **kwargs): self.queries = [] logging.Handler.__init__(self, *args, **kwargs) def emit(self, record): self.queries.append(record)
QueryLogHandler
python
kennethreitz__tablib
src/tablib/packages/dbfpy/fields.py
{ "start": 1578, "end": 6732 }
class ____: """Abstract field definition. Child classes must override ``type`` class attribute to provide datatype information of the field definition. For more info about types visit `http://www.clicketyclick.dk/databases/xbase/format/data_types.html` Also child classes must override ``defaultValue`` field to provide default value for the field value. If child class has fixed length ``length`` class attribute must be overridden and set to the valid value. None value means, that field isn't of fixed length. Note: ``name`` field must not be changed after instantiation. """ __slots__ = ("name", "decimalCount", "start", "end", "ignoreErrors") # length of the field, None in case of variable-length field, # or a number if this field is a fixed-length field length = None # field type. for more information about fields types visit # `http://www.clicketyclick.dk/databases/xbase/format/data_types.html` # must be overridden in child classes typeCode = None # default value for the field. this field must be # overridden in child classes defaultValue = None def __init__(self, name, length=None, decimalCount=None, start=None, stop=None, ignoreErrors=False): """Initialize instance.""" assert self.typeCode is not None, "Type code must be overridden" assert self.defaultValue is not None, "Default value must be overridden" # fix arguments if len(name) > 10: raise ValueError("Field name \"%s\" is too long" % name) name = str(name).upper() if self.__class__.length is None: if length is None: raise ValueError("[%s] Length isn't specified" % name) length = int(length) if length <= 0: raise ValueError("[%s] Length must be a positive integer" % name) else: length = self.length if decimalCount is None: decimalCount = 0 # set fields self.name = name # FIXME: validate length according to the specification at # http://www.clicketyclick.dk/databases/xbase/format/data_types.html self.length = length self.decimalCount = decimalCount self.ignoreErrors = ignoreErrors self.start = start self.end = stop def __eq__(self, other): return repr(self) == repr(other) def __ne__(self, other): return repr(self) != repr(other) def __lt__(self, other): return repr(self) < repr(other) def __hash__(self): return hash(self.name) def fromString(cls, string, start, ignoreErrors=False): """Decode dbf field definition from the string data. Arguments: string: a string, dbf definition is decoded from. length of the string must be 32 bytes. start: position in the database file. ignoreErrors: initial error processing mode for the new field (boolean) """ assert len(string) == 32 _length = string[16] return cls(utils.unzfill(string)[:11].decode('utf-8'), _length, string[17], start, start + _length, ignoreErrors=ignoreErrors) fromString = classmethod(fromString) def toString(self): """Return encoded field definition. Return: Return value is a string object containing encoded definition of this field. """ _name = self.name.ljust(11, '\0') return ( _name + self.typeCode + # data address chr(0) * 4 + chr(self.length) + chr(self.decimalCount) + chr(0) * 14 ) def __repr__(self): return "%-10s %1s %3d %3d" % self.fieldInfo() def fieldInfo(self): """Return field information. Return: Return value is a (name, type, length, decimals) tuple. """ return (self.name, self.typeCode, self.length, self.decimalCount) def rawFromRecord(self, record): """Return a "raw" field value from the record string.""" return record[self.start:self.end] def decodeFromRecord(self, record): """Return decoded field value from the record string.""" try: return self.decodeValue(self.rawFromRecord(record)) except Exception: if self.ignoreErrors: return utils.INVALID_VALUE else: raise def decodeValue(self, value): """Return decoded value from string value. This method shouldn't be used publicly. It's called from the `decodeFromRecord` method. This is an abstract method and it must be overridden in child classes. """ raise NotImplementedError def encodeValue(self, value): """Return str object containing encoded field value. This is an abstract method and it must be overridden in child classes. """ raise NotImplementedError # real classes
DbfFieldDef
python
dagster-io__dagster
python_modules/dagster/dagster/components/lib/shim_components/sensor.py
{ "start": 226, "end": 552 }
class ____(ShimScaffolder): def get_text(self, request: ScaffoldRequest) -> str: return f"""import dagster as dg @dg.sensor(target=None) def {request.target_path.stem}(context: dg.SensorEvaluationContext) -> dg.SensorResult: return dg.SensorResult() """ scaffold_with(SensorScaffolder)(sensor)
SensorScaffolder
python
tensorflow__tensorflow
tensorflow/python/autograph/pyct/qual_names.py
{ "start": 1131, "end": 1227 }
class ____(collections.namedtuple('Symbol', ['name'])): """Represents a Python symbol."""
Symbol
python
numpy__numpy
numpy/f2py/tests/test_docs.py
{ "start": 826, "end": 1930 }
class ____(util.F2PyTest): # options = ['--debug-capi', '--build-dir', '/tmp/build-f2py'] sources = [_path('asterisk1.f90'), _path('asterisk2.f90'), _path('ftype.f')] def test_asterisk1(self): foo = self.module.foo1 assert_equal(foo(), b'123456789A12') def test_asterisk2(self): foo = self.module.foo2 assert_equal(foo(2), b'12') assert_equal(foo(12), b'123456789A12') assert_equal(foo(20), b'123456789A123456789B') def test_ftype(self): ftype = self.module ftype.foo() assert_equal(ftype.data.a, 0) ftype.data.a = 3 ftype.data.x = [1, 2, 3] assert_equal(ftype.data.a, 3) assert_array_equal(ftype.data.x, np.array([1, 2, 3], dtype=np.float32)) ftype.data.x[1] = 45 assert_array_equal(ftype.data.x, np.array([1, 45, 3], dtype=np.float32)) # gh-26718 Cleanup for repeated test runs ftype.data.a = 0 # TODO: implement test methods for other example Fortran codes
TestDocAdvanced
python
huggingface__transformers
src/transformers/models/roc_bert/modeling_roc_bert.py
{ "start": 15869, "end": 16582 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->RoCBert,BERT->ROC_BERT
RoCBertSelfOutput
python
scipy__scipy
scipy/stats/tests/test_distributions.py
{ "start": 171179, "end": 174346 }
class ____: def test_compare_t(self): # Verify that jf_skew_t with a=b recovers the t distribution with 2a # degrees of freedom a = b = 5 df = a * 2 x = [-1.0, 0.0, 1.0, 2.0] q = [0.0, 0.1, 0.25, 0.75, 0.90, 1.0] jf = stats.jf_skew_t(a, b) t = stats.t(df) assert_allclose(jf.pdf(x), t.pdf(x)) assert_allclose(jf.cdf(x), t.cdf(x)) assert_allclose(jf.ppf(q), t.ppf(q)) assert_allclose(jf.stats('mvsk'), t.stats('mvsk')) @pytest.fixture def gamlss_pdf_data(self): """Sample data points computed using the `ST5` distribution from the GAMLSS package in R. The pdf has been calculated for (a,b)=(2,3), (a,b)=(8,4), and (a,b)=(12,13) for x in `np.linspace(-10, 10, 41)`. N.B. the `ST5` distribution in R uses an alternative parameterization in terms of nu and tau, where: - nu = (a - b) / (a * b * (a + b)) ** 0.5 - tau = 2 / (a + b) """ data = np.load( Path(__file__).parent / "data/jf_skew_t_gamlss_pdf_data.npy" ) return np.rec.fromarrays(data, names="x,pdf,a,b") @pytest.mark.parametrize("a,b", [(2, 3), (8, 4), (12, 13)]) def test_compare_with_gamlss_r(self, gamlss_pdf_data, a, b): """Compare the pdf with a table of reference values. The table of reference values was produced using R, where the Jones and Faddy skew t distribution is available in the GAMLSS package as `ST5`. """ data = gamlss_pdf_data[ (gamlss_pdf_data["a"] == a) & (gamlss_pdf_data["b"] == b) ] x, pdf = data["x"], data["pdf"] assert_allclose(pdf, stats.jf_skew_t(a, b).pdf(x), rtol=1e-12) # Test data for TestSkewNorm.test_noncentral_moments() # The expected noncentral moments were computed by Wolfram Alpha. # In Wolfram Alpha, enter # SkewNormalDistribution[0, 1, a] moment # with `a` replaced by the desired shape parameter. In the results, there # should be a table of the first four moments. Click on "More" to get more # moments. The expected moments start with the first moment (order = 1). _skewnorm_noncentral_moments = [ (2, [2*np.sqrt(2/(5*np.pi)), 1, 22/5*np.sqrt(2/(5*np.pi)), 3, 446/25*np.sqrt(2/(5*np.pi)), 15, 2682/25*np.sqrt(2/(5*np.pi)), 105, 107322/125*np.sqrt(2/(5*np.pi))]), (0.1, [np.sqrt(2/(101*np.pi)), 1, 302/101*np.sqrt(2/(101*np.pi)), 3, (152008*np.sqrt(2/(101*np.pi)))/10201, 15, (107116848*np.sqrt(2/(101*np.pi)))/1030301, 105, (97050413184*np.sqrt(2/(101*np.pi)))/104060401]), (-3, [-3/np.sqrt(5*np.pi), 1, -63/(10*np.sqrt(5*np.pi)), 3, -2529/(100*np.sqrt(5*np.pi)), 15, -30357/(200*np.sqrt(5*np.pi)), 105, -2428623/(2000*np.sqrt(5*np.pi)), 945, -242862867/(20000*np.sqrt(5*np.pi)), 10395, -29143550277/(200000*np.sqrt(5*np.pi)), 135135]), ]
TestJFSkewT
python
docker__docker-py
docker/context/api.py
{ "start": 208, "end": 6326 }
class ____: """Context API. Contains methods for context management: create, list, remove, get, inspect. """ DEFAULT_CONTEXT = Context("default", "swarm") @classmethod def create_context( cls, name, orchestrator=None, host=None, tls_cfg=None, default_namespace=None, skip_tls_verify=False): """Creates a new context. Returns: (Context): a Context object. Raises: :py:class:`docker.errors.MissingContextParameter` If a context name is not provided. :py:class:`docker.errors.ContextAlreadyExists` If a context with the name already exists. :py:class:`docker.errors.ContextException` If name is default. Example: >>> from docker.context import ContextAPI >>> ctx = ContextAPI.create_context(name='test') >>> print(ctx.Metadata) { "Name": "test", "Metadata": {}, "Endpoints": { "docker": { "Host": "unix:///var/run/docker.sock", "SkipTLSVerify": false } } } """ if not name: raise errors.MissingContextParameter("name") if name == "default": raise errors.ContextException( '"default" is a reserved context name') ctx = Context.load_context(name) if ctx: raise errors.ContextAlreadyExists(name) endpoint = "docker" if orchestrator and orchestrator != "swarm": endpoint = orchestrator ctx = Context(name, orchestrator) ctx.set_endpoint( endpoint, host, tls_cfg, skip_tls_verify=skip_tls_verify, def_namespace=default_namespace) ctx.save() return ctx @classmethod def get_context(cls, name=None): """Retrieves a context object. Args: name (str): The name of the context Example: >>> from docker.context import ContextAPI >>> ctx = ContextAPI.get_context(name='test') >>> print(ctx.Metadata) { "Name": "test", "Metadata": {}, "Endpoints": { "docker": { "Host": "unix:///var/run/docker.sock", "SkipTLSVerify": false } } } """ if not name: name = get_current_context_name() if name == "default": return cls.DEFAULT_CONTEXT return Context.load_context(name) @classmethod def contexts(cls): """Context list. Returns: (Context): List of context objects. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ names = [] for dirname, dirnames, fnames in os.walk(get_meta_dir()): for filename in fnames + dirnames: if filename == METAFILE: try: data = json.load( open(os.path.join(dirname, filename))) names.append(data["Name"]) except Exception as e: raise errors.ContextException( f"Failed to load metafile {filename}: {e}", ) from e contexts = [cls.DEFAULT_CONTEXT] for name in names: contexts.append(Context.load_context(name)) return contexts @classmethod def get_current_context(cls): """Get current context. Returns: (Context): current context object. """ return cls.get_context() @classmethod def set_current_context(cls, name="default"): ctx = cls.get_context(name) if not ctx: raise errors.ContextNotFound(name) err = write_context_name_to_docker_config(name) if err: raise errors.ContextException( f'Failed to set current context: {err}') @classmethod def remove_context(cls, name): """Remove a context. Similar to the ``docker context rm`` command. Args: name (str): The name of the context Raises: :py:class:`docker.errors.MissingContextParameter` If a context name is not provided. :py:class:`docker.errors.ContextNotFound` If a context with the name does not exist. :py:class:`docker.errors.ContextException` If name is default. Example: >>> from docker.context import ContextAPI >>> ContextAPI.remove_context(name='test') >>> """ if not name: raise errors.MissingContextParameter("name") if name == "default": raise errors.ContextException( 'context "default" cannot be removed') ctx = Context.load_context(name) if not ctx: raise errors.ContextNotFound(name) if name == get_current_context_name(): write_context_name_to_docker_config(None) ctx.remove() @classmethod def inspect_context(cls, name="default"): """Remove a context. Similar to the ``docker context inspect`` command. Args: name (str): The name of the context Raises: :py:class:`docker.errors.MissingContextParameter` If a context name is not provided. :py:class:`docker.errors.ContextNotFound` If a context with the name does not exist. Example: >>> from docker.context import ContextAPI >>> ContextAPI.remove_context(name='test') >>> """ if not name: raise errors.MissingContextParameter("name") if name == "default": return cls.DEFAULT_CONTEXT() ctx = Context.load_context(name) if not ctx: raise errors.ContextNotFound(name) return ctx()
ContextAPI
python
scipy__scipy
scipy/special/tests/test_basic.py
{ "start": 179521, "end": 192762 }
class ____: def _series(self, v, z, n=100): """Compute Struve function & error estimate from its power series.""" k = arange(0, n) r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5) err = abs(r).max() * finfo(double).eps * n return r.sum(), err def test_vs_series(self): """Check Struve function versus its power series""" for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]: for z in [1, 10, 19, 21, 30]: value, err = self._series(v, z) assert_allclose(special.struve(v, z), value, rtol=0, atol=err), (v, z) def test_some_values(self): assert_allclose(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7) assert_allclose(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8) assert_allclose(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12) assert_allclose(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11) assert_equal(special.struve(-12, -41), -special.struve(-12, 41)) assert_equal(special.struve(+12, -41), -special.struve(+12, 41)) assert_equal(special.struve(-11, -41), +special.struve(-11, 41)) assert_equal(special.struve(+11, -41), +special.struve(+11, 41)) assert_(isnan(special.struve(-7.1, -1))) assert_(isnan(special.struve(-10.1, -1))) def test_regression_679(self): """Regression test for #679""" assert_allclose(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8)) assert_allclose(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8)) assert_allclose(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8)) def test_chi2_smalldf(): assert_allclose(special.chdtr(0.6, 3), 0.957890536704110, atol=1.5e-7, rtol=0) def test_ch2_inf(): assert_equal(special.chdtr(0.7,np.inf), 1.0) @pytest.mark.parametrize("x", [-np.inf, -1.0, -0.0, 0.0, np.inf, np.nan]) def test_chi2_v_nan(x): assert np.isnan(special.chdtr(np.nan, x)) @pytest.mark.parametrize("v", [-np.inf, -1.0, -0.0, 0.0, np.inf, np.nan]) def test_chi2_x_nan(v): assert np.isnan(special.chdtr(v, np.nan)) @pytest.mark.parametrize("x", [-np.inf, -1.0, -0.0, 0.0, np.inf, np.nan]) def test_chi2c_v_nan(x): assert np.isnan(special.chdtrc(np.nan, x)) @pytest.mark.parametrize("v", [-np.inf, -1.0, -0.0, 0.0, np.inf, np.nan]) def test_chi2c_x_nan(v): assert np.isnan(special.chdtrc(v, np.nan)) def test_chi2_edgecases_gh20972(): # Tests that a variety of edgecases for chi square distribution functions # correctly return NaN when and only when they are supposed to, when # computed through different related ufuncs. See gh-20972. v = np.asarray([-0.01, 0, 0.01, 1, np.inf])[:, np.newaxis] x = np.asarray([-np.inf, -0.01, 0, 0.01, np.inf]) # Check that `gammainc` is NaN when it should be and finite otherwise ref = special.gammainc(v / 2, x / 2) mask = (x < 0) | (v < 0) | (x == 0) & (v == 0) | np.isinf(v) & np.isinf(x) assert np.all(np.isnan(ref[mask])) assert np.all(np.isfinite(ref[~mask])) # Use `gammainc` as a reference for the rest assert_allclose(special.chdtr(v, x), ref) assert_allclose(special.gdtr(1, v / 2, x / 2), ref) assert_allclose(1 - special.gammaincc(v / 2, x / 2), ref) assert_allclose(1 - special.chdtrc(v, x), ref) assert_allclose(1 - special.gdtrc(1, v / 2, x / 2), ref) def test_chi2c_smalldf(): assert_allclose(special.chdtrc(0.6, 3), 1 - 0.957890536704110, atol=1.5e-7, rtol=0) def test_chi2_inv_smalldf(): assert_allclose(special.chdtri(0.6, 1 - 0.957890536704110), 3, atol=1.5e-7, rtol=0) def test_agm_simple(): rtol = 1e-13 # Gauss's constant assert_allclose(1/special.agm(1, np.sqrt(2)), 0.834626841674073186, rtol=rtol) # These values were computed using Wolfram Alpha, with the # function ArithmeticGeometricMean[a, b]. agm13 = 1.863616783244897 agm15 = 2.604008190530940 agm35 = 3.936235503649555 assert_allclose(special.agm([[1], [3]], [1, 3, 5]), [[1, agm13, agm15], [agm13, 3, agm35]], rtol=rtol) # Computed by the iteration formula using mpmath, # with mpmath.mp.prec = 1000: agm12 = 1.4567910310469068 assert_allclose(special.agm(1, 2), agm12, rtol=rtol) assert_allclose(special.agm(2, 1), agm12, rtol=rtol) assert_allclose(special.agm(-1, -2), -agm12, rtol=rtol) assert_allclose(special.agm(24, 6), 13.458171481725614, rtol=rtol) assert_allclose(special.agm(13, 123456789.5), 11111458.498599306, rtol=rtol) assert_allclose(special.agm(1e30, 1), 2.229223055945383e+28, rtol=rtol) assert_allclose(special.agm(1e-22, 1), 0.030182566420169886, rtol=rtol) assert_allclose(special.agm(1e150, 1e180), 2.229223055945383e+178, rtol=rtol) assert_allclose(special.agm(1e180, 1e-150), 2.0634722510162677e+177, rtol=rtol) assert_allclose(special.agm(1e-150, 1e-170), 3.3112619670463756e-152, rtol=rtol) fi = np.finfo(1.0) assert_allclose(special.agm(fi.tiny, fi.max), 1.9892072050015473e+305, rtol=rtol) assert_allclose(special.agm(0.75*fi.max, fi.max), 1.564904312298045e+308, rtol=rtol) assert_allclose(special.agm(fi.tiny, 3*fi.tiny), 4.1466849866735005e-308, rtol=rtol) # zero, nan and inf cases. assert_equal(special.agm(0, 0), 0) assert_equal(special.agm(99, 0), 0) assert_equal(special.agm(-1, 10), np.nan) assert_equal(special.agm(0, np.inf), np.nan) assert_equal(special.agm(np.inf, 0), np.nan) assert_equal(special.agm(0, -np.inf), np.nan) assert_equal(special.agm(-np.inf, 0), np.nan) assert_equal(special.agm(np.inf, -np.inf), np.nan) assert_equal(special.agm(-np.inf, np.inf), np.nan) assert_equal(special.agm(1, np.nan), np.nan) assert_equal(special.agm(np.nan, -1), np.nan) assert_equal(special.agm(1, np.inf), np.inf) assert_equal(special.agm(np.inf, 1), np.inf) assert_equal(special.agm(-1, -np.inf), -np.inf) assert_equal(special.agm(-np.inf, -1), -np.inf) def test_legacy(): # Legacy behavior: truncating arguments to integers with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "floating point number truncated to an integer", RuntimeWarning) assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3)) assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3)) assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3)) assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3)) assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3)) assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3)) assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3)) assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3)) assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3)) # This lock can be removed once errstate is made thread-safe (see gh-21956) @pytest.fixture def errstate_lock(): import threading return threading.Lock() @with_special_errors def test_error_raising(errstate_lock): with errstate_lock: with special.errstate(all='raise'): assert_raises(special.SpecialFunctionError, special.iv, 1, 1e99j) def test_xlogy(): def xfunc(x, y): with np.errstate(invalid='ignore'): if x == 0 and not np.isnan(y): return x else: return x*np.log(y) z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float) z2 = np.r_[z1, [(0, 1j), (1, 1j)]] w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1]) assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13) w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1]) assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13) def test_xlog1py(): def xfunc(x, y): with np.errstate(invalid='ignore'): if x == 0 and not np.isnan(y): return x else: return x * np.log1p(y) z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0), (1, 1e-30)], dtype=float) w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1]) assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13) def test_entr(): def xfunc(x): if x < 0: return -np.inf else: return -special.xlogy(x, x) values = (0, 0.5, 1.0, np.inf) signs = [-1, 1] arr = [] for sgn, v in itertools.product(signs, values): arr.append(sgn * v) z = np.array(arr, dtype=float) w = np.vectorize(xfunc, otypes=[np.float64])(z) assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13) def test_kl_div(): def xfunc(x, y): if x < 0 or y < 0 or (y == 0 and x != 0): # extension of natural domain to preserve convexity return np.inf elif np.isposinf(x) or np.isposinf(y): # limits within the natural domain return np.inf elif x == 0: return y else: return special.xlogy(x, x/y) - x + y values = (0, 0.5, 1.0) signs = [-1, 1] arr = [] for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values): arr.append((sgna*va, sgnb*vb)) z = np.array(arr, dtype=float) w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1]) assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13) def test_rel_entr(): def xfunc(x, y): if x > 0 and y > 0: return special.xlogy(x, x/y) elif x == 0 and y >= 0: return 0 else: return np.inf values = (0, 0.5, 1.0) signs = [-1, 1] arr = [] for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values): arr.append((sgna*va, sgnb*vb)) z = np.array(arr, dtype=float) w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1]) assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13) def test_rel_entr_gh_20710_near_zero(): # Check accuracy of inputs which are very close inputs = np.array([ # x, y (0.9456657713430001, 0.9456657713430094), (0.48066098564791515, 0.48066098564794774), (0.786048657854401, 0.7860486578542367), ]) # Known values produced using `x * mpmath.log(x / y)` with dps=30 expected = [ -9.325873406851269e-15, -3.258504577274724e-14, 1.6431300764454033e-13, ] x = inputs[:, 0] y = inputs[:, 1] assert_allclose(special.rel_entr(x, y), expected, rtol=1e-13, atol=0) def test_rel_entr_gh_20710_overflow(): special.seterr(all='ignore') inputs = np.array([ # x, y # Overflow (4, 2.22e-308), # Underflow (1e-200, 1e+200), # Subnormal (2.22e-308, 1e15), ]) # Known values produced using `x * mpmath.log(x / y)` with dps=30 expected = [ 2839.139983229607, -9.210340371976183e-198, -1.6493212008074475e-305, ] x = inputs[:, 0] y = inputs[:, 1] assert_allclose(special.rel_entr(x, y), expected, rtol=1e-13, atol=0) def test_huber(): assert_equal(special.huber(-1, 1.5), np.inf) assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5)) assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2)) def xfunc(delta, r): if delta < 0: return np.inf elif np.abs(r) < delta: return 0.5 * np.square(r) else: return delta * (np.abs(r) - 0.5 * delta) z = np.random.randn(10, 2) w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1]) assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13) def test_pseudo_huber(): def xfunc(delta, r): if delta < 0: return np.inf elif (not delta) or (not r): return 0 else: return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1) z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]]) w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1]) assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13) def test_pseudo_huber_small_r(): delta = 1.0 r = 1e-18 y = special.pseudo_huber(delta, r) # expected computed with mpmath: # import mpmath # mpmath.mp.dps = 200 # r = mpmath.mpf(1e-18) # expected = float(mpmath.sqrt(1 + r**2) - 1) expected = 5.0000000000000005e-37 assert_allclose(y, expected, rtol=1e-13) def test_runtime_warning(): with pytest.warns(RuntimeWarning, match=r'Too many predicted coefficients'): mathieu_odd_coef(1000, 1000) with pytest.warns(RuntimeWarning, match=r'Too many predicted coefficients'): mathieu_even_coef(1000, 1000)
TestStruve
python
ray-project__ray
python/ray/autoscaler/_private/gcp/node.py
{ "start": 4221, "end": 5176 }
class ____(UserDict, metaclass=abc.ABCMeta): """Abstraction around compute and tpu nodes""" NON_TERMINATED_STATUSES = None RUNNING_STATUSES = None STATUS_FIELD = None def __init__(self, base_dict: dict, resource: "GCPResource", **kwargs) -> None: super().__init__(base_dict, **kwargs) self.resource = resource assert isinstance(self.resource, GCPResource) def is_running(self) -> bool: return self.get(self.STATUS_FIELD) in self.RUNNING_STATUSES def is_terminated(self) -> bool: return self.get(self.STATUS_FIELD) not in self.NON_TERMINATED_STATUSES @abc.abstractmethod def get_labels(self) -> dict: return @abc.abstractmethod def get_external_ip(self) -> str: return @abc.abstractmethod def get_internal_ip(self) -> str: return def __repr__(self) -> str: return f"<{self.__class__.__name__}: {self.get('name')}>"
GCPNode
python
pola-rs__polars
py-polars/src/polars/expr/struct.py
{ "start": 379, "end": 11015 }
class ____: """Namespace for struct related expressions.""" _accessor = "struct" def __init__(self, expr: Expr) -> None: self._pyexpr = expr._pyexpr def __getitem__(self, item: str | int) -> Expr: if isinstance(item, str): return self.field(item) elif isinstance(item, int): return wrap_expr(self._pyexpr.struct_field_by_index(item)) else: msg = f"expected type 'int | str', got {qualified_type_name(item)!r} ({item!r})" raise TypeError(msg) def field(self, name: str | list[str], *more_names: str) -> Expr: """ Retrieve one or multiple `Struct` field(s) as a new Series. Parameters ---------- name Name of the struct field to retrieve. *more_names Additional struct field names. Examples -------- >>> df = pl.DataFrame( ... { ... "aaa": [1, 2], ... "bbb": ["ab", "cd"], ... "ccc": [True, None], ... "ddd": [[1, 2], [3]], ... } ... ).select(pl.struct("aaa", "bbb", "ccc", "ddd").alias("struct_col")) >>> df shape: (2, 1) ┌──────────────────────┐ │ struct_col │ │ --- │ │ struct[4] │ ╞══════════════════════╡ │ {1,"ab",true,[1, 2]} │ │ {2,"cd",null,[3]} │ └──────────────────────┘ Retrieve struct field(s) as Series: >>> df.select(pl.col("struct_col").struct.field("bbb")) shape: (2, 1) ┌─────┐ │ bbb │ │ --- │ │ str │ ╞═════╡ │ ab │ │ cd │ └─────┘ >>> df.select( ... pl.col("struct_col").struct.field("bbb"), ... pl.col("struct_col").struct.field("ddd"), ... ) shape: (2, 2) ┌─────┬───────────┐ │ bbb ┆ ddd │ │ --- ┆ --- │ │ str ┆ list[i64] │ ╞═════╪═══════════╡ │ ab ┆ [1, 2] │ │ cd ┆ [3] │ └─────┴───────────┘ Use wildcard expansion: >>> df.select(pl.col("struct_col").struct.field("*")) shape: (2, 4) ┌─────┬─────┬──────┬───────────┐ │ aaa ┆ bbb ┆ ccc ┆ ddd │ │ --- ┆ --- ┆ --- ┆ --- │ │ i64 ┆ str ┆ bool ┆ list[i64] │ ╞═════╪═════╪══════╪═══════════╡ │ 1 ┆ ab ┆ true ┆ [1, 2] │ │ 2 ┆ cd ┆ null ┆ [3] │ └─────┴─────┴──────┴───────────┘ Retrieve multiple fields by name: >>> df.select(pl.col("struct_col").struct.field("aaa", "bbb")) shape: (2, 2) ┌─────┬─────┐ │ aaa ┆ bbb │ │ --- ┆ --- │ │ i64 ┆ str │ ╞═════╪═════╡ │ 1 ┆ ab │ │ 2 ┆ cd │ └─────┴─────┘ Retrieve multiple fields by regex expansion: >>> df.select(pl.col("struct_col").struct.field("^a.*|b.*$")) shape: (2, 2) ┌─────┬─────┐ │ aaa ┆ bbb │ │ --- ┆ --- │ │ i64 ┆ str │ ╞═════╪═════╡ │ 1 ┆ ab │ │ 2 ┆ cd │ └─────┴─────┘ Notes ----- The `struct` namespace has implemented `__getitem__` so you can also access fields by index: >>> df.select(pl.col("struct_col").struct[1]) shape: (2, 1) ┌─────┐ │ bbb │ │ --- │ │ str │ ╞═════╡ │ ab │ │ cd │ └─────┘ """ if more_names: name = [*([name] if isinstance(name, str) else name), *more_names] if isinstance(name, list): return wrap_expr(self._pyexpr.struct_multiple_fields(name)) return wrap_expr(self._pyexpr.struct_field_by_name(name)) def unnest(self) -> Expr: """ Expand the struct into its individual fields. Alias for `Expr.struct.field("*")`. >>> df = pl.DataFrame( ... { ... "aaa": [1, 2], ... "bbb": ["ab", "cd"], ... "ccc": [True, None], ... "ddd": [[1, 2], [3]], ... } ... ).select(pl.struct("aaa", "bbb", "ccc", "ddd").alias("struct_col")) >>> df shape: (2, 1) ┌──────────────────────┐ │ struct_col │ │ --- │ │ struct[4] │ ╞══════════════════════╡ │ {1,"ab",true,[1, 2]} │ │ {2,"cd",null,[3]} │ └──────────────────────┘ >>> df.select(pl.col("struct_col").struct.unnest()) shape: (2, 4) ┌─────┬─────┬──────┬───────────┐ │ aaa ┆ bbb ┆ ccc ┆ ddd │ │ --- ┆ --- ┆ --- ┆ --- │ │ i64 ┆ str ┆ bool ┆ list[i64] │ ╞═════╪═════╪══════╪═══════════╡ │ 1 ┆ ab ┆ true ┆ [1, 2] │ │ 2 ┆ cd ┆ null ┆ [3] │ └─────┴─────┴──────┴───────────┘ """ return self.field("*") def rename_fields(self, names: Sequence[str]) -> Expr: """ Rename the fields of the struct. Parameters ---------- names New names, given in the same order as the struct's fields. Examples -------- >>> df = pl.DataFrame( ... { ... "aaa": [1, 2], ... "bbb": ["ab", "cd"], ... "ccc": [True, None], ... "ddd": [[1, 2], [3]], ... } ... ).select(pl.struct("aaa", "bbb", "ccc", "ddd").alias("struct_col")) >>> df shape: (2, 1) ┌──────────────────────┐ │ struct_col │ │ --- │ │ struct[4] │ ╞══════════════════════╡ │ {1,"ab",true,[1, 2]} │ │ {2,"cd",null,[3]} │ └──────────────────────┘ >>> df.unnest("struct_col") shape: (2, 4) ┌─────┬─────┬──────┬───────────┐ │ aaa ┆ bbb ┆ ccc ┆ ddd │ │ --- ┆ --- ┆ --- ┆ --- │ │ i64 ┆ str ┆ bool ┆ list[i64] │ ╞═════╪═════╪══════╪═══════════╡ │ 1 ┆ ab ┆ true ┆ [1, 2] │ │ 2 ┆ cd ┆ null ┆ [3] │ └─────┴─────┴──────┴───────────┘ Rename fields: >>> df = df.select( ... pl.col("struct_col").struct.rename_fields(["www", "xxx", "yyy", "zzz"]) ... ) >>> df.unnest("struct_col") shape: (2, 4) ┌─────┬─────┬──────┬───────────┐ │ www ┆ xxx ┆ yyy ┆ zzz │ │ --- ┆ --- ┆ --- ┆ --- │ │ i64 ┆ str ┆ bool ┆ list[i64] │ ╞═════╪═════╪══════╪═══════════╡ │ 1 ┆ ab ┆ true ┆ [1, 2] │ │ 2 ┆ cd ┆ null ┆ [3] │ └─────┴─────┴──────┴───────────┘ Following a rename, the previous field names (obviously) cannot be referenced: >>> df.select(pl.col("struct_col").struct.field("aaa")) # doctest: +SKIP StructFieldNotFoundError: aaa """ return wrap_expr(self._pyexpr.struct_rename_fields(names)) def json_encode(self) -> Expr: """ Convert this struct to a string column with json values. Examples -------- >>> pl.DataFrame( ... {"a": [{"a": [1, 2], "b": [45]}, {"a": [9, 1, 3], "b": None}]} ... ).with_columns(pl.col("a").struct.json_encode().alias("encoded")) shape: (2, 2) ┌──────────────────┬────────────────────────┐ │ a ┆ encoded │ │ --- ┆ --- │ │ struct[2] ┆ str │ ╞══════════════════╪════════════════════════╡ │ {[1, 2],[45]} ┆ {"a":[1,2],"b":[45]} │ │ {[9, 1, 3],null} ┆ {"a":[9,1,3],"b":null} │ └──────────────────┴────────────────────────┘ """ return wrap_expr(self._pyexpr.struct_json_encode()) def with_fields( self, *exprs: IntoExpr | Iterable[IntoExpr], **named_exprs: IntoExpr, ) -> Expr: """ Add or overwrite fields of this struct. This is similar to `with_columns` on `DataFrame`. .. versionadded:: 0.20.27 Examples -------- >>> df = pl.DataFrame( ... { ... "coords": [{"x": 1, "y": 4}, {"x": 4, "y": 9}, {"x": 9, "y": 16}], ... "multiply": [10, 2, 3], ... } ... ) >>> df shape: (3, 2) ┌───────────┬──────────┐ │ coords ┆ multiply │ │ --- ┆ --- │ │ struct[2] ┆ i64 │ ╞═══════════╪══════════╡ │ {1,4} ┆ 10 │ │ {4,9} ┆ 2 │ │ {9,16} ┆ 3 │ └───────────┴──────────┘ >>> df = df.with_columns( ... pl.col("coords").struct.with_fields( ... pl.field("x").sqrt(), ... y_mul=pl.field("y") * pl.col("multiply"), ... ) ... ) >>> df shape: (3, 2) ┌─────────────┬──────────┐ │ coords ┆ multiply │ │ --- ┆ --- │ │ struct[3] ┆ i64 │ ╞═════════════╪══════════╡ │ {1.0,4,40} ┆ 10 │ │ {2.0,9,18} ┆ 2 │ │ {3.0,16,48} ┆ 3 │ └─────────────┴──────────┘ >>> df.unnest("coords") shape: (3, 4) ┌─────┬─────┬───────┬──────────┐ │ x ┆ y ┆ y_mul ┆ multiply │ │ --- ┆ --- ┆ --- ┆ --- │ │ f64 ┆ i64 ┆ i64 ┆ i64 │ ╞═════╪═════╪═══════╪══════════╡ │ 1.0 ┆ 4 ┆ 40 ┆ 10 │ │ 2.0 ┆ 9 ┆ 18 ┆ 2 │ │ 3.0 ┆ 16 ┆ 48 ┆ 3 │ └─────┴─────┴───────┴──────────┘ Parameters ---------- *exprs Field(s) to add, specified as positional arguments. Accepts expression input. Strings are parsed as column names, other non-expression inputs are parsed as literals. **named_exprs Additional fields to add, specified as keyword arguments. The columns will be renamed to the keyword used. See Also -------- field """ structify = bool(int(os.environ.get("POLARS_AUTO_STRUCTIFY", 0))) pyexprs = parse_into_list_of_expressions( *exprs, **named_exprs, __structify=structify ) return wrap_expr(self._pyexpr.struct_with_fields(pyexprs))
ExprStructNameSpace
python
rapidsai__cudf
python/cudf_polars/cudf_polars/dsl/expressions/selection.py
{ "start": 530, "end": 1941 }
class ____(Expr): __slots__ = () _non_child = ("dtype",) def __init__(self, dtype: DataType, values: Expr, indices: Expr) -> None: self.dtype = dtype self.children = (values, indices) self.is_pointwise = False def do_evaluate( self, df: DataFrame, *, context: ExecutionContext = ExecutionContext.FRAME ) -> Column: """Evaluate this expression given a dataframe for context.""" values, indices = ( child.evaluate(df, context=context) for child in self.children ) n = values.size lo, hi = plc.reduce.minmax(indices.obj, stream=df.stream) if hi.to_py(stream=df.stream) >= n or lo.to_py(stream=df.stream) < -n: # type: ignore[operator] raise ValueError("gather indices are out of bounds") if indices.null_count: bounds_policy = plc.copying.OutOfBoundsPolicy.NULLIFY obj = plc.replace.replace_nulls( indices.obj, plc.Scalar.from_py(n, dtype=indices.obj.type(), stream=df.stream), stream=df.stream, ) else: bounds_policy = plc.copying.OutOfBoundsPolicy.DONT_CHECK obj = indices.obj table = plc.copying.gather( plc.Table([values.obj]), obj, bounds_policy, stream=df.stream ) return Column(table.columns()[0], dtype=self.dtype)
Gather
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/paramSpec4.py
{ "start": 284, "end": 2511 }
class ____: ... def with_request(f: Callable[Concatenate[Request, P], R]) -> Callable[P, R]: def inner(*args: P.args, **kwargs: P.kwargs) -> R: return f(Request(), *args, **kwargs) return inner @with_request def takes_int_str(request: Request, x: int, y: str) -> int: # use request return x + 7 takes_int_str(1, "A") # This should generate an error because the first arg # is the incorrect type. takes_int_str("B", "A") # This should generate an error because there are too # many parameters. takes_int_str(1, "A", 2) # This should generate an error because a ParamSpec can appear # only within the last type arg for Concatenate def decorator1(f: Callable[Concatenate[P, P], int]) -> Callable[P, int]: ... # This should generate an error because the last type arg # for Concatenate should be a ParamSpec. def decorator2(f: Callable[Concatenate[int, int], int]) -> Callable[P, int]: ... # This should generate an error because Concatenate is missing # its type arguments. def decorator3(f: Callable[Concatenate, int]) -> Callable[P, int]: ... def decorator4(func: Callable[P, None]) -> Callable[Concatenate[int, P], None]: def wrapper(x: int, /, *args: P.args, **kwargs: P.kwargs) -> None: ... return wrapper def func1(func: Callable[Concatenate[int, P], None]) -> Callable[P, None]: ... def func2(a: int, b: str, c: str) -> None: ... def func3(a: int, /, b: str, c: str) -> None: ... def func4(a: int, b: str, /, c: str) -> None: ... v1 = func1(func2) reveal_type(v1, expected_text="(b: str, c: str) -> None") v2 = func1(func3) reveal_type(v2, expected_text="(b: str, c: str) -> None") v3 = func1(func4) reveal_type(v3, expected_text="(b: str, /, c: str) -> None") def func5(__fn: Callable[P, R], *args: P.args, **kwargs: P.kwargs) -> R: ... def func6(name: str, *args: str): ... v5 = func5(func6, "a", "b", "c") # This should generate an error because 1 isn't assignable to str. v6 = func5(func6, "a", "b", "c", 1) def func7(name: str, **kwargs: str): ... v7 = func5(func7, "a", b="b", c="c") # This should generate an error because 1 isn't assignable to str. v8 = func5(func7, "a", b="b", c=1) T = TypeVar("T", covariant=True) X = TypeVar("X")
Request
python
pydata__xarray
xarray/core/indexing.py
{ "start": 13464, "end": 14716 }
class ____: """Base class for explicit indexer objects. ExplicitIndexer objects wrap a tuple of values given by their ``tuple`` property. These tuples should always have length equal to the number of dimensions on the indexed array. Do not instantiate BaseIndexer objects directly: instead, use one of the sub-classes BasicIndexer, OuterIndexer or VectorizedIndexer. """ __slots__ = ("_key",) def __init__(self, key: tuple[Any, ...]): if type(self) is ExplicitIndexer: raise TypeError("cannot instantiate base ExplicitIndexer objects") self._key = tuple(key) @property def tuple(self) -> tuple[Any, ...]: return self._key def __repr__(self) -> str: return f"{type(self).__name__}({self.tuple})" @overload def as_integer_or_none(value: int) -> int: ... @overload def as_integer_or_none(value: None) -> None: ... def as_integer_or_none(value: int | None) -> int | None: return None if value is None else operator.index(value) def as_integer_slice(value: slice) -> slice: start = as_integer_or_none(value.start) stop = as_integer_or_none(value.stop) step = as_integer_or_none(value.step) return slice(start, stop, step)
ExplicitIndexer
python
tensorflow__tensorflow
tensorflow/python/compiler/tensorrt/test/identity_output_test.py
{ "start": 1279, "end": 2010 }
class ____(trt_test.TfTrtIntegrationTestBase): """Testing engine with the same tensor repeated as output via identity.""" def GraphFn(self, x): x1 = math_ops.exp(x) x1 = x1 + x out1 = array_ops.identity(x1, name='output_0') out2 = array_ops.identity(x1, name='output_1') iden1 = array_ops.identity(x1) out3 = array_ops.identity(iden1, name='output_2') return [out1, out2, out3] def GetParams(self): return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 32]], [[100, 32]] * 3) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return ['TRTEngineOp_000'] if __name__ == '__main__': test.main()
IdentityTest
python
pytorch__pytorch
test/functorch/test_control_flow.py
{ "start": 331425, "end": 335354 }
class ____(torch.nn.Module): def forward(self, L_t_: "f32[2, 3]"): l_t_ = L_t_ sum_1: "f32[]" = l_t_.sum() to: "i64[]" = sum_1.to(torch.int64); sum_1 = None item: "Sym(u0)" = to.item(); to = None sin: "f32[2, 3]" = l_t_.sin() cond_fn_0 = self.cond_fn_0 body_fn_0 = self.body_fn_0 while_loop = torch.ops.higher_order.while_loop(cond_fn_0, body_fn_0, (2, 3, 1, 1, 1, 3, item, sin), ()); cond_fn_0 = body_fn_0 = item = sin = None getitem_8: "Sym(u15)" = while_loop[0] getitem_9: "Sym(u16)" = while_loop[1] getitem_10: "Sym(u17)" = while_loop[2] getitem_11: "Sym(u18)" = while_loop[3] getitem_12: "Sym(u19)" = while_loop[4] getitem_13: "Sym(u20)" = while_loop[5] getitem_14: "Sym(u21)" = while_loop[6] getitem_7: "f32[2, 3]" = while_loop[7]; while_loop = None add: "Sym(u15 + 1)" = getitem_8 + 1 add_1: "Sym(u16 + 1)" = getitem_9 + 1 add_2: "Sym(u17 + 1)" = getitem_10 + 1 add_3: "Sym(u18 + 1)" = getitem_11 + 1 add_4: "Sym(u19 + 1)" = getitem_12 + 1 add_5: "Sym(u20 + 1)" = getitem_13 + 1 add_6: "Sym(u21 + 1)" = getitem_14 + 1 add_7: "f32[2, 3]" = getitem_7 + 1 add_8: "f32[2, 3]" = getitem_8 + l_t_; getitem_8 = None add_9: "f32[2, 3]" = getitem_9 + l_t_; getitem_9 = None add_10: "f32[2, 3]" = getitem_10 + l_t_; getitem_10 = None add_11: "f32[2, 3]" = getitem_11 + l_t_; getitem_11 = None add_12: "f32[2, 3]" = getitem_12 + l_t_; getitem_12 = None add_13: "f32[2, 3]" = getitem_13 + l_t_; getitem_13 = None add_14: "f32[2, 3]" = getitem_14 + l_t_; getitem_14 = None add_15: "f32[2, 3]" = getitem_7 + l_t_; getitem_7 = l_t_ = None return (add, add_1, add_2, add_3, add_4, add_5, add_6, add_7, add_8, add_9, add_10, add_11, add_12, add_13, add_14, add_15) class cond_fn_0(torch.nn.Module): def forward(self, unbacked_symint: "Sym(u1)", unbacked_symint_0: "Sym(u2)", unbacked_symint_1: "Sym(u3)", unbacked_symint_2: "Sym(u4)", unbacked_symint_3: "Sym(u5)", unbacked_symint_4: "Sym(u6)", unbacked_symint_5: "Sym(u7)", child: "f32[2, 3]"): mul: "Sym(u3*u4)" = unbacked_symint_1 * unbacked_symint_2; unbacked_symint_1 = unbacked_symint_2 = None mul_1: "Sym(u3*u4*u5)" = mul * unbacked_symint_3; mul = unbacked_symint_3 = None mul_2: "Sym(u1*u2)" = unbacked_symint * unbacked_symint_0; unbacked_symint = unbacked_symint_0 = None lt: "Sym(u3*u4*u5 < u1*u2)" = mul_1 < mul_2; mul_1 = mul_2 = None return lt class body_fn_0(torch.nn.Module): def forward(self, unbacked_symint_6: "Sym(u8)", unbacked_symint_7: "Sym(u9)", unbacked_symint_8: "Sym(u10)", unbacked_symint_9: "Sym(u11)", unbacked_symint_10: "Sym(u12)", unbacked_symint_11: "Sym(u13)", unbacked_symint_12: "Sym(u14)", child_1: "f32[2, 3]"): add: "Sym(u14 + 1)" = unbacked_symint_12 + 1; unbacked_symint_12 = None child: "f32[2, 3]" = child_1 + 1; child_1 = None return (unbacked_symint_7, unbacked_symint_8, unbacked_symint_9, unbacked_symint_10, unbacked_symint_6, 0, add, child) """, # noqa: B950 ) @skipIfTorchDynamo("Skip because we're testing export") @parametrize("strict", [True, False]) @parametrize("dynamic", [True, False]) def test_while_loop_op_pytree_int_carry_export(self, strict, dynamic): m, args = WHILE_LOOP_TESTS["pytree_int_carry"] dynamic_shapes = {"x": {0: torch.export.Dim("dim_x")}} if dynamic else None ep = self._check_export(m, args, strict=strict, dynamic_shapes=dynamic_shapes) if strict and dynamic and not TEST_WITH_CROSSREF: self.assertExpectedInline( normalize_gm(ep.module().print_readable(print_output=False)), """\
GraphModule
python
getsentry__sentry
src/sentry/sentry_apps/api/bases/sentryapps.py
{ "start": 10763, "end": 11538 }
class ____(IntegrationPlatformEndpoint): def convert_args( self, request: Request, sentry_app_id_or_slug: int | str, *args: Any, **kwargs: Any ): if str(sentry_app_id_or_slug).isdecimal(): sentry_app = app_service.get_sentry_app_by_id(id=int(sentry_app_id_or_slug)) else: sentry_app = app_service.get_sentry_app_by_slug(slug=sentry_app_id_or_slug) if sentry_app is None: raise SentryAppError(message="Could not find the requested sentry app", status_code=404) self.check_object_permissions(request, sentry_app) sentry_sdk.get_isolation_scope().set_tag("sentry_app", sentry_app.slug) kwargs["sentry_app"] = sentry_app return (args, kwargs)
RegionSentryAppBaseEndpoint
python
networkx__networkx
networkx/algorithms/isomorphism/tests/test_vf2userfunc.py
{ "start": 6140, "end": 6350 }
class ____(TestNodeMatch_Graph): def setup_method(self): TestNodeMatch_Graph.setup_method(self) self.g1 = nx.DiGraph() self.g2 = nx.DiGraph() self.build()
TestEdgeMatch_DiGraph
python
scipy__scipy
benchmarks/benchmarks/stats_sampling.py
{ "start": 5192, "end": 6072 }
class ____(Benchmark): param_names = ['dist'] params = [allcontdists] def setup(self, dist): self.urng = np.random.default_rng(0xb235b58c1f616c59c18d8568f77d44d1) with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) try: self.rng = sampling.NumericalInversePolynomial( dist, random_state=self.urng ) except sampling.UNURANError: raise NotImplementedError(f"setup failed for {dist}") def time_pinv_setup(self, dist): with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) sampling.NumericalInversePolynomial( dist, random_state=self.urng ) def time_pinv_rvs(self, dist): self.rng.rvs(100000)
NumericalInversePolynomial
python
doocs__leetcode
solution/0300-0399/0335.Self Crossing/Solution.py
{ "start": 0, "end": 611 }
class ____: def isSelfCrossing(self, distance: List[int]) -> bool: d = distance for i in range(3, len(d)): if d[i] >= d[i - 2] and d[i - 1] <= d[i - 3]: return True if i >= 4 and d[i - 1] == d[i - 3] and d[i] + d[i - 4] >= d[i - 2]: return True if ( i >= 5 and d[i - 2] >= d[i - 4] and d[i - 1] <= d[i - 3] and d[i] >= d[i - 2] - d[i - 4] and d[i - 1] + d[i - 5] >= d[i - 3] ): return True return False
Solution
python
openai__gym
gym/core.py
{ "start": 712, "end": 10073 }
class ____(Generic[ObsType, ActType]): r"""The main OpenAI Gym class. It encapsulates an environment with arbitrary behind-the-scenes dynamics. An environment can be partially or fully observed. The main API methods that users of this class need to know are: - :meth:`step` - Takes a step in the environment using an action returning the next observation, reward, if the environment terminated and observation information. - :meth:`reset` - Resets the environment to an initial state, returning the initial observation and observation information. - :meth:`render` - Renders the environment observation with modes depending on the output - :meth:`close` - Closes the environment, important for rendering where pygame is imported And set the following attributes: - :attr:`action_space` - The Space object corresponding to valid actions - :attr:`observation_space` - The Space object corresponding to valid observations - :attr:`reward_range` - A tuple corresponding to the minimum and maximum possible rewards - :attr:`spec` - An environment spec that contains the information used to initialise the environment from `gym.make` - :attr:`metadata` - The metadata of the environment, i.e. render modes - :attr:`np_random` - The random number generator for the environment Note: a default reward range set to :math:`(-\infty,+\infty)` already exists. Set it if you want a narrower range. """ # Set this in SOME subclasses metadata: Dict[str, Any] = {"render_modes": []} # define render_mode if your environment supports rendering render_mode: Optional[str] = None reward_range = (-float("inf"), float("inf")) spec: "EnvSpec" = None # Set these in ALL subclasses action_space: spaces.Space[ActType] observation_space: spaces.Space[ObsType] # Created _np_random: Optional[np.random.Generator] = None @property def np_random(self) -> np.random.Generator: """Returns the environment's internal :attr:`_np_random` that if not set will initialise with a random seed.""" if self._np_random is None: self._np_random, seed = seeding.np_random() return self._np_random @np_random.setter def np_random(self, value: np.random.Generator): self._np_random = value def step(self, action: ActType) -> Tuple[ObsType, float, bool, bool, dict]: """Run one timestep of the environment's dynamics. When end of episode is reached, you are responsible for calling :meth:`reset` to reset this environment's state. Accepts an action and returns either a tuple `(observation, reward, terminated, truncated, info)`. Args: action (ActType): an action provided by the agent Returns: observation (object): this will be an element of the environment's :attr:`observation_space`. This may, for instance, be a numpy array containing the positions and velocities of certain objects. reward (float): The amount of reward returned as a result of taking the action. terminated (bool): whether a `terminal state` (as defined under the MDP of the task) is reached. In this case further step() calls could return undefined results. truncated (bool): whether a truncation condition outside the scope of the MDP is satisfied. Typically a timelimit, but could also be used to indicate agent physically going out of bounds. Can be used to end the episode prematurely before a `terminal state` is reached. info (dictionary): `info` contains auxiliary diagnostic information (helpful for debugging, learning, and logging). This might, for instance, contain: metrics that describe the agent's performance state, variables that are hidden from observations, or individual reward terms that are combined to produce the total reward. It also can contain information that distinguishes truncation and termination, however this is deprecated in favour of returning two booleans, and will be removed in a future version. (deprecated) done (bool): A boolean value for if the episode has ended, in which case further :meth:`step` calls will return undefined results. A done signal may be emitted for different reasons: Maybe the task underlying the environment was solved successfully, a certain timelimit was exceeded, or the physics simulation has entered an invalid state. """ raise NotImplementedError def reset( self, *, seed: Optional[int] = None, options: Optional[dict] = None, ) -> Tuple[ObsType, dict]: """Resets the environment to an initial state and returns the initial observation. This method can reset the environment's random number generator(s) if ``seed`` is an integer or if the environment has not yet initialized a random number generator. If the environment already has a random number generator and :meth:`reset` is called with ``seed=None``, the RNG should not be reset. Moreover, :meth:`reset` should (in the typical use case) be called with an integer seed right after initialization and then never again. Args: seed (optional int): The seed that is used to initialize the environment's PRNG. If the environment does not already have a PRNG and ``seed=None`` (the default option) is passed, a seed will be chosen from some source of entropy (e.g. timestamp or /dev/urandom). However, if the environment already has a PRNG and ``seed=None`` is passed, the PRNG will *not* be reset. If you pass an integer, the PRNG will be reset even if it already exists. Usually, you want to pass an integer *right after the environment has been initialized and then never again*. Please refer to the minimal example above to see this paradigm in action. options (optional dict): Additional information to specify how the environment is reset (optional, depending on the specific environment) Returns: observation (object): Observation of the initial state. This will be an element of :attr:`observation_space` (typically a numpy array) and is analogous to the observation returned by :meth:`step`. info (dictionary): This dictionary contains auxiliary information complementing ``observation``. It should be analogous to the ``info`` returned by :meth:`step`. """ # Initialize the RNG if the seed is manually passed if seed is not None: self._np_random, seed = seeding.np_random(seed) def render(self) -> Optional[Union[RenderFrame, List[RenderFrame]]]: """Compute the render frames as specified by render_mode attribute during initialization of the environment. The set of supported modes varies per environment. (And some third-party environments may not support rendering at all.) By convention, if render_mode is: - None (default): no render is computed. - human: render return None. The environment is continuously rendered in the current display or terminal. Usually for human consumption. - rgb_array: return a single frame representing the current state of the environment. A frame is a numpy.ndarray with shape (x, y, 3) representing RGB values for an x-by-y pixel image. - rgb_array_list: return a list of frames representing the states of the environment since the last reset. Each frame is a numpy.ndarray with shape (x, y, 3), as with `rgb_array`. - ansi: Return a strings (str) or StringIO.StringIO containing a terminal-style text representation for each time step. The text can include newlines and ANSI escape sequences (e.g. for colors). Note: Make sure that your class's metadata 'render_modes' key includes the list of supported modes. It's recommended to call super() in implementations to use the functionality of this method. """ raise NotImplementedError def close(self): """Override close in your subclass to perform any necessary cleanup. Environments will automatically :meth:`close()` themselves when garbage collected or when the program exits. """ pass @property def unwrapped(self) -> "Env": """Returns the base non-wrapped environment. Returns: Env: The base non-wrapped gym.Env instance """ return self def __str__(self): """Returns a string of the environment with the spec id if specified.""" if self.spec is None: return f"<{type(self).__name__} instance>" else: return f"<{type(self).__name__}<{self.spec.id}>>" def __enter__(self): """Support with-statement for the environment.""" return self def __exit__(self, *args): """Support with-statement for the environment.""" self.close() # propagate exception return False
Env
python
doocs__leetcode
solution/1700-1799/1701.Average Waiting Time/Solution.py
{ "start": 0, "end": 229 }
class ____: def averageWaitingTime(self, customers: List[List[int]]) -> float: tot = t = 0 for a, b in customers: t = max(t, a) + b tot += t - a return tot / len(customers)
Solution
python
matplotlib__matplotlib
lib/matplotlib/tests/test_ticker.py
{ "start": 23339, "end": 23736 }
class ____: def test_set_params(self): """ Create index locator with 3 base, 4 offset. and change it to something else. See if change was successful. Should not exception. """ index = mticker.IndexLocator(base=3, offset=4) index.set_params(base=7, offset=7) assert index._base == 7 assert index.offset == 7
TestIndexLocator
python
jazzband__django-oauth-toolkit
oauth2_provider/exceptions.py
{ "start": 1301, "end": 1418 }
class ____(InvalidRequestFatalError): description = "Invalid post logout redirect URI."
InvalidOIDCRedirectURIError
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/memberAccess2.py
{ "start": 898, "end": 995 }
class ____(TypedDict): a: Callable[[int], int] foo3 = Foo3(a=lambda a: a) g = foo3["a"](3)
Foo3
python
scipy__scipy
scipy/optimize/_zeros_py.py
{ "start": 43169, "end": 56659 }
class ____: """Solve f(x, *args) == 0 using Algorithm748 of Alefeld, Potro & Shi. """ _MU = 0.5 _K_MIN = 1 _K_MAX = 100 # A very high value for real usage. Expect 1, 2, maybe 3. def __init__(self): self.f = None self.args = None self.function_calls = 0 self.iterations = 0 self.k = 2 # ab=[a,b] is a global interval containing a root self.ab = [np.nan, np.nan] # fab is function values at a, b self.fab = [np.nan, np.nan] self.d = None self.fd = None self.e = None self.fe = None self.disp = False self.xtol = _xtol self.rtol = _rtol self.maxiter = _iter def configure(self, xtol, rtol, maxiter, disp, k): self.disp = disp self.xtol = xtol self.rtol = rtol self.maxiter = maxiter # Silently replace a low value of k with 1 self.k = max(k, self._K_MIN) # Noisily replace a high value of k with self._K_MAX if self.k > self._K_MAX: msg = f"toms748: Overriding k: ->{self._K_MAX}" warnings.warn(msg, RuntimeWarning, stacklevel=3) self.k = self._K_MAX def _callf(self, x, error=True): """Call the user-supplied function, update book-keeping""" fx = self.f(x, *self.args) self.function_calls += 1 if not np.isfinite(fx) and error: raise ValueError(f"Invalid function value: f({x:f}) -> {fx} ") return fx def get_result(self, x, flag=_ECONVERGED): r"""Package the result and statistics into a tuple.""" return (x, self.function_calls, self.iterations, flag) def _update_bracket(self, c, fc): return _update_bracket(self.ab, self.fab, c, fc) def start(self, f, a, b, args=()): r"""Prepare for the iterations.""" self.function_calls = 0 self.iterations = 0 self.f = f self.args = args self.ab[:] = [a, b] if not np.isfinite(a) or np.imag(a) != 0: raise ValueError(f"Invalid x value: {a} ") if not np.isfinite(b) or np.imag(b) != 0: raise ValueError(f"Invalid x value: {b} ") fa = self._callf(a) if not np.isfinite(fa) or np.imag(fa) != 0: raise ValueError(f"Invalid function value: f({a:f}) -> {fa} ") if fa == 0: return _ECONVERGED, a fb = self._callf(b) if not np.isfinite(fb) or np.imag(fb) != 0: raise ValueError(f"Invalid function value: f({b:f}) -> {fb} ") if fb == 0: return _ECONVERGED, b if np.sign(fb) * np.sign(fa) > 0: raise ValueError("f(a) and f(b) must have different signs, but " f"f({a:e})={fa:e}, f({b:e})={fb:e} ") self.fab[:] = [fa, fb] return _EINPROGRESS, sum(self.ab) / 2.0 def get_status(self): """Determine the current status.""" a, b = self.ab[:2] if np.isclose(a, b, rtol=self.rtol, atol=self.xtol): return _ECONVERGED, sum(self.ab) / 2.0 if self.iterations >= self.maxiter: return _ECONVERR, sum(self.ab) / 2.0 return _EINPROGRESS, sum(self.ab) / 2.0 def iterate(self): """Perform one step in the algorithm. Implements Algorithm 4.1(k=1) or 4.2(k=2) in [APS1995] """ self.iterations += 1 eps = np.finfo(float).eps d, fd, e, fe = self.d, self.fd, self.e, self.fe ab_width = self.ab[1] - self.ab[0] # Need the start width below c = None for nsteps in range(2, self.k+2): # If the f-values are sufficiently separated, perform an inverse # polynomial interpolation step. Otherwise, nsteps repeats of # an approximate Newton-Raphson step. if _notclose(self.fab + [fd, fe], rtol=0, atol=32*eps): c0 = _inverse_poly_zero(self.ab[0], self.ab[1], d, e, self.fab[0], self.fab[1], fd, fe) if self.ab[0] < c0 < self.ab[1]: c = c0 if c is None: c = _newton_quadratic(self.ab, self.fab, d, fd, nsteps) fc = self._callf(c) if fc == 0: return _ECONVERGED, c # re-bracket e, fe = d, fd d, fd = self._update_bracket(c, fc) # u is the endpoint with the smallest f-value uix = (0 if np.abs(self.fab[0]) < np.abs(self.fab[1]) else 1) u, fu = self.ab[uix], self.fab[uix] _, A = _compute_divided_differences(self.ab, self.fab, forward=(uix == 0), full=False) c = u - 2 * fu / A if np.abs(c - u) > 0.5 * (self.ab[1] - self.ab[0]): c = sum(self.ab) / 2.0 else: if np.isclose(c, u, rtol=eps, atol=0): # c didn't change (much). # Either because the f-values at the endpoints have vastly # differing magnitudes, or because the root is very close to # that endpoint frs = np.frexp(self.fab)[1] if frs[uix] < frs[1 - uix] - 50: # Differ by more than 2**50 c = (31 * self.ab[uix] + self.ab[1 - uix]) / 32 else: # Make a bigger adjustment, about the # size of the requested tolerance. mm = (1 if uix == 0 else -1) adj = mm * np.abs(c) * self.rtol + mm * self.xtol c = u + adj if not self.ab[0] < c < self.ab[1]: c = sum(self.ab) / 2.0 fc = self._callf(c) if fc == 0: return _ECONVERGED, c e, fe = d, fd d, fd = self._update_bracket(c, fc) # If the width of the new interval did not decrease enough, bisect if self.ab[1] - self.ab[0] > self._MU * ab_width: e, fe = d, fd z = sum(self.ab) / 2.0 fz = self._callf(z) if fz == 0: return _ECONVERGED, z d, fd = self._update_bracket(z, fz) # Record d and e for next iteration self.d, self.fd = d, fd self.e, self.fe = e, fe status, xn = self.get_status() return status, xn def solve(self, f, a, b, args=(), xtol=_xtol, rtol=_rtol, k=2, maxiter=_iter, disp=True): r"""Solve f(x) = 0 given an interval containing a root.""" self.configure(xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp, k=k) status, xn = self.start(f, a, b, args) if status == _ECONVERGED: return self.get_result(xn) # The first step only has two x-values. c = _secant(self.ab, self.fab) if not self.ab[0] < c < self.ab[1]: c = sum(self.ab) / 2.0 fc = self._callf(c) if fc == 0: return self.get_result(c) self.d, self.fd = self._update_bracket(c, fc) self.e, self.fe = None, None self.iterations += 1 while True: status, xn = self.iterate() if status == _ECONVERGED: return self.get_result(xn) if status == _ECONVERR: fmt = "Failed to converge after %d iterations, bracket is %s" if disp: msg = fmt % (self.iterations + 1, self.ab) raise RuntimeError(msg) return self.get_result(xn, _ECONVERR) def toms748(f, a, b, args=(), k=1, xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """ Find a root using TOMS Algorithm 748 method. Implements the Algorithm 748 method of Alefeld, Potro and Shi to find a root of the function `f` on the interval ``[a , b]``, where `f(a)` and `f(b)` must have opposite signs. It uses a mixture of inverse cubic interpolation and "Newton-quadratic" steps. [APS1995]. Parameters ---------- f : function Python function returning a scalar. The function :math:`f` must be continuous, and :math:`f(a)` and :math:`f(b)` have opposite signs. a : scalar, lower boundary of the search interval b : scalar, upper boundary of the search interval args : tuple, optional containing extra arguments for the function `f`. `f` is called by ``f(x, *args)``. k : int, optional The number of Newton quadratic steps to perform each iteration. ``k>=1``. xtol : scalar, optional The computed root ``x0`` will satisfy ``np.isclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter must be positive. rtol : scalar, optional The computed root ``x0`` will satisfy ``np.isclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. maxiter : int, optional If convergence is not achieved in `maxiter` iterations, an error is raised. Must be >= 0. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where `x` is the root, and `r` is a `RootResults` object. disp : bool, optional If True, raise RuntimeError if the algorithm didn't converge. Otherwise, the convergence status is recorded in the `RootResults` return object. Returns ------- root : float Approximate root of `f` r : `RootResults` (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. See Also -------- brentq, brenth, ridder, bisect, newton fsolve : find roots in N dimensions. elementwise.find_root : efficient elementwise 1-D root-finder Notes ----- `f` must be continuous. Algorithm 748 with ``k=2`` is asymptotically the most efficient algorithm known for finding roots of a four times continuously differentiable function. In contrast with Brent's algorithm, which may only decrease the length of the enclosing bracket on the last step, Algorithm 748 decreases it each iteration with the same asymptotic efficiency as it finds the root. For easy statement of efficiency indices, assume that `f` has 4 continuous deriviatives. For ``k=1``, the convergence order is at least 2.7, and with about asymptotically 2 function evaluations per iteration, the efficiency index is approximately 1.65. For ``k=2``, the order is about 4.6 with asymptotically 3 function evaluations per iteration, and the efficiency index 1.66. For higher values of `k`, the efficiency index approaches the kth root of ``(3k-2)``, hence ``k=1`` or ``k=2`` are usually appropriate. As mentioned in the parameter documentation, the computed root ``x0`` will satisfy ``np.isclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. In equation form, this terminating condition is ``abs(x - x0) <= xtol + rtol * abs(x0)``. The default value ``xtol=2e-12`` may lead to surprising behavior if one expects `toms748` to always compute roots with relative error near machine precision. Care should be taken to select `xtol` for the use case at hand. Setting ``xtol=5e-324``, the smallest subnormal number, will ensure the highest level of accuracy. Larger values of `xtol` may be useful for saving function evaluations when a root is at or near zero in applications where the tiny absolute differences available between floating point numbers near zero are not meaningful. References ---------- .. [APS1995] Alefeld, G. E. and Potra, F. A. and Shi, Yixun, *Algorithm 748: Enclosing Zeros of Continuous Functions*, ACM Trans. Math. Softw. Volume 221(1995) doi = {10.1145/210089.210111} Examples -------- >>> def f(x): ... return (x**3 - 1) # only one real root at x = 1 >>> from scipy import optimize >>> root, results = optimize.toms748(f, 0, 2, full_output=True) >>> root 1.0 >>> results converged: True flag: converged function_calls: 11 iterations: 5 root: 1.0 method: toms748 """ if xtol <= 0: raise ValueError(f"xtol too small ({xtol:g} <= 0)") if rtol < _rtol / 4: raise ValueError(f"rtol too small ({rtol:g} < {_rtol/4:g})") maxiter = operator.index(maxiter) if maxiter < 1: raise ValueError("maxiter must be greater than 0") if not np.isfinite(a): raise ValueError(f"a is not finite {a}") if not np.isfinite(b): raise ValueError(f"b is not finite {b}") if a >= b: raise ValueError(f"a and b are not an interval [{a}, {b}]") if not k >= 1: raise ValueError(f"k too small ({k} < 1)") if not isinstance(args, tuple): args = (args,) f = _wrap_nan_raise(f) solver = TOMS748Solver() result = solver.solve(f, a, b, args=args, k=k, xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp) x, function_calls, iterations, flag = result return _results_select(full_output, (x, function_calls, iterations, flag), "toms748")
TOMS748Solver
python
prabhupant__python-ds
data_structures/circular_linked_list/check_circular_linked_list.py
{ "start": 58, "end": 487 }
class ____(): def __init__(self, val): self.val = val self.next = None def check(head): if not head: return True curr = head while curr: if curr.next == head: return True elif curr.next == None: return False curr = curr.next first = Node(1) second = Node(2) third = Node(3) first.next = second second.next = third print(check(first))
Node
python
kubernetes-client__python
kubernetes/client/api/scheduling_api.py
{ "start": 543, "end": 5193 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def get_api_group(self, **kwargs): # noqa: E501 """get_api_group # noqa: E501 get information of a group # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_group(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: V1APIGroup If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.get_api_group_with_http_info(**kwargs) # noqa: E501 def get_api_group_with_http_info(self, **kwargs): # noqa: E501 """get_api_group # noqa: E501 get information of a group # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_group_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method get_api_group" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501 # Authentication setting auth_settings = ['BearerToken'] # noqa: E501 return self.api_client.call_api( '/apis/scheduling.k8s.io/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIGroup', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
SchedulingApi
python
getsentry__sentry
tests/sentry/issues/test_issue_occurrence.py
{ "start": 291, "end": 2741 }
class ____(OccurrenceTestMixin, TestCase): def test(self) -> None: occurrence = self.build_occurrence() self.assert_occurrences_identical( occurrence, IssueOccurrence.from_dict(occurrence.to_dict()) ) def test_level_default(self) -> None: occurrence_data = self.build_occurrence_data() occurrence_data["level"] = None occurrence = IssueOccurrence.from_dict(occurrence_data) assert occurrence.level == DEFAULT_LEVEL def test_assignee(self) -> None: occurrence_data = self.build_occurrence_data() occurrence_data["assignee"] = f"user:{self.user.id}" occurrence = IssueOccurrence.from_dict(occurrence_data) assert occurrence.assignee == Actor(id=self.user.id, actor_type=ActorType.USER) occurrence_data["assignee"] = f"{self.user.id}" occurrence = IssueOccurrence.from_dict(occurrence_data) assert occurrence.assignee == Actor(id=self.user.id, actor_type=ActorType.USER) occurrence_data["assignee"] = f"{self.user.email}" occurrence = IssueOccurrence.from_dict(occurrence_data) assert occurrence.assignee == Actor(id=self.user.id, actor_type=ActorType.USER) occurrence_data["assignee"] = f"{self.user.username}" occurrence = IssueOccurrence.from_dict(occurrence_data) assert occurrence.assignee == Actor(id=self.user.id, actor_type=ActorType.USER) occurrence_data["assignee"] = f"team:{self.team.id}" occurrence = IssueOccurrence.from_dict(occurrence_data) assert occurrence.assignee == Actor(id=self.team.id, actor_type=ActorType.TEAM) def test_assignee_none(self) -> None: occurrence_data = self.build_occurrence_data() occurrence = IssueOccurrence.from_dict(occurrence_data) assert occurrence.assignee is None occurrence_data["assignee"] = None occurrence = IssueOccurrence.from_dict(occurrence_data) assert occurrence.assignee is None occurrence_data["assignee"] = "" occurrence = IssueOccurrence.from_dict(occurrence_data) assert occurrence.assignee is None def test_priority(self) -> None: occurrence_data = self.build_occurrence_data() occurrence_data["priority"] = PriorityLevel.HIGH.value occurrence = IssueOccurrence.from_dict(occurrence_data) assert occurrence.priority == PriorityLevel.HIGH
IssueOccurrenceSerializeTest
python
kamyu104__LeetCode-Solutions
Python/find-if-path-exists-in-graph.py
{ "start": 1241, "end": 2239 }
class ____(object): def validPath(self, n, edges, start, end): """ :type n: int :type edges: List[List[int]] :type start: int :type end: int :rtype: bool """ def bfs(adj, start, target): q = [start] lookup = set(q) steps = 0 while q: new_q = [] for pos in q: if pos == target: return steps for nei in adj[pos]: if nei in lookup: continue lookup.add(nei) new_q.append(nei) q = new_q steps += 1 return -1 adj = collections.defaultdict(list) for u, v in edges: adj[u].append(v) adj[v].append(u) return bfs(adj, start, end) >= 0 # Time: O(|V| + |E|) # Space: O(|V| + |E|) # dfs solution
Solution2
python
gevent__gevent
src/greentest/3.10/test_socket.py
{ "start": 178182, "end": 179123 }
class ____(SocketPairTest): def __init__(self, methodName='runTest'): SocketPairTest.__init__(self, methodName=methodName) def _check_defaults(self, sock): self.assertIsInstance(sock, socket.socket) if hasattr(socket, 'AF_UNIX'): self.assertEqual(sock.family, socket.AF_UNIX) else: self.assertEqual(sock.family, socket.AF_INET) self.assertEqual(sock.type, socket.SOCK_STREAM) self.assertEqual(sock.proto, 0) def _testDefaults(self): self._check_defaults(self.cli) def testDefaults(self): self._check_defaults(self.serv) def testRecv(self): msg = self.serv.recv(1024) self.assertEqual(msg, MSG) def _testRecv(self): self.cli.send(MSG) def testSend(self): self.serv.send(MSG) def _testSend(self): msg = self.cli.recv(1024) self.assertEqual(msg, MSG)
BasicSocketPairTest
python
keras-team__keras
keras/src/ops/math.py
{ "start": 7727, "end": 9104 }
class ____(Operation): def __init__(self, axis=None, keepdims=False, *, name=None): super().__init__(name=name) self.axis = axis self.keepdims = keepdims def compute_output_spec(self, x): output_shape = reduce_shape(x.shape, self.axis, self.keepdims) return KerasTensor(shape=output_shape) def call(self, x): return backend.math.logsumexp(x, axis=self.axis, keepdims=self.keepdims) @keras_export("keras.ops.logsumexp") def logsumexp(x, axis=None, keepdims=False): """Computes the logarithm of sum of exponentials of elements in a tensor. Args: x: Input tensor. axis: An integer or a tuple of integers specifying the axis/axes along which to compute the sum. If `None`, the sum is computed over all elements. Defaults to `None`. keepdims: A boolean indicating whether to keep the dimensions of the input tensor when computing the sum. Defaults to `False`. Returns: A tensor containing the logarithm of the sum of exponentials of elements in `x`. Example: >>> x = keras.ops.convert_to_tensor([1., 2., 3.]) >>> logsumexp(x) 3.407606 """ if any_symbolic_tensors((x,)): return Logsumexp(axis, keepdims).symbolic_call(x) return backend.math.logsumexp(x, axis=axis, keepdims=keepdims)
Logsumexp
python
sqlalchemy__sqlalchemy
test/orm/test_of_type.py
{ "start": 1337, "end": 9196 }
class ____: __dialect__ = "default" def test_any_one(self): sess = fixture_session() any_ = Company.employees.of_type(Engineer).any( Engineer.primary_language == "cobol" ) eq_(sess.query(Company).filter(any_).one(), self.c2) def test_any_two(self): sess = fixture_session() calias = aliased(Company) any_ = calias.employees.of_type(Engineer).any( Engineer.primary_language == "cobol" ) eq_(sess.query(calias).filter(any_).one(), self.c2) def test_any_three(self): sess = fixture_session() any_ = Company.employees.of_type(Boss).any(Boss.golf_swing == "fore") eq_(sess.query(Company).filter(any_).one(), self.c1) def test_any_four(self): sess = fixture_session() any_ = Company.employees.of_type(Manager).any( Manager.manager_name == "pointy" ) eq_(sess.query(Company).filter(any_).one(), self.c1) def test_any_five(self): sess = fixture_session() any_ = Company.employees.of_type(Engineer).any( and_(Engineer.primary_language == "cobol") ) eq_(sess.query(Company).filter(any_).one(), self.c2) def test_join_to_subclass_one(self): sess = fixture_session() eq_( sess.query(Company) .join(Company.employees.of_type(Engineer)) .filter(Engineer.primary_language == "java") .all(), [self.c1], ) def test_join_to_subclass_two(self): sess = fixture_session() eq_( sess.query(Company) .join(Company.employees.of_type(Engineer)) .join(Engineer.machines) .filter(Machine.name.ilike("%thinkpad%")) .all(), [self.c1], ) def test_join_to_subclass_three(self): sess = fixture_session() eq_( sess.query(Company, Engineer) .join(Company.employees.of_type(Engineer)) .filter(Engineer.primary_language == "java") .count(), 1, ) def test_join_to_subclass_four(self): sess = fixture_session() # test [ticket:2093] eq_( sess.query(Company.company_id, Engineer) .join(Company.employees.of_type(Engineer)) .filter(Engineer.primary_language == "java") .count(), 1, ) def test_join_to_subclass_five(self): sess = fixture_session() eq_( sess.query(Company) .join(Company.employees.of_type(Engineer)) .filter(Engineer.primary_language == "java") .count(), 1, ) def test_with_polymorphic_join_compile_one(self): sess = fixture_session() self.assert_compile( sess.query(Company).join( Company.employees.of_type( with_polymorphic( Person, [Engineer, Manager], aliased=True, flat=True ) ) ), "SELECT companies.company_id AS companies_company_id, " "companies.name AS companies_name FROM companies " "JOIN %s" % (self._polymorphic_join_target([Engineer, Manager])), ) def test_with_polymorphic_join_exec_contains_eager_one(self): sess = fixture_session() def go(): wp = with_polymorphic( Person, [Engineer, Manager], aliased=True, flat=True ) eq_( sess.query(Company) .join(Company.employees.of_type(wp)) .order_by(Company.company_id, wp.person_id) .options(contains_eager(Company.employees.of_type(wp))) .all(), [self.c1, self.c2], ) self.assert_sql_count(testing.db, go, 1) @testing.combinations( # this form is not expected to work in all cases, ultimately # the "alias" parameter should be deprecated entirely # lambda Company, wp: contains_eager(Company.employees, alias=wp), lambda Company, wp: contains_eager(Company.employees.of_type(wp)), lambda Company, wp: contains_eager( Company.employees.of_type(wp), alias=wp ), ) def test_with_polymorphic_join_exec_contains_eager_two( self, contains_eager_option ): sess = fixture_session() wp = with_polymorphic(Person, [Engineer, Manager], aliased=True) contains_eager_option = testing.resolve_lambda( contains_eager_option, Company=Company, wp=wp ) q = ( sess.query(Company) .join(Company.employees.of_type(wp)) .order_by(Company.company_id, wp.person_id) .options(contains_eager_option) ) def go(): eq_(q.all(), [self.c1, self.c2]) self.assert_sql_count(testing.db, go, 1) self.assert_compile( q, self._test_with_polymorphic_join_exec_contains_eager_two_result(), ) def test_with_polymorphic_any(self): sess = fixture_session() wp = with_polymorphic(Person, [Engineer], aliased=True) eq_( sess.query(Company.company_id) .filter( Company.employees.of_type(wp).any( wp.Engineer.primary_language == "java" ) ) .all(), [(1,)], ) def test_subqueryload_implicit_withpoly(self): sess = fixture_session() def go(): eq_( sess.query(Company) .filter_by(company_id=1) .options(subqueryload(Company.employees.of_type(Engineer))) .all(), [self._company_with_emps_fixture()[0]], ) self.assert_sql_count(testing.db, go, 4) def test_joinedload_implicit_withpoly(self): sess = fixture_session() def go(): eq_( sess.query(Company) .filter_by(company_id=1) .options(joinedload(Company.employees.of_type(Engineer))) .all(), [self._company_with_emps_fixture()[0]], ) self.assert_sql_count(testing.db, go, 3) def test_subqueryload_explicit_withpoly(self): sess = fixture_session() def go(): target = with_polymorphic(Person, Engineer) eq_( sess.query(Company) .filter_by(company_id=1) .options(subqueryload(Company.employees.of_type(target))) .all(), [self._company_with_emps_fixture()[0]], ) self.assert_sql_count(testing.db, go, 4) def test_joinedload_explicit_withpoly(self): sess = fixture_session() def go(): target = with_polymorphic(Person, Engineer, flat=True) eq_( sess.query(Company) .filter_by(company_id=1) .options(joinedload(Company.employees.of_type(target))) .all(), [self._company_with_emps_fixture()[0]], ) self.assert_sql_count(testing.db, go, 3) def test_joinedload_stacked_of_type(self): sess = fixture_session() def go(): eq_( sess.query(Company) .filter_by(company_id=1) .options( joinedload(Company.employees.of_type(Manager)), joinedload(Company.employees.of_type(Engineer)), ) .all(), [self._company_with_emps_fixture()[0]], ) self.assert_sql_count(testing.db, go, 2)
_PolymorphicTestBase
python
sphinx-doc__sphinx
sphinx/builders/latex/transforms.py
{ "start": 803, "end": 1217 }
class ____(SphinxTransform): """Add docname to footnote and footnote_reference nodes.""" default_priority = 700 TARGET_NODES = (nodes.footnote, nodes.footnote_reference) def apply(self, **kwargs: Any) -> None: matcher = NodeMatcher(*self.TARGET_NODES) for node in matcher.findall(self.document): node['docname'] = self.env.current_document.docname
FootnoteDocnameUpdater
python
kamyu104__LeetCode-Solutions
Python/combination-sum-ii.py
{ "start": 39, "end": 898 }
class ____(object): # @param candidates, a list of integers # @param target, integer # @return a list of lists of integers def combinationSum2(self, candidates, target): result = [] self.combinationSumRecu(sorted(candidates), result, 0, [], target) return result def combinationSumRecu(self, candidates, result, start, intermediate, target): if target == 0: result.append(list(intermediate)) prev = 0 while start < len(candidates) and candidates[start] <= target: if prev != candidates[start]: intermediate.append(candidates[start]) self.combinationSumRecu(candidates, result, start + 1, intermediate, target - candidates[start]) intermediate.pop() prev = candidates[start] start += 1
Solution
python
kamyu104__LeetCode-Solutions
Python/minimum-interval-to-include-each-query.py
{ "start": 68, "end": 826 }
class ____(object): def minInterval(self, intervals, queries): """ :type intervals: List[List[int]] :type queries: List[int] :rtype: List[int] """ intervals.sort() queries = [(q, i) for i, q in enumerate(queries)] queries.sort() min_heap = [] i = 0 result =[-1]*len(queries) for q, idx in queries: while i != len(intervals) and intervals[i][0] <= q: heapq.heappush(min_heap, [intervals[i][1]-intervals[i][0]+1, i]) i += 1 while min_heap and intervals[min_heap[0][1]][1] < q: heapq.heappop(min_heap) result[idx] = min_heap[0][0] if min_heap else -1 return result
Solution
python
google__jax
tests/monitoring_test.py
{ "start": 845, "end": 5722 }
class ____(absltest.TestCase): def tearDown(self): monitoring.clear_event_listeners() super().tearDown() def test_record_event(self): events = [] counters = {} # Map event names to frequency. def increment_event_counter(event): if event not in counters: counters[event] = 0 counters[event] += 1 # Test that we can register multiple callbacks. monitoring.register_event_listener(events.append) monitoring.register_event_listener(increment_event_counter) monitoring.record_event("test_unique_event") monitoring.record_event("test_common_event") monitoring.record_event("test_common_event") self.assertListEqual(events, ["test_unique_event", "test_common_event", "test_common_event"]) self.assertDictEqual(counters, {"test_unique_event": 1, "test_common_event": 2}) def test_record_event_durations(self): durations = {} # Map event names to frequency. def increment_event_duration(event, duration, **kwargs): del kwargs if event not in durations: durations[event] = 0. durations[event] += duration monitoring.register_event_duration_secs_listener(increment_event_duration) monitoring.record_event_duration_secs("test_short_event", 1) monitoring.record_event_duration_secs("test_short_event", 2) monitoring.record_event_duration_secs("test_long_event", 10) self.assertDictEqual(durations, {"test_short_event": 3, "test_long_event": 10}) def test_record_scalar(self): observed_keys = [] observed_values = [] monitoring.register_scalar_listener( lambda key, _, **kwargs: observed_keys.append(key), ) monitoring.register_scalar_listener( lambda _, value, **kwargs: observed_values.append(value), ) monitoring.record_scalar("test_unique_event", 1) monitoring.record_scalar("test_common_event", 2.5) monitoring.record_scalar("test_common_event", 5e5) self.assertListEqual( observed_keys, ["test_unique_event", "test_common_event", "test_common_event"], ) self.assertListEqual( observed_values, [1, 2.5, 5e5], ) def test_unregister_exist_callback_success(self): original_duration_listeners = jax_src_monitoring.get_event_duration_listeners() callback = lambda event, durations, **kwargs: None self.assertNotIn(callback, original_duration_listeners) monitoring.register_event_duration_secs_listener(callback) self.assertIn(callback, jax_src_monitoring.get_event_duration_listeners()) # Verify that original listeners list is not modified by register function. self.assertNotEqual(original_duration_listeners, jax_src_monitoring.get_event_duration_listeners()) jax_src_monitoring.unregister_event_duration_listener(callback) self.assertEqual(original_duration_listeners, jax_src_monitoring.get_event_duration_listeners()) def test_unregister_not_exist_callback_fail(self): callback = lambda event, durations, **kwargs: None self.assertNotIn(callback, jax_src_monitoring.get_event_duration_listeners()) with self.assertRaises(AssertionError): jax_src_monitoring.unregister_event_duration_listener(callback) def test_get_event_duration_listeners_returns_a_copy(self): original_duration_listeners = jax_src_monitoring.get_event_duration_listeners() callback = lambda event, durations, **kwargs: None original_duration_listeners.append(callback) self.assertNotIn(callback, jax_src_monitoring.get_event_duration_listeners()) self.assertNotEqual(original_duration_listeners, jax_src_monitoring.get_event_duration_listeners()) def test_unregister_exist_event_callback_success(self): original_event_listeners = jax_src_monitoring.get_event_listeners() callback = lambda event: None self.assertNotIn(callback, original_event_listeners) monitoring.register_event_listener(callback) self.assertIn(callback, jax_src_monitoring.get_event_listeners()) # Verify that original listeners list is not modified by register function. self.assertNotEqual(original_event_listeners, jax_src_monitoring.get_event_listeners()) jax_src_monitoring.unregister_event_listener(callback) self.assertEqual(original_event_listeners, jax_src_monitoring.get_event_listeners()) def test_unregister_not_exist_event_callback_fail(self): callback = lambda event: None self.assertNotIn(callback, jax_src_monitoring.get_event_listeners()) with self.assertRaises(AssertionError): jax_src_monitoring.unregister_event_listener(callback) if __name__ == "__main__": absltest.main()
MonitoringTest
python
tensorflow__tensorflow
tensorflow/python/data/kernel_tests/checkpoint_test.py
{ "start": 2049, "end": 24542 }
class ____(test_base.DatasetTestBase, parameterized.TestCase): def tearDown(self): prefix = self._iterator_checkpoint_prefix() pattern = prefix + "*" files = gfile.Glob(pattern) map(gfile.Remove, files) super(CheckpointTest, self).tearDown() def _iterator_checkpoint_prefix(self): return os.path.join(self.get_temp_dir(), "iterator") def _save_op(self, iterator_resource): iterator_state_variant = gen_dataset_ops.serialize_iterator( iterator_resource) save_op = io_ops.write_file( self._iterator_checkpoint_prefix(), parsing_ops.serialize_tensor(iterator_state_variant)) return save_op def _restore_op(self, iterator_resource): iterator_state_variant = parsing_ops.parse_tensor( io_ops.read_file(self._iterator_checkpoint_prefix()), dtypes.variant) restore_op = gen_dataset_ops.deserialize_iterator(iterator_resource, iterator_state_variant) return restore_op @combinations.generate(test_base.graph_only_combinations()) def testSaveRestore(self): def _build_graph(start, stop): iterator = dataset_ops.make_initializable_iterator( dataset_ops.Dataset.range(start, stop)) init_op = iterator.initializer get_next = iterator.get_next() save_op = self._save_op(iterator._iterator_resource) restore_op = self._restore_op(iterator._iterator_resource) return init_op, get_next, save_op, restore_op # Saving and restoring in different sessions. start = 2 stop = 10 break_point = 5 with ops.Graph().as_default() as g: init_op, get_next, save_op, _ = _build_graph(start, stop) with self.session(graph=g) as sess: sess.run(variables.global_variables_initializer()) sess.run(init_op) for i in range(start, break_point): self.assertEqual(i, sess.run(get_next)) sess.run(save_op) with ops.Graph().as_default() as g: init_op, get_next, _, restore_op = _build_graph(start, stop) with self.session(graph=g) as sess: sess.run(init_op) sess.run(restore_op) for i in range(break_point, stop): self.assertEqual(i, sess.run(get_next)) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) # Saving and restoring in same session. with ops.Graph().as_default() as g: init_op, get_next, save_op, restore_op = _build_graph(start, stop) with self.session(graph=g) as sess: sess.run(variables.global_variables_initializer()) sess.run(init_op) for i in range(start, break_point): self.assertEqual(i, sess.run(get_next)) sess.run(save_op) sess.run(init_op) sess.run(restore_op) for i in range(break_point, stop): self.assertEqual(i, sess.run(get_next)) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) @combinations.generate(test_base.graph_only_combinations()) def testInitThenRestore(self): # Note: Calling init_op before restore_op is redundant. This test just makes # sure we do not fail if restore is called on an already initialized # iterator resource. def _build_graph(start, stop): dataset = dataset_ops.Dataset.range(start, stop) iterator = dataset_ops.make_initializable_iterator(dataset) init_op = iterator.initializer get_next = iterator.get_next() save_op = self._save_op(iterator._iterator_resource) restore_op = self._restore_op(iterator._iterator_resource) return init_op, get_next, save_op, restore_op # Saving and restoring in different sessions. start = 2 stop = 10 break_point = 5 with ops.Graph().as_default() as g: init_op, get_next, save_op, _ = _build_graph(start, stop) with self.session(graph=g) as sess: sess.run(variables.global_variables_initializer()) sess.run(init_op) for i in range(start, break_point): self.assertEqual(i, sess.run(get_next)) sess.run(save_op) with ops.Graph().as_default() as g: init_op, get_next, _, restore_op = _build_graph(start, stop) with self.session(graph=g) as sess: sess.run(init_op) sess.run(restore_op) for i in range(break_point, stop): self.assertEqual(i, sess.run(get_next)) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) @combinations.generate(test_base.graph_only_combinations()) def testMultipleSaves(self): def _build_graph(start, stop): iterator = dataset_ops.make_initializable_iterator( dataset_ops.Dataset.range(start, stop)) init_op = iterator.initializer get_next = iterator.get_next() save_op = self._save_op(iterator._iterator_resource) restore_op = self._restore_op(iterator._iterator_resource) return init_op, get_next, save_op, restore_op start = 2 stop = 10 break_point1 = 5 break_point2 = 7 with ops.Graph().as_default() as g: init_op, get_next, save_op, _ = _build_graph(start, stop) with self.session(graph=g) as sess: sess.run(variables.global_variables_initializer()) sess.run(init_op) for i in range(start, break_point1): self.assertEqual(i, sess.run(get_next)) sess.run(save_op) with ops.Graph().as_default() as g: init_op, get_next, save_op, restore_op = _build_graph(start, stop) with self.session(graph=g) as sess: sess.run(init_op) sess.run(restore_op) for i in range(break_point1, break_point2): self.assertEqual(i, sess.run(get_next)) sess.run(save_op) break_point2 = 7 with ops.Graph().as_default() as g: init_op, get_next, save_op, restore_op = _build_graph(start, stop) with self.session(graph=g) as sess: sess.run(init_op) sess.run(restore_op) for i in range(break_point2, stop): self.assertEqual(i, sess.run(get_next)) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) @combinations.generate(test_base.graph_only_combinations()) def testSaveRestoreWithRepeat(self): def _build_graph(start, stop, num_epochs): iterator = dataset_ops.make_initializable_iterator( dataset_ops.Dataset.range(start, stop).repeat(num_epochs)) init_op = iterator.initializer get_next = iterator.get_next() save_op = self._save_op(iterator._iterator_resource) restore_op = self._restore_op(iterator._iterator_resource) return init_op, get_next, save_op, restore_op start = 2 stop = 10 num_epochs = 5 break_range = 5 break_epoch = 3 with ops.Graph().as_default() as g: init_op, get_next, save_op, restore_op = _build_graph( start, stop, num_epochs) with self.session(graph=g) as sess: sess.run(variables.global_variables_initializer()) sess.run(init_op) # Note: There is no checkpoint saved currently so a NotFoundError is # raised. with self.assertRaises(errors.NotFoundError): sess.run(init_op) sess.run(restore_op) for _ in range(break_epoch - 1): for i in range(start, stop): self.assertEqual(i, sess.run(get_next)) for i in range(start, break_range): self.assertEqual(i, sess.run(get_next)) sess.run(save_op) with ops.Graph().as_default() as g: init_op, get_next, _, restore_op = _build_graph(start, stop, num_epochs) with self.session(graph=g) as sess: sess.run(init_op) sess.run(restore_op) for i in range(break_range, stop): self.assertEqual(i, sess.run(get_next)) for _ in range(break_epoch, num_epochs): for i in range(start, stop): self.assertEqual(i, sess.run(get_next)) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) @combinations.generate(test_base.graph_only_combinations()) def testSaveRestoreExhaustedIterator(self): def _build_graph(start, stop, num_epochs): iterator = dataset_ops.make_initializable_iterator( dataset_ops.Dataset.range(start, stop).repeat(num_epochs)) init_op = iterator.initializer get_next = iterator.get_next() save_op = self._save_op(iterator._iterator_resource) restore_op = self._restore_op(iterator._iterator_resource) return init_op, get_next, save_op, restore_op start = 2 stop = 10 num_epochs = 5 with ops.Graph().as_default() as g: init_op, get_next, save_op, restore_op = _build_graph( start, stop, num_epochs) with self.session(graph=g) as sess: sess.run(variables.global_variables_initializer()) sess.run(init_op) # Note: There is no checkpoint saved currently so a NotFoundError is # raised. with self.assertRaises(errors.NotFoundError): sess.run(init_op) sess.run(restore_op) for _ in range(num_epochs): for i in range(start, stop): self.assertEqual(i, sess.run(get_next)) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) sess.run(save_op) with ops.Graph().as_default() as g: init_op, get_next, _, restore_op = _build_graph(start, stop, num_epochs) with self.session(graph=g) as sess: sess.run(init_op) sess.run(restore_op) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) @combinations.generate(combinations.times( test_base.eager_only_combinations(), combinations.combine(enable_async_ckpt=[True, False]) )) def testSaveRestoreOneShotIterator(self, enable_async_ckpt): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6]).map( math_ops.square).batch(2) iterator = iter(dataset) get_next = iterator.get_next ckpt_options = checkpoint_options.CheckpointOptions( experimental_enable_async_checkpoint=enable_async_ckpt) checkpoint = trackable_utils.Checkpoint(iterator=iterator) self.assertAllEqual([1, 4], get_next()) save_path = checkpoint.save(checkpoint_prefix, options=ckpt_options) self.assertAllEqual([9, 16], get_next()) self.assertAllEqual([25, 36], get_next()) checkpoint.restore(save_path).run_restore_ops() self.assertAllEqual([9, 16], get_next()) self.assertAllEqual([25, 36], get_next()) with self.assertRaises(errors.OutOfRangeError): get_next() @combinations.generate(combinations.times( test_base.eager_only_combinations(), combinations.combine(enable_async_ckpt=[True, False]) )) def testSaveRestoreMultipleIterator(self, enable_async_ckpt): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") dataset = dataset_ops.Dataset.from_tensor_slices( [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) dataset = dataset.map(math_ops.square).batch(2) iterator_1 = iter(dataset) get_next_1 = iterator_1.get_next iterator_2 = iter(dataset) get_next_2 = iterator_2.get_next dataset_2 = dataset_ops.Dataset.range(10) iterator_3 = iter(dataset_2) get_next_3 = iterator_3.get_next ckpt_options = checkpoint_options.CheckpointOptions( experimental_enable_async_checkpoint=enable_async_ckpt) checkpoint = trackable_utils.Checkpoint( iterator_1=iterator_1, iterator_2=iterator_2, iterator_3=iterator_3) self.assertAllEqual([1, 4], get_next_1()) self.assertAllEqual(0, get_next_3()) self.assertAllEqual(1, get_next_3()) self.assertAllEqual(2, get_next_3()) save_path = checkpoint.save(checkpoint_prefix, options=ckpt_options) self.assertAllEqual([1, 4], get_next_2()) self.assertAllEqual([9, 16], get_next_2()) self.assertAllEqual(3, get_next_3()) checkpoint.restore(save_path).run_restore_ops() self.assertAllEqual([9, 16], get_next_1()) self.assertAllEqual([1, 4], get_next_2()) self.assertAllEqual(3, get_next_3()) @combinations.generate(combinations.times( test_base.eager_only_combinations(), combinations.combine(enable_async_ckpt=[True, False]) )) def testRestoreExhaustedIterator(self, enable_async_ckpt): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") dataset = dataset_ops.Dataset.range(3) iterator = iter(dataset) get_next = iterator.get_next ckpt_options = checkpoint_options.CheckpointOptions( experimental_enable_async_checkpoint=enable_async_ckpt) checkpoint = trackable_utils.Checkpoint(iterator=iterator) self.assertAllEqual(0, get_next()) self.assertAllEqual(1, get_next()) save_path = checkpoint.save(checkpoint_prefix, options=ckpt_options) self.assertAllEqual(2, get_next()) checkpoint.restore(save_path).run_restore_ops() self.assertAllEqual(2, get_next()) save_path = checkpoint.save(checkpoint_prefix, options=ckpt_options) checkpoint.restore(save_path).run_restore_ops() with self.assertRaises(errors.OutOfRangeError): get_next() @combinations.generate(test_base.eager_only_combinations()) def testRestoreInReconstructedIteratorInitializable(self): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") dataset = dataset_ops.Dataset.range(10) iterator = iter(dataset) get_next = iterator.get_next checkpoint = trackable_utils.Checkpoint(iterator=iterator) for i in range(5): checkpoint.restore( checkpoint_management.latest_checkpoint( checkpoint_directory)).initialize_or_restore() for j in range(2): self.assertEqual(i * 2 + j, self.evaluate(get_next())) checkpoint.save(file_prefix=checkpoint_prefix) @combinations.generate(test_base.eager_only_combinations()) def testSaveRestoreReshuffleDataset(self): dataset = dataset_ops.Dataset.range(10) dataset = dataset.shuffle(10, reshuffle_each_iteration=True) iterator = iter(dataset) ckpt = trackable_utils.Checkpoint( step=variables.Variable(0), iterator=iterator) manager = checkpoint_management.CheckpointManager( ckpt, self.get_temp_dir(), max_to_keep=3) iter1 = [next(iterator).numpy() for _ in range(5)] manager.save() iter2 = [next(iterator).numpy() for _ in range(5)] ckpt.restore(manager.latest_checkpoint) iter3 = [next(iterator).numpy() for _ in range(5)] self.assertNotEqual(iter1, iter2) self.assertCountEqual(iter2, iter3) @combinations.generate(test_base.eager_only_combinations()) def testSaveRestoreModifiedDataset(self): ckpt_dir = self.get_temp_dir() dataset = dataset_ops.Dataset.range(10) iterator = iter(dataset) ckpt = trackable_utils.Checkpoint(iterator=iterator) manager = checkpoint_management.CheckpointManager( ckpt, ckpt_dir, max_to_keep=3) for _ in range(5): next(iterator) manager.save() # Define a different dataset and try to restore into its iterator. dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3]) iterator = iter(dataset) ckpt = trackable_utils.Checkpoint(iterator=iterator) manager = checkpoint_management.CheckpointManager( ckpt, ckpt_dir, max_to_keep=3) with self.assertRaisesRegex( errors.NotFoundError, "Make sure the dataset definition has not changed"): ckpt.restore(manager.latest_checkpoint) def _assertNotCheckpointable(self, dataset): iterator = iter(dataset) ckpt = trackable_utils.Checkpoint( step=variables.Variable(0), iterator=iterator) manager = checkpoint_management.CheckpointManager( ckpt, self.get_temp_dir(), max_to_keep=3) with self.assertRaises(errors.FailedPreconditionError): manager.save() @staticmethod def _statefulInt64Func(_): return random_ops.random_uniform((), 0, 1, dtypes.int64) @staticmethod def _statefulBoolFunc(_): return random_ops.random_uniform((), 0, 1, dtypes.int64) < 1 @staticmethod def _statefulDatasetFunc(_): x = random_ops.random_uniform((), 0, 1, dtypes.int64) return dataset_ops.Dataset.range(x) @combinations.generate(test_base.eager_only_combinations()) def testStatefulFilterNotCheckpointable(self): dataset = dataset_ops.Dataset.range(10) dataset = dataset.filter(self._statefulBoolFunc) self._assertNotCheckpointable(dataset) @combinations.generate(test_base.eager_only_combinations()) def testStatefulFlatMapNotCheckpointable(self): dataset = dataset_ops.Dataset.range(10) dataset = dataset.flat_map(self._statefulDatasetFunc) self._assertNotCheckpointable(dataset) @combinations.generate(test_base.eager_only_combinations()) def testStatefulInterleaveNotCheckpointable(self): dataset = dataset_ops.Dataset.range(10) dataset = dataset.interleave(self._statefulDatasetFunc) self._assertNotCheckpointable(dataset) @combinations.generate(test_base.eager_only_combinations()) def testStatefulMapNotCheckpointable(self): dataset = dataset_ops.Dataset.range(10) dataset = dataset.map(self._statefulBoolFunc) self._assertNotCheckpointable(dataset) @combinations.generate(test_base.eager_only_combinations()) def testStatefulParallelInterleaveNotCheckpointable(self): dataset = dataset_ops.Dataset.range(10) dataset = dataset.interleave( self._statefulDatasetFunc, num_parallel_calls=2) self._assertNotCheckpointable(dataset) @combinations.generate(test_base.eager_only_combinations()) def testStatefulParallelMapNotCheckpointable(self): dataset = dataset_ops.Dataset.range(10) dataset = dataset.map(self._statefulBoolFunc, num_parallel_calls=2) self._assertNotCheckpointable(dataset) @combinations.generate(test_base.eager_only_combinations()) def testStatefulGroupByReducerNotCheckpointable(self): stateful_key_func = self._statefulInt64Func key_func = lambda _: math_ops.cast(0, dtypes.int64) stateful_init_func = self._statefulBoolFunc init_func = lambda x: True stateful_reduce_func = lambda _, x: self._statefulBoolFunc(x) reduce_func = lambda _, x: True stateful_finalize_func = self._statefulBoolFunc finalize_func = lambda x: True test_cases = [ (stateful_key_func, init_func, reduce_func, finalize_func), (key_func, stateful_init_func, reduce_func, finalize_func), (key_func, init_func, stateful_reduce_func, finalize_func), (key_func, init_func, reduce_func, stateful_finalize_func), ] for key_func, init_func, reduce_func, finalize_func in test_cases: dataset = dataset_ops.Dataset.range(10) reducer = grouping.Reducer(init_func, reduce_func, finalize_func) dataset = dataset.apply(grouping.group_by_reducer(key_func, reducer)) self._assertNotCheckpointable(dataset) @combinations.generate(test_base.eager_only_combinations()) def testStatefulGroupByWindowNotCheckpointable(self): stateful_key_func = self._statefulInt64Func key_func = lambda _: math_ops.cast(0, dtypes.int64) stateful_reduce_func = lambda _, x: self._statefulDatasetFunc(x) reduce_func = lambda _, x: x stateful_window_func = self._statefulInt64Func window_func = lambda x: math_ops.cast(0, dtypes.int64) test_cases = [ (stateful_key_func, reduce_func, window_func), (key_func, stateful_reduce_func, window_func), (key_func, reduce_func, stateful_window_func), ] for key_func_fn, reduce_func_fn, window_func in test_cases: dataset = dataset_ops.Dataset.range(10) dataset = dataset.apply( grouping.group_by_window( key_func_fn, reduce_func_fn, window_size_func=window_func)) self._assertNotCheckpointable(dataset) @combinations.generate(test_base.eager_only_combinations()) def testStatefulMapAndBatchNotCheckpointable(self): dataset = dataset_ops.Dataset.range(10) dataset = dataset.map(self._statefulBoolFunc) dataset = dataset.batch(2) self._assertNotCheckpointable(dataset) @combinations.generate(test_base.eager_only_combinations()) def testStatefulExperimentalParallelInterleaveNotCheckpointable(self): dataset = dataset_ops.Dataset.range(10) dataset = dataset.apply( interleave_ops.parallel_interleave(self._statefulDatasetFunc, 2)) self._assertNotCheckpointable(dataset) @combinations.generate(test_base.eager_only_combinations()) def testStatefulScanNotCheckpointable(self): dataset = dataset_ops.Dataset.range(10) def stateful_scan(state, element): return state, self._statefulBoolFunc(element) dataset = dataset.apply(scan_ops.scan(0, stateful_scan)) self._assertNotCheckpointable(dataset) @combinations.generate(test_base.eager_only_combinations()) def testStatefulTakeWhileNotCheckpointable(self): dataset = dataset_ops.Dataset.range(10) dataset = dataset.apply(take_while_ops.take_while(self._statefulBoolFunc)) self._assertNotCheckpointable(dataset) @combinations.generate(test_base.eager_only_combinations()) def testStatefulExternalPolicy(self): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") dataset = dataset_ops.Dataset.range(4) def fn(x): return x * x dataset = dataset.map( lambda x: script_ops.eager_py_func(fn, [x], dtypes.int64)) options = options_lib.Options() options.experimental_external_state_policy = ( options_lib.ExternalStatePolicy.WARN) dataset = dataset.with_options(options) iterator = iter(dataset) get_next = iterator.get_next checkpoint = trackable_utils.Checkpoint(iterator=iterator) self.assertEqual(0, get_next().numpy()) self.assertEqual(1, get_next().numpy()) save_path = checkpoint.save(checkpoint_prefix) self.assertEqual(4, get_next().numpy()) self.assertEqual(9, get_next().numpy()) checkpoint.restore(save_path).run_restore_ops() self.assertEqual(4, get_next().numpy()) self.assertEqual(9, get_next().numpy()) with self.assertRaises(errors.OutOfRangeError): get_next() if __name__ == "__main__": test.main()
CheckpointTest
python
walkccc__LeetCode
solutions/1324. Print Words Vertically/1324.py
{ "start": 0, "end": 332 }
class ____: def printVertically(self, s: str) -> list[str]: ans = [] words = s.split() maxLength = max(len(word) for word in words) for i in range(maxLength): row = [] for word in words: row.append(word[i] if i < len(word) else ' ') ans.append(''.join(row).rstrip()) return ans
Solution
python
huggingface__transformers
src/transformers/models/glm46v/modeling_glm46v.py
{ "start": 24726, "end": 26056 }
class ____(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): The rope index difference between sequence length and multimodal rope. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None rope_deltas: Optional[torch.LongTensor] = None
Glm46VCausalLMOutputWithPast
python
pandas-dev__pandas
setup.py
{ "start": 7780, "end": 8577 }
class ____(build_ext): """ Subclass build_ext to get clearer report if Cython is necessary. """ def check_cython_extensions(self, extensions) -> None: for ext in extensions: for src in ext.sources: if not os.path.exists(src): print(f"{ext.name}: -> [{ext.sources}]") raise Exception( f"""Cython-generated file '{src}' not found. Cython is required to compile pandas from a development branch. Please install Cython or download a release package of pandas. """ ) def build_extensions(self) -> None: self.check_cython_extensions(self.extensions) build_ext.build_extensions(self)
CheckingBuildExt
python
tornadoweb__tornado
tornado/ioloop.py
{ "start": 31102, "end": 32169 }
class ____: """An IOLoop timeout, a UNIX timestamp and a callback""" # Reduce memory overhead when there are lots of pending callbacks __slots__ = ["deadline", "callback", "tdeadline"] def __init__( self, deadline: float, callback: Callable[[], None], io_loop: IOLoop ) -> None: if not isinstance(deadline, numbers.Real): raise TypeError("Unsupported deadline %r" % deadline) self.deadline = deadline self.callback = callback self.tdeadline = ( deadline, next(io_loop._timeout_counter), ) # type: Tuple[float, int] # Comparison methods to sort by deadline, with object id as a tiebreaker # to guarantee a consistent ordering. The heapq module uses __le__ # in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons # use __lt__). def __lt__(self, other: "_Timeout") -> bool: return self.tdeadline < other.tdeadline def __le__(self, other: "_Timeout") -> bool: return self.tdeadline <= other.tdeadline
_Timeout
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/operators/test_rds.py
{ "start": 28529, "end": 29905 }
class ____: @classmethod def setup_class(cls): cls.dag = DAG( dag_id="test_dag", schedule=None, default_args={"owner": "airflow", "start_date": DEFAULT_DATE}, ) cls.hook = RdsHook(aws_conn_id=AWS_CONN, region_name="us-east-1") _patch_hook_get_connection(cls.hook) @classmethod def teardown_class(cls): del cls.dag del cls.hook @mock_aws def test_delete_event_subscription(self): _create_event_subscription(self.hook) delete_subscription_operator = RdsDeleteEventSubscriptionOperator( task_id="test_delete", subscription_name=SUBSCRIPTION_NAME, aws_conn_id=AWS_CONN, dag=self.dag, ) _patch_hook_get_connection(delete_subscription_operator.hook) delete_subscription_operator.execute(None) with pytest.raises(self.hook.conn.exceptions.ClientError): self.hook.conn.describe_event_subscriptions(SubscriptionName=EXPORT_TASK_NAME) def test_template_fields(self): operator = RdsDeleteEventSubscriptionOperator( task_id="test_delete", subscription_name=SUBSCRIPTION_NAME, aws_conn_id=AWS_CONN, region_name=REGION, ) validate_template_fields(operator)
TestRdsDeleteEventSubscriptionOperator
python
numba__numba
numba/cuda/cudadrv/devices.py
{ "start": 522, "end": 1705 }
class ____(object): def __getattr__(self, attr): # First time looking at "lst" attribute. if attr == "lst": # Device list is not initialized. # Query all CUDA devices. numdev = driver.get_device_count() gpus = [_DeviceContextManager(driver.get_device(devid)) for devid in range(numdev)] # Define "lst" to avoid re-initialization self.lst = gpus return gpus # Other attributes return super(_DeviceList, self).__getattr__(attr) def __getitem__(self, devnum): ''' Returns the context manager for device *devnum*. ''' return self.lst[devnum] def __str__(self): return ', '.join([str(d) for d in self.lst]) def __iter__(self): return iter(self.lst) def __len__(self): return len(self.lst) @property def current(self): """Returns the active device or None if there's no active device """ with driver.get_active_context() as ac: devnum = ac.devnum if devnum is not None: return self[devnum]
_DeviceList
python
gevent__gevent
src/greentest/3.14/test_httpservers.py
{ "start": 1494, "end": 1825 }
class ____(unittest.TestCase): def test_https_server_raises_runtime_error(self): with import_helper.isolated_modules(): sys.modules['ssl'] = None certfile = certdata_file("keycert.pem") with self.assertRaises(RuntimeError): create_https_server(certfile)
TestSSLDisabled
python
openai__openai-python
src/openai/types/eval_create_response.py
{ "start": 2256, "end": 2635 }
class ____(ScoreModelGrader): __test__ = False pass_threshold: Optional[float] = None """The threshold for the score.""" TestingCriterion: TypeAlias = Union[ LabelModelGrader, StringCheckGrader, TestingCriterionEvalGraderTextSimilarity, TestingCriterionEvalGraderPython, TestingCriterionEvalGraderScoreModel, ]
TestingCriterionEvalGraderScoreModel
python
numpy__numpy
numpy/f2py/tests/util.py
{ "start": 9761, "end": 12112 }
class ____: code = None sources = None options = [] skip = [] only = [] suffix = ".f" module = None _has_c_compiler = None _has_f77_compiler = None _has_f90_compiler = None @property def module_name(self): cls = type(self) return f'_{cls.__module__.rsplit(".", 1)[-1]}_{cls.__name__}_ext_module' @classmethod def setup_class(cls): if sys.platform == "win32": pytest.skip("Fails with MinGW64 Gfortran (Issue #9673)") F2PyTest._has_c_compiler = has_c_compiler() F2PyTest._has_f77_compiler = has_f77_compiler() F2PyTest._has_f90_compiler = has_f90_compiler() F2PyTest._has_fortran_compiler = has_fortran_compiler() def setup_method(self): if self.module is not None: return codes = self.sources or [] if self.code: codes.append(self.suffix) needs_f77 = any(str(fn).endswith(".f") for fn in codes) needs_f90 = any(str(fn).endswith(".f90") for fn in codes) needs_pyf = any(str(fn).endswith(".pyf") for fn in codes) if needs_f77 and not self._has_f77_compiler: pytest.skip("No Fortran 77 compiler available") if needs_f90 and not self._has_f90_compiler: pytest.skip("No Fortran 90 compiler available") if needs_pyf and not self._has_fortran_compiler: pytest.skip("No Fortran compiler available") # Build the module if self.code is not None: self.module = build_code( self.code, options=self.options, skip=self.skip, only=self.only, suffix=self.suffix, module_name=self.module_name, ) if self.sources is not None: self.module = build_module( self.sources, options=self.options, skip=self.skip, only=self.only, module_name=self.module_name, ) # # Helper functions # def getpath(*a): # Package root d = Path(numpy.f2py.__file__).parent.resolve() return d.joinpath(*a) @contextlib.contextmanager def switchdir(path): curpath = Path.cwd() os.chdir(path) try: yield finally: os.chdir(curpath)
F2PyTest
python
lepture__authlib
authlib/oidc/core/errors.py
{ "start": 2424, "end": 2572 }
class ____(OAuth2Error): """The OP does not support use of the request parameter.""" error = "request_not_supported"
RequestNotSupportedError
python
run-llama__llama_index
llama-index-integrations/vector_stores/llama-index-vector-stores-weaviate/llama_index/vector_stores/weaviate/_exceptions.py
{ "start": 0, "end": 551 }
class ____(Exception): """Exception raised when no synchronous weaviate client was provided via the `weaviate_client` parameter.""" def __init__( self, message="Sync method called without a synchronous WeaviateClient provided. Either switch to using async methods together with a provided WeaviateAsyncClient or provide a synchronous WeaviateClient via `weaviate_client` to the constructor of WeaviateVectorStore.", ) -> None: self.message = message super().__init__(self.message)
SyncClientNotProvidedError
python
keras-team__keras
keras/src/layers/convolutional/conv_test.py
{ "start": 9082, "end": 26432 }
class ____(testing.TestCase): @parameterized.parameters( { "filters": 5, "kernel_size": 2, "strides": 1, "padding": "valid", "data_format": "channels_last", "dilation_rate": 1, "groups": 1, "input_shape": (3, 5, 4), "output_shape": (3, 4, 5), }, { "filters": 6, "kernel_size": 2, "strides": 1, "padding": "same", "data_format": "channels_last", "dilation_rate": (2,), "groups": 2, "input_shape": (3, 4, 4), "output_shape": (3, 4, 6), }, { "filters": 6, "kernel_size": 2, "strides": 1, "padding": "causal", "data_format": "channels_last", "dilation_rate": (2,), "groups": 2, "input_shape": (3, 4, 4), "output_shape": (3, 4, 6), }, { "filters": 6, "kernel_size": 2, "strides": (2,), "padding": "valid", "data_format": "channels_last", "dilation_rate": 1, "groups": 2, "input_shape": (3, 5, 4), "output_shape": (3, 2, 6), }, ) @pytest.mark.requires_trainable_backend def test_conv1d_basic( self, filters, kernel_size, strides, padding, data_format, dilation_rate, groups, input_shape, output_shape, ): self.run_layer_test( layers.Conv1D, init_kwargs={ "filters": filters, "kernel_size": kernel_size, "strides": strides, "padding": padding, "data_format": data_format, "dilation_rate": dilation_rate, "groups": groups, }, input_shape=input_shape, expected_output_shape=output_shape, expected_num_trainable_weights=2, expected_num_non_trainable_weights=0, expected_num_losses=0, supports_masking=False, ) @parameterized.parameters( { "filters": 5, "kernel_size": 2, "strides": 1, "padding": "valid", "data_format": "channels_last", "dilation_rate": 1, "groups": 1, "input_shape": (3, 5, 5, 4), "output_shape": (3, 4, 4, 5), }, { "filters": 6, "kernel_size": 2, "strides": 1, "padding": "same", "data_format": "channels_last", "dilation_rate": (2, 2), "groups": 2, "input_shape": (3, 4, 4, 4), "output_shape": (3, 4, 4, 6), }, { "filters": 6, "kernel_size": (2, 2), "strides": (2, 1), "padding": "valid", "data_format": "channels_last", "dilation_rate": (1, 1), "groups": 2, "input_shape": (3, 5, 5, 4), "output_shape": (3, 2, 4, 6), }, ) @pytest.mark.requires_trainable_backend def test_conv2d_basic( self, filters, kernel_size, strides, padding, data_format, dilation_rate, groups, input_shape, output_shape, ): self.run_layer_test( layers.Conv2D, init_kwargs={ "filters": filters, "kernel_size": kernel_size, "strides": strides, "padding": padding, "data_format": data_format, "dilation_rate": dilation_rate, "groups": groups, }, input_shape=input_shape, expected_output_shape=output_shape, expected_num_trainable_weights=2, expected_num_non_trainable_weights=0, expected_num_losses=0, supports_masking=False, ) @parameterized.parameters( { "filters": 5, "kernel_size": 2, "strides": 1, "padding": "valid", "data_format": "channels_last", "dilation_rate": 1, "groups": 1, "input_shape": (3, 5, 5, 5, 4), "output_shape": (3, 4, 4, 4, 5), }, { "filters": 6, "kernel_size": 2, "strides": 1, "padding": "same", "data_format": "channels_last", "dilation_rate": (2, 2, 2), "groups": 2, "input_shape": (3, 4, 4, 4, 4), "output_shape": (3, 4, 4, 4, 6), }, { "filters": 6, "kernel_size": (2, 2, 3), "strides": (2, 1, 2), "padding": "valid", "data_format": "channels_last", "dilation_rate": (1, 1, 1), "groups": 2, "input_shape": (3, 5, 5, 5, 4), "output_shape": (3, 2, 4, 2, 6), }, ) @pytest.mark.requires_trainable_backend def test_conv3d_basic( self, filters, kernel_size, strides, padding, data_format, dilation_rate, groups, input_shape, output_shape, ): self.run_layer_test( layers.Conv3D, init_kwargs={ "filters": filters, "kernel_size": kernel_size, "strides": strides, "padding": padding, "data_format": data_format, "dilation_rate": dilation_rate, "groups": groups, }, input_shape=input_shape, expected_output_shape=output_shape, expected_num_trainable_weights=2, expected_num_non_trainable_weights=0, expected_num_losses=0, supports_masking=False, ) def test_bad_init_args(self): # `filters` is not positive. with self.assertRaisesRegex( ValueError, "Invalid value for argument `filters`. Expected a " "strictly positive value. Received filters=0.", ): layers.Conv1D(filters=0, kernel_size=1) # `kernel_size` has 0. with self.assertRaisesRegex( ValueError, r"The `kernel_size` argument must be a tuple of \d+ " r"integers. Received kernel_size=\(1, 0\), including values \{0\} " r"that do not satisfy `value > 0`", ): layers.Conv2D(filters=2, kernel_size=(1, 0)) # `strides` has 0. with self.assertRaisesRegex( ValueError, r"The `strides` argument must be a tuple of \d+ " r"integers. Received strides=\(1, 0\), including values \{0\} that " r"do not satisfy `value > 0`", ): layers.Conv2D(filters=2, kernel_size=(2, 2), strides=(1, 0)) # `dilation_rate > 1` while `strides > 1`. with self.assertRaisesRegex( ValueError, r"`strides > 1` not supported in conjunction with " r"`dilation_rate > 1`. Received: strides=\(2, 2\) and " r"dilation_rate=\(2, 1\)", ): layers.Conv2D( filters=2, kernel_size=(2, 2), strides=2, dilation_rate=(2, 1) ) # `groups` is not strictly positive. with self.assertRaisesRegex( ValueError, "The number of groups must be a positive integer. " "Received: groups=0.", ): layers.Conv2D(filters=5, kernel_size=(2, 2), groups=0) # `filters` cannot be divided by `groups`. with self.assertRaisesRegex( ValueError, "The number of filters must be evenly divisible by the" " number of groups. Received: groups=2, filters=5.", ): layers.Conv2D(filters=5, kernel_size=(2, 2), groups=2) @parameterized.named_parameters( { "testcase_name": "conv1d_kernel_size3_strides1", "conv_cls": layers.Conv1D, "filters": 6, "kernel_size": 3, "strides": 1, "padding": "valid", "data_format": "channels_last", "dilation_rate": 1, "groups": 1, "input_shape": (None, 5, 4), "output_shape": (None, 3, 6), }, { "testcase_name": "conv1d_kernel_size2_strides2", "conv_cls": layers.Conv1D, "filters": 6, "kernel_size": 2, "strides": 2, "padding": "valid", "data_format": "channels_last", "dilation_rate": 1, "groups": 2, "input_shape": (None, 5, 4), "output_shape": (None, 2, 6), }, { "testcase_name": "conv2d_kernel_size3_strides1", "conv_cls": layers.Conv2D, "filters": 6, "kernel_size": 3, "strides": 1, "padding": "valid", "data_format": "channels_last", "dilation_rate": 1, "groups": 1, "input_shape": (None, 5, 5, 4), "output_shape": (None, 3, 3, 6), }, { "testcase_name": "conv2d_kernel_size2_strides2", "conv_cls": layers.Conv2D, "filters": 6, "kernel_size": 2, "strides": 2, "padding": "valid", "data_format": "channels_last", "dilation_rate": 1, "groups": 2, "input_shape": (None, 5, 5, 4), "output_shape": (None, 2, 2, 6), }, { "testcase_name": "conv3d_kernel_size3_strides1", "conv_cls": layers.Conv3D, "filters": 6, "kernel_size": 3, "strides": 1, "padding": "valid", "data_format": "channels_last", "dilation_rate": 1, "groups": 1, "input_shape": (None, 5, 5, 5, 4), "output_shape": (None, 3, 3, 3, 6), }, { "testcase_name": "conv3d_kernel_size2_strides2", "conv_cls": layers.Conv3D, "filters": 6, "kernel_size": 2, "strides": 2, "padding": "valid", "data_format": "channels_last", "dilation_rate": 1, "groups": 2, "input_shape": (None, 5, 5, 5, 4), "output_shape": (None, 2, 2, 2, 6), }, ) @pytest.mark.requires_trainable_backend def test_enable_lora( self, conv_cls, filters, kernel_size, strides, padding, data_format, dilation_rate, groups, input_shape, output_shape, ): if conv_cls not in (layers.Conv1D, layers.Conv2D, layers.Conv3D): raise TypeError layer = conv_cls( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, groups=groups, ) layer.build(input_shape) layer.enable_lora(2) self.assertLen(layer.trainable_weights, 3) self.assertLen(layer.non_trainable_weights, 1) if backend.backend() == "torch": self.assertLen(layer.torch_params, 4) # Try eager call x = np.random.random((64,) + input_shape[1:]) y = np.random.random((64,) + output_shape[1:]) _ = layer(x[:2]) init_lora_a_kernel_value = layer.lora_kernel_a.numpy() init_lora_b_kernel_value = layer.lora_kernel_b.numpy() # Try calling fit() model = models.Sequential([layer]) model.compile(optimizer="sgd", loss="mse") model.fit(x, y) final_lora_a_kernel_value = layer.lora_kernel_a.numpy() final_lora_b_kernel_value = layer.lora_kernel_b.numpy() diff_a = np.max( np.abs(init_lora_a_kernel_value - final_lora_a_kernel_value) ) diff_b = np.max( np.abs(init_lora_b_kernel_value - final_lora_b_kernel_value) ) self.assertGreater(diff_a, 0.0) self.assertGreater(diff_b, 0.0) # Try saving and reloading the model temp_filepath = os.path.join(self.get_temp_dir(), "lora_model.keras") model.save(temp_filepath) new_model = saving.load_model(temp_filepath) self.assertTrue(new_model.layers[0].lora_enabled) self.assertAllClose(model.predict(x), new_model.predict(x)) # Try saving and reloading the model's weights only temp_filepath = os.path.join( self.get_temp_dir(), "lora_model.weights.h5" ) model.save_weights(temp_filepath) # Load the file into a fresh, non-lora model new_model = models.Sequential( [ conv_cls( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, groups=groups, ) ] ) new_model.build(input_shape) new_model.load_weights(temp_filepath) self.assertAllClose(model.predict(x), new_model.predict(x)) # Try loading a normal checkpoint into a lora model new_model.save_weights(temp_filepath) model.load_weights(temp_filepath) self.assertAllClose(model.predict(x), new_model.predict(x)) @pytest.mark.requires_trainable_backend def test_lora_weight_name(self): class MyModel(models.Model): def __init__(self): super().__init__(name="mymodel") self.conv2d = layers.Conv2D(4, 3, name="conv2d") def build(self, input_shape): self.conv2d.build(input_shape) def call(self, x): return self.conv2d(x) model = MyModel() model.build((None, 5, 5, 4)) model.conv2d.enable_lora(2) self.assertEqual( model.conv2d.lora_kernel_a.path, "mymodel/conv2d/lora_kernel_a" ) @pytest.mark.requires_trainable_backend def test_enable_lora_with_alpha(self): # Create a `Conv2D` layer with a small kernel for simplicity. layer = layers.Conv2D(filters=3, kernel_size=(2, 2), padding="valid") # Use a fixed input shape: batch size 1, height=4, width=4, channels=3. input_shape = (1, 4, 4, 3) layer.build(input_shape) # Set the base kernel to known, deterministic values. base_kernel = np.linspace( 0, 1, num=np.prod(layer.kernel.shape), dtype=np.float32 ) base_kernel = base_kernel.reshape(layer.kernel.shape) layer.kernel.assign(base_kernel) # Enable LoRA with `rank`=2 and a custom `lora_alpha` value (e.g. 3.0). layer.enable_lora(rank=2, lora_alpha=3.0) self.assertEqual(layer.lora_rank, 2) self.assertEqual(layer.lora_alpha, 3.0) # For `Conv2D`, assume the LoRA weights have shapes: # `lora_kernel_a`: (kernel_height, kernel_width, in_channels, rank) # `lora_kernel_b`: (rank, out_channels) lora_a_shape = layer.lora_kernel_a.shape lora_b_shape = layer.lora_kernel_b.shape # Assign known constant values to LoRA weights. lora_a = np.full(lora_a_shape, 0.1, dtype=np.float32) lora_b = np.full(lora_b_shape, 0.2, dtype=np.float32) layer.lora_kernel_a.assign(lora_a) layer.lora_kernel_b.assign(lora_b) # Compute the expected delta. # Flatten `lora_kernel_a` to shape (-1, `rank`), # multiply with `lora_kernel_b`, # then reshape to the kernel's shape. scaling = 3.0 / 2 # `lora_alpha / lora_rank` delta = np.matmul(lora_a.reshape(-1, 2), lora_b) delta = delta.reshape(base_kernel.shape) expected_effective_kernel = base_kernel + scaling * delta # Compare the effective kernel computed via the property. actual_effective_kernel = ops.convert_to_numpy(layer.kernel) self.assertAllClose(actual_effective_kernel, expected_effective_kernel) @pytest.mark.requires_trainable_backend def test_lora_rank_argument(self): self.run_layer_test( layers.Conv2D, init_kwargs={ "filters": 5, "kernel_size": 3, "activation": "sigmoid", "data_format": "channels_last", "kernel_regularizer": "l2", "lora_rank": 2, }, input_shape=(2, 5, 5, 4), expected_output_shape=(2, 3, 3, 5), expected_num_trainable_weights=3, expected_num_non_trainable_weights=1, expected_num_seed_generators=0, expected_num_losses=2, # we have 2 regularizers. supports_masking=False, )
ConvBasicTest
python
kamyu104__LeetCode-Solutions
Python/smallest-number-in-infinite-set.py
{ "start": 124, "end": 797 }
class ____(object): def __init__(self): self.__n = 1 self.__lookup = set() self.__min_heap = [] def popSmallest(self): """ :rtype: int """ if self.__min_heap: result = heapq.heappop(self.__min_heap) self.__lookup.remove(result) return result result = self.__n self.__n += 1 return result def addBack(self, num): """ :type num: int :rtype: None """ if num >= self.__n or num in self.__lookup: return self.__lookup.add(num) heapq.heappush(self.__min_heap, num)
SmallestInfiniteSet
python
dask__distributed
distributed/tests/test_worker_memory.py
{ "start": 25043, "end": 37193 }
class ____(UserDict): """A MutableMapping which implements distributed.spill.ManualEvictProto""" def __init__(self): super().__init__() self.evicted = set() @property def fast(self): # Any Sized of bool will do return self.keys() - self.evicted def evict(self): # Evict a random key k = next(iter(self.fast)) self.evicted.add(k) return 1 @gen_cluster( client=True, nthreads=[("", 1)], worker_kwargs={"memory_limit": "1 GB", "data": ManualEvictDict}, config={ "distributed.worker.memory.pause": False, "distributed.worker.memory.monitor-interval": "10ms", }, ) async def test_manual_evict_proto(c, s, a): """data is a third-party dict-like which respects the ManualEvictProto duck-type API. spill threshold is respected. """ a.monitor.get_process_memory = lambda: 701_000_000 if a.data else 0 assert memory_monitor_running(a) assert isinstance(a.data, ManualEvictDict) futures = await c.scatter({"x": None, "y": None, "z": None}) while a.data.evicted != {"x", "y", "z"}: await asyncio.sleep(0.01) async def leak_until_restart(c: Client, s: Scheduler) -> None: s.allowed_failures = 0 def leak(): L = [] while True: L.append(b"0" * 5_000_000) sleep(0.01) (addr,) = s.workers pid = (await c.run(os.getpid))[addr] future = c.submit(leak, key="leak") # Wait until the worker is restarted while len(s.workers) != 1 or set(s.workers) == {addr}: await asyncio.sleep(0.01) # Test that the process has been properly waited for and not just left there with pytest.raises(psutil.NoSuchProcess): psutil.Process(pid) with pytest.raises(KilledWorker): await future assert s.tasks["leak"].suspicious == 1 assert not any( (await c.run(lambda dask_worker: "leak" in dask_worker.state.tasks)).values() ) future.release() while "leak" in s.tasks: await asyncio.sleep(0.01) @pytest.mark.slow @gen_cluster( nthreads=[("", 1)], client=True, Worker=Nanny, worker_kwargs={"memory_limit": "400 MiB"}, config={"distributed.worker.memory.monitor-interval": "10ms"}, ) async def test_nanny_terminate(c, s, a): await leak_until_restart(c, s) @pytest.mark.slow @pytest.mark.parametrize( "ignore_sigterm", [ False, pytest.param(True, marks=pytest.mark.skipif(WINDOWS, reason="Needs SIGKILL")), ], ) @gen_cluster( nthreads=[("", 1)], client=True, Worker=Nanny, worker_kwargs={"memory_limit": "400 MiB"}, config={"distributed.worker.memory.monitor-interval": "10ms"}, ) async def test_disk_cleanup_on_terminate(c, s, a, ignore_sigterm): """Test that the spilled data on disk is cleaned up when the nanny kills the worker. Unlike in a regular worker shutdown, where the worker deletes its own spill directory, the cleanup in case of termination from the monitor is performed by the nanny. The worker may be slow to accept SIGTERM, for whatever reason. At the next iteration of the memory manager, if the process is still alive, the nanny sends SIGKILL. """ def do_ignore_sigterm(): # ignore the return value of signal.signal: it may not be serializable signal.signal(signal.SIGTERM, signal.SIG_IGN) if ignore_sigterm: await c.run(do_ignore_sigterm) fut = c.submit(inc, 1, key="myspill") await wait(fut) await c.run(lambda dask_worker: dask_worker.data.evict()) glob_out = await c.run( # zict <= 2.2.0: myspill # zict >= 2.3.0: myspill#0 lambda dask_worker: glob.glob(dask_worker.local_directory + "/**/myspill*") ) spill_fname = glob_out[a.worker_address][0] assert os.path.exists(spill_fname) await leak_until_restart(c, s) assert not os.path.exists(spill_fname) @gen_cluster( nthreads=[("", 1)], client=True, worker_kwargs={"memory_limit": "2 GiB"}, # ^ must be smaller than system memory limit, otherwise that will take precedence config={ "distributed.worker.memory.target": False, "distributed.worker.memory.spill": 0.5, "distributed.worker.memory.pause": 0.8, "distributed.worker.memory.monitor-interval": "10ms", }, ) async def test_pause_while_spilling(c, s, a): N_PAUSE = 3 N_TOTAL = 5 if a.memory_manager.memory_limit < parse_bytes("2 GiB"): pytest.fail( f"Set 2 GiB memory limit, got {format_bytes(a.memory_manager.memory_limit)}." ) def get_process_memory(): if len(a.data) < N_PAUSE: # Don't trigger spilling until after some tasks have completed return 0 elif a.data.fast and not a.data.slow: # Trigger spilling return parse_bytes("1.6 GiB") else: # Trigger pause, but only after we started spilling return parse_bytes("1.9 GiB") a.monitor.get_process_memory = get_process_memory class SlowSpill: def __init__(self): # We need to record the worker while we are inside a task; can't do it in # __reduce__ or it will pick up an arbitrary one among all running workers self.worker = distributed.get_worker() while len(self.worker.data.fast) >= N_PAUSE: sleep(0.01) def __reduce__(self): paused = self.worker.status == Status.paused if not paused: sleep(0.1) return bool, (paused,) futs = [c.submit(SlowSpill, pure=False) for _ in range(N_TOTAL)] await async_poll_for(lambda: len(a.data.slow) >= N_PAUSE, timeout=5, period=0) assert a.status == Status.paused # Worker should have become paused after the first `SlowSpill` was evicted, because # the spill to disk took longer than the memory monitor interval. assert len(a.data.fast) == 0 # With queuing enabled, after the 3rd `SlowSpill` has been created, there's a race # between the scheduler sending the worker a new task, and the memory monitor # running and pausing the worker. If the worker gets paused before the 4th task # lands, only 3 will be in memory. If after, the 4th will block on the semaphore # until one of the others is spilled. assert len(a.data.slow) in (N_PAUSE, N_PAUSE + 1) n_spilled_while_not_paused = sum(not paused for paused in a.data.slow.values()) assert n_spilled_while_not_paused == 1 @pytest.mark.slow @pytest.mark.skipif( condition=MACOS, reason="https://github.com/dask/distributed/issues/6233" ) @gen_cluster( nthreads=[("", 1)], client=True, worker_kwargs={"memory_limit": "10 GiB"}, config={ "distributed.worker.memory.target": False, "distributed.worker.memory.spill": 0.6, "distributed.worker.memory.pause": False, "distributed.worker.memory.monitor-interval": "10ms", }, ) async def test_release_evloop_while_spilling(c, s, a): N = 100 def get_process_memory(): if len(a.data) < N: # Don't trigger spilling until after all tasks have completed return 0 return 10 * 2**30 a.monitor.get_process_memory = get_process_memory class SlowSpill: def __reduce__(self): sleep(0.01) return SlowSpill, () futs = [c.submit(SlowSpill, pure=False) for _ in range(N)] while len(a.data) < N: await asyncio.sleep(0) ts = [monotonic()] while a.data.fast: await asyncio.sleep(0) ts.append(monotonic()) # 100 tasks taking 0.01s to pickle each = 2s to spill everything # (this is because everything is pickled twice: # https://github.com/dask/distributed/issues/1371). # We should regain control of the event loop every 0.5s. c = Counter(round(t1 - t0, 1) for t0, t1 in itertools.pairwise(ts)) # Depending on the implementation of WorkerMemoryMonitor._maybe_spill: # if it calls sleep(0) every 0.5s: # {0.0: 315, 0.5: 4} # if it calls sleep(0) after spilling each key: # {0.0: 233} # if it never yields: # {0.0: 359, 2.0: 1} # Make sure we remain in the first use case. assert 1 < sum(v for k, v in c.items() if 0.5 <= k <= 1.9), dict(c) assert not any(v for k, v in c.items() if k >= 2.0), dict(c) @pytest.mark.parametrize( "cls,name,value", [ (Worker, "memory_limit", 123e9), (Worker, "memory_target_fraction", 0.789), (Worker, "memory_spill_fraction", 0.789), (Worker, "memory_pause_fraction", 0.789), (Nanny, "memory_limit", 123e9), (Nanny, "memory_terminate_fraction", 0.789), ], ) @gen_cluster(nthreads=[]) async def test_deprecated_attributes(s, cls, name, value): async with cls(s.address) as a: with pytest.warns(FutureWarning, match=name): setattr(a, name, value) with pytest.warns(FutureWarning, match=name): assert getattr(a, name) == value assert getattr(a.memory_manager, name) == value @gen_cluster(nthreads=[("", 1)]) async def test_deprecated_memory_monitor_method_worker(s, a): with pytest.warns(FutureWarning, match="memory_monitor"): await a.memory_monitor() @gen_cluster(nthreads=[("", 1)], Worker=Nanny) async def test_deprecated_memory_monitor_method_nanny(s, a): with pytest.warns(FutureWarning, match="memory_monitor"): a.memory_monitor() @pytest.mark.parametrize( "name", ["memory_target_fraction", "memory_spill_fraction", "memory_pause_fraction"], ) @gen_cluster(nthreads=[]) async def test_deprecated_params(s, name): with pytest.warns(FutureWarning, match=name): async with Worker(s.address, **{name: 0.789}) as a: assert getattr(a.memory_manager, name) == 0.789 @gen_cluster(config={"distributed.worker.memory.monitor-interval": "10ms"}) async def test_pause_while_idle(s, a, b): sa = s.workers[a.address] assert a.address in s.idle assert sa in s.running a.monitor.get_process_memory = lambda: 2**40 await async_poll_for(lambda: sa.status == Status.paused, timeout=5) assert a.address not in s.idle assert sa not in s.running a.monitor.get_process_memory = lambda: 0 await async_poll_for(lambda: sa.status == Status.running, timeout=5) assert a.address in s.idle assert sa in s.running @gen_cluster(client=True, config={"distributed.worker.memory.monitor-interval": "10ms"}) async def test_pause_while_saturated(c, s, a, b): sa = s.workers[a.address] ev = Event() futs = c.map(lambda i, ev: ev.wait(), range(3), ev=ev, workers=[a.address]) await async_poll_for(lambda: len(a.state.tasks) == 3, timeout=5) assert sa in s.saturated assert sa in s.running a.monitor.get_process_memory = lambda: 2**40 await async_poll_for(lambda: sa.status == Status.paused, timeout=5) assert sa not in s.saturated assert sa not in s.running a.monitor.get_process_memory = lambda: 0 await async_poll_for(lambda: sa.status == Status.running, timeout=5) assert sa in s.saturated assert sa in s.running await ev.set() @gen_cluster(nthreads=[]) async def test_worker_log_memory_limit_too_high(s): async with Worker(s.address, memory_limit="1 PB") as worker: assert any( "Ignoring provided memory limit" in record.msg for record in worker.logs ) @gen_cluster( nthreads=[], config={ "distributed.worker.memory.target": False, "distributed.worker.memory.spill": 0.0001, "distributed.worker.memory.pause": False, "distributed.worker.memory.monitor-interval": "10ms", }, ) async def test_high_unmanaged_memory_warning(s): RateLimiterFilter.reset_timer("distributed.worker.memory") async with Worker(s.address) as worker: await asyncio.sleep(0.1) # Enough for 10 runs of the memory monitors assert ( sum("Unmanaged memory use is high" in record.msg for record in worker.logs) == 1 ) # Message is rate limited
ManualEvictDict
python
getsentry__sentry-python
sentry_sdk/_queue.py
{ "start": 3634, "end": 11250 }
class ____: """Create a queue object with a given maximum size. If maxsize is <= 0, the queue size is infinite. """ def __init__(self, maxsize=0): self.maxsize = maxsize self._init(maxsize) # mutex must be held whenever the queue is mutating. All methods # that acquire mutex must release it before returning. mutex # is shared between the three conditions, so acquiring and # releasing the conditions also acquires and releases mutex. self.mutex = threading.RLock() # Notify not_empty whenever an item is added to the queue; a # thread waiting to get is notified then. self.not_empty = threading.Condition(self.mutex) # Notify not_full whenever an item is removed from the queue; # a thread waiting to put is notified then. self.not_full = threading.Condition(self.mutex) # Notify all_tasks_done whenever the number of unfinished tasks # drops to zero; thread waiting to join() is notified to resume self.all_tasks_done = threading.Condition(self.mutex) self.unfinished_tasks = 0 def task_done(self): """Indicate that a formerly enqueued task is complete. Used by Queue consumer threads. For each get() used to fetch a task, a subsequent call to task_done() tells the queue that the processing on the task is complete. If a join() is currently blocking, it will resume when all items have been processed (meaning that a task_done() call was received for every item that had been put() into the queue). Raises a ValueError if called more times than there were items placed in the queue. """ with self.all_tasks_done: unfinished = self.unfinished_tasks - 1 if unfinished <= 0: if unfinished < 0: raise ValueError("task_done() called too many times") self.all_tasks_done.notify_all() self.unfinished_tasks = unfinished def join(self): """Blocks until all items in the Queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls task_done() to indicate the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks. """ with self.all_tasks_done: while self.unfinished_tasks: self.all_tasks_done.wait() def qsize(self): """Return the approximate size of the queue (not reliable!).""" with self.mutex: return self._qsize() def empty(self): """Return True if the queue is empty, False otherwise (not reliable!). This method is likely to be removed at some point. Use qsize() == 0 as a direct substitute, but be aware that either approach risks a race condition where a queue can grow before the result of empty() or qsize() can be used. To create code that needs to wait for all queued tasks to be completed, the preferred technique is to use the join() method. """ with self.mutex: return not self._qsize() def full(self): """Return True if the queue is full, False otherwise (not reliable!). This method is likely to be removed at some point. Use qsize() >= n as a direct substitute, but be aware that either approach risks a race condition where a queue can shrink before the result of full() or qsize() can be used. """ with self.mutex: return 0 < self.maxsize <= self._qsize() def put(self, item, block=True, timeout=None): """Put an item into the queue. If optional args 'block' is true and 'timeout' is None (the default), block if necessary until a free slot is available. If 'timeout' is a non-negative number, it blocks at most 'timeout' seconds and raises the FullError exception if no free slot was available within that time. Otherwise ('block' is false), put an item on the queue if a free slot is immediately available, else raise the FullError exception ('timeout' is ignored in that case). """ with self.not_full: if self.maxsize > 0: if not block: if self._qsize() >= self.maxsize: raise FullError() elif timeout is None: while self._qsize() >= self.maxsize: self.not_full.wait() elif timeout < 0: raise ValueError("'timeout' must be a non-negative number") else: endtime = time() + timeout while self._qsize() >= self.maxsize: remaining = endtime - time() if remaining <= 0.0: raise FullError() self.not_full.wait(remaining) self._put(item) self.unfinished_tasks += 1 self.not_empty.notify() def get(self, block=True, timeout=None): """Remove and return an item from the queue. If optional args 'block' is true and 'timeout' is None (the default), block if necessary until an item is available. If 'timeout' is a non-negative number, it blocks at most 'timeout' seconds and raises the EmptyError exception if no item was available within that time. Otherwise ('block' is false), return an item if one is immediately available, else raise the EmptyError exception ('timeout' is ignored in that case). """ with self.not_empty: if not block: if not self._qsize(): raise EmptyError() elif timeout is None: while not self._qsize(): self.not_empty.wait() elif timeout < 0: raise ValueError("'timeout' must be a non-negative number") else: endtime = time() + timeout while not self._qsize(): remaining = endtime - time() if remaining <= 0.0: raise EmptyError() self.not_empty.wait(remaining) item = self._get() self.not_full.notify() return item def put_nowait(self, item): """Put an item into the queue without blocking. Only enqueue the item if a free slot is immediately available. Otherwise raise the FullError exception. """ return self.put(item, block=False) def get_nowait(self): """Remove and return an item from the queue without blocking. Only get an item if one is immediately available. Otherwise raise the EmptyError exception. """ return self.get(block=False) # Override these methods to implement other queue organizations # (e.g. stack or priority queue). # These will only be called with appropriate locks held # Initialize the queue representation def _init(self, maxsize): self.queue = deque() # type: Any def _qsize(self): return len(self.queue) # Put a new item in the queue def _put(self, item): self.queue.append(item) # Get an item from the queue def _get(self): return self.queue.popleft()
Queue
python
pytorch__pytorch
torch/_dynamo/exc.py
{ "start": 2535, "end": 2597 }
class ____(RestartAnalysis): pass
SpeculationRestartAnalysis
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 585614, "end": 586044 }
class ____(sgqlc.types.Type): """An edge in a connection.""" __schema__ = github_schema __field_names__ = ("cursor", "node") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") """A cursor for use in pagination.""" node = sgqlc.types.Field("EnterpriseAdministratorInvitation", graphql_name="node") """The item at the end of the edge."""
EnterpriseAdministratorInvitationEdge
python
matplotlib__matplotlib
lib/matplotlib/backends/backend_qt.py
{ "start": 45635, "end": 45982 }
class ____(backend_tools.ToolCopyToClipboardBase): def trigger(self, *args, **kwargs): pixmap = self.canvas.grab() QtWidgets.QApplication.instance().clipboard().setPixmap(pixmap) FigureManagerQT._toolbar2_class = NavigationToolbar2QT FigureManagerQT._toolmanager_toolbar_class = ToolbarQt @_Backend.export
ToolCopyToClipboardQT
python
bokeh__bokeh
src/bokeh/models/widgets/groups.py
{ "start": 2444, "end": 3039 }
class ____(AbstractGroup): ''' Abstract base class for groups with items rendered as check/radio boxes. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) inline = Bool(False, help=""" Should items be arrange vertically (``False``) or horizontally in-line (``True``). """) #----------------------------------------------------------------------------- # General API #-----------------------------------------------------------------------------
ToggleInputGroup
python
encode__django-rest-framework
tests/test_validators.py
{ "start": 22427, "end": 22874 }
class ____(models.Model): state = models.CharField(max_length=100, default="new") position = models.IntegerField() something = models.IntegerField() class Meta: constraints = [ models.UniqueConstraint( name="unique_constraint_%(class)s", fields=("position", "something"), condition=models.Q(state="new"), ), ]
UniqueConstraintReadOnlyFieldModel
python
airbytehq__airbyte
airbyte-integrations/connectors/source-rki-covid/source_rki_covid/source.py
{ "start": 10629, "end": 12646 }
class ____(IncrementalRkiCovidStream): """Docs: https://api.corona-zahlen.org/germany/germany/history/recovered/:days""" primary_key = None def __init__(self, config, **kwargs): super().__init__(**kwargs) self.start_date = config.get("start_date") @property def source_defined_cursor(self) -> bool: return False @property def cursor_field(self) -> str: return "date" def date_to_int(self, start_date) -> int: diff = datetime.now() - datetime.strptime(start_date, "%Y-%m-%d") if diff.days <= 0: return 1 return diff.days def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]: if not current_stream_state: current_stream_state = {self.cursor_field: self.start_date} return {self.cursor_field: max(latest_record.get(self.cursor_field, ""), current_stream_state.get(self.cursor_field, ""))} def read_records(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping[str, Any]]: records = super().read_records(stream_state=stream_state, **kwargs) if stream_state: for record in records: if record[self.cursor_field] > stream_state.get(self.cursor_field): yield record else: yield from records def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]: if response.json().get("data"): return response.json().get("data") return [{}] def path( self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None ) -> str: if self.start_date: return "germany/history/recovered/" + str(self.date_to_int(self.start_date)) return "germany/history/recovered/" # source: germany/history/frozen-incidence/:days | Incremental
GermanHistoryRecovered
python
django__django
tests/decorators/test_clickjacking.py
{ "start": 1470, "end": 2654 }
class ____(SimpleTestCase): def test_wrapped_sync_function_is_not_coroutine_function(self): def sync_view(request): return HttpResponse() wrapped_view = xframe_options_sameorigin(sync_view) self.assertIs(iscoroutinefunction(wrapped_view), False) def test_wrapped_async_function_is_coroutine_function(self): async def async_view(request): return HttpResponse() wrapped_view = xframe_options_sameorigin(async_view) self.assertIs(iscoroutinefunction(wrapped_view), True) def test_decorator_sets_x_frame_options_to_sameorigin(self): @xframe_options_sameorigin def a_view(request): return HttpResponse() response = a_view(HttpRequest()) self.assertEqual(response.headers["X-Frame-Options"], "SAMEORIGIN") async def test_decorator_sets_x_frame_options_to_sameorigin_async_view(self): @xframe_options_sameorigin async def an_async_view(request): return HttpResponse() response = await an_async_view(HttpRequest()) self.assertEqual(response.headers["X-Frame-Options"], "SAMEORIGIN")
XFrameOptionsSameoriginTests
python
Netflix__metaflow
metaflow/lint.py
{ "start": 167, "end": 15335 }
class ____(object): def __init__(self): self.require_static_graph = True self.require_fundamentals = True self.require_acyclicity = True self.require_non_nested_foreach = False self._checks = [] def _decorate(self, setting, f): f.attrs.append(setting) return f def ensure_static_graph(self, f): return self._decorate("require_static_graph", f) def ensure_fundamentals(self, f): return self._decorate("require_fundamentals", f) def ensure_acyclicity(self, f): return self._decorate("require_acyclicity", f) def ensure_non_nested_foreach(self, f): return self._decorate("require_non_nested_foreach", f) def check(self, f): self._checks.append(f) f.attrs = [] return f def run_checks(self, graph, **kwargs): for check in self._checks: if any(getattr(self, attr) or kwargs.get(attr) for attr in check.attrs): check(graph) linter = FlowLinter() @linter.ensure_fundamentals @linter.check def check_reserved_words(graph): RESERVED = {"name", "next", "input", "index", "cmd"} msg = "Step name *%s* is a reserved word. Choose another name for the " "step." for node in graph: if node.name in RESERVED: raise LintWarn(msg % node.name, node.func_lineno, node.source_file) @linter.ensure_fundamentals @linter.check def check_basic_steps(graph): msg = "Add %s *%s* step in your flow." for prefix, node in (("a", "start"), ("an", "end")): if node not in graph: raise LintWarn(msg % (prefix, node)) @linter.ensure_static_graph @linter.check def check_that_end_is_end(graph): msg0 = "The *end* step should not have a step.next() transition. " "Just remove it." msg1 = ( "The *end* step should not be a join step (it gets an extra " "argument). Add a join step before it." ) node = graph["end"] if node.has_tail_next or node.invalid_tail_next: raise LintWarn(msg0, node.tail_next_lineno, node.source_file) if node.num_args > 1: raise LintWarn(msg1, node.tail_next_lineno, node.source_file) @linter.ensure_fundamentals @linter.check def check_step_names(graph): msg = ( "Step *{0.name}* has an invalid name. Only lowercase ascii " "characters, underscores, and digits are allowed." ) for node in graph: if re.search("[^a-z0-9_]", node.name) or node.name[0] == "_": raise LintWarn(msg.format(node), node.func_lineno, node.source_file) @linter.ensure_fundamentals @linter.check def check_num_args(graph): msg0 = ( "Step {0.name} has too many arguments. Normal steps take only " "'self' as an argument. Join steps take 'self' and 'inputs'." ) msg1 = ( "Step *{0.name}* is both a join step (it takes an extra argument) " "and a split step (it transitions to multiple steps). This is not " "allowed. Add a new step so that split and join become separate steps." ) msg2 = "Step *{0.name}* is missing the 'self' argument." for node in graph: if node.num_args > 2: raise LintWarn(msg0.format(node), node.func_lineno, node.source_file) elif node.num_args == 2 and node.type != "join": raise LintWarn(msg1.format(node), node.func_lineno, node.source_file) elif node.num_args == 0: raise LintWarn(msg2.format(node), node.func_lineno, node.source_file) @linter.ensure_static_graph @linter.check def check_static_transitions(graph): msg = ( "Step *{0.name}* is missing a self.next() transition to " "the next step. Add a self.next() as the last line in the " "function." ) for node in graph: if node.type != "end" and not node.has_tail_next: raise LintWarn(msg.format(node), node.func_lineno, node.source_file) @linter.ensure_static_graph @linter.check def check_valid_transitions(graph): msg = ( "Step *{0.name}* specifies an invalid self.next() transition. " "Make sure the self.next() expression matches with one of the " "supported transition types:\n" " • Linear: self.next(self.step_name)\n" " • Fan-out: self.next(self.step1, self.step2, ...)\n" " • Foreach: self.next(self.step, foreach='variable')\n" " • Switch: self.next({{\"key\": self.step, ...}}, condition='variable')\n\n" "For switch statements, keys must be string literals, numbers or config expressions " "(self.config.key_name), not variables." ) for node in graph: if node.type != "end" and node.has_tail_next and node.invalid_tail_next: raise LintWarn(msg.format(node), node.tail_next_lineno, node.source_file) @linter.ensure_static_graph @linter.check def check_unknown_transitions(graph): msg = ( "Step *{0.name}* specifies a self.next() transition to " "an unknown step, *{step}*." ) for node in graph: unknown = [n for n in node.out_funcs if n not in graph] if unknown: raise LintWarn( msg.format(node, step=unknown[0]), node.tail_next_lineno, node.source_file, ) @linter.ensure_acyclicity @linter.ensure_static_graph @linter.check def check_for_acyclicity(graph): msg = ( "There is a loop in your flow: *{0}*. Break the loop " "by fixing self.next() transitions." ) def check_path(node, seen): for n in node.out_funcs: if node.type == "split-switch" and n == node.name: continue if n in seen: path = "->".join(seen + [n]) raise LintWarn( msg.format(path), node.tail_next_lineno, node.source_file ) else: check_path(graph[n], seen + [n]) for start in graph: check_path(start, []) @linter.ensure_static_graph @linter.check def check_for_orphans(graph): msg = ( "Step *{0.name}* is unreachable from the start step. Add " "self.next({0.name}) in another step or remove *{0.name}*." ) seen = set(["start"]) def traverse(node): for n in node.out_funcs: if n not in seen: seen.add(n) traverse(graph[n]) traverse(graph["start"]) nodeset = frozenset(n.name for n in graph) orphans = nodeset - seen if orphans: orphan = graph[list(orphans)[0]] raise LintWarn(msg.format(orphan), orphan.func_lineno, orphan.source_file) @linter.ensure_static_graph @linter.check def check_split_join_balance(graph): msg0 = ( "Step *end* reached before a split started at step(s) *{roots}* " "were joined. Add a join step before *end*." ) msg1 = ( "Step *{0.name}* seems like a join step (it takes an extra input " "argument) but an incorrect number of steps (*{paths}*) lead to " "it. This join was expecting {num_roots} incoming paths, starting " "from split step(s) *{roots}*." ) msg2 = ( "Step *{0.name}* seems like a join step (it takes an extra input " "argument) but it is not preceded by a split. Ensure that there is " "a matching split for every join." ) msg3 = ( "Step *{0.name}* joins steps from unrelated splits. Ensure that " "there is a matching join for every split." ) def traverse(node, split_stack): if node.type in ("start", "linear"): new_stack = split_stack elif node.type in ("split", "foreach"): new_stack = split_stack + [("split", node.out_funcs)] elif node.type == "split-switch": # For a switch, continue traversal down each path with the same stack for n in node.out_funcs: if node.type == "split-switch" and n == node.name: continue traverse(graph[n], split_stack) return elif node.type == "end": new_stack = split_stack if split_stack: _, split_roots = split_stack.pop() roots = ", ".join(split_roots) raise LintWarn( msg0.format(roots=roots), node.func_lineno, node.source_file ) elif node.type == "join": new_stack = split_stack if split_stack: _, split_roots = split_stack[-1] new_stack = split_stack[:-1] # Resolve each incoming function to its root branch from the split. resolved_branches = set( graph[n].split_branches[-1] for n in node.in_funcs ) # compares the set of resolved branches against the expected branches # from the split. if len(resolved_branches) != len( split_roots ) or resolved_branches ^ set(split_roots): paths = ", ".join(resolved_branches) roots = ", ".join(split_roots) raise LintWarn( msg1.format( node, paths=paths, num_roots=len(split_roots), roots=roots ), node.func_lineno, node.source_file, ) else: raise LintWarn(msg2.format(node), node.func_lineno, node.source_file) # check that incoming steps come from the same lineage # (no cross joins) def parents(n): if graph[n].type == "join": return tuple(graph[n].split_parents[:-1]) else: return tuple(graph[n].split_parents) if not all_equal(map(parents, node.in_funcs)): raise LintWarn(msg3.format(node), node.func_lineno, node.source_file) else: new_stack = split_stack for n in node.out_funcs: if node.type == "split-switch" and n == node.name: continue traverse(graph[n], new_stack) traverse(graph["start"], []) @linter.ensure_static_graph @linter.check def check_switch_splits(graph): """Check conditional split constraints""" msg0 = ( "Step *{0.name}* is a switch split but defines {num} transitions. " "Switch splits must define at least 2 transitions." ) msg1 = "Step *{0.name}* is a switch split but has no condition variable." msg2 = "Step *{0.name}* is a switch split but has no switch cases defined." for node in graph: if node.type == "split-switch": # Check at least 2 outputs if len(node.out_funcs) < 2: raise LintWarn( msg0.format(node, num=len(node.out_funcs)), node.func_lineno, node.source_file, ) # Check condition exists if not node.condition: raise LintWarn( msg1.format(node), node.func_lineno, node.source_file, ) # Check switch cases exist if not node.switch_cases: raise LintWarn( msg2.format(node), node.func_lineno, node.source_file, ) @linter.ensure_static_graph @linter.check def check_empty_foreaches(graph): msg = ( "Step *{0.name}* is a foreach split that has no children: " "it is followed immediately by a join step, *{join}*. Add " "at least one step between the split and the join." ) for node in graph: if node.type == "foreach": joins = [n for n in node.out_funcs if graph[n].type == "join"] if joins: raise LintWarn( msg.format(node, join=joins[0]), node.func_lineno, node.source_file ) @linter.ensure_static_graph @linter.check def check_parallel_step_after_next(graph): msg = ( "Step *{0.name}* is called as a parallel step with self.next(num_parallel=..) " "but does not have a @parallel decorator." ) for node in graph: if node.parallel_foreach and not all( graph[out_node].parallel_step for out_node in node.out_funcs ): raise LintWarn(msg.format(node), node.func_lineno, node.source_file) @linter.ensure_static_graph @linter.check def check_join_followed_by_parallel_step(graph): msg = ( "An @parallel step should be followed by a join step. Step *{0}* is called " "after an @parallel step but is not a join step. Please add an extra `inputs` " "argument to the step." ) for node in graph: if node.parallel_step and not graph[node.out_funcs[0]].type == "join": raise LintWarn( msg.format(node.out_funcs[0]), node.func_lineno, node.source_file ) @linter.ensure_static_graph @linter.check def check_parallel_foreach_calls_parallel_step(graph): msg = ( "Step *{0.name}* has a @parallel decorator, but is not called " "with self.next(num_parallel=...) from step *{1.name}*." ) for node in graph: if node.parallel_step: for node2 in graph: if node2.out_funcs and node.name in node2.out_funcs: if not node2.parallel_foreach: raise LintWarn( msg.format(node, node2), node.func_lineno, node.source_file ) @linter.ensure_non_nested_foreach @linter.check def check_nested_foreach(graph): msg = ( "Nested foreaches are not allowed: Step *{0.name}* is a foreach " "split that is nested under another foreach split." ) for node in graph: if node.type == "foreach": if any(graph[p].type == "foreach" for p in node.split_parents): raise LintWarn(msg.format(node), node.func_lineno, node.source_file) @linter.ensure_static_graph @linter.check def check_ambiguous_joins(graph): for node in graph: if node.type == "join": problematic_parents = [ p_name for p_name in node.in_funcs if graph[p_name].type == "split-switch" ] if problematic_parents: msg = ( "A conditional path cannot lead directly to a join step.\n" "In your conditional step(s) {parents}, one or more of the possible paths transition directly to the join step {join_name}.\n" "As a workaround, please introduce an intermediate, unconditional step on that specific path before joining." ).format( parents=", ".join("*%s*" % p for p in problematic_parents), join_name="*%s*" % node.name, ) raise LintWarn(msg, node.func_lineno, node.source_file)
FlowLinter
python
getsentry__sentry
tests/sentry/workflow_engine/handlers/condition/test_reappeared_event_handler.py
{ "start": 381, "end": 1786 }
class ____(ConditionTestCase): condition = Condition.REAPPEARED_EVENT payload = {"id": ReappearedEventCondition.id} def test_dual_write(self) -> None: dcg = self.create_data_condition_group() dc = self.translate_to_data_condition(self.payload, dcg) assert dc.type == self.condition assert dc.comparison is True assert dc.condition_result is True assert dc.condition_group == dcg def test_json_schema(self) -> None: dc = self.create_data_condition( type=self.condition, comparison=True, condition_result=True, ) dc.comparison = False dc.save() dc.comparison = {"time": "asdf"} with pytest.raises(ValidationError): dc.save() dc.comparison = "hello" with pytest.raises(ValidationError): dc.save() def test(self) -> None: job = WorkflowEventData( event=self.group_event, group=self.group_event.group, has_reappeared=False, has_escalated=True, ) dc = self.create_data_condition( type=self.condition, comparison=True, condition_result=True, ) self.assert_passes(dc, job) job = replace(job, has_escalated=False) self.assert_does_not_pass(dc, job)
TestReappearedEventCondition
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/utils/field_validator.py
{ "start": 8888, "end": 22404 }
class ____(LoggingMixin): """ Validates correctness of request body according to specification. The specification can describe various type of fields including custom validation, and union of fields. This validator is to be reusable by various operators. See the EXAMPLE_VALIDATION_SPECIFICATION for some examples and explanations of how to create specification. :param validation_specs: dictionary describing validation specification :param api_version: Version of the api used (for example v1) """ def __init__(self, validation_specs: Sequence[dict], api_version: str) -> None: super().__init__() self._validation_specs = validation_specs self._api_version = api_version @staticmethod def _get_field_name_with_parent(field_name, parent): if parent: return parent + "." + field_name return field_name @staticmethod def _sanity_checks( children_validation_specs: dict, field_type: str, full_field_path: str, regexp: str, allow_empty: bool, custom_validation: Callable | None, value, ) -> None: if value is None and field_type != "union": raise GcpFieldValidationException( f"The required body field '{full_field_path}' is missing. Please add it." ) if regexp and field_type: raise GcpValidationSpecificationException( f"The validation specification entry '{full_field_path}' has both type and regexp. " "The regexp is only allowed without type (i.e. assume type is 'str' that can be " "validated with regexp)" ) if allow_empty is not None and field_type: raise GcpValidationSpecificationException( f"The validation specification entry '{full_field_path}' has both type and allow_empty. " "The allow_empty is only allowed without type (i.e. assume type is 'str' that can " "be validated with allow_empty)" ) if children_validation_specs and field_type not in COMPOSITE_FIELD_TYPES: raise GcpValidationSpecificationException( f"Nested fields are specified in field '{full_field_path}' of type '{field_type}'. " f"Nested fields are only allowed for fields of those types: ('{COMPOSITE_FIELD_TYPES}')." ) if custom_validation and field_type: raise GcpValidationSpecificationException( f"The validation specification field '{full_field_path}' has both type and " f"custom_validation. Custom validation is only allowed without type." ) @staticmethod def _validate_regexp(full_field_path: str, regexp: str, value: str) -> None: if not re.match(regexp, value): # Note matching of only the beginning as we assume the regexps all-or-nothing raise GcpFieldValidationException( f"The body field '{full_field_path}' of value '{value}' does not match the field " f"specification regexp: '{regexp}'." ) @staticmethod def _validate_is_empty(full_field_path: str, value: str) -> None: if not value: raise GcpFieldValidationException( f"The body field '{full_field_path}' can't be empty. Please provide a value." ) def _validate_dict(self, children_validation_specs: dict, full_field_path: str, value: dict) -> None: for child_validation_spec in children_validation_specs: self._validate_field( validation_spec=child_validation_spec, dictionary_to_validate=value, parent=full_field_path ) all_dict_keys = {spec["name"] for spec in children_validation_specs} for field_name in value: if field_name not in all_dict_keys: self.log.warning( "The field '%s' is in the body, but is not specified in the " "validation specification '%s'. " "This might be because you are using newer API version and " "new field names defined for that version. Then the warning " "can be safely ignored, or you might want to upgrade the operator" "to the version that supports the new API version.", self._get_field_name_with_parent(field_name, full_field_path), children_validation_specs, ) def _validate_union( self, children_validation_specs: dict, full_field_path: str, dictionary_to_validate: dict ) -> None: field_found = False found_field_name = None for child_validation_spec in children_validation_specs: # Forcing optional so that we do not have to type optional = True # in specification for all union fields new_field_found = self._validate_field( validation_spec=child_validation_spec, dictionary_to_validate=dictionary_to_validate, parent=full_field_path, force_optional=True, ) field_name = child_validation_spec["name"] if new_field_found and field_found: raise GcpFieldValidationException( f"The mutually exclusive fields '{field_name}' and '{found_field_name}' belonging to " f"the union '{full_field_path}' are both present. Please remove one" ) if new_field_found: field_found = True found_field_name = field_name if not field_found: self.log.warning( "There is no '%s' union defined in the body %s. " "Validation expected one of '%s' but could not find any. It's possible " "that you are using newer API version and there is another union variant " "defined for that version. Then the warning can be safely ignored, " "or you might want to upgrade the operator to the version that " "supports the new API version.", full_field_path, dictionary_to_validate, [field["name"] for field in children_validation_specs], ) def _validate_field(self, validation_spec, dictionary_to_validate, parent=None, force_optional=False): """ Validate if field is OK. :param validation_spec: specification of the field :param dictionary_to_validate: dictionary where the field should be present :param parent: full path of parent field :param force_optional: forces the field to be optional (all union fields have force_optional set to True) :return: True if the field is present """ field_name = validation_spec["name"] field_type = validation_spec.get("type") optional = validation_spec.get("optional") regexp = validation_spec.get("regexp") allow_empty = validation_spec.get("allow_empty") children_validation_specs = validation_spec.get("fields") required_api_version = validation_spec.get("api_version") custom_validation = validation_spec.get("custom_validation") full_field_path = self._get_field_name_with_parent(field_name=field_name, parent=parent) if required_api_version and required_api_version != self._api_version: self.log.debug( "Skipping validation of the field '%s' for API version '%s' " "as it is only valid for API version '%s'", field_name, self._api_version, required_api_version, ) return False value = dictionary_to_validate.get(field_name) if (optional or force_optional) and value is None: self.log.debug("The optional field '%s' is missing. That's perfectly OK.", full_field_path) return False # Certainly down from here the field is present (value is not None) # so we should only return True from now on self._sanity_checks( children_validation_specs=children_validation_specs, field_type=field_type, full_field_path=full_field_path, regexp=regexp, allow_empty=allow_empty, custom_validation=custom_validation, value=value, ) if allow_empty is False: self._validate_is_empty(full_field_path, value) if regexp: self._validate_regexp(full_field_path, regexp, value) elif field_type == "dict": if not isinstance(value, dict): raise GcpFieldValidationException( f"The field '{full_field_path}' should be of dictionary type according to " f"the specification '{validation_spec}' but it is '{value}'" ) if children_validation_specs is None: self.log.debug( "The dict field '%s' has no nested fields defined in the " "specification '%s'. That's perfectly ok - it's content will " "not be validated.", full_field_path, validation_spec, ) else: self._validate_dict(children_validation_specs, full_field_path, value) elif field_type == "union": if not children_validation_specs: raise GcpValidationSpecificationException( f"The union field '{full_field_path}' has no nested fields defined in " f"specification '{validation_spec}'. " "Unions should have at least one nested field defined." ) self._validate_union(children_validation_specs, full_field_path, dictionary_to_validate) elif field_type == "list": if not isinstance(value, list): raise GcpFieldValidationException( f"The field '{full_field_path}' should be of list type according to " f"the specification '{validation_spec}' but it is '{value}'" ) elif custom_validation: try: custom_validation(value) except Exception as e: raise GcpFieldValidationException( f"Error while validating custom field '{full_field_path}' " f"specified by '{validation_spec}': '{e}'" ) elif field_type is None: self.log.debug( "The type of field '%s' is not specified in '%s'. Not validating its content.", full_field_path, validation_spec, ) else: raise GcpValidationSpecificationException( f"The field '{full_field_path}' is of type '{field_type}' in " f"specification '{validation_spec}'.This type is unknown to validation!" ) return True def validate(self, body_to_validate: dict) -> None: """ Validate if the body (dictionary) follows specification that the validator was instantiated with. Raises ValidationSpecificationException or ValidationFieldException in case of problems with specification or the body not conforming to the specification respectively. :param body_to_validate: body that must follow the specification :return: None """ if body_to_validate is None: raise RuntimeError("The body to validate is `None`. Please provide a dictionary to validate.") try: for validation_spec in self._validation_specs: self._validate_field(validation_spec=validation_spec, dictionary_to_validate=body_to_validate) except GcpFieldValidationException as e: raise GcpFieldValidationException( f"There was an error when validating: body '{body_to_validate}': '{e}'" ) all_field_names = { spec["name"] for spec in self._validation_specs if spec.get("type") != "union" and spec.get("api_version") != self._api_version } all_union_fields = [spec for spec in self._validation_specs if spec.get("type") == "union"] for union_field in all_union_fields: all_field_names.update( nested_union_spec["name"] for nested_union_spec in union_field["fields"] if nested_union_spec.get("type") != "union" and nested_union_spec.get("api_version") != self._api_version ) for field_name in body_to_validate: if field_name not in all_field_names: self.log.warning( "The field '%s' is in the body, but is not specified in the " "validation specification '%s'. " "This might be because you are using newer API version and " "new field names defined for that version. Then the warning " "can be safely ignored, or you might want to upgrade the operator" "to the version that supports the new API version.", field_name, self._validation_specs, )
GcpBodyFieldValidator
python
google__jax
jax/_src/hijax.py
{ "start": 1333, "end": 2239 }
class ____(core.Primitive): def __init__(self, name): self.name = name ad.primitive_jvps[self] = self.jvp ad.primitive_transposes[self] = self.transpose def is_high(self, *avals, **params) -> bool: return True def is_effectful(self, params) -> bool: # type: ignore return False # default immutable # type checking and forward type propagation def abstract_eval(self, *arg_avals, **params): assert False, "must override" # lowering implements the primitive in terms of lojax inputs/outputs/ops def to_lojax(self, *lotypes_wrapped_in_hitypes, **params): assert False, f"must override for {self}" # autodiff interface def jvp(self, primals, tangents, **params): assert False, "must override" # transposition is only required if the primitive is linear in some inputs def transpose(self, *args, **params): assert False, "must override"
HiPrimitive
python
getsentry__sentry
tests/sentry/web/frontend/test_auth_close.py
{ "start": 232, "end": 1217 }
class ____(TestCase): @cached_property def path(self) -> str: return reverse("sentry-auth-close") def test_renders_auth_close_view(self) -> None: self.login_as(self.user) resp = self.client.get(self.path) assert resp.status_code == 200 self.assertTemplateUsed("sentry/auth_close.html") def test_renders_auth_close_view_again(self) -> None: resp = self.client.get(reverse("sentry-login") + "?next=" + urlquote("/auth/close/")) self.login_as(self.user) assert resp.status_code == 200 self.assertTemplateUsed("sentry/auth_close.html") def test_context_anonymous_user(self) -> None: """page should redirect for unauthenticated user""" resp = self.client.get(self.path) assert resp.status_code == 302 def test_context_logged_in(self) -> None: self.login_as(self.user) resp = self.client.get(self.path) assert resp.context["logged_in"]
AuthClose
python
getsentry__sentry
tests/sentry/incidents/test_logic.py
{ "start": 10801, "end": 12696 }
class ____(TestCase, BaseIncidentsTest): def test_projects(self) -> None: incident = self.create_incident( date_started=self.now - timedelta(minutes=5), query="", projects=[self.project] ) self.create_event(self.now - timedelta(minutes=1)) self.create_event(self.now - timedelta(minutes=2), user={"id": 123}) self.create_event(self.now - timedelta(minutes=2), user={"id": 123}) self.create_event(self.now - timedelta(minutes=2), user={"id": 124}) snuba_query = incident.alert_rule.snuba_query params = GetMetricIssueAggregatesParams( snuba_query=snuba_query, date_started=incident.date_started, current_end_date=incident.current_end_date, organization=incident.organization, project_ids=[self.project.id], ) assert get_metric_issue_aggregates(params) == {"count": 4} def test_is_unresolved_query(self) -> None: incident = self.create_incident( date_started=self.now - timedelta(minutes=5), query="is:unresolved", projects=[self.project], ) event = self.create_event(self.now - timedelta(minutes=1)) self.create_event(self.now - timedelta(minutes=2)) self.create_event(self.now - timedelta(minutes=3)) self.create_event(self.now - timedelta(minutes=4)) event.group.update(status=GroupStatus.UNRESOLVED) snuba_query = incident.alert_rule.snuba_query params = GetMetricIssueAggregatesParams( snuba_query=snuba_query, date_started=incident.date_started, current_end_date=incident.current_end_date, organization=incident.organization, project_ids=[self.project.id], ) assert get_metric_issue_aggregates(params) == {"count": 4}
GetMetricIssueAggregatesTest
python
apache__airflow
providers/fab/tests/unit/fab/auth_manager/schemas/test_role_and_permission_schema.py
{ "start": 1176, "end": 2477 }
class ____: @pytest.fixture(scope="class") def role(self, minimal_app_for_auth_api): with minimal_app_for_auth_api.app_context(): yield create_role( minimal_app_for_auth_api, name="Test", permissions=[ (permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION), ], ) delete_role(minimal_app_for_auth_api, "Test") @pytest.fixture(autouse=True) def _set_attrs(self, minimal_app_for_auth_api, role): self.app = minimal_app_for_auth_api self.role = role def test_serialize(self): deserialized_role = role_schema.dump(self.role) assert deserialized_role == { "name": "Test", "actions": [{"resource": {"name": "Connections"}, "action": {"name": "can_create"}}], } def test_deserialize(self): role = { "name": "Test", "actions": [{"resource": {"name": "Connections"}, "action": {"name": "can_create"}}], } role_obj = role_schema.load(role) assert role_obj == { "name": "Test", "permissions": [{"resource": {"name": "Connections"}, "action": {"name": "can_create"}}], }
TestRoleCollectionItemSchema
python
psf__black
tests/data/cases/preview_long_strings__regression.py
{ "start": 14207, "end": 16202 }
class ____(xxxx.xxxxxxxxxxxxx): def xxxxxxx_xxxxxx(xxxx): assert xxxxxxx_xxxx in [ x.xxxxx.xxxxxx.xxxxx.xxxxxx, x.xxxxx.xxxxxx.xxxxx.xxxx, ], ("xxxxxxxxxxx xxxxxxx xxxx (xxxxxx xxxx) %x xxx xxxxx" % xxxxxxx_xxxx) value.__dict__[ key ] = "test" # set some Thrift field to non-None in the struct aa bb cc dd ee RE_ONE_BACKSLASH = { "asdf_hjkl_jkl": re.compile( r"(?<!([0-9]\ ))(?<=(^|\ ))([A-Z]+(\ )?|[0-9](\ )|[a-z](\ )){4,7}([A-Z]|[0-9]|[a-z])($|\b)(?!(\ ?([0-9]\ )|(\.)))" ), } RE_TWO_BACKSLASHES = { "asdf_hjkl_jkl": re.compile( r"(?<!([0-9]\ ))(?<=(^|\ ))([A-Z]+(\ )?|[0-9](\ )|[a-z](\\ )){4,7}([A-Z]|[0-9]|[a-z])($|\b)(?!(\ ?([0-9]\ )|(\.)))" ), } RE_THREE_BACKSLASHES = { "asdf_hjkl_jkl": re.compile( r"(?<!([0-9]\ ))(?<=(^|\ ))([A-Z]+(\ )?|[0-9](\ )|[a-z](\\\ )){4,7}([A-Z]|[0-9]|[a-z])($|\b)(?!(\ ?([0-9]\ )|(\.)))" ), } # We do NOT split on f-string expressions. print(f"Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam. {[f'{i}' for i in range(10)]}") x = f"This is a long string which contains an f-expr that should not split {{{[i for i in range(5)]}}}." # The parens should NOT be removed in this case. ( "my very long string that should get formatted if I'm careful to make sure it goes" " over 88 characters which it has now" ) # The parens should NOT be removed in this case. ( "my very long string that should get formatted if I'm careful to make sure it goes over 88 characters which" " it has now" ) # The parens should NOT be removed in this case. ( "my very long string" " that should get formatted" " if I'm careful to make sure" " it goes over 88 characters which" " it has now" ) def _legacy_listen_examples(): text += ( " \"listen for the '%(event_name)s' event\"\n" "\n # ... (event logic logic logic) ...\n" % { "since": since, } )
xxxxxxxxxxxxxxxxxxxxx
python
pytorch__pytorch
test/inductor/test_provenance_tracing.py
{ "start": 36609, "end": 36921 }
class ____(TestCase): device = "cpu" copy_tests( ProvenanceTracingKernelContextTemplate, TestProvenanceTracingKernelContextCpu, "cpu", ) @unittest.skipIf(sys.platform == "darwin", "No CUDA on MacOS") @unittest.skipIf(not torch.cuda.is_available(), "No CUDA")
TestProvenanceTracingKernelContextCpu
python
ray-project__ray
rllib/core/models/torch/heads.py
{ "start": 3104, "end": 6137 }
class ____(TorchModel): """An MLPHead that implements floating log stds for Gaussian distributions.""" def __init__(self, config: FreeLogStdMLPHeadConfig) -> None: super().__init__(config) assert config.output_dims[0] % 2 == 0, "output_dims must be even for free std!" self._half_output_dim = config.output_dims[0] // 2 self.net = TorchMLP( input_dim=config.input_dims[0], hidden_layer_dims=config.hidden_layer_dims, hidden_layer_activation=config.hidden_layer_activation, hidden_layer_use_layernorm=config.hidden_layer_use_layernorm, hidden_layer_use_bias=config.hidden_layer_use_bias, hidden_layer_weights_initializer=config.hidden_layer_weights_initializer, hidden_layer_weights_initializer_config=( config.hidden_layer_weights_initializer_config ), hidden_layer_bias_initializer=config.hidden_layer_bias_initializer, hidden_layer_bias_initializer_config=( config.hidden_layer_bias_initializer_config ), output_dim=self._half_output_dim, output_activation=config.output_layer_activation, output_use_bias=config.output_layer_use_bias, output_weights_initializer=config.output_layer_weights_initializer, output_weights_initializer_config=( config.output_layer_weights_initializer_config ), output_bias_initializer=config.output_layer_bias_initializer, output_bias_initializer_config=config.output_layer_bias_initializer_config, ) self.log_std = torch.nn.Parameter( torch.as_tensor([0.0] * self._half_output_dim) ) # If log standard deviations should be clipped. This should be only true for # policy heads. Value heads should never be clipped. self.clip_log_std = config.clip_log_std # The clipping parameter for the log standard deviation. self.log_std_clip_param = torch.Tensor( [config.log_std_clip_param], device=self.log_std.device ) # Register a buffer to handle device mapping. self.register_buffer("log_std_clip_param_const", self.log_std_clip_param) @override(Model) def _forward(self, inputs: torch.Tensor, **kwargs) -> torch.Tensor: # Compute the mean first, then append the log_std. mean = self.net(inputs) # If log standard deviation should be clipped. if self.clip_log_std: # Clip the log standard deviation to avoid running into too small # deviations that factually collapses the policy. log_std = torch.clamp( self.log_std, -self.log_std_clip_param_const, self.log_std_clip_param_const, ) else: log_std = self.log_std return torch.cat([mean, log_std.unsqueeze(0).repeat([len(mean), 1])], axis=1)
TorchFreeLogStdMLPHead
python
weaviate__weaviate-python-client
weaviate/collections/classes/config_vectors.py
{ "start": 76417, "end": 77372 }
class ____: @staticmethod def update( *, name: Optional[str] = None, vector_index_config: Union[ _VectorIndexConfigHNSWUpdate, _VectorIndexConfigFlatUpdate, _VectorIndexConfigDynamicUpdate, ], ) -> _VectorConfigUpdate: """Update the vector index configuration of a vector. This is the only update operation allowed currently. If you wish to change the vectorization configuration itself, you will have to recreate the collection with the new configuration. Args: name: The name of the vector. vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Reconfigure.VectorIndex` to create a vector index configuration. `None` by default """ return _VectorConfigUpdate( name=name or "default", vector_index_config=vector_index_config, )
_VectorsUpdate
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/io_ops/reader_ops_test.py
{ "start": 24857, "end": 26500 }
class ____(test.TestCase): @test_util.run_deprecated_v1 def testNoDeadlockFromQueue(self): """Tests that reading does not block main execution threads.""" config = config_pb2.ConfigProto( inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) with self.session(config=config) as sess: thread_data_t = collections.namedtuple("thread_data_t", ["thread", "queue", "output"]) thread_data = [] # Create different readers, each with its own queue. for i in range(3): queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=()) reader = io_ops.TextLineReader() _, line = reader.read(queue) output = [] t = threading.Thread( target=AsyncReaderTest._RunSessionAndSave, args=(sess, [line], output)) thread_data.append(thread_data_t(t, queue, output)) # Start all readers. They are all blocked waiting for queue entries. self.evaluate(variables.global_variables_initializer()) for d in thread_data: d.thread.start() # Unblock the readers. for i, d in enumerate(reversed(thread_data)): fname = os.path.join(self.get_temp_dir(), "deadlock.%s.txt" % i) with open(fname, "wb") as f: f.write(("file-%s" % i).encode()) self.evaluate(d.queue.enqueue_many([[fname]])) d.thread.join() self.assertEqual([[("file-%s" % i).encode()]], d.output) @staticmethod def _RunSessionAndSave(sess, args, output): output.append(sess.run(args)) if __name__ == "__main__": test.main()
AsyncReaderTest
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/triggers/test_glue_crawler.py
{ "start": 1189, "end": 2603 }
class ____: def test_serialization(self): crawler_name = "test_crawler" poll_interval = 10 aws_conn_id = "aws_default" trigger = GlueCrawlerCompleteTrigger( crawler_name=crawler_name, waiter_delay=poll_interval, aws_conn_id=aws_conn_id, ) classpath, kwargs = trigger.serialize() assert classpath == "airflow.providers.amazon.aws.triggers.glue_crawler.GlueCrawlerCompleteTrigger" assert kwargs == { "crawler_name": "test_crawler", "waiter_delay": 10, "waiter_max_attempts": 1500, "aws_conn_id": "aws_default", } @pytest.mark.asyncio @mock.patch.object(GlueCrawlerHook, "get_waiter") @mock.patch.object(GlueCrawlerHook, "get_async_conn") async def test_run_success(self, mock_async_conn, mock_get_waiter): mock_async_conn.__aenter__.return_value = mock.MagicMock() mock_get_waiter().wait = AsyncMock() crawler_name = "test_crawler" trigger = GlueCrawlerCompleteTrigger(crawler_name=crawler_name) generator = trigger.run() response = await generator.asend(None) assert response == TriggerEvent({"status": "success", "value": None}) assert_expected_waiter_type(mock_get_waiter, "crawler_ready") mock_get_waiter().wait.assert_called_once()
TestGlueCrawlerCompleteTrigger
python
getlogbook__logbook
tests/test_queues.py
{ "start": 5827, "end": 11347 }
class ____: def __init__(self, message, queue): self.message = message self.queue = queue def __call__(self): from logbook.queues import MultiProcessingHandler with MultiProcessingHandler(self.queue): logbook.warning(self.message) @require_module("multiprocessing") def test_subscriber_group(): if os.getenv("APPVEYOR") == "True": pytest.skip("Test hangs on AppVeyor CI") from multiprocessing import Process, Queue from logbook.queues import MultiProcessingSubscriber, SubscriberGroup a_queue = Queue(-1) b_queue = Queue(-1) subscriber = SubscriberGroup( [MultiProcessingSubscriber(a_queue), MultiProcessingSubscriber(b_queue)] ) for _ in range(10): p1 = Process(target=SubscriberGroupSendBack("foo", a_queue)) p2 = Process(target=SubscriberGroupSendBack("bar", b_queue)) p1.start() p2.start() p1.join() p2.join() messages = [subscriber.recv().message for i in (1, 2)] assert sorted(messages) == ["bar", "foo"] @require_module("redis") def test_redis_handler(): import redis from logbook.queues import RedisHandler KEY = f"redis-{os.getpid()}" FIELDS = ["message", "host"] r = redis.Redis(REDIS_HOST, REDIS_PORT, decode_responses=True) redis_handler = RedisHandler(key=KEY, level=logbook.INFO, bubble=True) # We don't want output for the tests, so we can wrap everything in a # NullHandler null_handler = logbook.NullHandler() # Check default values with null_handler.applicationbound(): with redis_handler: logbook.info(LETTERS) key, message = r.blpop(KEY) # Are all the fields in the record? for field in FIELDS: assert message.find(field) assert key == KEY assert message.find(LETTERS) # Change the key of the handler and check on redis KEY = f"test_another_key-{os.getpid()}" redis_handler.key = KEY with null_handler.applicationbound(): with redis_handler: logbook.info(LETTERS) key, message = r.blpop(KEY) assert key == KEY # Check that extra fields are added if specified when creating the handler FIELDS.append("type") extra_fields = {"type": "test"} del redis_handler redis_handler = RedisHandler( key=KEY, level=logbook.INFO, extra_fields=extra_fields, bubble=True ) with null_handler.applicationbound(): with redis_handler: logbook.info(LETTERS) key, message = r.blpop(KEY) for field in FIELDS: assert message.find(field) assert message.find("test") # And finally, check that fields are correctly added if appended to the # log message FIELDS.append("more_info") with null_handler.applicationbound(): with redis_handler: logbook.info(LETTERS, more_info="This works") key, message = r.blpop(KEY) for field in FIELDS: assert message.find(field) assert message.find("This works") @require_module("redis") def test_redis_handler_lpush(): """ Test if lpush stores messages in the right order new items should be first on list """ import redis from logbook.queues import RedisHandler null_handler = logbook.NullHandler() KEY = "lpushed-" redis_handler = RedisHandler( key=KEY, push_method="lpush", level=logbook.INFO, bubble=True ) with null_handler.applicationbound(): with redis_handler: logbook.info("old item") logbook.info("new item") time.sleep(1.5) r = redis.Redis(REDIS_HOST, REDIS_PORT, decode_responses=True) logs = r.lrange(KEY, 0, -1) assert logs assert "new item" in logs[0] r.delete(KEY) @require_module("redis") def test_redis_handler_rpush(): """ Test if rpush stores messages in the right order old items should be first on list """ import redis from logbook.queues import RedisHandler null_handler = logbook.NullHandler() KEY = "rpushed-" + str(os.getpid()) redis_handler = RedisHandler( key=KEY, push_method="rpush", level=logbook.INFO, bubble=True ) with null_handler.applicationbound(): with redis_handler: logbook.info("old item") logbook.info("new item") time.sleep(1.5) r = redis.Redis(REDIS_HOST, REDIS_PORT, decode_responses=True) logs = r.lrange(KEY, 0, -1) assert logs assert "old item" in logs[0] r.delete(KEY) @pytest.fixture def handlers(handlers_subscriber): return handlers_subscriber[0] @pytest.fixture def subscriber(handlers_subscriber): return handlers_subscriber[1] @pytest.fixture def handlers_subscriber(multi): from logbook.queues import ZeroMQHandler, ZeroMQSubscriber # Get an unused port tempsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) tempsock.bind(("127.0.0.1", 0)) host, unused_port = tempsock.getsockname() tempsock.close() # Retrieve the ZeroMQ handler and subscriber uri = "tcp://%s:%d" % (host, unused_port) # noqa: UP031 if multi: handlers = [ZeroMQHandler(uri, multi=True) for _ in range(3)] else: handlers = [ZeroMQHandler(uri)] subscriber = ZeroMQSubscriber(uri, multi=multi) # Enough time to start time.sleep(0.1) return handlers, subscriber @pytest.fixture(params=[True, False], ids=["multi", "nomulti"]) def multi(request): return request.param
SubscriberGroupSendBack
python
walkccc__LeetCode
solutions/1516. Move Sub-Tree of N-Ary Tree/1516.py
{ "start": 0, "end": 883 }
class ____: def moveSubTree(self, root: 'Node', p: 'Node', q: 'Node') -> 'Node': if p in q.children: return root # Create a dummy Node for the case when root == p dummy = Node(None, [root]) # Get each parent of p and q pParent = self._getParent(dummy, p) qParent = self._getParent(p, q) # Get p's original index in p's parent pIndex = pParent.children.index(p) pParent.children.pop(pIndex) q.children.append(p) # If q is in the p's subtree, qParent != None if qParent: qParent.children.remove(q) pParent.children.insert(pIndex, q) return dummy.children[0] def _getParent(self, root: 'Node', target: 'Node') -> Optional['Node']: for child in root.children: if child == target: return root res = self._getParent(child, target) if res: return res return None
Solution
python
gevent__gevent
src/gevent/_fileobjectposix.py
{ "start": 737, "end": 7303 }
class ____(RawIOBase): # Internal, undocumented, class. All that's documented is that this # is a IOBase object. Constructor is private. # Note that RawIOBase has a __del__ method that calls # self.close(). (In C implementations like CPython, this is # the type's tp_dealloc slot; prior to Python 3, the object doesn't # appear to have a __del__ method, even though it functionally does) _read_watcher = None _write_watcher = None _closed = False _seekable = None _keep_alive = None # An object that needs to live as long as we do. def __init__(self, fileno, open_descriptor, closefd=True): RawIOBase.__init__(self) self._closefd = closefd self._fileno = fileno self.name = fileno self.mode = open_descriptor.fileio_mode make_nonblocking(fileno) readable = open_descriptor.can_read writable = open_descriptor.can_write self.hub = get_hub() io_watcher = self.hub.loop.io try: if readable: self._read_watcher = io_watcher(fileno, 1) if writable: self._write_watcher = io_watcher(fileno, 2) except: # If anything goes wrong, it's important to go ahead and # close these watchers *now*, especially under libuv, so # that they don't get eventually reclaimed by the garbage # collector at some random time, thanks to the C level # slot (even though we don't seem to have any actual references # at the Python level). Previously, if we didn't close now, # that random close in the future would cause issues if we had duplicated # the fileno (if a wrapping with statement had closed an open fileobject, # for example) # test__fileobject can show a failure if this doesn't happen # TRAVIS=true GEVENT_LOOP=libuv python -m gevent.tests.test__fileobject \ # TestFileObjectPosix.test_seek TestFileObjectThread.test_bufsize_0 self.close() raise def isatty(self): # TODO: Couldn't we just subclass FileIO? f = FileIO(self._fileno, 'r', False) try: return f.isatty() finally: f.close() def readable(self): return self._read_watcher is not None def writable(self): return self._write_watcher is not None def seekable(self): if self._seekable is None: try: os.lseek(self._fileno, 0, os.SEEK_CUR) except OSError: self._seekable = False else: self._seekable = True return self._seekable def fileno(self): return self._fileno @property def closed(self): return self._closed def __destroy_events(self): read_event = self._read_watcher write_event = self._write_watcher hub = self.hub self.hub = self._read_watcher = self._write_watcher = None hub.cancel_waits_close_and_then( (read_event, write_event), cancel_wait_ex, self.__finish_close, self._closefd, self._fileno, self._keep_alive ) def close(self): if self._closed: return self.flush() # TODO: Can we use 'read_event is not None and write_event is # not None' to mean _closed? self._closed = True try: self.__destroy_events() finally: self._fileno = self._keep_alive = None @staticmethod def __finish_close(closefd, fileno, keep_alive): try: if closefd: _close(fileno) finally: if hasattr(keep_alive, 'close'): keep_alive.close() # RawIOBase provides a 'read' method that will call readall() if # the `size` was missing or -1 and otherwise call readinto(). We # want to take advantage of this to avoid single byte reads when # possible. This is highlighted by a bug in BufferedIOReader that # calls read() in a loop when its readall() method is invoked; # this was fixed in Python 3.3, but we still need our workaround for 2.7. See # https://github.com/gevent/gevent/issues/675) def __read(self, n): if self._read_watcher is None: raise UnsupportedOperation('read') while 1: try: return _read(self._fileno, n) except OSError as ex: if ex.args[0] not in ignored_errors: raise wait_on_watcher(self._read_watcher, None, None, self.hub) def readall(self): ret = BytesIO() while True: try: data = self.__read(DEFAULT_BUFFER_SIZE) except cancel_wait_ex: # We were closed while reading. A buffered reader # just returns what it has handy at that point, # so we do to. data = None if not data: break ret.write(data) return ret.getvalue() def readinto(self, b): data = self.__read(len(b)) n = len(data) try: b[:n] = data except TypeError as err: import array if not isinstance(b, array.array): raise err b[:n] = array.array(b'b', data) return n def write(self, b): if self._write_watcher is None: raise UnsupportedOperation('write') while True: try: return _write(self._fileno, b) except OSError as ex: if ex.args[0] not in ignored_errors: raise wait_on_watcher(self._write_watcher, None, None, self.hub) def seek(self, offset, whence=0): try: return os.lseek(self._fileno, offset, whence) except IOError: # pylint:disable=try-except-raise raise except OSError as ex: # pylint:disable=duplicate-except # Python 2.x # make sure on Python 2.x we raise an IOError # as documented for RawIOBase. # See https://github.com/gevent/gevent/issues/1323 reraise(IOError, IOError(*ex.args), sys.exc_info()[2]) def __repr__(self): return "<%s at 0x%x fileno=%s mode=%r>" % ( type(self).__name__, id(self), self._fileno, self.mode )
GreenFileDescriptorIO
python
encode__starlette
starlette/responses.py
{ "start": 5868, "end": 5935 }
class ____(Response): media_type = "text/plain"
PlainTextResponse
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 167193, "end": 168157 }
class ____(sgqlc.types.Input): """Autogenerated input type of ConvertProjectCardNoteToIssue""" __schema__ = github_schema __field_names__ = ("project_card_id", "repository_id", "title", "body", "client_mutation_id") project_card_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="projectCardId") """The ProjectCard ID to convert.""" repository_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="repositoryId") """The ID of the repository to create the issue in.""" title = sgqlc.types.Field(String, graphql_name="title") """The title of the newly created issue. Defaults to the card's note text. """ body = sgqlc.types.Field(String, graphql_name="body") """The body of the newly created issue.""" client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
ConvertProjectCardNoteToIssueInput
python
encode__django-rest-framework
tests/test_validation.py
{ "start": 2786, "end": 3071 }
class ____(serializers.Serializer): foo = serializers.CharField() def validate_foo(self, attrs, source): raise serializers.ValidationError("foo invalid") def validate(self, attrs): raise serializers.ValidationError("serializer invalid")
ValidationSerializer
python
davidhalter__jedi
jedi/inference/imports.py
{ "start": 1321, "end": 5225 }
class ____: def __init__(self): self._name_cache = {} def add(self, string_names, value_set): if string_names is not None: self._name_cache[string_names] = value_set def get(self, string_names): return self._name_cache.get(string_names) # This memoization is needed, because otherwise we will infinitely loop on # certain imports. @inference_state_method_cache(default=NO_VALUES) def infer_import(context, tree_name): module_context = context.get_root_context() from_import_name, import_path, level, values = \ _prepare_infer_import(module_context, tree_name) if values: if from_import_name is not None: values = values.py__getattribute__( from_import_name, name_context=context, analysis_errors=False ) if not values: path = import_path + (from_import_name,) importer = Importer(context.inference_state, path, module_context, level) values = importer.follow() debug.dbg('after import: %s', values) return values @inference_state_method_cache(default=[]) def goto_import(context, tree_name): module_context = context.get_root_context() from_import_name, import_path, level, values = \ _prepare_infer_import(module_context, tree_name) if not values: return [] if from_import_name is not None: names = unite([ c.goto( from_import_name, name_context=context, analysis_errors=False ) for c in values ]) # Avoid recursion on the same names. if names and not any(n.tree_name is tree_name for n in names): return names path = import_path + (from_import_name,) importer = Importer(context.inference_state, path, module_context, level) values = importer.follow() return set(s.name for s in values) def _prepare_infer_import(module_context, tree_name): import_node = search_ancestor(tree_name, 'import_name', 'import_from') import_path = import_node.get_path_for_name(tree_name) from_import_name = None try: from_names = import_node.get_from_names() except AttributeError: # Is an import_name pass else: if len(from_names) + 1 == len(import_path): # We have to fetch the from_names part first and then check # if from_names exists in the modules. from_import_name = import_path[-1] import_path = from_names importer = Importer(module_context.inference_state, tuple(import_path), module_context, import_node.level) return from_import_name, tuple(import_path), import_node.level, importer.follow() def _add_error(value, name, message): if hasattr(name, 'parent') and value is not None: analysis.add(value, 'import-error', name, message) else: debug.warning('ImportError without origin: ' + message) def _level_to_base_import_path(project_path, directory, level): """ In case the level is outside of the currently known package (something like import .....foo), we can still try our best to help the user for completions. """ for i in range(level - 1): old = directory directory = os.path.dirname(directory) if old == directory: return None, None d = directory level_import_paths = [] # Now that we are on the level that the user wants to be, calculate the # import path for it. while True: if d == project_path: return level_import_paths, d dir_name = os.path.basename(d) if dir_name: level_import_paths.insert(0, dir_name) d = os.path.dirname(d) else: return None, directory
ModuleCache
python
GoogleCloudPlatform__python-docs-samples
pubsub/streaming-analytics/PubSubToGCS.py
{ "start": 2477, "end": 5156 }
class ____(DoFn): def __init__(self, output_path): self.output_path = output_path def process(self, key_value, window=DoFn.WindowParam): """Write messages in a batch to Google Cloud Storage.""" ts_format = "%H:%M" window_start = window.start.to_utc_datetime().strftime(ts_format) window_end = window.end.to_utc_datetime().strftime(ts_format) shard_id, batch = key_value filename = "-".join([self.output_path, window_start, window_end, str(shard_id)]) with io.gcsio.GcsIO().open(filename=filename, mode="w") as f: for message_body, publish_time in batch: f.write(f"{message_body},{publish_time}\n".encode()) def run(input_topic, output_path, window_size=1.0, num_shards=5, pipeline_args=None): # Set `save_main_session` to True so DoFns can access globally imported modules. pipeline_options = PipelineOptions( pipeline_args, streaming=True, save_main_session=True ) with Pipeline(options=pipeline_options) as pipeline: ( pipeline # Because `timestamp_attribute` is unspecified in `ReadFromPubSub`, Beam # binds the publish time returned by the Pub/Sub server for each message # to the element's timestamp parameter, accessible via `DoFn.TimestampParam`. # https://beam.apache.org/releases/pydoc/current/apache_beam.io.gcp.pubsub.html#apache_beam.io.gcp.pubsub.ReadFromPubSub | "Read from Pub/Sub" >> io.ReadFromPubSub(topic=input_topic) | "Window into" >> GroupMessagesByFixedWindows(window_size, num_shards) | "Write to GCS" >> ParDo(WriteToGCS(output_path)) ) if __name__ == "__main__": logging.getLogger().setLevel(logging.INFO) parser = argparse.ArgumentParser() parser.add_argument( "--input_topic", help="The Cloud Pub/Sub topic to read from." '"projects/<PROJECT_ID>/topics/<TOPIC_ID>".', ) parser.add_argument( "--window_size", type=float, default=1.0, help="Output file's window size in minutes.", ) parser.add_argument( "--output_path", help="Path of the output GCS file including the prefix.", ) parser.add_argument( "--num_shards", type=int, default=5, help="Number of shards to use when writing windowed elements to GCS.", ) known_args, pipeline_args = parser.parse_known_args() run( known_args.input_topic, known_args.output_path, known_args.window_size, known_args.num_shards, pipeline_args, ) # [END pubsub_to_gcs]
WriteToGCS
python
ansible__ansible
lib/ansible/utils/collection_loader/_collection_finder.py
{ "start": 28309, "end": 29019 }
class ____(_AnsibleCollectionPkgLoaderBase): def _validate_args(self): super(_AnsibleCollectionNSPkgLoader, self)._validate_args() if len(self._split_name) != 2: raise ImportError('this loader can only load collections namespace packages, not {0}'.format(self._fullname)) def _validate_final(self): # special-case the `ansible` namespace, since `ansible.builtin` is magical if not self._subpackage_search_paths and self._package_to_load != 'ansible': raise ImportError('no {0} found in {1}'.format(self._package_to_load, self._candidate_paths)) # handles locating the actual collection package and associated metadata
_AnsibleCollectionNSPkgLoader
python
jupyterlab__jupyterlab
jupyterlab/handlers/announcements.py
{ "start": 4545, "end": 5948 }
class ____(APIHandler): """Check for Updates API handler. Args: update_check: The class checking for a new version """ def initialize( self, update_checker: Optional[CheckForUpdate] = None, ) -> None: super().initialize() self.update_checker = ( NeverCheckForUpdate(__version__) if update_checker is None else update_checker ) self.update_checker.logger = self.log @web.authenticated async def get(self): """Check for updates. Response: { "notification": Optional[Notification] } """ notification = None out = await self.update_checker() if out: message, link = (out, ()) if isinstance(out, str) else out now = datetime.now(tz=timezone.utc).timestamp() * 1000.0 hash_ = hashlib.sha1(message.encode()).hexdigest() # noqa: S324 notification = Notification( message=message, createdAt=now, modifiedAt=now, type="info", link=link, options={"data": {"id": hash_, "tags": ["update"]}}, ) self.set_status(200) self.finish( json.dumps({"notification": None if notification is None else asdict(notification)}) )
CheckForUpdateHandler
python
pypa__pip
src/pip/_vendor/truststore/_windows.py
{ "start": 937, "end": 1135 }
class ____(Structure): _fields_ = ( ("cUsageIdentifier", DWORD), ("rgpszUsageIdentifier", POINTER(LPSTR)), ) PCERT_ENHKEY_USAGE = POINTER(CERT_ENHKEY_USAGE)
CERT_ENHKEY_USAGE
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_bic.py
{ "start": 1433, "end": 3257 }
class ____(ColumnMapExpectation): """Expect column values to be valid BIC (Business Identifier Code).""" map_metric = "column_values.valid_bic" success_keys = ("mostly",) default_kwarg_values = {} library_metadata = { "maturity": "experimental", "tags": [ "hackathon-22", "experimental", "typed-entities", ], "contributors": ["@szecsip", "@mkopec87"], "requirements": ["schwifty"], } examples = [ { "data": { "all_valid": [ "GENODEM1GLS", "BOHIUS77", "OTPVHUHB", "CAXBMNUB", "SVBMMNUB", ], "some_other": [ "GENODEM1GLS", "BOHIUS77", "OTPVHUHB", "CAXBMNUB", "SVBXXXXX", ], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "all_valid"}, "out": { "success": True, }, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "some_other", "mostly": 1}, "out": { "success": False, }, }, ], } ] if __name__ == "__main__": ExpectColumnValuesToBeValidBic().print_diagnostic_checklist()
ExpectColumnValuesToBeValidBic
python
tensorflow__tensorflow
tensorflow/python/ops/nn_test.py
{ "start": 42310, "end": 44178 }
class ____(test_lib.TestCase): def testValues(self): np_values = np.array( [np.linspace(-7.0, 0.0, 100), np.linspace(0.0, 7.0, 100)], dtype=np.float32) tf_values = constant_op.constant(np_values) actual_tf_outputs = nn_impl.swish(tf_values) expected_tf_outputs = tf_values * math_ops.sigmoid(tf_values) actual_outputs, expected_outputs = self.evaluate( [actual_tf_outputs, expected_tf_outputs]) self.assertAllClose(actual_outputs, expected_outputs) def testValuesWithBeta(self): np_values = np.array( [np.linspace(-7.0, 0.0, 100), np.linspace(0.0, 7.0, 100)], dtype=np.float32) tf_values = constant_op.constant(np_values) actual_tf_outputs = nn_impl.swish(tf_values, beta=0.5) expected_tf_outputs = tf_values * math_ops.sigmoid(0.5 * tf_values) actual_outputs, expected_outputs = self.evaluate( [actual_tf_outputs, expected_tf_outputs]) self.assertAllClose(actual_outputs, expected_outputs) def testGradients(self): shape = [5, 3, 4] sigma = 5 input_values = np.random.randn(*shape) * sigma x_tf = constant_op.constant(input_values) with self.cached_session(): def f(x): # pylint: disable=invalid-name return nn_impl.swish(x) theoretical, numerical = gradient_checker_v2.compute_gradient( f, [x_tf]) self.assertAllClose(theoretical, numerical) def testGradientsWithBeta(self): shape = [5, 3, 4] sigma = 5 input_values = np.random.randn(*shape) * sigma x_tf = constant_op.constant(input_values) with self.cached_session(): def f(x): # pylint: disable=invalid-name return nn_impl.swish(x, beta=0.5) theoretical, numerical = gradient_checker_v2.compute_gradient( f, [x_tf]) self.assertAllClose(theoretical, numerical)
SwishTest
python
pyqtgraph__pyqtgraph
pyqtgraph/parametertree/parameterTypes/basetypes.py
{ "start": 14497, "end": 15594 }
class ____(Parameter): """ Group parameters are used mainly as a generic parent item that holds (and groups!) a set of child parameters. It also provides a simple mechanism for displaying a button or combo that can be used to add new parameters to the group. To enable this, the group must be initialized with the 'addText' option (the text will be displayed on a button which, when clicked, will cause addNew() to be called). If the 'addList' option is specified as well, then a dropdown-list of addable items will be displayed instead of a button. """ itemClass = GroupParameterItem sigAddNew = QtCore.Signal(object, object) # self, type def addNew(self, typ=None): """ This method is called when the user has requested to add a new item to the group. By default, it emits ``sigAddNew(self, typ)``. """ self.sigAddNew.emit(self, typ) def setAddList(self, vals): """Change the list of options available for the user to add to the group.""" self.setOpts(addList=vals)
GroupParameter
python
Netflix__metaflow
metaflow/client/core.py
{ "start": 83425, "end": 86935 }
class ____(object): """ Entry point to all objects in the Metaflow universe. This object can be used to list all the flows present either through the explicit property or by iterating over this object. Attributes ---------- flows : List[Flow] Returns the list of all `Flow` objects known to this metadata provider. Note that only flows present in the current namespace will be returned. A `Flow` is present in a namespace if it has at least one run in the namespace. """ def __init__(self, _current_metadata: Optional[str] = None): if _current_metadata: provider, info = _metadata(_current_metadata) self.metadata = provider if info: self.metadata.INFO = info else: if current_metadata is False: default_metadata() self.metadata = current_metadata @property def flows(self) -> List[Flow]: """ Returns a list of all the flows present. Only flows present in the set namespace are returned. A flow is present in a namespace if it has at least one run that is in the namespace. Returns ------- List[Flow] List of all flows present. """ return list(self) def __iter__(self) -> Iterator[Flow]: """ Iterator over all flows present. Only flows present in the set namespace are returned. A flow is present in a namespace if it has at least one run that is in the namespace. Yields ------- Flow A Flow present in the Metaflow universe. """ # We do not filter on namespace in the request because # filtering on namespace on flows means finding at least one # run in this namespace. This is_in_namespace() function # does this properly in this case all_flows = self.metadata.get_object("root", "flow", None, None) all_flows = all_flows if all_flows else [] for flow in all_flows: try: v = Flow(_object=flow, _metaflow=self) yield v except MetaflowNamespaceMismatch: continue def __str__(self) -> str: return "Metaflow()" def __getitem__(self, name: str) -> Flow: """ Returns a specific flow by name. The flow will only be returned if it is present in the current namespace. Parameters ---------- name : str Name of the Flow Returns ------- Flow Flow with the given name. """ return Flow(name, _metaflow=self) def _metadata(ms: str) -> Tuple[Optional["MetadataProvider"], Optional[str]]: infos = ms.split("@", 1) types = [m.TYPE for m in METADATA_PROVIDERS] if infos[0] in types: provider = [m for m in METADATA_PROVIDERS if m.TYPE == infos[0]][0] if len(infos) > 1: return provider, infos[1] return provider, None # Deduce from ms; if starts with http, use service or else use local if ms.startswith("http"): metadata_type = "service" else: metadata_type = "local" res = [m for m in METADATA_PROVIDERS if m.TYPE == metadata_type] if not res: return None, None return res[0], ms _CLASSES["flow"] = Flow _CLASSES["run"] = Run _CLASSES["step"] = Step _CLASSES["task"] = Task _CLASSES["artifact"] = DataArtifact
Metaflow
python
automl__auto-sklearn
autosklearn/pipeline/components/feature_preprocessing/truncatedSVD.py
{ "start": 377, "end": 2229 }
class ____(AutoSklearnPreprocessingAlgorithm): def __init__(self, target_dim, random_state=None): self.target_dim = target_dim self.random_state = random_state self.preprocessor = None def fit(self, X, Y): import sklearn.decomposition self.target_dim = int(self.target_dim) target_dim = min(self.target_dim, X.shape[1] - 1) self.preprocessor = sklearn.decomposition.TruncatedSVD( target_dim, algorithm="randomized", random_state=self.random_state ) # TODO: remove when migrating to sklearn 0.16 # Circumvents a bug in sklearn # https://github.com/scikit-learn/scikit-learn/commit/f08b8c8e52663167819f242f605db39f3b5a6d0c # X = X.astype(np.float64) self.preprocessor.fit(X, Y) return self def transform(self, X): if self.preprocessor is None: raise NotImplementedError() return self.preprocessor.transform(X) @staticmethod def get_properties(dataset_properties=None): return { "shortname": "TSVD", "name": "Truncated Singular Value Decomposition", "handles_regression": True, "handles_classification": True, "handles_multiclass": True, "handles_multilabel": True, "handles_multioutput": True, "is_deterministic": True, "input": (SPARSE, UNSIGNED_DATA), "output": (DENSE, INPUT), } @staticmethod def get_hyperparameter_search_space( feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None ): target_dim = UniformIntegerHyperparameter( "target_dim", 10, 256, default_value=128 ) cs = ConfigurationSpace() cs.add_hyperparameter(target_dim) return cs
TruncatedSVD