language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
etianen__django-reversion
tests/test_app/tests/test_views.py
{ "start": 109, "end": 487 }
class ____(TestModelMixin, TestBase): def testCreateRevision(self): response = self.client.post("/test-app/create-revision/") obj = TestModel.objects.get(pk=response.content) self.assertSingleRevision((obj,)) def testCreateRevisionGet(self): self.client.get("/test-app/create-revision/") self.assertNoRevision()
CreateRevisionTest
python
ApeWorX__ape
tests/functional/test_plugins.py
{ "start": 2949, "end": 3552 }
class ____: def test_from_package_names(self, plugin_metadata): actual = plugin_metadata assert actual.core.plugin_names == list(CORE_PLUGINS) assert actual.third_party.plugin_names == list(THIRD_PARTY) assert actual.installed.plugin_names == [INSTALLED_PLUGINS[0]] # Not 3rd party assert actual.available.plugin_names == [AVAILABLE_PLUGINS[0]] # Not installed def test_all_plugins(self, plugin_metadata, package_names): actual = {f"ape-{x.name}" for x in plugin_metadata.all_plugins} assert actual == package_names
TestPluginMetadataList
python
jmcnamara__XlsxWriter
xlsxwriter/test/styles/test_write_dxfs.py
{ "start": 295, "end": 740 }
class ____(unittest.TestCase): """ Test the Styles _write_dxfs() method. """ def setUp(self): self.fh = StringIO() self.styles = Styles() self.styles._set_filehandle(self.fh) def test_write_dxfs(self): """Test the _write_dxfs() method""" self.styles._write_dxfs() exp = """<dxfs count="0"/>""" got = self.fh.getvalue() self.assertEqual(exp, got)
TestWriteDxfs
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/cloud_sql.py
{ "start": 20095, "end": 22175 }
class ____(CloudSQLBaseOperator): """ Delete a Cloud SQL instance. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:CloudSQLDeleteInstanceOperator` :param instance: Cloud SQL instance ID. This does not include the project ID. :param project_id: Optional, Google Cloud Project ID. If set to None or missing, the default project_id from the Google Cloud connection is used. :param gcp_conn_id: The connection ID used to connect to Google Cloud. :param api_version: API version used (e.g. v1beta4). :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ # [START gcp_sql_delete_template_fields] template_fields: Sequence[str] = ( "project_id", "instance", "gcp_conn_id", "api_version", "impersonation_chain", ) # [END gcp_sql_delete_template_fields] ui_color = "#FEECD2" def execute(self, context: Context) -> bool | None: hook = CloudSQLHook( gcp_conn_id=self.gcp_conn_id, api_version=self.api_version, impersonation_chain=self.impersonation_chain, ) if not self._check_if_instance_exists(self.instance, hook): print(f"Cloud SQL instance with ID {self.instance} does not exist. Aborting delete.") return True return hook.delete_instance(project_id=self.project_id, instance=self.instance)
CloudSQLDeleteInstanceOperator
python
Lightning-AI__lightning
examples/fabric/dcgan/train_fabric.py
{ "start": 6948, "end": 8011 }
class ____(nn.Module): def __init__(self): super().__init__() self.main = nn.Sequential( # input is Z, going into a convolution nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False), nn.BatchNorm2d(ngf * 8), nn.ReLU(True), # state size. (ngf*8) x 4 x 4 nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True), # state size. (ngf*4) x 8 x 8 nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), # state size. (ngf*2) x 16 x 16 nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (ngf) x 32 x 32 nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False), nn.Tanh(), # state size. (nc) x 64 x 64 ) def forward(self, input): return self.main(input)
Generator
python
coleifer__peewee
tests/dataset.py
{ "start": 451, "end": 518 }
class ____(TestModel): username = TextField(primary_key=True)
User
python
doocs__leetcode
solution/3600-3699/3672.Sum of Weighted Modes in Subarrays/Solution.py
{ "start": 0, "end": 690 }
class ____: def modeWeight(self, nums: List[int], k: int) -> int: pq = [] cnt = defaultdict(int) for x in nums[:k]: cnt[x] += 1 heappush(pq, (-cnt[x], x)) def get_mode() -> int: while -pq[0][0] != cnt[pq[0][1]]: heappop(pq) freq, val = -pq[0][0], pq[0][1] return freq * val ans = 0 ans += get_mode() for i in range(k, len(nums)): x, y = nums[i], nums[i - k] cnt[x] += 1 cnt[y] -= 1 heappush(pq, (-cnt[x], x)) heappush(pq, (-cnt[y], y)) ans += get_mode() return ans
Solution
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/orm/interfaces.py
{ "start": 4388, "end": 4517 }
class ____(ORMColumnsClauseRole[_T]): __slots__ = () _role_name = "ORM mapped or aliased entity"
ORMEntityColumnsClauseRole
python
pypa__pip
src/pip/_internal/index/collector.py
{ "start": 2411, "end": 6005 }
class ____(Exception): pass def _ensure_api_response(url: str, session: PipSession) -> None: """ Send a HEAD request to the URL, and ensure the response contains a simple API Response. Raises `_NotHTTP` if the URL is not available for a HEAD request, or `_NotAPIContent` if the content type is not a valid content type. """ scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url) if scheme not in {"http", "https"}: raise _NotHTTP() resp = session.head(url, allow_redirects=True) raise_for_status(resp) _ensure_api_header(resp) def _get_simple_response(url: str, session: PipSession) -> Response: """Access an Simple API response with GET, and return the response. This consists of three parts: 1. If the URL looks suspiciously like an archive, send a HEAD first to check the Content-Type is HTML or Simple API, to avoid downloading a large file. Raise `_NotHTTP` if the content type cannot be determined, or `_NotAPIContent` if it is not HTML or a Simple API. 2. Actually perform the request. Raise HTTP exceptions on network failures. 3. Check the Content-Type header to make sure we got a Simple API response, and raise `_NotAPIContent` otherwise. """ if is_archive_file(Link(url).filename): _ensure_api_response(url, session=session) logger.debug("Getting page %s", redact_auth_from_url(url)) resp = session.get( url, headers={ "Accept": ", ".join( [ "application/vnd.pypi.simple.v1+json", "application/vnd.pypi.simple.v1+html; q=0.1", "text/html; q=0.01", ] ), # We don't want to blindly returned cached data for # /simple/, because authors generally expecting that # twine upload && pip install will function, but if # they've done a pip install in the last ~10 minutes # it won't. Thus by setting this to zero we will not # blindly use any cached data, however the benefit of # using max-age=0 instead of no-cache, is that we will # still support conditional requests, so we will still # minimize traffic sent in cases where the page hasn't # changed at all, we will just always incur the round # trip for the conditional GET now instead of only # once per 10 minutes. # For more information, please see pypa/pip#5670. "Cache-Control": "max-age=0", }, ) raise_for_status(resp) # The check for archives above only works if the url ends with # something that looks like an archive. However that is not a # requirement of an url. Unless we issue a HEAD request on every # url we cannot know ahead of time for sure if something is a # Simple API response or not. However we can check after we've # downloaded it. _ensure_api_header(resp) logger.debug( "Fetched page %s as %s", redact_auth_from_url(url), resp.headers.get("Content-Type", "Unknown"), ) return resp def _get_encoding_from_headers(headers: ResponseHeaders) -> str | None: """Determine if we have any encoding information in our headers.""" if headers and "Content-Type" in headers: m = email.message.Message() m["content-type"] = headers["Content-Type"] charset = m.get_param("charset") if charset: return str(charset) return None
_NotHTTP
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/postgresql/asyncpg.py
{ "start": 10105, "end": 10309 }
class ____(INTERVAL): render_bind_cast = True @classmethod def adapt_emulated_to_native(cls, interval, **kw): return AsyncPgInterval(precision=interval.second_precision)
AsyncPgInterval
python
urllib3__urllib3
src/urllib3/contrib/emscripten/fetch.py
{ "start": 2900, "end": 3071 }
class ____(_RequestError): pass def _obj_from_dict(dict_val: dict[str, Any]) -> JsProxy: return to_js(dict_val, dict_converter=js.Object.fromEntries)
_TimeoutError
python
sympy__sympy
sympy/stats/matrix_distributions.py
{ "start": 7087, "end": 8806 }
class ____(Distribution, NamedArgsMixin): """ Abstract class for Matrix Distribution. """ def __new__(cls, *args): args = [ImmutableMatrix(arg) if isinstance(arg, list) else _sympify(arg) for arg in args] return Basic.__new__(cls, *args) @staticmethod def check(*args): pass def __call__(self, expr): if isinstance(expr, list): expr = ImmutableMatrix(expr) return self.pdf(expr) def sample(self, size=(), library='scipy', seed=None): """ Internal sample method Returns dictionary mapping RandomSymbol to realization value. """ libraries = ['scipy', 'numpy', 'pymc3', 'pymc'] if library not in libraries: raise NotImplementedError("Sampling from %s is not supported yet." % str(library)) if not import_module(library): raise ValueError("Failed to import %s" % library) samps = _get_sample_class_matrixrv[library](self, size, seed) if samps is not None: return samps raise NotImplementedError( "Sampling for %s is not currently implemented from %s" % (self.__class__.__name__, library) ) ################################################################################ #------------------------Matrix Distribution Types-----------------------------# ################################################################################ #------------------------------------------------------------------------------- # Matrix Gamma distribution ----------------------------------------------------
MatrixDistribution
python
PyCQA__pylint
tests/functional/n/not_async_context_manager.py
{ "start": 425, "end": 511 }
class ____: def __aexit__(self, *args): pass
SecondPartialAsyncContextManager
python
pyca__cryptography
tests/hazmat/primitives/decrepit/test_algorithms.py
{ "start": 4194, "end": 4672 }
class ____: test_ofb = generate_encrypt_test( load_nist_vectors, os.path.join("ciphers", "Blowfish"), ["bf-ofb.txt"], lambda key, **kwargs: Blowfish(binascii.unhexlify(key)), lambda iv, **kwargs: OFB(binascii.unhexlify(iv)), ) @pytest.mark.supported( only_if=lambda backend: backend.cipher_supported( Blowfish(b"\x00" * 56), CFB(b"\x00" * 8) ), skip_message="Does not support Blowfish CFB", )
TestBlowfishModeOFB
python
ray-project__ray
python/ray/tune/tests/_test_trial_runner_pg.py
{ "start": 9392, "end": 11882 }
class ____(unittest.TestCase): def tearDown(self) -> None: if ray.is_initialized: ray.shutdown() def testResourceDeadlock(self): """Tests that resource deadlock is avoided for heterogeneous PGFs. We start 4 trials in a cluster with 2 CPUs. The first two trials require 1 CPU each, the third trial 2 CPUs, the fourth trial 1 CPU. The second trial needs a bit more time to finish. This means that the resources from the first trial will be freed, and the PG of the _fourth_ trial becomes ready (not that of the third trial, because that requires 2 CPUs - however, one is still occupied by trial 2). After the first two trials finished, the FIFOScheduler tries to start the third trial. However, it can't be started because its placement group is not ready. Instead, the placement group of the fourth trial is ready. Thus, we opt to run the fourth trial instead. """ def train_fn(config): time.sleep(config["sleep"]) return 4 ray.init(num_cpus=2) tune.register_trainable("het", train_fn) pgf1 = PlacementGroupFactory([{"CPU": 1}]) pgf2 = PlacementGroupFactory([{"CPU": 2}]) trial1 = Trial("het", config={"sleep": 0}, placement_group_factory=pgf1) trial2 = Trial("het", config={"sleep": 2}, placement_group_factory=pgf1) trial3 = Trial("het", config={"sleep": 0}, placement_group_factory=pgf2) trial4 = Trial("het", config={"sleep": 0}, placement_group_factory=pgf1) runner = TrialRunner(fail_fast=True) runner.add_trial(trial1) runner.add_trial(trial2) runner.add_trial(trial3) runner.add_trial(trial4) timeout = time.monotonic() + 30 while not runner.is_finished(): # We enforce a timeout here self.assertLess( time.monotonic(), timeout, msg="Ran into a resource deadlock" ) runner.step() def test_placement_group_no_cpu_trainer(): """Bundles with only GPU:1 but no CPU should work""" ray.init(num_gpus=1, num_cpus=1) pgf = PlacementGroupFactory([{"GPU": 1, "CPU": 0}, {"CPU": 1}]) def train_fn(config): time.sleep(1) return 5 tune.run(train_fn, resources_per_trial=pgf) if __name__ == "__main__": import pytest sys.exit(pytest.main(["-v", __file__]))
TrialRunnerPlacementGroupHeterogeneousTest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1534290, "end": 1535064 }
class ____(sgqlc.types.Type, Node): """Represents a 'transferred' event on a given issue or pull request.""" __schema__ = github_schema __field_names__ = ("actor", "created_at", "from_repository", "issue") actor = sgqlc.types.Field(Actor, graphql_name="actor") """Identifies the actor who performed the event.""" created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt") """Identifies the date and time when the object was created.""" from_repository = sgqlc.types.Field(Repository, graphql_name="fromRepository") """The repository this came from""" issue = sgqlc.types.Field(sgqlc.types.non_null(Issue), graphql_name="issue") """Identifies the issue associated with the event."""
TransferredEvent
python
getsentry__sentry
tests/sentry/integrations/test_client.py
{ "start": 576, "end": 13824 }
class ____(TestCase): @responses.activate def test_get(self) -> None: responses.add(responses.GET, "http://example.com", json={}) resp = ApiClient().get("http://example.com") assert isinstance(resp, BaseApiResponse) assert resp.status_code == 200 @responses.activate def test_post(self) -> None: responses.add(responses.POST, "http://example.com", json={}) resp = ApiClient().post("http://example.com") assert isinstance(resp, BaseApiResponse) assert resp.status_code == 200 @responses.activate def test_delete(self) -> None: responses.add(responses.DELETE, "http://example.com", json={}) resp = ApiClient().delete("http://example.com") assert isinstance(resp, BaseApiResponse) assert resp.status_code == 200 @responses.activate def test_put(self) -> None: responses.add(responses.PUT, "http://example.com", json={}) resp = ApiClient().put("http://example.com") assert isinstance(resp, BaseApiResponse) assert resp.status_code == 200 @responses.activate def test_patch(self) -> None: responses.add(responses.PATCH, "http://example.com", json={}) resp = ApiClient().patch("http://example.com") assert isinstance(resp, BaseApiResponse) assert resp.status_code == 200 @mock.patch("sentry.shared_integrations.client.base.cache") @responses.activate def test_cache_mocked(self, cache: mock.MagicMock) -> None: cache.get.return_value = None responses.add(responses.GET, "http://example.com", json={"key": "value1"}) resp = ApiClient().get_cached("http://example.com") assert resp == {"key": "value1"} key = "integration.undefined.client:41c2952996340270af611f0d7fad7286" cache.get.assert_called_with(key) cache.set.assert_called_with(key, {"key": "value1"}, 900) @responses.activate def test_get_cached_basic(self) -> None: responses.add(responses.GET, "http://example.com", json={"key": "value1"}) resp = ApiClient().get_cached("http://example.com") assert resp == {"key": "value1"} assert len(responses.calls) == 1 # should still return old value responses.replace(responses.GET, "http://example.com", json={"key": "value2"}) resp = ApiClient().get_cached("http://example.com") assert resp == {"key": "value1"} assert len(responses.calls) == 1 # make sure normal get isn't impacted resp = ApiClient().get("http://example.com") assert resp == {"key": "value2"} assert len(responses.calls) == 2 @responses.activate def test_get_cached_query_param(self) -> None: responses.add(responses.GET, "http://example.com?param=val", json={}) responses.add(responses.GET, "http://example.com?param=different", json={}) ApiClient().get_cached("http://example.com", params={"param": "val"}) assert len(responses.calls) == 1 ApiClient().get_cached("http://example.com", params={"param": "val"}) assert len(responses.calls) == 1 ApiClient().get_cached("http://example.com", params={"param": "different"}) assert len(responses.calls) == 2 @responses.activate def test_head_cached_query_param(self) -> None: responses.add(responses.HEAD, "http://example.com?param=val", json={}) responses.add(responses.HEAD, "http://example.com?param=different", json={}) ApiClient().head_cached("http://example.com", params={"param": "val"}) assert len(responses.calls) == 1 ApiClient().head_cached("http://example.com", params={"param": "val"}) assert len(responses.calls) == 1 ApiClient().head_cached("http://example.com", params={"param": "different"}) assert len(responses.calls) == 2 @responses.activate def test_get_and_head_cached(self) -> None: # Same URL, different HTTP method url = "http://example.com" responses.add( responses.GET, url, json={"key": "response-for-get"}, adding_headers={"x-method": "GET"}, ) responses.add( responses.HEAD, url, json={}, adding_headers={"x-method": "HEAD"}, ) resp = ApiClient().head_cached(url) assert resp.headers["x-method"] == "HEAD" assert len(responses.calls) == 1 resp = ApiClient().head_cached(url) assert resp.headers["x-method"] == "HEAD" assert len(responses.calls) == 1 resp = ApiClient().get_cached(url, raw_response=True) assert resp.headers["x-method"] == "GET" assert resp.json() == {"key": "response-for-get"} assert len(responses.calls) == 2 resp = ApiClient().get_cached(url, raw_response=True) assert resp.headers["x-method"] == "GET" assert resp.json() == {"key": "response-for-get"} assert len(responses.calls) == 2 @responses.activate def test_default_redirect_behaviour(self) -> None: destination_url = "http://example.com/destination" destination_status = 202 destination_headers = {"Location": destination_url} responses.add(responses.GET, destination_url, status=destination_status, json={}) responses.add(responses.DELETE, destination_url, status=destination_status, json={}) responses.add( responses.GET, "http://example.com/1", status=301, headers=destination_headers ) resp = ApiClient().get("http://example.com/1") assert isinstance(resp, BaseApiResponse) assert resp.status_code == destination_status # By default, non GET requests are not allowed to redirect responses.add( responses.DELETE, "http://example.com/2", status=301, headers=destination_headers, json={}, ) resp = ApiClient().delete("http://example.com/2") assert isinstance(resp, BaseApiResponse) assert resp.status_code == 301 responses.add( responses.DELETE, "http://example.com/3", status=301, headers=destination_headers, json={}, ) resp = ApiClient().delete("http://example.com/3", allow_redirects=True) assert isinstance(resp, BaseApiResponse) assert resp.status_code == destination_status def test_connection_error_handling(self) -> None: """ Test handling of `ConnectionError`s raised by the `requests` library. (It's worth specifying because we also handle built-in `ConnectionError`s (specifically, `ConnectionResetError`s`).) """ client = ApiClient() with mock.patch.object( client, "track_response_data", wraps=client.track_response_data ) as track_response_data_spy: with mock.patch( "requests.sessions.Session.send", side_effect=ConnectionError("foo"), ): with pytest.raises(ApiHostError): client.get("http://example.com") assert track_response_data_spy.call_args.args[0] == "connection_error" def test_timeout_handling(self) -> None: """Test handling of `Timeout` errors""" client = ApiClient() with mock.patch.object( client, "track_response_data", wraps=client.track_response_data ) as track_response_data_spy: with mock.patch( "requests.sessions.Session.send", side_effect=Timeout("foo"), ): with pytest.raises(ApiTimeoutError): client.get("http://example.com") assert track_response_data_spy.call_args.args[0] == "timeout" def test_http_error_handling_with_response(self) -> None: """ Test handling of `HTTPError`s raised by the `requests` library. (It's worth specifying because we also handle `HTTPError`s (specifically, `InvalidChunkLength` errors) from `urllib3`.) """ client = ApiClient() mock_error_response = Response() mock_error_response.status_code = 500 with mock.patch.object( client, "track_response_data", wraps=client.track_response_data ) as track_response_data_spy: with mock.patch( "requests.sessions.Session.send", side_effect=HTTPError("foo", response=mock_error_response), ): with pytest.raises(ApiError): client.get("http://example.com") assert track_response_data_spy.call_args.args[0] == 500 def test_http_error_handling_without_response(self) -> None: """ Test handling of `HTTPError`s raised by the `requests` library. (It's worth specifying because we also handle `HTTPError`s (specifically, `InvalidChunkLength` errors) from `urllib3`.) """ client = ApiClient() with mock.patch.object( client, "track_response_data", wraps=client.track_response_data ) as track_response_data_spy: with mock.patch( "requests.sessions.Session.send", side_effect=HTTPError("foo", response=None), ): with pytest.raises(ApiError): client.get("http://example.com") assert track_response_data_spy.call_args.args[0] == "unknown" def test_chained_connection_reset_error_handling(self) -> None: """Test handling of errors caused by `ConnectionResetError` errors""" client = ApiClient() with mock.patch.object( client, "track_response_data", wraps=client.track_response_data ) as track_response_data_spy: chained_error = ConnectionResetError(errno.ECONNRESET, "Connection reset by peer") caught_error = Exception( errno.ECONNRESET, 'ConnectionResetError(104, "Connection reset by peer")' ) caught_error.__cause__ = chained_error with mock.patch( "requests.sessions.Session.send", side_effect=caught_error, ): with pytest.raises(ApiConnectionResetError): client.get("http://example.com") assert track_response_data_spy.call_args.args[0] == "connection_reset_error" def test_chained_invalid_chunk_length_error_handling(self) -> None: """Test handling of errors caused by `InvalidChunkLength` errors""" client = ApiClient() mock_error_response = HTTPResponse() with mock.patch.object( client, "track_response_data", wraps=client.track_response_data ) as track_response_data_spy: chained_error = InvalidChunkLength(mock_error_response, b"") caught_error = Exception( "Connection broken: InvalidChunkLength(got length b'', 0 bytes read)" ) caught_error.__cause__ = chained_error with mock.patch( "requests.sessions.Session.send", side_effect=caught_error, ): with pytest.raises(ApiError): client.get("http://example.com") assert ( track_response_data_spy.call_args.args[0] == "Connection broken: invalid chunk length" ) @responses.activate def test_verify_ssl_handling(self) -> None: """ Test handling of `verify_ssl` parameter when setting REQUESTS_CA_BUNDLE. """ responses.add(responses.GET, "https://example.com", json={}) requests_ca_bundle = "/some/path/to/certs" with mock.patch.dict(os.environ, {"REQUESTS_CA_BUNDLE": requests_ca_bundle}): client = ApiClient() with mock.patch( "requests.sessions.Session.send", wraps=Session().send ) as session_send_mock: client.get("https://example.com") session_send_mock.assert_called_once_with( mock.ANY, timeout=30, allow_redirects=True, proxies={}, stream=False, verify=requests_ca_bundle, cert=None, ) @responses.activate def test_parameters_passed_correctly(self) -> None: responses.add(responses.GET, "https://example.com", json={}) client = ApiClient(verify_ssl=False) with mock.patch( "requests.sessions.Session.send", wraps=Session().send ) as session_send_mock: client.get("https://example.com", timeout=50, allow_redirects=False) session_send_mock.assert_called_once_with( mock.ANY, timeout=50, allow_redirects=False, proxies={}, stream=False, verify=False, cert=None, )
ApiClientTest
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-qdrant/llama_index/readers/qdrant/base.py
{ "start": 169, "end": 6913 }
class ____(BaseReader): """ Qdrant reader. Retrieve documents from existing Qdrant collections. Args: location: If `:memory:` - use in-memory Qdrant instance. If `str` - use it as a `url` parameter. If `None` - use default values for `host` and `port`. url: either host or str of "Optional[scheme], host, Optional[port], Optional[prefix]". Default: `None` port: Port of the REST API interface. Default: 6333 grpc_port: Port of the gRPC interface. Default: 6334 prefer_grpc: If `true` - use gPRC interface whenever possible in custom methods. https: If `true` - use HTTPS(SSL) protocol. Default: `false` api_key: API key for authentication in Qdrant Cloud. Default: `None` prefix: If not `None` - add `prefix` to the REST URL path. Example: `service/v1` will result in `http://localhost:6333/service/v1/{qdrant-endpoint}` for REST API. Default: `None` timeout: Timeout for REST and gRPC API requests. Default: 5.0 seconds for REST and unlimited for gRPC host: Host name of Qdrant service. If url and host are None, set to 'localhost'. Default: `None` """ def __init__( self, location: Optional[str] = None, url: Optional[str] = None, port: Optional[int] = 6333, grpc_port: int = 6334, prefer_grpc: bool = False, https: Optional[bool] = None, api_key: Optional[str] = None, prefix: Optional[str] = None, timeout: Optional[float] = None, host: Optional[str] = None, path: Optional[str] = None, ): """Initialize with parameters.""" import_err_msg = ( "`qdrant-client` package not found, please run `pip install qdrant-client`" ) try: import qdrant_client except ImportError: raise ImportError(import_err_msg) self._client = qdrant_client.QdrantClient( location=location, url=url, port=port, grpc_port=grpc_port, prefer_grpc=prefer_grpc, https=https, api_key=api_key, prefix=prefix, timeout=timeout, host=host, path=path, ) def load_data( self, collection_name: str, query_vector: List[float], should_search_mapping: Optional[Dict[str, str]] = None, must_search_mapping: Optional[Dict[str, str]] = None, must_not_search_mapping: Optional[Dict[str, str]] = None, rang_search_mapping: Optional[Dict[str, Dict[str, float]]] = None, limit: int = 10, ) -> List[Document]: """ Load data from Qdrant. Args: collection_name (str): Name of the Qdrant collection. query_vector (List[float]): Query vector. should_search_mapping (Optional[Dict[str, str]]): Mapping from field name to query string. must_search_mapping (Optional[Dict[str, str]]): Mapping from field name to query string. must_not_search_mapping (Optional[Dict[str, str]]): Mapping from field name to query string. rang_search_mapping (Optional[Dict[str, Dict[str, float]]]): Mapping from field name to range query. limit (int): Number of results to return. Example: reader = QdrantReader() reader.load_data( collection_name="test_collection", query_vector=[0.1, 0.2, 0.3], should_search_mapping={"text_field": "text"}, must_search_mapping={"text_field": "text"}, must_not_search_mapping={"text_field": "text"}, # gte, lte, gt, lt supported rang_search_mapping={"text_field": {"gte": 0.1, "lte": 0.2}}, limit=10 ) Returns: List[Document]: A list of documents. """ from qdrant_client.http.models import ( FieldCondition, Filter, MatchText, MatchValue, Range, ) from qdrant_client.http.models.models import Payload should_search_mapping = should_search_mapping or {} must_search_mapping = must_search_mapping or {} must_not_search_mapping = must_not_search_mapping or {} rang_search_mapping = rang_search_mapping or {} should_search_conditions = [ FieldCondition(key=key, match=MatchText(text=value)) for key, value in should_search_mapping.items() if should_search_mapping ] must_search_conditions = [ FieldCondition(key=key, match=MatchValue(value=value)) for key, value in must_search_mapping.items() if must_search_mapping ] must_not_search_conditions = [ FieldCondition(key=key, match=MatchValue(value=value)) for key, value in must_not_search_mapping.items() if must_not_search_mapping ] rang_search_conditions = [ FieldCondition( key=key, range=Range( gte=value.get("gte"), lte=value.get("lte"), gt=value.get("gt"), lt=value.get("lt"), ), ) for key, value in rang_search_mapping.items() if rang_search_mapping ] should_search_conditions.extend(rang_search_conditions) response = self._client.search( collection_name=collection_name, query_vector=query_vector, query_filter=Filter( must=must_search_conditions, must_not=must_not_search_conditions, should=should_search_conditions, ), with_vectors=True, with_payload=True, limit=limit, ) documents = [] for point in response: payload = cast(Payload, point.payload) try: vector = cast(List[float], point.vector) except ValueError as e: raise ValueError("Could not cast vector to List[float].") from e document = Document( id_=payload.get("doc_id"), text=payload.get("text"), metadata=payload.get("metadata"), embedding=vector, ) documents.append(document) return documents
QdrantReader
python
cython__cython
Cython/Shadow.py
{ "start": 11226, "end": 11949 }
class ____(ArrayType): # Implemented as class to support both 'array(int, 5)' and 'array[int, 5]'. def __new__(cls, basetype, n): class ArrayInstance(ArrayType): _basetype = basetype _n = n return ArrayInstance def __class_getitem__(cls, item): basetype, n = item return cls(basetype, item) def struct(**members): class StructInstance(StructType): _members = members for key in members: setattr(StructInstance, key, None) return StructInstance def union(**members): class UnionInstance(UnionType): _members = members for key in members: setattr(UnionInstance, key, None) return UnionInstance
array
python
cython__cython
Demos/benchmarks/bm_async_generators.py
{ "start": 240, "end": 1354 }
class ____: def __init__(self, left: Tree | None, value: int, right: Tree | None) -> None: self.left = left self.value = value self.right = right async def __aiter__(self) -> AsyncIterator[int]: if self.left: async for i in self.left: yield i yield self.value if self.right: async for i in self.right: yield i def tree(input: range) -> Tree | None: n = len(input) if n == 0: return None i = n // 2 return Tree(tree(input[:i]), input[i], tree(input[i + 1:])) async def bench_async_generators(async_tree) -> None: async for _ in async_tree: pass def run_benchmark(repeat=True, scale: cython.long = 1): from util import repeat_to_accuracy async_tree = tree(range(1000)) def single_run(scale, timer): s: cython.long t = timer() for s in range(scale): asyncio.run(bench_async_generators(async_tree)) t = timer() - t return t return repeat_to_accuracy(single_run, scale=scale, repeat=repeat)[0]
Tree
python
tensorflow__tensorflow
tensorflow/python/ops/nccl_ops_test.py
{ "start": 5267, "end": 5538 }
class ____(NcclTestCase): def testSum(self): self._Test(partial(_NcclReduce, nccl_ops.reduce_sum), lambda x, y: x + y) def testSumGrad(self): self._TestGradient(partial(_NcclReduce, nccl_ops.reduce_sum), lambda x, y: x)
SingleReduceTest
python
davidhalter__jedi
test/completion/recursion.py
{ "start": 609, "end": 801 }
class ____(): def __init__(self): self.recursive = [1] def annoying(self): self.recursive = [x for x in self.recursive] #? int() FooListComp().recursive[0]
FooListComp
python
scrapy__scrapy
tests/test_utils_datatypes.py
{ "start": 8353, "end": 9000 }
class ____: def test_cache_with_limit(self): cache = LocalCache(limit=2) cache["a"] = 1 cache["b"] = 2 cache["c"] = 3 assert len(cache) == 2 assert "a" not in cache assert "b" in cache assert "c" in cache assert cache["b"] == 2 assert cache["c"] == 3 def test_cache_without_limit(self): maximum = 10**4 cache = LocalCache() for x in range(maximum): cache[str(x)] = x assert len(cache) == maximum for x in range(maximum): assert str(x) in cache assert cache[str(x)] == x
TestLocalCache
python
pydantic__pydantic
tests/test_forward_ref.py
{ "start": 12122, "end": 12187 }
class ____(Spec): p: PSpec | None # PSpec.model_rebuild()
GSpec
python
dagster-io__dagster
python_modules/libraries/dagster-aws/dagster_aws/s3/compute_log_manager.py
{ "start": 905, "end": 12451 }
class ____(TruncatingCloudStorageComputeLogManager, ConfigurableClass): """Logs compute function stdout and stderr to S3. Users should not instantiate this class directly. Instead, use a YAML block in ``dagster.yaml`` such as the following: .. code-block:: YAML compute_logs: module: dagster_aws.s3.compute_log_manager class: S3ComputeLogManager config: bucket: "mycorp-dagster-compute-logs" local_dir: "/tmp/cool" prefix: "dagster-test-" use_ssl: true verify: true verify_cert_path: "/path/to/cert/bundle.pem" endpoint_url: "http://alternate-s3-host.io" skip_empty_files: true upload_interval: 30 upload_extra_args: ServerSideEncryption: "AES256" show_url_only: false region: "us-west-1" Args: bucket (str): The name of the s3 bucket to which to log. local_dir (Optional[str]): Path to the local directory in which to stage logs. Default: ``dagster_shared.seven.get_system_temp_directory()``. prefix (Optional[str]): Prefix for the log file keys. use_ssl (Optional[bool]): Whether or not to use SSL. Default True. verify (Optional[bool]): Whether or not to verify SSL certificates. Default True. verify_cert_path (Optional[str]): A filename of the CA cert bundle to use. Only used if `verify` set to False. endpoint_url (Optional[str]): Override for the S3 endpoint url. skip_empty_files: (Optional[bool]): Skip upload of empty log files. upload_interval: (Optional[int]): Interval in seconds to upload partial log files to S3. By default, will only upload when the capture is complete. upload_extra_args: (Optional[dict]): Extra args for S3 file upload show_url_only: (Optional[bool]): Only show the URL of the log file in the UI, instead of fetching and displaying the full content. Default False. region: (Optional[str]): The region of the S3 bucket. If not specified, will use the default region of the AWS session. inst_data (Optional[ConfigurableClassData]): Serializable representation of the compute log manager when newed up from config. """ def __init__( self, bucket, local_dir=None, inst_data: Optional[ConfigurableClassData] = None, prefix="dagster", use_ssl=True, verify=True, verify_cert_path=None, endpoint_url=None, skip_empty_files=False, upload_interval=None, upload_extra_args=None, show_url_only=False, region=None, ): _verify = False if not verify else verify_cert_path self._s3_session = boto3.resource( "s3", use_ssl=use_ssl, verify=_verify, endpoint_url=endpoint_url ).meta.client self._s3_bucket = check.str_param(bucket, "bucket") self._s3_prefix = self._clean_prefix(check.str_param(prefix, "prefix")) # proxy calls to local compute log manager (for subscriptions, etc) if not local_dir: local_dir = seven.get_system_temp_directory() self._local_manager = LocalComputeLogManager(local_dir) self._subscription_manager = PollingComputeLogSubscriptionManager(self) self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData) self._skip_empty_files = check.bool_param(skip_empty_files, "skip_empty_files") self._upload_interval = check.opt_int_param(upload_interval, "upload_interval") check.opt_dict_param(upload_extra_args, "upload_extra_args") self._upload_extra_args = upload_extra_args self._show_url_only = show_url_only if region is None: # if unspecified, use the current session name self._region = self._s3_session.meta.region_name else: self._region = region super().__init__() @property def inst_data(self): return self._inst_data @classmethod def config_type(cls): return { "bucket": StringSource, "local_dir": Field(StringSource, is_required=False), "prefix": Field(StringSource, is_required=False, default_value="dagster"), "use_ssl": Field(bool, is_required=False, default_value=True), "verify": Field(bool, is_required=False, default_value=True), "verify_cert_path": Field(StringSource, is_required=False), "endpoint_url": Field(StringSource, is_required=False), "skip_empty_files": Field(bool, is_required=False, default_value=False), "upload_interval": Field(Noneable(int), is_required=False, default_value=None), "upload_extra_args": Field( Permissive(), is_required=False, description="Extra args for S3 file upload" ), "show_url_only": Field(bool, is_required=False, default_value=False), "region": Field(StringSource, is_required=False), } @classmethod def from_config_value( cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any] ) -> Self: return cls(inst_data=inst_data, **config_value) @property def local_manager(self) -> LocalComputeLogManager: return self._local_manager @property def upload_interval(self) -> Optional[int]: return self._upload_interval if self._upload_interval else None def _clean_prefix(self, prefix): parts = prefix.split("/") return "/".join([part for part in parts if part]) def _resolve_path_for_namespace(self, namespace): return [self._s3_prefix, "storage", *namespace] def _s3_key(self, log_key, io_type, partial=False): check.inst_param(io_type, "io_type", ComputeIOType) extension = IO_TYPE_EXTENSION[io_type] [*namespace, filebase] = log_key filename = f"{filebase}.{extension}" if partial: filename = f"{filename}.partial" paths = [*self._resolve_path_for_namespace(namespace), filename] return "/".join(paths) # s3 path delimiter @contextmanager def capture_logs(self, log_key: Sequence[str]) -> Iterator[CapturedLogContext]: with super().capture_logs(log_key) as local_context: if not self._show_url_only: yield local_context else: out_key = self._s3_key(log_key, ComputeIOType.STDOUT) err_key = self._s3_key(log_key, ComputeIOType.STDERR) s3_base = f"https://s3.console.aws.amazon.com/s3/object/{self._s3_bucket}?region={self._region}" yield CapturedLogContext( local_context.log_key, external_stdout_url=f"{s3_base}&prefix={out_key}", external_stderr_url=f"{s3_base}&prefix={err_key}", ) def delete_logs( self, log_key: Optional[Sequence[str]] = None, prefix: Optional[Sequence[str]] = None ): self.local_manager.delete_logs(log_key=log_key, prefix=prefix) s3_keys_to_remove = None if log_key: s3_keys_to_remove = [ self._s3_key(log_key, ComputeIOType.STDOUT), self._s3_key(log_key, ComputeIOType.STDERR), self._s3_key(log_key, ComputeIOType.STDOUT, partial=True), self._s3_key(log_key, ComputeIOType.STDERR, partial=True), ] elif prefix: # add the trailing '' to make sure that ['a'] does not match ['apple'] s3_prefix = "/".join([self._s3_prefix, "storage", *prefix, ""]) matching = self._s3_session.list_objects(Bucket=self._s3_bucket, Prefix=s3_prefix) s3_keys_to_remove = [obj["Key"] for obj in matching.get("Contents", [])] else: check.failed("Must pass in either `log_key` or `prefix` argument to delete_logs") if s3_keys_to_remove: to_delete = [{"Key": key} for key in s3_keys_to_remove] self._s3_session.delete_objects(Bucket=self._s3_bucket, Delete={"Objects": to_delete}) def download_url_for_type(self, log_key: Sequence[str], io_type: ComputeIOType): if not self.is_capture_complete(log_key): return None s3_key = self._s3_key(log_key, io_type) return self._s3_session.generate_presigned_url( ClientMethod="get_object", Params={"Bucket": self._s3_bucket, "Key": s3_key} ) def display_path_for_type(self, log_key: Sequence[str], io_type: ComputeIOType): # pyright: ignore[reportIncompatibleMethodOverride] if not self.is_capture_complete(log_key): return None s3_key = self._s3_key(log_key, io_type) return f"s3://{self._s3_bucket}/{s3_key}" def cloud_storage_has_logs( self, log_key: Sequence[str], io_type: ComputeIOType, partial: bool = False ) -> bool: s3_key = self._s3_key(log_key, io_type, partial=partial) try: # https://stackoverflow.com/a/38376288/14656695 self._s3_session.head_object(Bucket=self._s3_bucket, Key=s3_key) except ClientError: return False return True def _upload_file_obj( self, data: IO[bytes], log_key: Sequence[str], io_type: ComputeIOType, partial=False ): path = self.local_manager.get_captured_local_path(log_key, IO_TYPE_EXTENSION[io_type]) if (self._skip_empty_files or partial) and os.stat(path).st_size == 0: return s3_key = self._s3_key(log_key, io_type, partial=partial) extra_args = { "ContentType": "text/plain", **(self._upload_extra_args if self._upload_extra_args else {}), } self._s3_session.upload_fileobj(data, self._s3_bucket, s3_key, ExtraArgs=extra_args) def download_from_cloud_storage( self, log_key: Sequence[str], io_type: ComputeIOType, partial=False ): path = self._local_manager.get_captured_local_path( log_key, IO_TYPE_EXTENSION[io_type], partial=partial ) ensure_dir(os.path.dirname(path)) s3_key = self._s3_key(log_key, io_type, partial=partial) with open(path, "wb") as fileobj: self._s3_session.download_fileobj(self._s3_bucket, s3_key, fileobj) def get_log_keys_for_log_key_prefix( self, log_key_prefix: Sequence[str], io_type: ComputeIOType ) -> Sequence[Sequence[str]]: directory = self._resolve_path_for_namespace(log_key_prefix) objects = self._s3_session.list_objects_v2( Bucket=self._s3_bucket, Prefix="/".join(directory) ) results = [] list_key_prefix = list(log_key_prefix) if objects["KeyCount"] == 0: return [] for obj in objects["Contents"]: full_key = obj["Key"] filename, obj_io_type = full_key.split("/")[-1].split(".") if obj_io_type != IO_TYPE_EXTENSION[io_type]: continue results.append(list_key_prefix + [filename]) return results def on_subscribe(self, subscription): self._subscription_manager.add_subscription(subscription) def on_unsubscribe(self, subscription): self._subscription_manager.remove_subscription(subscription) def dispose(self): self._subscription_manager.dispose() self._local_manager.dispose()
S3ComputeLogManager
python
openai__openai-python
src/openai/types/batch_request_counts.py
{ "start": 155, "end": 408 }
class ____(BaseModel): completed: int """Number of requests that have been completed successfully.""" failed: int """Number of requests that have failed.""" total: int """Total number of requests in the batch."""
BatchRequestCounts
python
PyCQA__pylint
tests/functional/i/invalid/invalid_index_returned.py
{ "start": 194, "end": 302 }
class ____: """__index__ returns <type 'int'>""" def __index__(self): return 1
FirstGoodIndex
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 186962, "end": 187467 }
class ____(sgqlc.types.Input): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("ref_id", "oid", "force", "client_mutation_id") ref_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="refId") oid = sgqlc.types.Field(sgqlc.types.non_null(GitObjectID), graphql_name="oid") force = sgqlc.types.Field(Boolean, graphql_name="force") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
UpdateRefInput
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/paramSpec24.py
{ "start": 582, "end": 804 }
class ____(Protocol[P, T]): foo: int = 0 val: T def __init__(self, val: T) -> None: self.val = val def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T: return self.val
_callable_cache
python
getsentry__sentry
src/sentry/migrations/0996_add_dashboard_field_link_model.py
{ "start": 269, "end": 2678 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = False dependencies = [ ("sentry", "0995_add_date_updated_to_grouphash_metadata"), ] operations = [ migrations.CreateModel( name="DashboardFieldLink", fields=[ ( "id", sentry.db.models.fields.bounded.BoundedBigAutoField( primary_key=True, serialize=False ), ), ("date_updated", models.DateTimeField(auto_now=True)), ("date_added", models.DateTimeField(auto_now_add=True)), ("field", models.TextField()), ( "dashboard", sentry.db.models.fields.foreignkey.FlexibleForeignKey( on_delete=django.db.models.deletion.CASCADE, to="sentry.dashboard" ), ), ( "dashboard_widget_query", sentry.db.models.fields.foreignkey.FlexibleForeignKey( on_delete=django.db.models.deletion.CASCADE, to="sentry.dashboardwidgetquery", ), ), ], options={ "db_table": "sentry_dashboardfieldlink", "unique_together": {("dashboard_widget_query", "field")}, }, ), ]
Migration
python
scipy__scipy
scipy/io/matlab/_miobase.py
{ "start": 356, "end": 434 }
class ____(Exception): """Exception indicating a read issue."""
MatReadError
python
kamyu104__LeetCode-Solutions
Python/longest-substring-of-one-repeating-character.py
{ "start": 3755, "end": 5110 }
class ____(object): def longestRepeating(self, s, queryCharacters, queryIndices): """ :type s: str :type queryCharacters: str :type queryIndices: List[int] :rtype: List[int] """ LEFT, RIGHT, LEFT_LEN, RIGHT_LEN, LEN, MAX_LEN, SIZE = xrange(7) def build(i): return update(s[i]) def update(y): result = [0]*SIZE result[LEN] = result[LEFT_LEN] = result[RIGHT_LEN] = result[MAX_LEN] = 1 result[LEFT] = result[RIGHT] = y return result def query(x, y): return y if x is None else x if y is None else \ [x[LEFT], y[RIGHT], x[LEFT_LEN]+(y[LEFT_LEN] if x[LEFT_LEN] == x[LEN] and x[RIGHT] == y[LEFT] else 0), y[RIGHT_LEN]+(x[RIGHT_LEN] if y[RIGHT_LEN] == y[LEN] and y[LEFT] == x[RIGHT] else 0), x[LEN]+y[LEN], max(x[MAX_LEN], y[MAX_LEN], x[RIGHT_LEN]+y[LEFT_LEN] if x[RIGHT] == y[LEFT] else 0)] result = [] st = SegmentTree2(len(s), build_fn=build, query_fn=query, update_fn=update) for c, i in itertools.izip(queryCharacters, queryIndices): st.update(i, c) result.append(st.query(0, len(s)-1)[MAX_LEN]) return result
Solution2
python
huggingface__transformers
src/transformers/models/esm/modeling_esmfold.py
{ "start": 40215, "end": 40831 }
class ____(nn.Module): def __init__(self, pairwise_state_dim, num_heads): super().__init__() self.layernorm = nn.LayerNorm(pairwise_state_dim) self.linear = nn.Linear(pairwise_state_dim, num_heads, bias=False) def forward(self, pairwise_state): """ Inputs: pairwise_state: B x L x L x pairwise_state_dim Output: pairwise_bias: B x L x L x num_heads """ assert len(pairwise_state.shape) == 4 z = self.layernorm(pairwise_state) pairwise_bias = self.linear(z) return pairwise_bias
EsmFoldPairToSequence
python
Netflix__metaflow
metaflow/plugins/argo/argo_workflows_deployer.py
{ "start": 212, "end": 4444 }
class ____(DeployerImpl): """ Deployer implementation for Argo Workflows. Parameters ---------- name : str, optional, default None Argo workflow name. The flow name is used instead if this option is not specified. """ TYPE: ClassVar[Optional[str]] = "argo-workflows" def __init__(self, deployer_kwargs: Dict[str, str], **kwargs): """ Initialize the ArgoWorkflowsDeployer. Parameters ---------- deployer_kwargs : Dict[str, str] The deployer-specific keyword arguments. **kwargs : Any Additional arguments to pass to the superclass constructor. """ self._deployer_kwargs = deployer_kwargs super().__init__(**kwargs) @property def deployer_kwargs(self) -> Dict[str, Any]: return self._deployer_kwargs @staticmethod def deployed_flow_type() -> ( Type[ "metaflow.plugins.argo.argo_workflows_deployer_objects.ArgoWorkflowsDeployedFlow" ] ): from .argo_workflows_deployer_objects import ArgoWorkflowsDeployedFlow return ArgoWorkflowsDeployedFlow def create( self, **kwargs ) -> "metaflow.plugins.argo.argo_workflows_deployer_objects.ArgoWorkflowsDeployedFlow": """ Create a new ArgoWorkflow deployment. Parameters ---------- authorize : str, optional, default None Authorize using this production token. Required when re-deploying an existing flow for the first time. The token is cached in METAFLOW_HOME. generate_new_token : bool, optional, default False Generate a new production token for this flow. Moves the production flow to a new namespace. given_token : str, optional, default None Use the given production token for this flow. Moves the production flow to the given namespace. tags : List[str], optional, default None Annotate all objects produced by Argo Workflows runs with these tags. user_namespace : str, optional, default None Change the namespace from the default (production token) to the given tag. only_json : bool, optional, default False Only print out JSON sent to Argo Workflows without deploying anything. max_workers : int, optional, default 100 Maximum number of parallel processes. workflow_timeout : int, optional, default None Workflow timeout in seconds. workflow_priority : int, optional, default None Workflow priority as an integer. Higher priority workflows are processed first if Argo Workflows controller is configured to process limited parallel workflows. auto_emit_argo_events : bool, optional, default True Auto emits Argo Events when the run completes successfully. notify_on_error : bool, optional, default False Notify if the workflow fails. notify_on_success : bool, optional, default False Notify if the workflow succeeds. notify_slack_webhook_url : str, optional, default '' Slack incoming webhook url for workflow success/failure notifications. notify_pager_duty_integration_key : str, optional, default '' PagerDuty Events API V2 Integration key for workflow success/failure notifications. enable_heartbeat_daemon : bool, optional, default False Use a daemon container to broadcast heartbeats. deployer_attribute_file : str, optional, default None Write the workflow name to the specified file. Used internally for Metaflow's Deployer API. enable_error_msg_capture : bool, optional, default True Capture stack trace of first failed task in exit hook. Returns ------- ArgoWorkflowsDeployedFlow The Flow deployed to Argo Workflows. """ # Prevent circular import from .argo_workflows_deployer_objects import ArgoWorkflowsDeployedFlow return self._create(ArgoWorkflowsDeployedFlow, **kwargs) _addl_stubgen_modules = ["metaflow.plugins.argo.argo_workflows_deployer_objects"]
ArgoWorkflowsDeployer
python
charliermarsh__ruff
crates/ty_python_semantic/resources/corpus/95_annotation_class_no_value.py
{ "start": 0, "end": 23 }
class ____(): z: int
F
python
davidhalter__jedi
sith.py
{ "start": 2224, "end": 7351 }
class ____(object): def __init__(self, operation, path, line, column, traceback=None): if operation not in self.operations: raise ValueError("%s is not a valid operation" % operation) # Set other attributes self.operation = operation self.path = path self.line = line self.column = column self.traceback = traceback @classmethod def from_cache(cls, record): with open(record) as f: args = json.load(f) return cls(*args) # Changing this? Also update the module docstring above. operations = ['complete', 'goto', 'infer', 'get_references', 'get_signatures'] @classmethod def generate(cls, file_path): operation = random.choice(cls.operations) path = random.choice(SourceFinder.files(file_path)) with open(path) as f: source = f.read() lines = source.splitlines() if not lines: lines = [''] line = random.randint(1, len(lines)) line_string = lines[line - 1] line_len = len(line_string) if line_string.endswith('\r\n'): line_len -= 1 if line_string.endswith('\n'): line_len -= 1 column = random.randint(0, line_len) return cls(operation, path, line, column) def run(self, debugger, record=None, print_result=False): try: with open(self.path) as f: self.script = jedi.Script(f.read(), path=self.path) kwargs = {} if self.operation == 'goto': kwargs['follow_imports'] = random.choice([False, True]) self.objects = getattr(self.script, self.operation)(self.line, self.column, **kwargs) if print_result: print("{path}: Line {line} column {column}".format(**self.__dict__)) self.show_location(self.line, self.column) self.show_operation() except Exception: self.traceback = traceback.format_exc() if record is not None: call_args = (self.operation, self.path, self.line, self.column, self.traceback) with open(record, 'w') as f: json.dump(call_args, f) self.show_errors() if debugger: einfo = sys.exc_info() pdb = __import__(debugger) if debugger == 'pudb': pdb.post_mortem(einfo[2], einfo[0], einfo[1]) else: pdb.post_mortem(einfo[2]) exit(1) def show_location(self, lineno, column, show=3): # Three lines ought to be enough lower = lineno - show if lineno - show > 0 else 0 prefix = ' |' for i, line in enumerate(self.script._code.split('\n')[lower:lineno]): print(prefix, lower + i + 1, line) print(prefix, ' ' * (column + len(str(lineno))), '^') def show_operation(self): print("%s:\n" % self.operation.capitalize()) if self.operation == 'complete': self.show_completions() else: self.show_definitions() def show_completions(self): for completion in self.objects: print(completion.name) def show_definitions(self): for completion in self.objects: print(completion.full_name) if completion.module_path is None: continue if os.path.abspath(completion.module_path) == os.path.abspath(self.path): self.show_location(completion.line, completion.column) def show_errors(self): sys.stderr.write(self.traceback) print(("Error with running Script(...).{operation}() with\n" "\tpath: {path}\n" "\tline: {line}\n" "\tcolumn: {column}").format(**self.__dict__)) def main(arguments): debugger = 'pdb' if arguments['--pdb'] else \ 'ipdb' if arguments['--ipdb'] else \ 'pudb' if arguments['--pudb'] else None record = arguments['--record'] jedi.settings.use_filesystem_cache = arguments['--fs-cache'] if arguments['--debug']: jedi.set_debug_function() if arguments['redo'] or arguments['show']: t = TestCase.from_cache(record) if arguments['show']: t.show_errors() else: t.run(debugger) elif arguments['run']: TestCase( arguments['<operation>'], arguments['<path>'], int(arguments['<line>']), int(arguments['<column>']) ).run(debugger, print_result=True) else: for _ in range(int(arguments['--maxtries'])): t = TestCase.generate(arguments['<path>'] or '.') if arguments['-s']: print('%s %s %s %s ' % (t.operation, t.path, t.line, t.column)) sys.stdout.flush() else: print('.', end='') t.run(debugger, record) sys.stdout.flush() print() if __name__ == '__main__': arguments = docopt(__doc__) main(arguments)
TestCase
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/generator10.py
{ "start": 171, "end": 384 }
class ____(Awaitable): def __await__(self): yield from (sleep(0.1).__await__()) async def func1(): x: None = await MyAwaitable() loop = get_event_loop() loop.run_until_complete(func1())
MyAwaitable
python
pytorch__pytorch
test/distributed/checkpoint/test_utils.py
{ "start": 2162, "end": 4920 }
class ____(TestCase): def test_init_convert_offset(self): a = MetadataIndex("foo", [1, 2]) b = MetadataIndex("foo", torch.Size([1, 2])) self.assertEqual(a, b) def test_index_hint_ignored_on_equals(self): a = MetadataIndex("foo") b = MetadataIndex("foo", index=99) self.assertEqual(a, b) def test_index_hint_ignored_on_hash(self): a = MetadataIndex("foo") b = MetadataIndex("foo", index=99) self.assertEqual(hash(a), hash(b)) def test_flat_data(self): state_dict = { "a": torch.rand(10), "b": [1, 2, 3], } a = find_state_dict_object(state_dict, MetadataIndex("a")) self.assertEqual(a, state_dict["a"]) a = find_state_dict_object(state_dict, MetadataIndex("a", [0])) self.assertEqual(a, state_dict["a"]) a = find_state_dict_object(state_dict, MetadataIndex("a", index=99)) self.assertEqual(a, state_dict["a"]) b = find_state_dict_object(state_dict, MetadataIndex("b")) self.assertEqual(b, state_dict["b"]) b = find_state_dict_object(state_dict, MetadataIndex("b", index=1)) self.assertEqual(b, state_dict["b"]) with self.assertRaisesRegex(ValueError, "FQN"): find_state_dict_object(state_dict, MetadataIndex("c")) with self.assertRaisesRegex(ValueError, "ShardedTensor"): find_state_dict_object(state_dict, MetadataIndex("b", [1])) @with_fake_comms(rank=0, world_size=2) def test_sharded_tensor_lookup(self): st = create_sharded_tensor(rank=0, world_size=2, shards_per_rank=3) state_dict = {"st": st} obj = find_state_dict_object(state_dict, MetadataIndex("st", [8])) self.assertEqual(obj, st.local_shards()[1].tensor) # good hint obj = find_state_dict_object(state_dict, MetadataIndex("st", [8], index=1)) self.assertEqual(obj, st.local_shards()[1].tensor) # bad hint obj = find_state_dict_object(state_dict, MetadataIndex("st", [8], index=2)) self.assertEqual(obj, st.local_shards()[1].tensor) # broken hint obj = find_state_dict_object(state_dict, MetadataIndex("st", [8], index=99)) self.assertEqual(obj, st.local_shards()[1].tensor) with self.assertRaisesRegex(ValueError, "no offset was provided"): find_state_dict_object(state_dict, MetadataIndex("st")) with self.assertRaisesRegex(ValueError, "Could not find shard"): find_state_dict_object(state_dict, MetadataIndex("st", [1])) def test_dcp_logger(self): self.assertTrue(_c10d_logger is not _dcp_logger) self.assertEqual(1, len(_c10d_logger.handlers))
TestMedatadaIndex
python
django__django
tests/filtered_relation/models.py
{ "start": 1357, "end": 1445 }
class ____(models.Model): name = models.CharField(max_length=50, unique=True)
Borrower
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_object_position07.py
{ "start": 315, "end": 933 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("object_position07.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.insert_image("E9", self.image_dir + "red.png", {"object_position": 4}) worksheet.set_row(8, 30, None, {"hidden": True}) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
pydantic__pydantic
tests/mypy/modules/final_with_default.py
{ "start": 137, "end": 193 }
class ____(BaseModel): f: Final[int] = 1 Model()
Model
python
pytorch__pytorch
test/distributed/_composable/fsdp/test_fully_shard_ignore_params.py
{ "start": 2232, "end": 6223 }
class ____(nn.Module): def __init__(self) -> None: super().__init__() q = torch.randn(10, device=device_type) self.q = nn.Parameter(q) self.y = Y() def _append_prefix(prefix: str, name: str) -> str: if prefix != "" and name != "": return prefix + "." + name else: return prefix + name def _generate_model_and_input() -> nn.Module: dim = 8 torch.manual_seed(42) addend = torch.randn((dim, dim), device=device_type) torch.manual_seed(70) subend = torch.randn((dim, dim), device=device_type) model = A(dim, addend, subend).to(device_type) torch.manual_seed(84) inp = torch.randn((dim, dim), device=device_type) return model, inp def _find_name_param_mappings(module: torch.nn.Module, prefix: str): name_to_param_map = {} param_to_name_map = {} for name, param in module.named_parameters(prefix): name_to_param_map[name] = param param_to_name_map[param] = name return name_to_param_map, param_to_name_map def _discover_ddp_ignored_params(module: torch.nn.Module, prefix: str): ddp_ignore_parameters: list[str] = [] if isinstance(module, FSDP2): ddp_ignore_parameters = [name for name, _ in module.named_parameters(prefix)] else: for name, child in list(module.named_children()): # post order traversal path = _append_prefix(prefix, name) ignored_params = _discover_ddp_ignored_params(child, path) ddp_ignore_parameters.extend(ignored_params) return ddp_ignore_parameters def _modify_ddp_ignored_params( ddp_ignored_param_names: list[str], fsdp_ignored_params: set[torch.nn.Parameter], name_to_param_map: dict, ): modified_list = [] for name in ddp_ignored_param_names: assert name in name_to_param_map param = name_to_param_map[name] if param not in fsdp_ignored_params: # DDP can ignore only if it is not ignored by FSDP modified_list.append(name) return modified_list def _get_full_tensor(name, param): if isinstance(param, DTensor): return param.full_tensor() else: return param def _discover_fsdp_ignored_params( module: torch.nn.Module, ignored_path, path: str ) -> set[torch.nn.Parameter]: total_ignored_params = set() if ignored_path == path: # Ignore all parameters inside module name_parameters = dict(module.named_parameters(path)) total_ignored_params = set(name_parameters.values()) for _ in module.buffers(recurse=True): # yet to handle ignoring buffers raise AssertionError("Yet to handle ignoring buffers") else: for name, sub_module in list(module.named_children()): child_path = _append_prefix(path, name) child_ignored_params = _discover_fsdp_ignored_params( sub_module, ignored_path, child_path ) total_ignored_params = total_ignored_params | child_ignored_params return total_ignored_params def _post_order_wrap_fsdp( module: torch.nn.Module, mesh, path: str, ignored_path: str, ignored_params: set[torch.nn.Parameter], ) -> torch.nn.Module: if ignored_path != path: for name, sub_module in list(module.named_children()): child_path = _append_prefix(path, name) _post_order_wrap_fsdp( sub_module, mesh, child_path, ignored_path, ignored_params ) fully_shard(module, mesh=mesh, ignored_params=ignored_params) return module def _find_all_fsdped_modules(module: torch.nn.Module, path) -> set[str]: result = set() for name, child in list(module.named_children()): child_path = _append_prefix(path, name) child_result = _find_all_fsdped_modules(child, child_path) result = result | child_result if isinstance(module, FSDP2): result.add(path) return result
X
python
numba__llvmlite
llvmlite/ir/types.py
{ "start": 5846, "end": 6282 }
class ____(Type): """ The type for empty values (e.g. a function returning no value). """ def _to_string(self): return 'void' def __eq__(self, other): return isinstance(other, VoidType) def __hash__(self): return hash(VoidType) @classmethod def from_llvm(cls, typeref, ir_ctx): """ Create from a llvmlite.binding.TypeRef """ return cls()
VoidType
python
scikit-learn__scikit-learn
sklearn/feature_selection/_univariate_selection.py
{ "start": 20928, "end": 24522 }
class ____(_BaseFilter): """Select features according to a percentile of the highest scores. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- score_func : callable, default=f_classif Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues) or a single array with scores. Default is f_classif (see below "See Also"). The default function only works with classification tasks. .. versionadded:: 0.18 percentile : int, default=10 Percent of features to keep. Attributes ---------- scores_ : array-like of shape (n_features,) Scores of features. pvalues_ : array-like of shape (n_features,) p-values of feature scores, None if `score_func` returned only scores. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- f_classif : ANOVA F-value between label/feature for classification tasks. mutual_info_classif : Mutual information for a discrete target. chi2 : Chi-squared stats of non-negative features for classification tasks. f_regression : F-value between label/feature for regression tasks. mutual_info_regression : Mutual information for a continuous target. SelectKBest : Select features based on the k highest scores. SelectFpr : Select features based on a false positive rate test. SelectFdr : Select features based on an estimated false discovery rate. SelectFwe : Select features based on family-wise error rate. GenericUnivariateSelect : Univariate feature selector with configurable mode. Notes ----- Ties between features with equal scores will be broken in an unspecified way. This filter supports unsupervised feature selection that only requests `X` for computing the scores. Examples -------- >>> from sklearn.datasets import load_digits >>> from sklearn.feature_selection import SelectPercentile, chi2 >>> X, y = load_digits(return_X_y=True) >>> X.shape (1797, 64) >>> X_new = SelectPercentile(chi2, percentile=10).fit_transform(X, y) >>> X_new.shape (1797, 7) """ _parameter_constraints: dict = { **_BaseFilter._parameter_constraints, "percentile": [Interval(Real, 0, 100, closed="both")], } def __init__(self, score_func=f_classif, *, percentile=10): super().__init__(score_func=score_func) self.percentile = percentile def _get_support_mask(self): check_is_fitted(self) # Cater for NaNs if self.percentile == 100: return np.ones(len(self.scores_), dtype=bool) elif self.percentile == 0: return np.zeros(len(self.scores_), dtype=bool) scores = _clean_nans(self.scores_) threshold = np.percentile(scores, 100 - self.percentile) mask = scores > threshold ties = np.where(scores == threshold)[0] if len(ties): max_feats = int(len(scores) * self.percentile / 100) kept_ties = ties[: max_feats - mask.sum()] mask[kept_ties] = True return mask def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.target_tags.required = False return tags
SelectPercentile
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/random/parameterized_truncated_normal_op_test.py
{ "start": 18808, "end": 20601 }
class ____(test.Benchmark): def benchmarkParameterizedOpVsNaiveOpCpu(self): self._benchmarkParameterizedOpVsNaiveOp(False) def benchmarkParameterizedOpVsNaiveOpGpu(self): self._benchmarkParameterizedOpVsNaiveOp(True) def _benchmarkParameterizedOpVsNaiveOp(self, use_gpu): num_iters = 50 print(("Composition of new ParameterizedTruncatedNormalOp vs. " "naive TruncatedNormalOp [%d iters]") % num_iters) print("Shape\tsec(parameterized)\tsec(naive)\tspeedup") for shape in [[10000, 100], [1000, 1000], [1000000], [100, 100, 100], [20, 20, 20, 20]]: p_dt, n_dt = parameterized_vs_naive(shape, num_iters, use_gpu) print("%s\t%.3f\t%.3f\t%.2f" % (shape, p_dt, n_dt, p_dt / n_dt)) shape_str = "-".join(map(str, shape)) self.report_benchmark( name="parameterized_shape" + shape_str, iters=num_iters, wall_time=p_dt) self.report_benchmark( name="naive_shape" + shape_str, iters=num_iters, wall_time=n_dt) def benchmarkRandnSamplerCPU(self): self._benchmarkRandnSampler(False) def benchmarkRandnSamplerGPU(self): self._benchmarkRandnSampler(True) def _benchmarkRandnSampler(self, use_gpu): num_iters = 100 shape = [int(1e6)] randn_dt, uniform_dt = randn_sampler_switchover(shape, num_iters, use_gpu) print(("Randn Sampler vs uniform samplers [%d iters]\t%.4f\t%.4f") % (num_iters, randn_dt, uniform_dt)) gpu_str = "_gpu" if use_gpu else "_cpu" self.report_benchmark( name="randn_sampler" + gpu_str, iters=num_iters, wall_time=randn_dt) self.report_benchmark( name="uniform_sampler" + gpu_str, iters=num_iters, wall_time=uniform_dt) if __name__ == "__main__": test.main()
TruncatedNormalBenchmark
python
encode__django-rest-framework
tests/test_fields.py
{ "start": 10274, "end": 10790 }
class ____: def setup_method(self): class TestSerializer(serializers.Serializer): initial_field = serializers.IntegerField(initial=123) blank_field = serializers.IntegerField() self.serializer = TestSerializer() def test_initial(self): """ Initial values should be included when serializing a new representation. """ assert self.serializer.data == { 'initial_field': 123, 'blank_field': None }
TestInitial
python
doocs__leetcode
lcp/LCP 56. 信物传送/Solution.py
{ "start": 0, "end": 876 }
class ____: def conveyorBelt(self, matrix: List[str], start: List[int], end: List[int]) -> int: dirs = (-1, 0, 1, 0, -1) d = {"^": 0, "v": 2, "<": 3, ">": 1} i, j = start q = deque([(i, j)]) m, n = len(matrix), len(matrix[0]) dist = [[inf] * n for _ in range(m)] dist[i][j] = 0 while 1: i, j = q.popleft() if i == end[0] and j == end[1]: return int(dist[i][j]) for k in range(4): x, y = i + dirs[k], j + dirs[k + 1] t = dist[i][j] + int(k != d[matrix[i][j]]) if 0 <= x < m and 0 <= y < n and t < dist[x][y]: dist[x][y] = t if dist[x][y] == dist[i][j]: q.appendleft((x, y)) else: q.append((x, y))
Solution
python
facelessuser__pymdown-extensions
tests/test_extensions/test_blocks/test_captions.py
{ "start": 20446, "end": 36635 }
class ____(util.MdCase): """Test Blocks caption cases with enabled `auto`.""" extension = ['pymdownx.blocks.caption', 'md_in_html'] extension_configs = { 'pymdownx.blocks.caption': { } } def test_caption_number_and_id(self): """Test captions with IDs and number.""" self.check_markdown( R''' A paragraph with a caption. /// figure-caption | 3 #id This is the caption. /// A paragraph with a caption. /// figure-caption | < 4 #id2 This is the caption. /// ''', R''' <figure id="id"> <p>A paragraph with a caption.</p> <figcaption> <p><span class="caption-prefix">Figure 3.</span> This is the caption.</p> </figcaption> </figure> <figure id="id2"> <figcaption> <p><span class="caption-prefix">Figure 4.</span> This is the caption.</p> </figcaption> <p>A paragraph with a caption.</p> </figure> ''', True ) def test_caption(self): """Test basic caption with `auto`.""" self.check_markdown( R''' A paragraph with a caption. /// figure-caption This is the caption. /// ''', R''' <figure id="__figure-caption_1"> <p>A paragraph with a caption.</p> <figcaption> <p><span class="caption-prefix">Figure 1.</span> This is the caption.</p> </figcaption> </figure> ''', True ) def test_consecutive_captions(self): """Test consecutive captions with `auto`.""" self.check_markdown( R''' A paragraph with a caption. /// figure-caption This is the caption. /// A paragraph with a caption. /// figure-caption This is the caption. /// ''', R''' <figure id="__figure-caption_1"> <p>A paragraph with a caption.</p> <figcaption> <p><span class="caption-prefix">Figure 1.</span> This is the caption.</p> </figcaption> </figure> <figure id="__figure-caption_2"> <p>A paragraph with a caption.</p> <figcaption> <p><span class="caption-prefix">Figure 2.</span> This is the caption.</p> </figcaption> </figure> ''', True ) def test_nested_captions(self): """Test nested captions with `auto`.""" self.check_markdown( R''' A paragraph with a caption. /// figure-caption Level 3 caption. /// /// figure-caption Level 2 caption. /// /// figure-caption Level 1 caption. /// ''', R''' <figure id="__figure-caption_1"> <figure id="__figure-caption_1_1"> <figure id="__figure-caption_1_1_1"> <p>A paragraph with a caption.</p> <figcaption> <p><span class="caption-prefix">Figure 1.1.1.</span> Level 3 caption.</p> </figcaption> </figure> <figcaption> <p><span class="caption-prefix">Figure 1.1.</span> Level 2 caption.</p> </figcaption> </figure> <figcaption> <p><span class="caption-prefix">Figure 1.</span> Level 1 caption.</p> </figcaption> </figure> ''', True ) def test_nested_consecutive_captions(self): """Test nested captions with `auto`.""" self.check_markdown( R''' A paragraph with a caption. /// figure-caption Level 3 caption. /// /// figure-caption Level 2 caption. /// /// figure-caption Level 1 caption. /// A paragraph with a caption. /// figure-caption Level 2 caption. /// /// figure-caption Level 1 caption. /// ''', R''' <figure id="__figure-caption_1"> <figure id="__figure-caption_1_1"> <figure id="__figure-caption_1_1_1"> <p>A paragraph with a caption.</p> <figcaption> <p><span class="caption-prefix">Figure 1.1.1.</span> Level 3 caption.</p> </figcaption> </figure> <figcaption> <p><span class="caption-prefix">Figure 1.1.</span> Level 2 caption.</p> </figcaption> </figure> <figcaption> <p><span class="caption-prefix">Figure 1.</span> Level 1 caption.</p> </figcaption> </figure> <figure id="__figure-caption_2"> <figure id="__figure-caption_2_1"> <p>A paragraph with a caption.</p> <figcaption> <p><span class="caption-prefix">Figure 2.1.</span> Level 2 caption.</p> </figcaption> </figure> <figcaption> <p><span class="caption-prefix">Figure 2.</span> Level 1 caption.</p> </figcaption> </figure> ''', True ) def test_manual_prepend(self): """Test manual prepend.""" self.check_markdown( R""" Text /// figure-caption | < 2 Prepended and number ignored /// Text /// figure-caption | > Appended /// """, R""" <figure id="__figure-caption_2"> <figcaption> <p><span class="caption-prefix">Figure 2.</span> Prepended and number ignored</p> </figcaption> <p>Text</p> </figure> <figure id="__figure-caption_3"> <p>Text</p> <figcaption> <p><span class="caption-prefix">Figure 3.</span> Appended</p> </figcaption> </figure> """, True ) def test_mixed_captions(self): """Test mixed captions with `auto`.""" self.check_markdown( R''' Paragraph /// caption Not numbered /// Paragraph /// figure-caption Numbered level 2 caption. /// /// caption Not numbered level 1. /// /// figure-caption Numbered level 1 caption. /// ''', R''' <figure> <p>Paragraph</p> <figcaption> <p>Not numbered</p> </figcaption> </figure> <figure id="__figure-caption_1"> <figure> <figure id="__figure-caption_1_1"> <p>Paragraph</p> <figcaption> <p><span class="caption-prefix">Figure 1.1.</span> Numbered level 2 caption.</p> </figcaption> </figure> <figcaption> <p>Not numbered level 1.</p> </figcaption> </figure> <figcaption> <p><span class="caption-prefix">Figure 1.</span> Numbered level 1 caption.</p> </figcaption> </figure> ''', True ) def test_existing_fig_caption(self): """Test when a figure has a figure caption and we don't know what type it is.""" self.check_markdown( R""" <figure markdown> Some text. <figcaption markdown> Caption. </figcaption> </figure> """, R""" <figure> <p>Some text.</p> <figcaption> <p>Caption.</p> </figcaption> </figure> """, True ) def test_inject_new_p_in_caption(self): """Test `auto` cases that require the prefix to be injected in a new paragraph.""" self.check_markdown( R""" Test /// figure-caption /// Test /// figure-caption > blockquote /// """, R""" <figure id="__figure-caption_1"> <p>Test</p> <figcaption><p><span class="caption-prefix">Figure 1.</span></p></figcaption> </figure> <figure id="__figure-caption_2"> <p>Test</p> <figcaption><p><span class="caption-prefix">Figure 2.</span></p> <blockquote> <p>blockquote</p> </blockquote> </figcaption> </figure> """, True ) def test_empty_paragraph(self): """Test `auto` cases that require prefix to inject a new paragraph.""" self.check_markdown( R""" Test /// figure-caption <p markdown></p> /// """, R""" <figure id="__figure-caption_1"> <p>Test</p> <figcaption> <p><span class="caption-prefix">Figure 1.</span></p> </figcaption> </figure> """, True ) def test_nested_captions_manual_id(self): """Test nested captions with `auto` and `auto` with manual IDs.""" self.check_markdown( R''' A paragraph with a caption. /// figure-caption Level 4 caption. /// /// figure-caption attrs: {id: test} Level 3 caption. /// /// figure-caption Level 2 caption. /// /// figure-caption Level 1 caption. /// ''', R''' <figure id="__figure-caption_1"> <figure id="__figure-caption_1_1"> <figure id="test"> <figure id="__figure-caption_1_1_1_1"> <p>A paragraph with a caption.</p> <figcaption> <p><span class="caption-prefix">Figure 1.1.1.1.</span> Level 4 caption.</p> </figcaption> </figure> <figcaption> <p><span class="caption-prefix">Figure 1.1.1.</span> Level 3 caption.</p> </figcaption> </figure> <figcaption> <p><span class="caption-prefix">Figure 1.1.</span> Level 2 caption.</p> </figcaption> </figure> <figcaption> <p><span class="caption-prefix">Figure 1.</span> Level 1 caption.</p> </figcaption> </figure> ''', True ) def test_depth(self): """Test level depth.""" self.check_markdown( R""" Paragraph /// figure-caption Caption 1 /// Paragraph /// figure-caption Caption 1.1.1 /// /// figure-caption | ^1 Caption 1.1 /// Paragraph /// figure-caption | ^1 Caption 2.1 /// /// figure-caption Caption 2 /// """, """ <figure id="__figure-caption_1"> <p>Paragraph</p> <figcaption> <p><span class="caption-prefix">Figure 1.</span> Caption 1</p> </figcaption> </figure> <figure id="__figure-caption_1_1"> <figure id="__figure-caption_1_1_1"> <p>Paragraph</p> <figcaption> <p><span class="caption-prefix">Figure 1.1.1.</span> Caption 1.1.1</p> </figcaption> </figure> <figcaption> <p><span class="caption-prefix">Figure 1.1.</span> Caption 1.1</p> </figcaption> </figure> <figure id="__figure-caption_2"> <figure id="__figure-caption_2_1_1"> <p>Paragraph</p> <figcaption> <p><span class="caption-prefix">Figure 2.1.1.</span> Caption 2.1</p> </figcaption> </figure> <figcaption> <p><span class="caption-prefix">Figure 2.</span> Caption 2</p> </figcaption> </figure> """, True ) def test_manual_number(self): """Test manual number.""" self.check_markdown( R""" Paragraph /// figure-caption Caption 4.2.1 /// /// figure-caption | 4.2 Caption 4.2 /// Paragraph /// figure-caption Caption 5.2.1 /// /// figure-caption | 5.2 Caption 5.2 /// /// figure-caption Caption 5 /// """, """ <figure id="__figure-caption_4_2"> <figure id="__figure-caption_4_2_1"> <p>Paragraph</p> <figcaption> <p><span class="caption-prefix">Figure 4.2.1.</span> Caption 4.2.1</p> </figcaption> </figure> <figcaption> <p><span class="caption-prefix">Figure 4.2.</span> Caption 4.2</p> </figcaption> </figure> <figure id="__figure-caption_5"> <figure id="__figure-caption_5_2"> <figure id="__figure-caption_5_2_1"> <p>Paragraph</p> <figcaption> <p><span class="caption-prefix">Figure 5.2.1.</span> Caption 5.2.1</p> </figcaption> </figure> <figcaption> <p><span class="caption-prefix">Figure 5.2.</span> Caption 5.2</p> </figcaption> </figure> <figcaption> <p><span class="caption-prefix">Figure 5.</span> Caption 5</p> </figcaption> </figure> """, True ) def test_manual_number_increment_levels(self): """Test that forced levels and manual numbers with auto works.""" self.check_markdown( R""" Paragraph /// figure-caption | 1.1 Caption 1.1 /// Paragraph /// figure-caption | ^1 Caption 1.2 /// Paragraph /// figure-caption | 2.1 Caption 2.1 /// Paragraph /// figure-caption | ^1 Caption 2.2 /// """, """ <figure id="__figure-caption_1_1"> <p>Paragraph</p> <figcaption> <p><span class="caption-prefix">Figure 1.1.</span> Caption 1.1</p> </figcaption> </figure> <figure id="__figure-caption_1_2"> <p>Paragraph</p> <figcaption> <p><span class="caption-prefix">Figure 1.2.</span> Caption 1.2</p> </figcaption> </figure> <figure id="__figure-caption_2_1"> <p>Paragraph</p> <figcaption> <p><span class="caption-prefix">Figure 2.1.</span> Caption 2.1</p> </figcaption> </figure> <figure id="__figure-caption_2_2"> <p>Paragraph</p> <figcaption> <p><span class="caption-prefix">Figure 2.2.</span> Caption 2.2</p> </figcaption> </figure> """, True )
TestBlocksCaptionAutoPrefix
python
PrefectHQ__prefect
src/prefect/server/schemas/core.py
{ "start": 2487, "end": 3836 }
class ____(PrefectBaseModel): """Defines of how a flow run should retry.""" max_retries: int = Field( default=0, description=( "The maximum number of retries. Field is not used. Please use `retries`" " instead." ), deprecated=True, ) retry_delay_seconds: float = Field( default=0, description=( "The delay between retries. Field is not used. Please use `retry_delay`" " instead." ), deprecated=True, ) retries: Optional[int] = Field(default=None, description="The number of retries.") retry_delay: Optional[int] = Field( default=None, description="The delay time between retries, in seconds." ) pause_keys: Optional[set[str]] = Field( default_factory=set, description="Tracks pauses this run has observed." ) resuming: Optional[bool] = Field( default=False, description="Indicates if this run is resuming from a pause." ) retry_type: Optional[Literal["in_process", "reschedule"]] = Field( default=None, description="The type of retry this run is undergoing." ) @model_validator(mode="before") def populate_deprecated_fields(cls, values: dict[str, Any]) -> dict[str, Any]: return set_run_policy_deprecated_fields(values)
FlowRunPolicy
python
prabhupant__python-ds
data_structures/binary_trees/print_full_nodes.py
{ "start": 105, "end": 552 }
class ____: def __init__(self, val): self.val = val self.right = None self.left = None def print_full(root): if not root: return if root.left and root.right: print(root.val, end=' ') print_full(root.left) print_full(root.right) root = Node(10) root.left = Node(8) root.right = Node(2) root.left.left = Node(3) root.left.right = Node(5) root.right.left = Node(7) print_full(root)
Node
python
getsentry__sentry
src/sentry/integrations/jira_server/client.py
{ "start": 8903, "end": 13503 }
class ____(ApiClient): """ Client for making requests to JiraServer to follow OAuth1 flow. Jira OAuth1 docs: https://developer.atlassian.com/server/jira/platform/oauth/ """ request_token_url = "{}/plugins/servlet/oauth/request-token" access_token_url = "{}/plugins/servlet/oauth/access-token" authorize_url = "{}/plugins/servlet/oauth/authorize?oauth_token={}" integration_name = "jira_server_setup" SERVER_INFO_URL = "/rest/api/2/serverInfo" WEBHOOK_URL = "/rest/jira-webhook/1.0/webhooks" LEGACY_WEBHOOK_URL = "/rest/webhooks/1.0/webhook" @control_silo_function def __init__(self, base_url, consumer_key, private_key, verify_ssl=True): self.base_url = base_url self.consumer_key = consumer_key self.private_key = private_key self.verify_ssl = verify_ssl def get_request_token(self): """ Step 1 of the oauth flow. Get a request token that we can have the user verify. """ url = self.request_token_url.format(self.base_url) resp = self.post(url, allow_text=True) return dict(parse_qsl(resp.text)) def get_authorize_url(self, request_token): """ Step 2 of the oauth flow. Get a URL that the user can verify our request token at. """ return self.authorize_url.format(self.base_url, request_token["oauth_token"]) def get_access_token(self, request_token, verifier): """ Step 3 of the oauth flow. Use the verifier and request token from step 1 to get an access token. """ if not verifier: raise ApiError("Missing OAuth token verifier") auth = OAuth1( client_key=self.consumer_key, resource_owner_key=request_token["oauth_token"], resource_owner_secret=request_token["oauth_token_secret"], verifier=verifier, rsa_key=self.private_key, signature_method=SIGNATURE_RSA, signature_type="auth_header", decoding=None, ) url = self.access_token_url.format(self.base_url) resp = self.post(url, auth=auth, allow_text=True) return dict(parse_qsl(resp.text)) def create_issue_webhook(self, external_id, secret, credentials): auth = OAuth1( client_key=credentials["consumer_key"], rsa_key=credentials["private_key"], resource_owner_key=credentials["access_token"], resource_owner_secret=credentials["access_token_secret"], signature_method=SIGNATURE_RSA, signature_type="auth_header", decoding=None, ) # Create a JWT token that we can add to the webhook URL # so we can locate the matching integration later. token = jwt.encode({"id": external_id}, secret) path = reverse("sentry-extensions-jiraserver-issue-updated", kwargs={"token": token}) data = { "name": "Sentry Issue Sync", "url": absolute_uri(path), "events": ["jira:issue_created", "jira:issue_updated"], } with IntegrationPipelineViewEvent( interaction_type=IntegrationPipelineViewType.WEBHOOK_CREATION, domain=IntegrationDomain.PROJECT_MANAGEMENT, provider_key=self.integration_name, ).capture() as lifecycle: webhook_url = self.WEBHOOK_URL # Query the server version to determine which webhook endpoint to use server_info = self.get(self.SERVER_INFO_URL, auth=auth) server_version = server_info.get("version") server_major_version = server_version.split(".")[0] if server_version else None lifecycle.add_extra("server_major_version", server_major_version) if server_major_version and int(server_major_version) >= 10: webhook_url = self.WEBHOOK_URL else: # Fallback to legacy webhook endpoint if we encounter an error webhook_url = self.LEGACY_WEBHOOK_URL return self.post(webhook_url, auth=auth, data=data) def request(self, *args, **kwargs): """ Add OAuth1 RSA signatures. """ if "auth" not in kwargs: kwargs["auth"] = OAuth1( client_key=self.consumer_key, rsa_key=self.private_key, signature_method=SIGNATURE_RSA, signature_type="auth_header", decoding=None, ) return self._request(*args, **kwargs)
JiraServerSetupClient
python
tensorflow__tensorflow
tensorflow/python/tpu/tpu_embedding_v3.py
{ "start": 3951, "end": 5819 }
class ____(control_flow_ops.ControlFlowContext): """Sets the _embedding_pipelining attribute on all ops created in the scope.""" def __init__(self, mode: str, enable: bool): super().__init__() self._name = "EmbeddingPipelinigContext" self._mode = attr_value_pb2.AttrValue(s=compat.as_bytes(mode)) self._enable = enable recording_summaries = summary_ops_v2.is_recording_summaries() if not isinstance(recording_summaries, bool): # We can't handle predicate functions at this point. So, we'll ignore the # special casing of summary recording because, presumably, this is not # a single step loop so pipelining is still valid. recording_summaries = False if enable and ( recording_summaries or not ecu.embedding_pipelining_state.enabled ): # We'll still flag these ops for the SC forward/backward pass, but we'll # run them sequentially. This has to be handled in the MLIR passes # embedding_pipelining.cc and embedding_sequencing.cc. disable_reason = ( "Summary recording" if recording_summaries else "_embedding_pipelining_state.enabled = False" ) logging.info("%s detected, disabling pipelining.", disable_reason) self._mode = attr_value_pb2.AttrValue( s=compat.as_bytes(mode + _PIPELINE_MODEL_SEQUENTIAL) ) def to_control_flow_context_def( self, context_def: Any, export_scope: Any = None ): # pylint: disable=useless-super-delegation # The method is required by `ControlFlowContext`. super().to_control_flow_context_def(context_def, export_scope) def AddOp(self, op: ops.Operation): # pylint: disable=protected-access if self._enable: op._set_attr(_PIPELINE_ATTRIBUTE, self._mode) if self._outer_context: self._outer_context.AddOp(op)
EmbeddingPipeliningContext
python
joke2k__faker
faker/providers/phone_number/bs_BA/__init__.py
{ "start": 49, "end": 879 }
class ____(PhoneNumberProvider): formats = ( "030 ### ###", "031 ### ###", "032 ### ###", "033 ### ###", "034 ### ###", "035 ### ###", "036 ### ###", "037 ### ###", "038 ### ###", "039 ### ###", "049 ### ###", "050 ### ###", "051 ### ###", "052 ### ###", "053 ### ###", "054 ### ###", "055 ### ###", "056 ### ###", "057 ### ###", "058 ### ###", "059 ### ###", "060 ### ###", "060 #### ###", "061 ### ###", "062 ### ###", "063 ### ###", "064 ### ###", "065 ### ###", "066 ### ###", "067 ### ###", "070 20# ###", "+387 61 ### ###", "+387 (0)61 ### ###", )
Provider
python
huggingface__transformers
tests/cli/test_serve.py
{ "start": 26803, "end": 30722 }
class ____(ServeCompletionsMixin, unittest.TestCase): """Tests the `continuous_batching` version of the Completions API.""" @classmethod def setUpClass(cls): """Starts a server for tests to connect to.""" cls.port = 8002 cls.server = Serve( port=cls.port, continuous_batching=True, attn_implementation="sdpa", default_seed=42, non_blocking=True ) @classmethod def tearDownClass(cls): cls.server.kill_server() def test_full_request(self): """Tests that an inference using the Responses API and Continuous Batching works""" request = { "model": "Qwen/Qwen2.5-0.5B-Instruct", "messages": [ {"role": "system", "content": "You are a sports assistant designed to craft sports programs."}, {"role": "user", "content": "Tell me what you can do."}, ], "stream": True, "max_tokens": 30, } all_payloads = self.run_server(request) full_text = "" for token in all_payloads: if isinstance(token, ChatCompletionStreamOutput) and token.choices and len(token.choices) > 0: content = token.choices[0].delta.get("content", "") full_text += content if content is not None else "" # Verify that the system prompt went through. self.assertTrue( full_text.startswith( "I can assist you with a wide range of tasks, from answering questions to providing information on various sports topics." ) ) def test_max_tokens_not_set_in_req(self): request = { "model": "Qwen/Qwen2.5-0.5B-Instruct", "messages": [ {"role": "system", "content": "You are a sports assistant designed to craft sports programs."}, {"role": "user", "content": "Tell me what you can do."}, ], "stream": True, } all_payloads = self.run_server(request) full_text = "" for token in all_payloads: if isinstance(token, ChatCompletionStreamOutput) and token.choices and len(token.choices) > 0: content = token.choices[0].delta.get("content", "") full_text += content if content is not None else "" # Verify that the system prompt went through. self.assertTrue( full_text.startswith( "I can assist you with a wide range of tasks, from answering questions to providing information on various sports topics." ) ) def test_request_cancellation(self): """Tests that a request can be cancelled.""" base_url = f"http://127.0.0.1:{self.port}" request_id = "test-cancel" # Ensure the server is up before sending a request response = _call_healthcheck(base_url) self.assertIsNotNone(response, "Failed to connect to the server health endpoint.") self.assertEqual(response.status_code, 200) _open_stream_and_cancel(base_url, request_id) scheduler = _get_scheduler(self.server) # Because cancellation is non-blocking, poll for a short, bounded time. deadline = time.time() + 8.0 # generous but still CI-friendly last_seen = None while time.time() < deadline: is_cancelled = scheduler.request_is_cancelled(request_id) if is_cancelled: break last_seen = time.time() time.sleep(0.1) # don't spin the CPU is_cancelled = scheduler.request_is_cancelled(request_id) self.assertTrue( is_cancelled, f"Request {request_id} still present in scheduler after cancellation " f"(last seen at {last_seen}). Check cancellation propagation.", ) @require_openai
ServeCompletionsContinuousBatchingIntegrationTest
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 62916, "end": 63639 }
class ____(sgqlc.types.Input): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("title", "summary", "text", "annotations", "images") title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title") summary = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="summary") text = sgqlc.types.Field(String, graphql_name="text") annotations = sgqlc.types.Field( sgqlc.types.list_of(sgqlc.types.non_null(CheckAnnotationData)), graphql_name="annotations", ) images = sgqlc.types.Field( sgqlc.types.list_of(sgqlc.types.non_null("CheckRunOutputImage")), graphql_name="images", )
CheckRunOutput
python
wandb__wandb
wandb/vendor/promise-2.3.0/tests/test_extra.py
{ "start": 646, "end": 905 }
class ____(Thread): def __init__(self, d, p, r): self.delay = d self.promise = p self.reason = r Thread.__init__(self) def run(self): sleep(self.delay) self.promise.do_reject(self.reason)
DelayedRejection
python
django-extensions__django-extensions
tests/test_logging_filters.py
{ "start": 219, "end": 2636 }
class ____(TestCase): """Tests for RateLimiterFilter.""" def setUp(self): self.rate_limiter_filter = RateLimiterFilter() self.record = mock.Mock(msg=TEST_SUBJECT) self.record.getMessage.return_value = TEST_SUBJECT.encode() self.time_patch = mock.patch.object(time, "time", return_value="test_time") self.time_patch.start() def tearDown(self): self.time_patch.stop() @override_settings(RATE_LIMITER_FILTER_PREFIX="test_prefix") @mock.patch("django.core.cache.cache") def test_should_incr_cache_with_custom_prefix_and_return_False(self, m_cache): m_cache.get_many.return_value = { "test_prefix:114392702498ad1d75c1829b9519b8c7": 10, "test_prefix:114392702498ad1d75c1829b9519b8c7:count": 1, } result = self.rate_limiter_filter.filter(self.record) self.assertIs(m_cache.set.called, False) m_cache.incr.assert_called_once_with( "test_prefix:114392702498ad1d75c1829b9519b8c7:count" ) self.assertIs(result, False) @override_settings(RATE_LIMITER_FILTER_RATE=1) @mock.patch("django.core.cache.cache") def test_should_set_cache_key_with_custom_rate_and_return_True(self, m_cache): m_cache.get_many.return_value = {} expected_calls = [ mock.call( "ratelimiterfilter:114392702498ad1d75c1829b9519b8c7:count", 1, 61 ), mock.call( "ratelimiterfilter:114392702498ad1d75c1829b9519b8c7", "test_time", 1 ), ] result = self.rate_limiter_filter.filter(self.record) self.assertEqual(self.record.msg, "[1x] test_subect") m_cache.set.assert_has_calls(expected_calls, any_order=False) self.assertIs(result, True) @mock.patch("django.core.cache.cache") def test_should_modify_record_msg_and_return_True(self, m_cache): """Default rate and prefix values.""" m_cache.get_many.return_value = { "ratelimiterfilter:114392702498ad1d75c1829b9519b8c7:count": 999, } result = self.rate_limiter_filter.filter(self.record) self.assertEqual(self.record.msg, "[999x] test_subect") m_cache.set.assert_called_once_with( "ratelimiterfilter:114392702498ad1d75c1829b9519b8c7", "test_time", 10 ) self.assertIs(result, True)
RateLimiterFilterTests
python
matplotlib__matplotlib
lib/matplotlib/sphinxext/roles.py
{ "start": 1467, "end": 4912 }
class ____(nodes.Inline, nodes.TextElement): """ Wraps a reference or pending reference to add a query string. The query string is generated from the attributes added to this node. Also equivalent to a `~docutils.nodes.literal` node. """ def to_query_string(self): """Generate query string from node attributes.""" return '&'.join(f'{name}={value}' for name, value in self.attlist()) def _visit_query_reference_node(self, node): """ Resolve *node* into query strings on its ``reference`` children. Then act as if this is a `~docutils.nodes.literal`. """ query = node.to_query_string() for refnode in node.findall(nodes.reference): uri = urlsplit(refnode['refuri'])._replace(query=query) refnode['refuri'] = urlunsplit(uri) self.visit_literal(node) def _depart_query_reference_node(self, node): """ Act as if this is a `~docutils.nodes.literal`. """ self.depart_literal(node) def _rcparam_role(name, rawtext, text, lineno, inliner, options=None, content=None): """ Sphinx role ``:rc:`` to highlight and link ``rcParams`` entries. Usage: Give the desired ``rcParams`` key as parameter. :code:`:rc:`figure.dpi`` will render as: :rc:`figure.dpi` """ # Generate a pending cross-reference so that Sphinx will ensure this link # isn't broken at some point in the future. title = f'rcParams["{text}"]' target = 'matplotlibrc-sample' ref_nodes, messages = inliner.interpreted(title, f'{title} <{target}>', 'ref', lineno) qr = _QueryReference(rawtext, highlight=text) qr += ref_nodes node_list = [qr] # The default backend would be printed as "agg", but that's not correct (as # the default is actually determined by fallback). if text in rcParamsDefault and text != "backend": node_list.extend([ nodes.Text(' (default: '), nodes.literal('', repr(rcParamsDefault[text])), nodes.Text(')'), ]) return node_list, messages def _mpltype_role(name, rawtext, text, lineno, inliner, options=None, content=None): """ Sphinx role ``:mpltype:`` for custom matplotlib types. In Matplotlib, there are a number of type-like concepts that do not have a direct type representation; example: color. This role allows to properly highlight them in the docs and link to their definition. Currently supported values: - :code:`:mpltype:`color`` will render as: :mpltype:`color` """ mpltype = text type_to_link_target = { 'color': 'colors_def', 'hatch': 'hatch_def', } if mpltype not in type_to_link_target: raise ValueError(f"Unknown mpltype: {mpltype!r}") node_list, messages = inliner.interpreted( mpltype, f'{mpltype} <{type_to_link_target[mpltype]}>', 'ref', lineno) return node_list, messages def setup(app): app.add_role("rc", _rcparam_role) app.add_role("mpltype", _mpltype_role) app.add_node( _QueryReference, html=(_visit_query_reference_node, _depart_query_reference_node), latex=(_visit_query_reference_node, _depart_query_reference_node), text=(_visit_query_reference_node, _depart_query_reference_node), ) return {"version": matplotlib.__version__, "parallel_read_safe": True, "parallel_write_safe": True}
_QueryReference
python
huggingface__transformers
src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py
{ "start": 18316, "end": 20098 }
class ____(Qwen3Config): def __init__( self, vocab_size: Optional[int] = 2048, hidden_size: Optional[int] = 1024, intermediate_size: Optional[int] = 3072, num_hidden_layers: Optional[int] = 5, num_attention_heads: Optional[int] = 16, num_key_value_heads: Optional[int] = 8, head_dim: Optional[int] = 128, hidden_act: Optional[str] = "silu", max_position_embeddings: Optional[int] = 32768, initializer_range: Optional[float] = 0.02, rms_norm_eps: Optional[float] = 0.000001, use_cache: Optional[bool] = True, tie_word_embeddings: Optional[bool] = False, rope_parameters: Optional[int] = None, attention_bias: Optional[bool] = False, sliding_window: Optional[int] = None, layer_types: Optional[list[str]] = None, attention_dropout: Optional[int] = 0, num_code_groups: Optional[int] = 32, **kwargs, ): super().__init__( vocab_size, hidden_size, intermediate_size, num_hidden_layers, num_attention_heads, num_key_value_heads, head_dim, hidden_act, max_position_embeddings, initializer_range, rms_norm_eps, use_cache, tie_word_embeddings, rope_parameters, attention_bias, False, sliding_window, None, layer_types, attention_dropout, **kwargs, ) del self.use_sliding_window del self.max_window_layers self.sliding_window = sliding_window self.num_code_groups = num_code_groups
Qwen3OmniMoeTalkerCodePredictorConfig
python
pytorch__pytorch
torch/mtia/__init__.py
{ "start": 8813, "end": 9449 }
class ____: r"""Context-manager that changes the selected device. Args: device (torch.device or int): device index to select. It's a no-op if this argument is a negative integer or ``None``. """ def __init__(self, device: Any): self.idx = _get_device_index(device, optional=True) self.prev_idx = -1 def __enter__(self): self.prev_idx = torch._C._accelerator_hooks_maybe_exchange_device(self.idx) def __exit__(self, type: Any, value: Any, traceback: Any): self.idx = torch._C._accelerator_hooks_maybe_exchange_device(self.prev_idx) return False
device
python
pdm-project__pdm
src/pdm/cli/commands/python.py
{ "start": 677, "end": 1537 }
class ____(BaseCommand): """Manage installed Python interpreters""" arguments = () def add_arguments(self, parser: ArgumentParser) -> None: self.parser = parser subparsers = parser.add_subparsers(title="commands", metavar="") ListCommand.register_to(subparsers, name="list") RemoveCommand.register_to(subparsers, name="remove") InstallCommand.register_to(subparsers, name="install") LinkCommand.register_to(subparsers, name="link") FindCommand.register_to(subparsers, name="find") @classmethod def register_to(cls, subparsers: _SubParsersAction, name: str | None = None, **kwargs: Any) -> None: return super().register_to(subparsers, name, aliases=["py"], **kwargs) def handle(self, project: Project, options: Namespace) -> None: self.parser.print_help()
Command
python
ray-project__ray
python/ray/train/v2/_internal/execution/worker_group/poll.py
{ "start": 1269, "end": 3866 }
class ____: worker_statuses: Dict[int, WorkerStatus] @property def errors(self) -> Dict[int, Exception]: return { world_rank: status.error for world_rank, status in self.worker_statuses.items() if status.error is not None } def get_worker_group_error(self) -> WorkerGroupError: return WorkerGroupError( error_message=self.get_error_string(), worker_failures=self.errors, ) @property def finished(self) -> bool: return self.worker_statuses and all( not status.running for status in self.worker_statuses.values() ) def get_error_string(self) -> str: """ Returns a string representation of worker group errors. Groups similar errors (ignoring numbers) and shows original error examples. """ # Group errors by normalized strings (ignoring numbers) normalized_error_to_ranks = defaultdict(list) normalized_error_to_original = {} show_full_error = set() for world_rank, status in self.worker_statuses.items(): if status.error: error_str = str(status.error) normalized_error = _normalize_error_string(error_str) normalized_error_to_ranks[normalized_error].append(str(world_rank)) # Store the first original error for this normalized group if normalized_error not in normalized_error_to_original: normalized_error_to_original[normalized_error] = error_str # Fully show errors for non-graceful worker failures or running workers if ( isinstance(status.error, WorkerHealthCheckFailedError) or status.running ): show_full_error.add(normalized_error) errors = [] for normalized_error, ranks in normalized_error_to_ranks.items(): # Show the original error orig_error = normalized_error_to_original[normalized_error] # Convert rank list to comma-separated strings ranks_str = ",".join(ranks) if normalized_error in show_full_error: errors.append(f"[Rank {ranks_str} Error Snippet]:\n{orig_error}") else: errors.append( f"[Rank {ranks_str} Error Snippet]:\n{_truncate_error_string(orig_error)}" ) error_str = "\n".join(errors) return error_str @dataclass(frozen=True)
WorkerGroupPollStatus
python
rapidsai__cudf
python/cudf/cudf/pandas/_logger.py
{ "start": 331, "end": 2681 }
class ____: # https://docs.python.org/3/howto/logging-cookbook.html#implementing-structured-logging def __init__(self, debug_type: str, /, **kwargs) -> None: self.debug_type = debug_type self.kwargs = kwargs def __str__(self) -> str: log = {"debug_type": self.debug_type} return json.dumps({**log, **self.kwargs}) def reprify(arg) -> str: """Attempt to return arg's repr for logging.""" try: return repr(arg) except Exception: return "<REPR FAILED>" def log_fallback( slow_args: tuple, slow_kwargs: dict, exception: Exception ) -> None: """Log when a fast call falls back to the slow path.""" caller = slow_args[0] module = getattr(caller, "__module__", "") obj_name = getattr(caller, "__qualname__", type(caller).__qualname__) if module: slow_object = f"{module}.{obj_name}" else: slow_object = obj_name # TODO: Maybe use inspect.signature to map called args and kwargs # to their keyword names, but a user calling an API incorrectly would # break this. caller_args = slow_args[1] args_passed = ", ".join((reprify(arg) for arg in caller_args)) args_types_passed = ", ".join((type(arg).__name__ for arg in caller_args)) kwargs_passed = {} kwargs_types_passed = "" if len(slow_args) == 3: caller_kwargs = slow_args[2] if caller_kwargs: fmt_kwargs = ", ".join( f"{kwarg}={reprify(value)}" for kwarg, value in caller_kwargs.items() ) kwargs_types_passed = ", ".join( f"{kwarg}={type(value).__name__}" for kwarg, value in caller_kwargs.items() ) args_passed = f"{args_passed}, {fmt_kwargs}" kwargs_passed = { kwarg: reprify(value) for kwarg, value in caller_kwargs.items() } message = StructuredMessage( "LOG_FAST_FALLBACK", failed_call=f"{slow_object}({args_passed})", exception=type(exception).__name__, exception_message=str(exception), slow_object=slow_object, args_passed=args_passed, kwargs_passed=kwargs_passed, args_types_passed=args_types_passed, kwargs_types_passed=kwargs_types_passed, ) logger.info(message)
StructuredMessage
python
django__django
tests/staticfiles_tests/test_liveserver.py
{ "start": 886, "end": 2019 }
class ____(LiveServerBase): @classmethod def setUpClass(cls): # If contrib.staticfiles isn't configured properly, the exception # should bubble up to the main thread. old_STATIC_URL = TEST_SETTINGS["STATIC_URL"] TEST_SETTINGS["STATIC_URL"] = None try: cls.raises_exception() finally: TEST_SETTINGS["STATIC_URL"] = old_STATIC_URL @classmethod def tearDownClass(cls): # skip it, as setUpClass doesn't call its parent either pass @classmethod def raises_exception(cls): try: super().setUpClass() except ImproperlyConfigured: # This raises ImproperlyConfigured("You're using the staticfiles # app without having set the required STATIC_URL setting.") pass else: raise Exception("setUpClass() should have raised an exception.") def test_test_test(self): # Intentionally empty method so that the test is picked up by the # test runner and the overridden setUpClass() method is executed. pass
StaticLiveServerChecks
python
allegroai__clearml
clearml/backend_api/services/v2_9/queues.py
{ "start": 14098, "end": 15236 }
class ____(Response): """ Response of queues.add_task endpoint. :param added: Number of tasks added (0 or 1) :type added: int """ _service = "queues" _action = "add_task" _version = "2.9" _schema = { "definitions": {}, "properties": { "added": { "description": "Number of tasks added (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], } }, "type": "object", } def __init__(self, added: Optional[int] = None, **kwargs: Any) -> None: super(AddTaskResponse, self).__init__(**kwargs) self.added = added @schema_property("added") def added(self) -> Optional[int]: return self._property_added @added.setter def added(self, value: Optional[int]) -> None: if value is None: self._property_added = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "added", six.integer_types) self._property_added = value
AddTaskResponse
python
kamyu104__LeetCode-Solutions
Python/maximize-subarray-sum-after-removing-all-occurrences-of-one-element.py
{ "start": 639, "end": 1221 }
class ____(object): def maxSubarraySum(self, nums): """ :type nums: List[int] :rtype: int """ result = float("-inf") curr = mn = mn0 = 0 mn1 = collections.defaultdict(int) for x in nums: curr += x result = max(result, curr-mn) if x < 0: mn1[x] = min(mn1[x], mn0)+x mn = min(mn, mn1[x]) mn0 = min(mn0, curr) mn = min(mn, mn0) return result # Time: O(nlogn) # Space: O(n) import collections # segment tree
Solution2
python
gevent__gevent
src/gevent/baseserver.py
{ "start": 1241, "end": 16716 }
class ____(object): """ An abstract base class that implements some common functionality for the servers in gevent. :param listener: Either be an address that the server should bind on or a :class:`gevent.socket.socket` instance that is already bound (and put into listening mode in case of TCP socket). :keyword handle: If given, the request handler. The request handler can be defined in a few ways. Most commonly, subclasses will implement a ``handle`` method as an instance method. Alternatively, a function can be passed as the ``handle`` argument to the constructor. In either case, the handler can later be changed by calling :meth:`set_handle`. When the request handler returns, the socket used for the request will be closed. Therefore, the handler must not return if the socket is still in use (for example, by manually spawned greenlets). :keyword spawn: If provided, is called to create a new greenlet to run the handler. By default, :func:`gevent.spawn` is used (meaning there is no artificial limit on the number of concurrent requests). Possible values for *spawn*: - a :class:`gevent.pool.Pool` instance -- ``handle`` will be executed using :meth:`gevent.pool.Pool.spawn` only if the pool is not full. While it is full, no new connections are accepted; - :func:`gevent.spawn_raw` -- ``handle`` will be executed in a raw greenlet which has a little less overhead then :class:`gevent.Greenlet` instances spawned by default; - ``None`` -- ``handle`` will be executed right away, in the :class:`Hub` greenlet. ``handle`` cannot use any blocking functions as it would mean switching to the :class:`Hub`. - an integer -- a shortcut for ``gevent.pool.Pool(integer)`` .. versionchanged:: 1.1a1 When the *handle* function returns from processing a connection, the client socket will be closed. This resolves the non-deterministic closing of the socket, fixing ResourceWarnings under Python 3 and PyPy. .. versionchanged:: 1.5 Now a context manager that returns itself and calls :meth:`stop` on exit. """ # pylint: disable=too-many-instance-attributes,bare-except,broad-except #: The number of seconds to sleep in case there was an error in accept() call. #: For consecutive errors the delay will double until it reaches max_delay. #: When accept() finally succeeds the delay will be reset to min_delay again. min_delay = 0.01 #: The maximum number of seconds to sleep in case there was an error in #: accept() call. max_delay = 1 #: Sets the maximum number of consecutive accepts that a process may perform on #: a single wake up. High values give higher priority to high connection rates, #: while lower values give higher priority to already established connections. #: Default is 100. #: #: Note that, in case of multiple working processes on the same #: listening socket, it should be set to a lower value. (pywsgi.WSGIServer sets it #: to 1 when ``environ["wsgi.multiprocess"]`` is true) #: #: This is equivalent to libuv's `uv_tcp_simultaneous_accepts #: <http://docs.libuv.org/en/v1.x/tcp.html#c.uv_tcp_simultaneous_accepts>`_ #: value. Setting the environment variable UV_TCP_SINGLE_ACCEPT to a true value #: (usually 1) changes the default to 1 (in libuv only; this does not affect gevent). max_accept = 100 _spawn = Greenlet.spawn #: the default timeout that we wait for the client connections to close in stop() stop_timeout = 1 fatal_errors = (errno.EBADF, errno.EINVAL, errno.ENOTSOCK) def __init__(self, listener, handle=None, spawn='default'): self._stop_event = Event() self._stop_event.set() self._watcher = None self._timer = None self._handle = None # XXX: FIXME: Subclasses rely on the presence or absence of the # `socket` attribute to determine whether we are open/should be opened. # Instead, have it be None. # XXX: In general, the state management here is confusing. Lots of stuff is # deferred until the various ``set_`` methods are called, and it's not documented # when it's safe to call those self.pool = None # can be set from ``spawn``; overrides self.full() try: self.set_listener(listener) self.set_spawn(spawn) self.set_handle(handle) self.delay = self.min_delay self.loop = get_hub().loop if self.max_accept < 1: raise ValueError('max_accept must be positive int: %r' % (self.max_accept, )) except: self.close() raise def __enter__(self): return self def __exit__(self, *args): self.stop() def set_listener(self, listener): if hasattr(listener, 'accept'): if hasattr(listener, 'do_handshake'): raise TypeError('Expected a regular socket, not SSLSocket: %r' % (listener, )) self.family = listener.family self.address = listener.getsockname() self.socket = listener else: self.family, self.address = parse_address(listener) def set_spawn(self, spawn): if spawn == 'default': self.pool = None self._spawn = self._spawn elif hasattr(spawn, 'spawn'): self.pool = spawn self._spawn = spawn.spawn elif isinstance(spawn, integer_types): from gevent.pool import Pool self.pool = Pool(spawn) self._spawn = self.pool.spawn else: self.pool = None self._spawn = spawn if hasattr(self.pool, 'full'): self.full = self.pool.full if self.pool is not None: self.pool._semaphore.rawlink(self._start_accepting_if_started) def set_handle(self, handle): if handle is not None: self.handle = handle if hasattr(self, 'handle'): self._handle = self.handle else: raise TypeError("'handle' must be provided") def _start_accepting_if_started(self, _event=None): if self.started: self.start_accepting() def start_accepting(self): if self._watcher is None: # just stop watcher without creating a new one? self._watcher = self.loop.io(self.socket.fileno(), 1) self._watcher.start(self._do_read) def stop_accepting(self): if self._watcher is not None: self._watcher.stop() self._watcher.close() self._watcher = None if self._timer is not None: self._timer.stop() self._timer.close() self._timer = None def do_handle(self, *args): spawn = self._spawn handle = self._handle close = self.do_close try: if spawn is None: _handle_and_close_when_done(handle, close, args) else: spawn(_handle_and_close_when_done, handle, close, args) except: close(*args) raise def do_close(self, *args): pass def do_read(self): raise NotImplementedError() def _do_read(self): for _ in xrange(self.max_accept): if self.full(): self.stop_accepting() if self.pool is not None: self.pool._semaphore.rawlink(self._start_accepting_if_started) return try: args = self.do_read() self.delay = self.min_delay if not args: return except: self.loop.handle_error(self, *sys.exc_info()) ex = sys.exc_info()[1] if self.is_fatal_error(ex): self.close() sys.stderr.write('ERROR: %s failed with %s\n' % (self, str(ex) or repr(ex))) return if self.delay >= 0: self.stop_accepting() self._timer = self.loop.timer(self.delay) self._timer.start(self._start_accepting_if_started) self.delay = min(self.max_delay, self.delay * 2) break else: try: self.do_handle(*args) except: self.loop.handle_error((args[1:], self), *sys.exc_info()) if self.delay >= 0: self.stop_accepting() self._timer = self.loop.timer(self.delay) self._timer.start(self._start_accepting_if_started) self.delay = min(self.max_delay, self.delay * 2) break def full(self): # pylint: disable=method-hidden # If a Pool is given for to ``set_spawn`` (the *spawn* argument # of the constructor) it will replace this method. return False def __repr__(self): return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._formatinfo()) def __str__(self): return '<%s %s>' % (type(self).__name__, self._formatinfo()) def _formatinfo(self): if hasattr(self, 'socket'): try: fileno = self.socket.fileno() except Exception as ex: fileno = str(ex) result = 'fileno=%s ' % fileno else: result = '' try: if isinstance(self.address, tuple) and len(self.address) == 2: result += 'address=%s:%s' % self.address else: result += 'address=%s' % (self.address, ) except Exception as ex: result += str(ex) or '<error>' handle = self.__dict__.get('handle') if handle is not None: fself = getattr(handle, '__self__', None) try: if fself is self: # Checks the __self__ of the handle in case it is a bound # method of self to prevent recursively defined reprs. handle_repr = '<bound method %s.%s of self>' % ( self.__class__.__name__, handle.__name__, ) else: handle_repr = repr(handle) result += ' handle=' + handle_repr except Exception as ex: result += str(ex) or '<error>' return result @property def server_host(self): """IP address that the server is bound to (string).""" if isinstance(self.address, tuple): return self.address[0] @property def server_port(self): """Port that the server is bound to (an integer).""" if isinstance(self.address, tuple): return self.address[1] def init_socket(self): """ If the user initialized the server with an address rather than socket, then this function must create a socket, bind it, and put it into listening mode. It is not supposed to be called by the user, it is called by :meth:`start` before starting the accept loop. """ @property def started(self): return not self._stop_event.is_set() def start(self): """Start accepting the connections. If an address was provided in the constructor, then also create a socket, bind it and put it into the listening mode. """ self.init_socket() self._stop_event.clear() try: self.start_accepting() except: self.close() raise def close(self): """Close the listener socket and stop accepting.""" self._stop_event.set() try: self.stop_accepting() finally: try: self.socket.close() except Exception: pass finally: self.__dict__.pop('socket', None) self.__dict__.pop('handle', None) self.__dict__.pop('_handle', None) self.__dict__.pop('_spawn', None) self.__dict__.pop('full', None) if self.pool is not None: self.pool._semaphore.unlink(self._start_accepting_if_started) # If the pool's semaphore had a notifier already started, # there's a reference cycle we're a part of # (self->pool->semaphere-hub callback->semaphore) # But we can't destroy self.pool, because self.stop() # calls this method, and then wants to join self.pool() @property def closed(self): return not hasattr(self, 'socket') def stop(self, timeout=None): """ Stop accepting the connections and close the listening socket. If the server uses a pool to spawn the requests, then :meth:`stop` also waits for all the handlers to exit. If there are still handlers executing after *timeout* has expired (default 1 second, :attr:`stop_timeout`), then the currently running handlers in the pool are killed. If the server does not use a pool, then this merely stops accepting connections; any spawned greenlets that are handling requests continue running until they naturally complete. """ self.close() if timeout is None: timeout = self.stop_timeout if self.pool: self.pool.join(timeout=timeout) self.pool.kill(block=True, timeout=1) def serve_forever(self, stop_timeout=None): """Start the server if it hasn't been already started and wait until it's stopped.""" # add test that serve_forever exists on stop() if not self.started: self.start() try: self._stop_event.wait() finally: Greenlet.spawn(self.stop, timeout=stop_timeout).join() def is_fatal_error(self, ex): return isinstance(ex, _socket.error) and ex.args[0] in self.fatal_errors def _extract_family(host): if host.startswith('[') and host.endswith(']'): host = host[1:-1] return _socket.AF_INET6, host return _socket.AF_INET, host def _parse_address(address): if isinstance(address, tuple): if not address[0] or ':' in address[0]: return _socket.AF_INET6, address return _socket.AF_INET, address if ((isinstance(address, string_types) and ':' not in address) or isinstance(address, integer_types)): # noqa (pep8 E129) # Just a port return _socket.AF_INET6, ('', int(address)) if not isinstance(address, string_types): raise TypeError('Expected tuple or string, got %s' % type(address)) host, port = address.rsplit(':', 1) family, host = _extract_family(host) if host == '*': host = '' return family, (host, int(port)) def parse_address(address): try: return _parse_address(address) except ValueError as ex: # pylint:disable=try-except-raise raise ValueError('Failed to parse address %r: %s' % (address, ex))
BaseServer
python
getsentry__sentry
tests/sentry/sentry_apps/external_requests/test_issue_link_requester.py
{ "start": 896, "end": 10253 }
class ____(TestCase): def setUp(self) -> None: super().setUp() self.user = self.create_user(name="foo") self.org = self.create_organization(owner=self.user) self.project = self.create_project(slug="boop", organization=self.org) self.group = self.create_group(project=self.project) self.sentry_app = self.create_sentry_app( name="foo", organization=self.org, webhook_url="https://example.com", scopes=() ) self.orm_install = self.create_sentry_app_installation( slug="foo", organization=self.org, user=self.user ) self.rpc_user = serialize_rpc_user(self.user) self.install = app_service.get_many(filter=dict(installation_ids=[self.orm_install.id]))[0] @responses.activate @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") def test_makes_request(self, mock_record: MagicMock) -> None: fields = {"title": "An Issue", "description": "a bug was found", "assignee": "user-1"} responses.add( method=responses.POST, url="https://example.com/link-issue", json={ "project": "ProjectName", "webUrl": "https://example.com/project/issue-id", "identifier": "issue-1", }, status=200, content_type="application/json", ) result = IssueLinkRequester( install=self.install, group=self.group, uri="/link-issue", fields=fields, user=self.rpc_user, action=IssueRequestActionType("create"), ).run() assert result == { "project": "ProjectName", "webUrl": "https://example.com/project/issue-id", "identifier": "issue-1", } request = responses.calls[0].request data = { "fields": {"title": "An Issue", "description": "a bug was found", "assignee": "user-1"}, "issueId": self.group.id, "installationId": self.install.uuid, "webUrl": self.group.get_absolute_url(), "project": {"id": self.project.id, "slug": self.project.slug}, "actor": {"type": "user", "id": self.user.id, "name": self.user.name}, } payload = json.loads(request.body) assert payload == data assert request.headers["Sentry-App-Signature"] == self.sentry_app.build_signature( json.dumps(payload) ) buffer = SentryAppWebhookRequestsBuffer(self.sentry_app) requests = buffer.get_requests() assert len(requests) == 1 assert requests[0]["response_code"] == 200 assert requests[0]["event_type"] == "external_issue.created" # SLO assertions assert_success_metric(mock_record) # EXTERNAL_REQUEST (success) -> EXTERNAL_REQUEST (success) assert_count_of_metric( mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=2 ) assert_count_of_metric( mock_record=mock_record, outcome=EventLifecycleOutcome.SUCCESS, outcome_count=2 ) @responses.activate @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") def test_invalid_response_format(self, mock_record: MagicMock) -> None: # missing 'identifier' invalid_format = { "project": "ProjectName", "webUrl": "https://example.com/project/issue-id", } responses.add( method=responses.POST, url="https://example.com/link-issue", json=invalid_format, status=200, content_type="application/json", ) with pytest.raises(SentryAppIntegratorError) as exception_info: IssueLinkRequester( install=self.install, group=self.group, uri="/link-issue", fields={}, user=self.rpc_user, action=IssueRequestActionType("create"), ).run() assert exception_info.value.webhook_context == { "error_type": FAILURE_REASON_BASE.format( SentryAppExternalRequestHaltReason.BAD_RESPONSE ), "uri": "/link-issue", "installation_uuid": self.install.uuid, "sentry_app_slug": self.sentry_app.slug, "project_slug": self.group.project.slug, "group_id": self.group.id, "response": invalid_format, } # SLO assertions assert_halt_metric( mock_record, f"{SentryAppEventType.EXTERNAL_ISSUE_LINKED}.{SentryAppExternalRequestHaltReason.BAD_RESPONSE}", ) # EXTERNAL_REQUEST (halt) -> EXTERNAL_REQUEST (success) assert_count_of_metric( mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=2 ) assert_count_of_metric( mock_record=mock_record, outcome=EventLifecycleOutcome.SUCCESS, outcome_count=1 ) assert_count_of_metric( mock_record=mock_record, outcome=EventLifecycleOutcome.HALTED, outcome_count=1 ) @responses.activate @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") def test_500_response(self, mock_record: MagicMock) -> None: responses.add( method=responses.POST, url="https://example.com/link-issue", body="Something failed", status=500, ) with pytest.raises(SentryAppIntegratorError) as exception_info: IssueLinkRequester( install=self.install, group=self.group, uri="/link-issue", fields={}, user=self.rpc_user, action=IssueRequestActionType("create"), ).run() assert exception_info.value.webhook_context == { "error_type": FAILURE_REASON_BASE.format( SentryAppExternalRequestHaltReason.BAD_RESPONSE ), "uri": "/link-issue", "installation_uuid": self.install.uuid, "sentry_app_slug": self.sentry_app.slug, "project_slug": self.group.project.slug, "group_id": self.group.id, "error_message": "500 Server Error: Internal Server Error for url: https://example.com/link-issue", } buffer = SentryAppWebhookRequestsBuffer(self.sentry_app) requests = buffer.get_requests() assert len(requests) == 1 assert requests[0]["response_code"] == 500 assert requests[0]["event_type"] == "external_issue.created" # SLO assertions # We recieved back a 500 response from 3p assert_many_halt_metrics(mock_record, [HTTPError(), HTTPError()]) # EXTERNAL_REQUEST (halt) -> EXTERNAL_REQUEST (halt) assert_count_of_metric( mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=2 ) assert_count_of_metric( mock_record=mock_record, outcome=EventLifecycleOutcome.HALTED, outcome_count=2 ) @responses.activate @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") def test_invalid_json_response(self, mock_record: MagicMock) -> None: responses.add( method=responses.POST, url="https://example.com/link-issue", body="not valid json}", status=200, content_type="application/json", ) with pytest.raises(SentryAppIntegratorError) as exception_info: IssueLinkRequester( install=self.install, group=self.group, uri="/link-issue", fields={}, user=self.rpc_user, action=IssueRequestActionType("create"), ).run() assert exception_info.value.webhook_context == { "error_type": FAILURE_REASON_BASE.format( SentryAppExternalRequestHaltReason.BAD_RESPONSE ), "uri": "/link-issue", "installation_uuid": self.install.uuid, "sentry_app_slug": self.sentry_app.slug, "project_slug": self.group.project.slug, "group_id": self.group.id, "response_body": b"not valid json}", } buffer = SentryAppWebhookRequestsBuffer(self.sentry_app) requests = buffer.get_requests() assert len(requests) == 1 assert requests[0]["response_code"] == 200 assert requests[0]["event_type"] == "external_issue.created" # SLO assertions assert_halt_metric( mock_record, json.JSONDecodeError("Expecting value", "not valid json}", 0), ) # EXTERNAL_REQUEST (halt) -> EXTERNAL_REQUEST (success) assert_count_of_metric( mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=2 ) assert_count_of_metric( mock_record=mock_record, outcome=EventLifecycleOutcome.SUCCESS, outcome_count=1 ) assert_count_of_metric( mock_record=mock_record, outcome=EventLifecycleOutcome.HALTED, outcome_count=1 )
TestIssueLinkRequester
python
pypa__warehouse
warehouse/accounts/models.py
{ "start": 13598, "end": 14235 }
class ____(db.Model): __tablename__ = "prohibited_email_domains" __repr__ = make_repr("domain") created: Mapped[datetime_now] domain: Mapped[str] = mapped_column(unique=True) is_mx_record: Mapped[bool_false] = mapped_column( comment="Prohibit any domains that have this domain as an MX record?" ) _prohibited_by: Mapped[UUID | None] = mapped_column( "prohibited_by", PG_UUID(as_uuid=True), ForeignKey("users.id"), index=True, ) prohibited_by: Mapped[User] = orm.relationship(User) comment: Mapped[str] = mapped_column(server_default="")
ProhibitedEmailDomain
python
getsentry__sentry
src/sentry/migrations/0998_add_prebuilt_id_to_dashboards.py
{ "start": 250, "end": 2644 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = False dependencies = [ ("sentry", "0997_add_has_trace_metrics_bit_to_project_model"), ] operations = [ migrations.AddField( model_name="dashboard", name="prebuilt_id", field=sentry.db.models.fields.bounded.BoundedPositiveIntegerField( db_default=None, null=True ), ), migrations.AlterField( model_name="dashboard", name="created_by_id", field=sentry.db.models.fields.hybrid_cloud_foreign_key.HybridCloudForeignKey( "sentry.User", db_index=True, null=True, on_delete="CASCADE" ), ), migrations.AddConstraint( model_name="dashboard", constraint=models.UniqueConstraint( condition=models.Q(("prebuilt_id__isnull", False)), fields=("organization", "prebuilt_id"), name="sentry_dashboard_organization_prebuilt_id_uniq", ), ), migrations.AddConstraint( model_name="dashboard", constraint=models.CheckConstraint( condition=models.Q( ("prebuilt_id__isnull", True), ("created_by_id__isnull", True), _connector="OR" ), name="sentry_dashboard_prebuilt_null_created_by", ), ), ]
Migration
python
ray-project__ray
python/ray/autoscaler/_private/node_provider_availability_tracker.py
{ "start": 383, "end": 573 }
class ____: node_type: str is_available: bool last_checked_timestamp: float unavailable_node_information: Optional[UnavailableNodeInformation] @dataclass
NodeAvailabilityRecord
python
vyperlang__vyper
vyper/semantics/types/primitives.py
{ "start": 10549, "end": 12212 }
class ____(NumericT): typeclass = "decimal" _bits = 168 # TODO generalize _decimal_places = 10 # TODO generalize _id = "decimal" _is_signed = True _invalid_ops = ( vy_ast.Pow, vy_ast.FloorDiv, vy_ast.BitAnd, vy_ast.BitOr, vy_ast.BitXor, vy_ast.Not, ) _valid_literal = (vy_ast.Decimal,) _equality_attrs = ("_bits", "_decimal_places") ast_type = Decimal def validate_numeric_op(self, node) -> None: try: super().validate_numeric_op(node) except VyperException as e: raise _add_div_hint(node, e) from None @cached_property def abi_type(self) -> ABIType: return ABI_GIntM(self._bits, self._is_signed) @cached_property def decimals(self) -> int: # Alias for API compatibility with codegen return self._decimal_places @cached_property def divisor(self) -> int: return 10**self.decimals @cached_property def epsilon(self) -> Decimal: return 1 / Decimal(self.divisor) @cached_property def ast_bounds(self) -> Tuple[Decimal, Decimal]: return self.decimal_bounds @cached_property def decimal_bounds(self) -> Tuple[Decimal, Decimal]: lo, hi = int_bounds(signed=self.is_signed, bits=self.bits) DIVISOR = Decimal(self.divisor) return lo / DIVISOR, hi / DIVISOR def to_abi_arg(self, name: str = "") -> dict[str, Any]: ret = super().to_abi_arg(name) ret["internalType"] = repr(self) return ret # maybe this even deserves its own module, address.py # should inherit from uint160?
DecimalT
python
pytorch__pytorch
torch/utils/data/datapipes/iter/selecting.py
{ "start": 569, "end": 3308 }
class ____(IterDataPipe[_T_co]): r""" Filters out elements from the source datapipe according to input ``filter_fn`` (functional name: ``filter``). Args: datapipe: Iterable DataPipe being filtered filter_fn: Customized function mapping an element to a boolean. input_col: Index or indices of data which ``filter_fn`` is applied, such as: - ``None`` as default to apply ``filter_fn`` to the data directly. - Integer(s) is used for list/tuple. - Key(s) is used for dict. Example: >>> # xdoctest: +SKIP >>> from torchdata.datapipes.iter import IterableWrapper >>> def is_even(n): ... return n % 2 == 0 >>> dp = IterableWrapper(range(5)) >>> filter_dp = dp.filter(filter_fn=is_even) >>> list(filter_dp) [0, 2, 4] """ datapipe: IterDataPipe[_T_co] filter_fn: Callable def __init__( self, datapipe: IterDataPipe[_T_co], filter_fn: Callable, input_col=None, ) -> None: super().__init__() self.datapipe = datapipe _check_unpickable_fn(filter_fn) self.filter_fn = filter_fn # type: ignore[assignment] self.input_col = input_col validate_input_col(filter_fn, input_col) def _apply_filter_fn(self, data) -> bool: if self.input_col is None: return self.filter_fn(data) elif isinstance(self.input_col, (list, tuple)): args = tuple(data[col] for col in self.input_col) return self.filter_fn(*args) else: return self.filter_fn(data[self.input_col]) def __iter__(self) -> Iterator[_T_co]: for data in self.datapipe: condition, filtered = self._returnIfTrue(data) if condition: yield filtered else: StreamWrapper.close_streams(data) def _returnIfTrue(self, data: _T) -> tuple[bool, _T]: condition = self._apply_filter_fn(data) if df_wrapper.is_column(condition): # We are operating on DataFrames filter here result = [] for idx, mask in enumerate(df_wrapper.iterate(condition)): if mask: result.append(df_wrapper.get_item(data, idx)) if result: return True, df_wrapper.concat(result) else: return False, None # type: ignore[return-value] if not isinstance(condition, bool): raise ValueError( "Boolean output is required for `filter_fn` of FilterIterDataPipe, got", type(condition), ) return condition, data
FilterIterDataPipe
python
graphql-python__graphene
graphene/types/tests/test_decimal.py
{ "start": 112, "end": 2165 }
class ____(ObjectType): decimal = Decimal(input=Decimal()) def resolve_decimal(self, info, input): return input schema = Schema(query=Query) def test_decimal_string_query(): decimal_value = decimal.Decimal("1969.1974") result = schema.execute("""{ decimal(input: "%s") }""" % decimal_value) assert not result.errors assert result.data == {"decimal": str(decimal_value)} assert decimal.Decimal(result.data["decimal"]) == decimal_value def test_decimal_string_query_variable(): decimal_value = decimal.Decimal("1969.1974") result = schema.execute( """query Test($decimal: Decimal){ decimal(input: $decimal) }""", variables={"decimal": decimal_value}, ) assert not result.errors assert result.data == {"decimal": str(decimal_value)} assert decimal.Decimal(result.data["decimal"]) == decimal_value def test_bad_decimal_query(): not_a_decimal = "Nobody expects the Spanish Inquisition!" result = schema.execute("""{ decimal(input: "%s") }""" % not_a_decimal) assert result.errors assert len(result.errors) == 1 assert result.data is None assert ( result.errors[0].message == "Expected value of type 'Decimal', found \"Nobody expects the Spanish Inquisition!\"." ) result = schema.execute("{ decimal(input: true) }") assert result.errors assert len(result.errors) == 1 assert result.data is None assert result.errors[0].message == "Expected value of type 'Decimal', found true." result = schema.execute("{ decimal(input: 1.2) }") assert result.errors assert len(result.errors) == 1 assert result.data is None assert result.errors[0].message == "Expected value of type 'Decimal', found 1.2." def test_decimal_string_query_integer(): decimal_value = 1 result = schema.execute("""{ decimal(input: %s) }""" % decimal_value) assert not result.errors assert result.data == {"decimal": str(decimal_value)} assert decimal.Decimal(result.data["decimal"]) == decimal_value
Query
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_alloy_db.py
{ "start": 82370, "end": 88047 }
class ____: def setup_method(self): self.operator = AlloyDBUpdateBackupOperator( task_id=TEST_TASK_ID, backup_id=TEST_BACKUP_ID, backup_configuration=TEST_BACKUP, update_mask=TEST_UPDATE_MASK, allow_missing=TEST_ALLOW_MISSING, project_id=TEST_GCP_PROJECT, location=TEST_GCP_REGION, gcp_conn_id=TEST_GCP_CONN_ID, request_id=TEST_REQUEST_ID, validate_request=TEST_VALIDATE_ONLY, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, impersonation_chain=TEST_IMPERSONATION_CHAIN, ) def test_init(self): assert self.operator.backup_id == TEST_BACKUP_ID assert self.operator.backup_configuration == TEST_BACKUP assert self.operator.update_mask == TEST_UPDATE_MASK assert self.operator.allow_missing == TEST_ALLOW_MISSING def test_template_fields(self): expected_template_fields = { "backup_id", "backup_configuration", "update_mask", "allow_missing", } | set(AlloyDBWriteBaseOperator.template_fields) assert set(AlloyDBUpdateBackupOperator.template_fields) == expected_template_fields @mock.patch(OPERATOR_MODULE_PATH.format("alloydb_v1.Backup.to_dict")) @mock.patch(UPDATE_BACKUP_OPERATOR_PATH.format("get_operation_result")) @mock.patch(UPDATE_BACKUP_OPERATOR_PATH.format("log")) @mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock) def test_execute(self, mock_hook, mock_log, mock_get_operation_result, mock_to_dict): mock_update_backup = mock_hook.return_value.update_backup mock_operation = mock_update_backup.return_value mock_operation_result = mock_get_operation_result.return_value expected_result = mock_to_dict.return_value mock_context = mock.MagicMock() result = self.operator.execute(context=mock_context) mock_update_backup.assert_called_once_with( backup_id=TEST_BACKUP_ID, project_id=TEST_GCP_PROJECT, location=TEST_GCP_REGION, backup=TEST_BACKUP, update_mask=TEST_UPDATE_MASK, allow_missing=TEST_ALLOW_MISSING, request_id=TEST_REQUEST_ID, validate_only=TEST_VALIDATE_ONLY, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_operation_result.assert_called_once_with(mock_operation) mock_to_dict.assert_called_once_with(mock_operation_result) assert result == expected_result mock_log.info.assert_has_calls( [ call("Updating an AlloyDB backup."), call("AlloyDB backup %s was successfully updated.", TEST_BACKUP_ID), ] ) @mock.patch(OPERATOR_MODULE_PATH.format("alloydb_v1.Backup.to_dict")) @mock.patch(UPDATE_BACKUP_OPERATOR_PATH.format("get_operation_result")) @mock.patch(UPDATE_BACKUP_OPERATOR_PATH.format("log")) @mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock) def test_execute_validate_request(self, mock_hook, mock_log, mock_get_operation_result, mock_to_dict): mock_update_ackup = mock_hook.return_value.update_backup mock_operation = mock_update_ackup.return_value mock_get_operation_result.return_value = None expected_message = "Validating an Update AlloyDB backup request." mock_context = mock.MagicMock() self.operator.validate_request = True result = self.operator.execute(context=mock_context) mock_log.info.assert_called_once_with(expected_message) mock_update_ackup.assert_called_once_with( backup_id=TEST_BACKUP_ID, backup=TEST_BACKUP, project_id=TEST_GCP_PROJECT, location=TEST_GCP_REGION, update_mask=TEST_UPDATE_MASK, allow_missing=TEST_ALLOW_MISSING, request_id=TEST_REQUEST_ID, validate_only=True, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_operation_result.assert_called_once_with(mock_operation) assert not mock_to_dict.called assert result is None @mock.patch(OPERATOR_MODULE_PATH.format("alloydb_v1.Backup.to_dict")) @mock.patch(UPDATE_BACKUP_OPERATOR_PATH.format("get_operation_result")) @mock.patch(UPDATE_BACKUP_OPERATOR_PATH.format("log")) @mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock) def test_execute_exception(self, mock_hook, mock_log, mock_get_operation_result, mock_to_dict): mock_update_backup = mock_hook.return_value.update_backup mock_update_backup.side_effect = Exception mock_context = mock.MagicMock() with pytest.raises(AirflowException): self.operator.execute(context=mock_context) mock_update_backup.assert_called_once_with( backup_id=TEST_BACKUP_ID, backup=TEST_BACKUP, project_id=TEST_GCP_PROJECT, location=TEST_GCP_REGION, update_mask=TEST_UPDATE_MASK, allow_missing=TEST_ALLOW_MISSING, request_id=TEST_REQUEST_ID, validate_only=TEST_VALIDATE_ONLY, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) assert not mock_get_operation_result.called assert not mock_to_dict.called mock_log.info.assert_called_once_with("Updating an AlloyDB backup.")
TestAlloyDBUpdateBackupOperator
python
pypa__hatch
src/hatch/publish/plugin/interface.py
{ "start": 74, "end": 3353 }
class ____(ABC): """ Example usage: ```python tab="plugin.py" from hatch.publish.plugin.interface import PublisherInterface class SpecialPublisher(PublisherInterface): PLUGIN_NAME = 'special' ... ``` ```python tab="hooks.py" from hatchling.plugin import hookimpl from .plugin import SpecialPublisher @hookimpl def hatch_register_publisher(): return SpecialPublisher ``` """ PLUGIN_NAME = "" """The name used for selection.""" def __init__(self, app, root, cache_dir, project_config, plugin_config): self.__app = app self.__root = root self.__cache_dir = cache_dir self.__project_config = project_config self.__plugin_config = plugin_config self.__disable = None @property def app(self): """ An instance of [Application](../utilities.md#hatchling.bridge.app.Application). """ return self.__app @property def root(self): """ The root of the project tree as a path-like object. """ return self.__root @property def cache_dir(self): """ The directory reserved exclusively for this plugin as a path-like object. """ return self.__cache_dir @property def project_config(self) -> dict: """ ```toml config-example [tool.hatch.publish.<PLUGIN_NAME>] ``` """ return self.__project_config @property def plugin_config(self) -> dict: """ This is defined in Hatch's [config file](../../config/hatch.md). ```toml tab="config.toml" [publish.<PLUGIN_NAME>] ``` """ return self.__plugin_config @property def disable(self): """ Whether this plugin is disabled, thus requiring confirmation when publishing. Local [project configuration](reference.md#hatch.publish.plugin.interface.PublisherInterface.project_config) takes precedence over global [plugin configuration](reference.md#hatch.publish.plugin.interface.PublisherInterface.plugin_config). """ if self.__disable is None: if "disable" in self.project_config: disable = self.project_config["disable"] if not isinstance(disable, bool): message = f"Field `tool.hatch.publish.{self.PLUGIN_NAME}.disable` must be a boolean" raise TypeError(message) else: disable = self.plugin_config.get("disable", False) if not isinstance(disable, bool): message = f"Global plugin configuration `publish.{self.PLUGIN_NAME}.disable` must be a boolean" raise TypeError(message) self.__disable = disable return self.__disable @abstractmethod def publish(self, artifacts: list[str], options: dict): """ :material-align-horizontal-left: **REQUIRED** :material-align-horizontal-right: This is called directly by the [`publish`](../../cli/reference.md#hatch-publish) command with the arguments and options it receives. """
PublisherInterface
python
nedbat__coveragepy
coverage/plugin_support.py
{ "start": 7025, "end": 8458 }
class ____(FileTracer): """A debugging `FileTracer`.""" def __init__(self, tracer: FileTracer, debug: LabelledDebug) -> None: self.tracer = tracer self.debug = debug def _show_frame(self, frame: FrameType) -> str: """A short string identifying a frame, for debug messages.""" filename = os.path.basename(frame.f_code.co_filename) return f"{filename}@{frame.f_lineno}" def source_filename(self) -> str: sfilename = self.tracer.source_filename() self.debug.write(f"source_filename() --> {sfilename!r}") return sfilename def has_dynamic_source_filename(self) -> bool: has = self.tracer.has_dynamic_source_filename() self.debug.write(f"has_dynamic_source_filename() --> {has!r}") return has def dynamic_source_filename(self, filename: str, frame: FrameType) -> str | None: dyn = self.tracer.dynamic_source_filename(filename, frame) self.debug.write( "dynamic_source_filename({!r}, {}) --> {!r}".format( filename, self._show_frame(frame), dyn, ) ) return dyn def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]: pair = self.tracer.line_number_range(frame) self.debug.write(f"line_number_range({self._show_frame(frame)}) --> {pair!r}") return pair
DebugFileTracerWrapper
python
huggingface__transformers
src/transformers/models/chameleon/modeling_chameleon.py
{ "start": 33776, "end": 34668 }
class ____(PreTrainedModel): config: ChameleonConfig base_model_prefix = "model" input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["ChameleonDecoderLayer", "ChameleonSwinDecoderLayer"] _skip_keys_device_placement = ["past_key_values", "causal_mask"] _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True _supports_flex_attn = True _supports_attention_backend = True @auto_docstring( custom_intro=""" The VQ-VAE model used in Chameleon for encoding/decoding images into discrete tokens. This model follows the "Make-a-scene: Scene-based text-to-image generation with human priors" paper from [ Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, and Yaniv Taigman](https://huggingface.co/papers/2203.13131). """ )
ChameleonPreTrainedModel
python
pyinstaller__pyinstaller
tests/unit/test_modulegraph/test_imports.py
{ "start": 19928, "end": 20640 }
class ____ (unittest.TestCase): if not hasattr(unittest.TestCase, 'assertIsInstance'): def assertIsInstance(self, value, types): if not isinstance(value, types): self.fail("%r is not an instance of %r"%(value, types)) def test_extended_args_import(): source = "".join(f"dummy_var{i} = {i}\n" for i in range(300)) + "import os\n" code = compile(source, "", "exec") node = modulegraph.Node("dummy_module") node._deferred_imports = [] node.code = code graph = modulegraph.ModuleGraph() graph._scan_bytecode(node, code, True) assert node._deferred_imports[0][1][0] == "os" if __name__ == "__main__": unittest.main()
TestInvalidAsyncFunction
python
ray-project__ray
rllib/examples/envs/classes/multi_agent/footsies/encoder.py
{ "start": 296, "end": 8564 }
class ____: """Encoder class to generate observations from the game state""" def __init__(self, observation_delay: int): self._encoding_history = { agent_id: collections.deque(maxlen=int(observation_delay)) for agent_id in ["p1", "p2"] } self.observation_delay = observation_delay self._last_common_state: Optional[np.ndarray] = None self._action_id_values = list(constants.FOOTSIES_ACTION_IDS.values()) @staticmethod def encode_common_state(game_state: footsies_pb2.GameState) -> np.ndarray: p1_state, p2_state = game_state.player1, game_state.player2 dist_x = np.abs(p1_state.player_position_x - p2_state.player_position_x) / 8.0 return np.array( [ dist_x, ], dtype=np.float32, ) @staticmethod def _encode_input_buffer( input_buffer: list[int], last_n: Optional[int] = None ) -> np.ndarray: """Encodes the input buffer into a one-hot vector. :param input_buffer: The input buffer to encode :type input_buffer: list[int] :return: The encoded one-hot vector :rtype: np.ndarray """ if last_n is not None: input_buffer = input_buffer[last_n:] ib_encoding = [] for action_id in input_buffer: arr = [0] * (len(constants.ACTION_TO_BITS) + 1) arr[action_id] = 1 ib_encoding.extend(arr) input_buffer_vector = np.asarray(ib_encoding, dtype=np.float32) return input_buffer_vector def encode( self, game_state: footsies_pb2.GameState, ) -> dict[str, Any]: """Encodes the game state into observations for all agents. :param game_state: The game state to encode :type game_state: footsies_pb2.GameState :return: The encoded observations for all agents. :rtype: dict[str, Any] """ common_state = self.encode_common_state(game_state) p1_encoding = self.encode_player_state(game_state.player1) p2_encoding = self.encode_player_state(game_state.player2) observation_delay = min( self.observation_delay, len(self._encoding_history["p1"]) ) if observation_delay > 0: p1_delayed_encoding = self._encoding_history["p1"][-observation_delay] p2_delayed_encoding = self._encoding_history["p2"][-observation_delay] else: p1_delayed_encoding = copy.deepcopy(p1_encoding) p2_delayed_encoding = copy.deepcopy(p2_encoding) self._encoding_history["p1"].append(p1_encoding) self._encoding_history["p2"].append(p2_encoding) self._last_common_state = common_state # Create features dictionary features = {} current_index = 0 # Common state features["common_state"] = { "start": current_index, "length": len(common_state), } current_index += len(common_state) # Concatenate the observations for the undelayed encoding p1_encoding = np.hstack(list(p1_encoding.values()), dtype=np.float32) p2_encoding = np.hstack(list(p2_encoding.values()), dtype=np.float32) # Concatenate the observations for the delayed encoding p1_delayed_encoding = np.hstack( list(p1_delayed_encoding.values()), dtype=np.float32 ) p2_delayed_encoding = np.hstack( list(p2_delayed_encoding.values()), dtype=np.float32 ) p1_centric_observation = np.hstack( [common_state, p1_encoding, p2_delayed_encoding] ) p2_centric_observation = np.hstack( [common_state, p2_encoding, p1_delayed_encoding] ) return {"p1": p1_centric_observation, "p2": p2_centric_observation} def encode_player_state( self, player_state: footsies_pb2.PlayerState, ) -> dict[str, Union[int, float, list, np.ndarray]]: """Encodes the player state into observations. :param player_state: The player state to encode :type player_state: footsies_pb2.PlayerState :return: The encoded observations for the player :rtype: dict[str, Any] """ feature_dict = { "player_position_x": player_state.player_position_x / constants.FeatureDictNormalizers.PLAYER_POSITION_X, "velocity_x": player_state.velocity_x / constants.FeatureDictNormalizers.VELOCITY_X, "is_dead": int(player_state.is_dead), "vital_health": player_state.vital_health, "guard_health": one_hot_encoder(player_state.guard_health, [0, 1, 2, 3]), "current_action_id": self._encode_action_id(player_state.current_action_id), "current_action_frame": player_state.current_action_frame / constants.FeatureDictNormalizers.CURRENT_ACTION_FRAME, "current_action_frame_count": player_state.current_action_frame_count / constants.FeatureDictNormalizers.CURRENT_ACTION_FRAME_COUNT, "current_action_remaining_frames": ( player_state.current_action_frame_count - player_state.current_action_frame ) / constants.FeatureDictNormalizers.CURRENT_ACTION_REMAINING_FRAMES, "is_action_end": int(player_state.is_action_end), "is_always_cancelable": int(player_state.is_always_cancelable), "current_action_hit_count": player_state.current_action_hit_count, "current_hit_stun_frame": player_state.current_hit_stun_frame / constants.FeatureDictNormalizers.CURRENT_HIT_STUN_FRAME, "is_in_hit_stun": int(player_state.is_in_hit_stun), "sprite_shake_position": player_state.sprite_shake_position, "max_sprite_shake_frame": player_state.max_sprite_shake_frame / constants.FeatureDictNormalizers.MAX_SPRITE_SHAKE_FRAME, "is_face_right": int(player_state.is_face_right), "current_frame_advantage": player_state.current_frame_advantage / constants.FeatureDictNormalizers.CURRENT_FRAME_ADVANTAGE, # The below features leak some information about the opponent! "would_next_forward_input_dash": int( player_state.would_next_forward_input_dash ), "would_next_backward_input_dash": int( player_state.would_next_backward_input_dash ), "special_attack_progress": min(player_state.special_attack_progress, 1.0), } return feature_dict def get_last_encoding(self) -> Optional[dict[str, np.ndarray]]: if self._last_common_state is None: return None return { "common_state": self._last_common_state.reshape(-1), "p1": np.hstack( list(self._encoding_history["p1"][-1].values()), dtype=np.float32, ), "p2": np.hstack( list(self._encoding_history["p2"][-1].values()), dtype=np.float32, ), } def reset(self): self._encoding_history = { agent_id: collections.deque(maxlen=int(self.observation_delay)) for agent_id in ["p1", "p2"] } def _encode_action_id(self, action_id: int) -> np.ndarray: """Encodes the action id into a one-hot vector. :param action_id: The action id to encode :type action_id: int :return: The encoded one-hot vector :rtype: np.ndarray """ action_vector = np.zeros(len(self._action_id_values), dtype=np.float32) # Get the index of the action id in constants.ActionID action_index = self._action_id_values.index(action_id) action_vector[action_index] = 1 assert action_vector.max() == 1 and action_vector.min() == 0 return action_vector def one_hot_encoder( value: Union[int, float, str], collection: list[Union[int, float, str]] ) -> np.ndarray: vector = np.zeros(len(collection), dtype=np.float32) vector[collection.index(value)] = 1 return vector
FootsiesEncoder
python
scipy__scipy
scipy/optimize/_differentialevolution.py
{ "start": 85674, "end": 90022 }
class ____: """Object to wrap/evaluate user defined constraints. Very similar in practice to `PreparedConstraint`, except that no evaluation of jac/hess is performed (explicit or implicit). If created successfully, it will contain the attributes listed below. Parameters ---------- constraint : {`NonlinearConstraint`, `LinearConstraint`, `Bounds`} Constraint to check and prepare. x0 : array_like Initial vector of independent variables, shape (N,) Attributes ---------- fun : callable Function defining the constraint wrapped by one of the convenience classes. bounds : 2-tuple Contains lower and upper bounds for the constraints --- lb and ub. These are converted to ndarray and have a size equal to the number of the constraints. Notes ----- _ConstraintWrapper.fun and _ConstraintWrapper.violation can get sent arrays of shape (N, S) or (N,), where S is the number of vectors of shape (N,) to consider constraints for. """ def __init__(self, constraint, x0): self.constraint = constraint if isinstance(constraint, NonlinearConstraint): def fun(x): x = np.asarray(x) return np.atleast_1d(constraint.fun(x)) elif isinstance(constraint, LinearConstraint): def fun(x): if issparse(constraint.A): A = constraint.A else: A = np.atleast_2d(constraint.A) res = A.dot(x) # x either has shape (N, S) or (N) # (M, N) x (N, S) --> (M, S) # (M, N) x (N,) --> (M,) # However, if (M, N) is a matrix then: # (M, N) * (N,) --> (M, 1), we need this to be (M,) if x.ndim == 1 and res.ndim == 2: # deal with case that constraint.A is an np.matrix # see gh20041 res = np.asarray(res)[:, 0] return res elif isinstance(constraint, Bounds): def fun(x): return np.asarray(x) else: raise ValueError("`constraint` of an unknown type is passed.") self.fun = fun lb = np.asarray(constraint.lb, dtype=float) ub = np.asarray(constraint.ub, dtype=float) x0 = np.asarray(x0) # find out the number of constraints f0 = fun(x0) self.num_constr = m = f0.size self.parameter_count = x0.size if lb.ndim == 0: lb = np.resize(lb, m) if ub.ndim == 0: ub = np.resize(ub, m) self.bounds = (lb, ub) def __call__(self, x): return np.atleast_1d(self.fun(x)) def violation(self, x): """How much the constraint is exceeded by. Parameters ---------- x : array-like Vector of independent variables, (N, S), where N is number of parameters and S is the number of solutions to be investigated. Returns ------- excess : array-like How much the constraint is exceeded by, for each of the constraints specified by `_ConstraintWrapper.fun`. Has shape (M, S) where M is the number of constraint components. """ # expect ev to have shape (num_constr, S) or (num_constr,) ev = self.fun(np.asarray(x)) try: excess_lb = np.maximum(self.bounds[0] - ev.T, 0) excess_ub = np.maximum(ev.T - self.bounds[1], 0) except ValueError as e: raise RuntimeError("An array returned from a Constraint has" " the wrong shape. If `vectorized is False`" " the Constraint should return an array of" " shape (M,). If `vectorized is True` then" " the Constraint must return an array of" " shape (M, S), where S is the number of" " solution vectors and M is the number of" " constraint components in a given" " Constraint object.") from e v = (excess_lb + excess_ub).T return v
_ConstraintWrapper
python
pennersr__django-allauth
allauth/mfa/stages.py
{ "start": 1406, "end": 2302 }
class ____(LoginStage): key = LoginStageKey.MFA_TRUST.value urlname = "mfa_trust" def handle(self): lbc_stage = self.controller.get_stage(AccountLoginStageKey.LOGIN_BY_CODE) auth_stage = self.controller.get_stage(AuthenticateStage.key) if ( not app_settings.TRUST_ENABLED or not auth_stage or not auth_stage.state.get("authentication_required") ) and ( not account_settings.LOGIN_BY_CODE_TRUST_ENABLED or not lbc_stage or not lbc_stage.state.get("login_by_code_required") ): return None, True client = is_headless_request(self.request) if client and client == "app": # Trust-this-browser relies on cookies. return None, True response = headed_redirect_response("mfa_trust") return response, True
TrustStage
python
kubernetes-client__python
kubernetes/client/models/v1beta2_capacity_request_policy_range.py
{ "start": 383, "end": 6247 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'max': 'str', 'min': 'str', 'step': 'str' } attribute_map = { 'max': 'max', 'min': 'min', 'step': 'step' } def __init__(self, max=None, min=None, step=None, local_vars_configuration=None): # noqa: E501 """V1beta2CapacityRequestPolicyRange - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._max = None self._min = None self._step = None self.discriminator = None if max is not None: self.max = max self.min = min if step is not None: self.step = step @property def max(self): """Gets the max of this V1beta2CapacityRequestPolicyRange. # noqa: E501 Max defines the upper limit for capacity that can be requested. Max must be less than or equal to the capacity value. Min and requestPolicy.default must be less than or equal to the maximum. # noqa: E501 :return: The max of this V1beta2CapacityRequestPolicyRange. # noqa: E501 :rtype: str """ return self._max @max.setter def max(self, max): """Sets the max of this V1beta2CapacityRequestPolicyRange. Max defines the upper limit for capacity that can be requested. Max must be less than or equal to the capacity value. Min and requestPolicy.default must be less than or equal to the maximum. # noqa: E501 :param max: The max of this V1beta2CapacityRequestPolicyRange. # noqa: E501 :type: str """ self._max = max @property def min(self): """Gets the min of this V1beta2CapacityRequestPolicyRange. # noqa: E501 Min specifies the minimum capacity allowed for a consumption request. Min must be greater than or equal to zero, and less than or equal to the capacity value. requestPolicy.default must be more than or equal to the minimum. # noqa: E501 :return: The min of this V1beta2CapacityRequestPolicyRange. # noqa: E501 :rtype: str """ return self._min @min.setter def min(self, min): """Sets the min of this V1beta2CapacityRequestPolicyRange. Min specifies the minimum capacity allowed for a consumption request. Min must be greater than or equal to zero, and less than or equal to the capacity value. requestPolicy.default must be more than or equal to the minimum. # noqa: E501 :param min: The min of this V1beta2CapacityRequestPolicyRange. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and min is None: # noqa: E501 raise ValueError("Invalid value for `min`, must not be `None`") # noqa: E501 self._min = min @property def step(self): """Gets the step of this V1beta2CapacityRequestPolicyRange. # noqa: E501 Step defines the step size between valid capacity amounts within the range. Max (if set) and requestPolicy.default must be a multiple of Step. Min + Step must be less than or equal to the capacity value. # noqa: E501 :return: The step of this V1beta2CapacityRequestPolicyRange. # noqa: E501 :rtype: str """ return self._step @step.setter def step(self, step): """Sets the step of this V1beta2CapacityRequestPolicyRange. Step defines the step size between valid capacity amounts within the range. Max (if set) and requestPolicy.default must be a multiple of Step. Min + Step must be less than or equal to the capacity value. # noqa: E501 :param step: The step of this V1beta2CapacityRequestPolicyRange. # noqa: E501 :type: str """ self._step = step def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1beta2CapacityRequestPolicyRange): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1beta2CapacityRequestPolicyRange): return True return self.to_dict() != other.to_dict()
V1beta2CapacityRequestPolicyRange
python
protocolbuffers__protobuf
python/google/protobuf/json_format.py
{ "start": 5360, "end": 16285 }
class ____(object): """JSON format printer for protocol message.""" def __init__( self, preserving_proto_field_name=False, use_integers_for_enums=False, descriptor_pool=None, always_print_fields_with_no_presence=False, ): self.always_print_fields_with_no_presence = ( always_print_fields_with_no_presence ) self.preserving_proto_field_name = preserving_proto_field_name self.use_integers_for_enums = use_integers_for_enums self.descriptor_pool = descriptor_pool def ToJsonString(self, message, indent, sort_keys, ensure_ascii): js = self._MessageToJsonObject(message) return json.dumps( js, indent=indent, sort_keys=sort_keys, ensure_ascii=ensure_ascii ) def _MessageToJsonObject(self, message): """Converts message to an object according to ProtoJSON Specification.""" message_descriptor = message.DESCRIPTOR full_name = message_descriptor.full_name if _IsWrapperMessage(message_descriptor): return self._WrapperMessageToJsonObject(message) if full_name in _WKTJSONMETHODS: return methodcaller(_WKTJSONMETHODS[full_name][0], message)(self) js = {} return self._RegularMessageToJsonObject(message, js) def _RegularMessageToJsonObject(self, message, js): """Converts normal message according to ProtoJSON Specification.""" fields = message.ListFields() try: for field, value in fields: if field.is_extension: name = '[%s]' % field.full_name elif self.preserving_proto_field_name: name = field.name else: name = field.json_name if _IsMapEntry(field): # Convert a map field. v_field = field.message_type.fields_by_name['value'] js_map = {} for key in value: if isinstance(key, bool): if key: recorded_key = 'true' else: recorded_key = 'false' else: recorded_key = str(key) js_map[recorded_key] = self._FieldToJsonObject(v_field, value[key]) js[name] = js_map elif field.is_repeated: # Convert a repeated field. js[name] = [self._FieldToJsonObject(field, k) for k in value] else: js[name] = self._FieldToJsonObject(field, value) # Serialize default value if including_default_value_fields is True. if ( self.always_print_fields_with_no_presence ): message_descriptor = message.DESCRIPTOR for field in message_descriptor.fields: # always_print_fields_with_no_presence doesn't apply to # any field which supports presence. if self.always_print_fields_with_no_presence and field.has_presence: continue if self.preserving_proto_field_name: name = field.name else: name = field.json_name if name in js: # Skip the field which has been serialized already. continue if _IsMapEntry(field): js[name] = {} elif field.is_repeated: js[name] = [] else: js[name] = self._FieldToJsonObject(field, field.default_value) except ValueError as e: raise SerializeToJsonError( 'Failed to serialize {0} field: {1}.'.format(field.name, e) ) from e return js def _FieldToJsonObject(self, field, value): """Converts field value according to ProtoJSON Specification.""" if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: return self._MessageToJsonObject(value) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: if self.use_integers_for_enums: return value if field.enum_type.full_name == 'google.protobuf.NullValue': return None enum_value = field.enum_type.values_by_number.get(value, None) if enum_value is not None: return enum_value.name else: if field.enum_type.is_closed: raise SerializeToJsonError( 'Enum field contains an integer value ' 'which can not mapped to an enum value.' ) else: return value elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: if field.type == descriptor.FieldDescriptor.TYPE_BYTES: # Use base64 Data encoding for bytes return base64.b64encode(value).decode('utf-8') else: return str(value) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: return bool(value) elif field.cpp_type in _INT64_TYPES: return str(value) elif field.cpp_type in _FLOAT_TYPES: if math.isinf(value): if value < 0.0: return _NEG_INFINITY else: return _INFINITY if math.isnan(value): return _NAN elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT: return type_checkers.ToShortestFloat(value) return value def _AnyMessageToJsonObject(self, message): """Converts Any message according to ProtoJSON Specification.""" if not message.ListFields(): return {} # Must print @type first, use OrderedDict instead of {} js = OrderedDict() type_url = message.type_url js['@type'] = type_url sub_message = _CreateMessageFromTypeUrl(type_url, self.descriptor_pool) sub_message.ParseFromString(message.value) message_descriptor = sub_message.DESCRIPTOR full_name = message_descriptor.full_name if _IsWrapperMessage(message_descriptor): js['value'] = self._WrapperMessageToJsonObject(sub_message) return js if full_name in _WKTJSONMETHODS: js['value'] = methodcaller(_WKTJSONMETHODS[full_name][0], sub_message)( self ) return js return self._RegularMessageToJsonObject(sub_message, js) def _GenericMessageToJsonObject(self, message): """Converts message according to ProtoJSON Specification.""" # Duration, Timestamp and FieldMask have ToJsonString method to do the # convert. Users can also call the method directly. return message.ToJsonString() def _ValueMessageToJsonObject(self, message): """Converts Value message according to ProtoJSON Specification.""" which = message.WhichOneof('kind') # If the Value message is not set treat as null_value when serialize # to JSON. The parse back result will be different from original message. if which is None or which == 'null_value': return None if which == 'list_value': return self._ListValueMessageToJsonObject(message.list_value) if which == 'number_value': value = message.number_value if math.isinf(value): raise ValueError( 'Fail to serialize Infinity for Value.number_value, ' 'which would parse as string_value' ) if math.isnan(value): raise ValueError( 'Fail to serialize NaN for Value.number_value, ' 'which would parse as string_value' ) else: value = getattr(message, which) oneof_descriptor = message.DESCRIPTOR.fields_by_name[which] return self._FieldToJsonObject(oneof_descriptor, value) def _ListValueMessageToJsonObject(self, message): """Converts ListValue message according to ProtoJSON Specification.""" return [self._ValueMessageToJsonObject(value) for value in message.values] def _StructMessageToJsonObject(self, message): """Converts Struct message according to ProtoJSON Specification.""" fields = message.fields ret = {} for key in fields: ret[key] = self._ValueMessageToJsonObject(fields[key]) return ret def _WrapperMessageToJsonObject(self, message): return self._FieldToJsonObject( message.DESCRIPTOR.fields_by_name['value'], message.value ) def _IsWrapperMessage(message_descriptor): return message_descriptor.file.name == 'google/protobuf/wrappers.proto' def _DuplicateChecker(js): result = {} for name, value in js: if name in result: raise ParseError('Failed to load JSON: duplicate key {0}.'.format(name)) result[name] = value return result def _CreateMessageFromTypeUrl(type_url, descriptor_pool): """Creates a message from a type URL.""" db = symbol_database.Default() pool = db.pool if descriptor_pool is None else descriptor_pool type_name = type_url.split('/')[-1] try: message_descriptor = pool.FindMessageTypeByName(type_name) except KeyError as e: raise TypeError( 'Can not find message descriptor by type_url: {0}'.format(type_url) ) from e message_class = message_factory.GetMessageClass(message_descriptor) return message_class() def Parse( text, message, ignore_unknown_fields=False, descriptor_pool=None, max_recursion_depth=100, ): """Parses a JSON representation of a protocol message into a message. Args: text: Message JSON representation. message: A protocol buffer message to merge into. ignore_unknown_fields: If True, do not raise errors for unknown fields. descriptor_pool: A Descriptor Pool for resolving types. If None use the default. max_recursion_depth: max recursion depth of JSON message to be deserialized. JSON messages over this depth will fail to be deserialized. Default value is 100. Returns: The same message passed as argument. Raises:: ParseError: On JSON parsing problems. """ if not isinstance(text, str): text = text.decode('utf-8') try: js = json.loads(text, object_pairs_hook=_DuplicateChecker) except Exception as e: raise ParseError('Failed to load JSON: {0}.'.format(str(e))) from e try: return ParseDict( js, message, ignore_unknown_fields, descriptor_pool, max_recursion_depth ) except ParseError as e: raise e except Exception as e: raise ParseError( 'Failed to parse JSON: {0}: {1}.'.format(type(e).__name__, str(e)) ) from e def ParseDict( js_dict, message, ignore_unknown_fields=False, descriptor_pool=None, max_recursion_depth=100, ): """Parses a JSON dictionary representation into a message. Args: js_dict: Dict representation of a JSON message. message: A protocol buffer message to merge into. ignore_unknown_fields: If True, do not raise errors for unknown fields. descriptor_pool: A Descriptor Pool for resolving types. If None use the default. max_recursion_depth: max recursion depth of JSON message to be deserialized. JSON messages over this depth will fail to be deserialized. Default value is 100. Returns: The same message passed as argument. """ parser = _Parser(ignore_unknown_fields, descriptor_pool, max_recursion_depth) parser.ConvertMessage(js_dict, message, '') return message _INT_OR_FLOAT = (int, float) _LIST_LIKE = (list, tuple)
_Printer
python
eventlet__eventlet
tests/greendns_test.py
{ "start": 7735, "end": 10921 }
class ____(tests.LimitedTestCase): def setUp(self): # Store this so we can reuse it for each test self.query = greendns.dns.message.Message() self.query.flags = greendns.dns.flags.QR self.query_wire = self.query.to_wire() super().setUp() def test_udp_ipv4(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', return_value=(self.query_wire, ('127.0.0.1', 53))): greendns.udp(self.query, '127.0.0.1') def test_udp_ipv4_timeout(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', side_effect=socket.timeout): with tests.assert_raises(dns.exception.Timeout): greendns.udp(self.query, '127.0.0.1', timeout=0.1) def test_udp_ipv4_wrong_addr_ignore(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', side_effect=socket.timeout): with tests.assert_raises(dns.exception.Timeout): greendns.udp(self.query, '127.0.0.1', timeout=0.1, ignore_unexpected=True) def test_udp_ipv4_wrong_addr(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', return_value=(self.query_wire, ('127.0.0.2', 53))): with tests.assert_raises(dns.query.UnexpectedSource): greendns.udp(self.query, '127.0.0.1') def test_udp_ipv6(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', return_value=(self.query_wire, ('::1', 53, 0, 0))): greendns.udp(self.query, '::1') def test_udp_ipv6_timeout(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', side_effect=socket.timeout): with tests.assert_raises(dns.exception.Timeout): greendns.udp(self.query, '::1', timeout=0.1) def test_udp_ipv6_addr_zeroes(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', return_value=(self.query_wire, ('0:00:0000::1', 53, 0, 0))): greendns.udp(self.query, '::1') def test_udp_ipv6_wrong_addr_ignore(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', side_effect=socket.timeout): with tests.assert_raises(dns.exception.Timeout): greendns.udp(self.query, '::1', timeout=0.1, ignore_unexpected=True) def test_udp_ipv6_wrong_addr(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', return_value=(self.query_wire, ('ffff:0000::1', 53, 0, 0))): with tests.assert_raises(dns.query.UnexpectedSource): greendns.udp(self.query, '::1')
TestUdp
python
xlwings__xlwings
xlwings/_xlwindows.py
{ "start": 2421, "end": 3472 }
class ____: def __init__(self, method): self.__method = method def __call__(self, *args, **kwargs): n_attempt = 1 while True: try: v = self.__method(*args, **kwargs) if isinstance(v, (CDispatch, CoClassBaseClass, DispatchBaseClass)): return COMRetryObjectWrapper(v) elif isinstance(v, types.MethodType): return COMRetryMethodWrapper(v) else: return v except pywintypes.com_error as e: if ( not N_COM_ATTEMPTS or n_attempt < N_COM_ATTEMPTS ) and e.hresult == -2147418111: n_attempt += 1 continue else: raise except AttributeError: if not N_COM_ATTEMPTS or n_attempt < N_COM_ATTEMPTS: n_attempt += 1 continue else: raise
COMRetryMethodWrapper
python
dagster-io__dagster
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
{ "start": 100270, "end": 101754 }
class ____(GeneratedAirbyteSource): @public def __init__( self, name: str, shop: str, start_date: str, api_key: str, api_secret: str, conversion_window_days: Optional[int] = None, ): """Airbyte Source for Woocommerce. Documentation can be found at https://docs.airbyte.com/integrations/sources/woocommerce Args: name (str): The name of the destination. shop (str): The name of the store. For https://EXAMPLE.com, the shop name is 'EXAMPLE.com'. start_date (str): The date you would like to replicate data. Format: YYYY-MM-DD. api_key (str): The CUSTOMER KEY for API in WooCommerce shop. api_secret (str): The CUSTOMER SECRET for API in WooCommerce shop. conversion_window_days (Optional[int]): A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. """ self.shop = check.str_param(shop, "shop") self.start_date = check.str_param(start_date, "start_date") self.api_key = check.str_param(api_key, "api_key") self.api_secret = check.str_param(api_secret, "api_secret") self.conversion_window_days = check.opt_int_param( conversion_window_days, "conversion_window_days" ) super().__init__("Woocommerce", name)
WoocommerceSource
python
astropy__astropy
astropy/samp/client.py
{ "start": 450, "end": 25371 }
class ____: """ Utility class which provides facilities to create and manage a SAMP compliant XML-RPC server that acts as SAMP callable client application. Parameters ---------- hub : :class:`~astropy.samp.SAMPHubProxy` An instance of :class:`~astropy.samp.SAMPHubProxy` to be used for messaging with the SAMP Hub. name : str, optional Client name (corresponding to ``samp.name`` metadata keyword). description : str, optional Client description (corresponding to ``samp.description.text`` metadata keyword). metadata : dict, optional Client application metadata in the standard SAMP format. addr : str, optional Listening address (or IP). This defaults to 127.0.0.1 if the internet is not reachable, otherwise it defaults to the host name. port : int, optional Listening XML-RPC server socket port. If left set to 0 (the default), the operating system will select a free port. callable : bool, optional Whether the client can receive calls and notifications. If set to `False`, then the client can send notifications and calls, but can not receive any. """ # TODO: define what is meant by callable def __init__( self, hub, name=None, description=None, metadata=None, addr=None, port=0, callable=True, ): # GENERAL self._is_running = False self._is_registered = False if metadata is None: metadata = {} if name is not None: metadata["samp.name"] = name if description is not None: metadata["samp.description.text"] = description self._metadata = metadata self._addr = addr self._port = port self._xmlrpcAddr = None self._callable = callable # HUB INTERACTION self.client = None self._public_id = None self._private_key = None self._hub_id = None self._notification_bindings = {} self._call_bindings = { "samp.app.ping": [self._ping, {}], "client.env.get": [self._client_env_get, {}], } self._response_bindings = {} self._host_name = "127.0.0.1" if internet_on(): try: self._host_name = socket.getfqdn() socket.getaddrinfo(self._addr or self._host_name, self._port or 0) except OSError: self._host_name = "127.0.0.1" self.hub = hub if self._callable: self._thread = threading.Thread(target=self._serve_forever) self._thread.daemon = True self.client = ThreadingXMLRPCServer( (self._addr or self._host_name, self._port), logRequests=False, allow_none=True, ) self.client.register_introspection_functions() self.client.register_function( self.receive_notification, "samp.client.receiveNotification" ) self.client.register_function(self.receive_call, "samp.client.receiveCall") self.client.register_function( self.receive_response, "samp.client.receiveResponse" ) # If the port was set to zero, then the operating system has # selected a free port. We now check what this port number is. if self._port == 0: self._port = self.client.socket.getsockname()[1] protocol = "http" self._xmlrpcAddr = urlunparse( ( protocol, f"{self._addr or self._host_name}:{self._port}", "", "", "", "", ) ) def start(self): """ Start the client in a separate thread (non-blocking). This only has an effect if ``callable`` was set to `True` when initializing the client. """ if self._callable: self._is_running = True self._run_client() def stop(self, timeout=10.0): """ Stop the client. Parameters ---------- timeout : float Timeout after which to give up if the client cannot be cleanly shut down. """ # Setting _is_running to False causes the loop in _serve_forever to # exit. The thread should then stop running. We wait for the thread to # terminate until the timeout, then we continue anyway. self._is_running = False if self._callable and self._thread.is_alive(): self._thread.join(timeout) if self._thread.is_alive(): raise SAMPClientError( f"Client was not shut down successfully (timeout={timeout}s)" ) @property def is_running(self): """ Whether the client is currently running. """ return self._is_running @property def is_registered(self): """ Whether the client is currently registered. """ return self._is_registered def _run_client(self): if self._callable: self._thread.start() def _serve_forever(self): while self._is_running: try: read_ready = select.select([self.client.socket], [], [], 0.1)[0] except OSError as exc: warnings.warn( f"Call to select in SAMPClient failed: {exc}", SAMPWarning ) else: if read_ready: self.client.handle_request() self.client.server_close() def _ping(self, private_key, sender_id, msg_id, msg_mtype, msg_params, message): reply = {"samp.status": SAMP_STATUS_OK, "samp.result": {}} self.hub.reply(private_key, msg_id, reply) def _client_env_get( self, private_key, sender_id, msg_id, msg_mtype, msg_params, message ): if msg_params["name"] in os.environ: reply = { "samp.status": SAMP_STATUS_OK, "samp.result": {"value": os.environ[msg_params["name"]]}, } else: reply = { "samp.status": SAMP_STATUS_WARNING, "samp.result": {"value": ""}, "samp.error": {"samp.errortxt": "Environment variable not defined."}, } self.hub.reply(private_key, msg_id, reply) def _handle_notification(self, private_key, sender_id, message): if private_key == self.get_private_key() and "samp.mtype" in message: msg_mtype = message["samp.mtype"] del message["samp.mtype"] msg_params = message["samp.params"] del message["samp.params"] msubs = SAMPHubServer.get_mtype_subtypes(msg_mtype) for mtype in msubs: if mtype in self._notification_bindings: bound_func = self._notification_bindings[mtype][0] if get_num_args(bound_func) == 5: bound_func( private_key, sender_id, msg_mtype, msg_params, message ) else: bound_func( private_key, sender_id, None, msg_mtype, msg_params, message ) return "" def receive_notification(self, private_key, sender_id, message): """ Standard callable client ``receive_notification`` method. This method is automatically handled when the :meth:`~astropy.samp.client.SAMPClient.bind_receive_notification` method is used to bind distinct operations to MTypes. In case of a customized callable client implementation that inherits from the :class:`~astropy.samp.SAMPClient` class this method should be overwritten. .. note:: When overwritten, this method must always return a string result (even empty). Parameters ---------- private_key : str Client private key. sender_id : str Sender public ID. message : dict Received message. Returns ------- confirmation : str Any confirmation string. """ return self._handle_notification(private_key, sender_id, message) def _handle_call(self, private_key, sender_id, msg_id, message): if private_key == self.get_private_key() and "samp.mtype" in message: msg_mtype = message["samp.mtype"] del message["samp.mtype"] msg_params = message["samp.params"] del message["samp.params"] msubs = SAMPHubServer.get_mtype_subtypes(msg_mtype) for mtype in msubs: if mtype in self._call_bindings: self._call_bindings[mtype][0]( private_key, sender_id, msg_id, msg_mtype, msg_params, message ) return "" def receive_call(self, private_key, sender_id, msg_id, message): """ Standard callable client ``receive_call`` method. This method is automatically handled when the :meth:`~astropy.samp.client.SAMPClient.bind_receive_call` method is used to bind distinct operations to MTypes. In case of a customized callable client implementation that inherits from the :class:`~astropy.samp.SAMPClient` class this method should be overwritten. .. note:: When overwritten, this method must always return a string result (even empty). Parameters ---------- private_key : str Client private key. sender_id : str Sender public ID. msg_id : str Message ID received. message : dict Received message. Returns ------- confirmation : str Any confirmation string. """ return self._handle_call(private_key, sender_id, msg_id, message) def _handle_response(self, private_key, responder_id, msg_tag, response): if private_key == self.get_private_key() and msg_tag in self._response_bindings: self._response_bindings[msg_tag]( private_key, responder_id, msg_tag, response ) return "" def receive_response(self, private_key, responder_id, msg_tag, response): """ Standard callable client ``receive_response`` method. This method is automatically handled when the :meth:`~astropy.samp.client.SAMPClient.bind_receive_response` method is used to bind distinct operations to MTypes. In case of a customized callable client implementation that inherits from the :class:`~astropy.samp.SAMPClient` class this method should be overwritten. .. note:: When overwritten, this method must always return a string result (even empty). Parameters ---------- private_key : str Client private key. responder_id : str Responder public ID. msg_tag : str Response message tag. response : dict Received response. Returns ------- confirmation : str Any confirmation string. """ return self._handle_response(private_key, responder_id, msg_tag, response) def bind_receive_message(self, mtype, function, declare=True, metadata=None): """ Bind a specific MType to a function or class method, being intended for a call or a notification. The function must be of the form:: def my_function_or_method(<self,> private_key, sender_id, msg_id, mtype, params, extra) where ``private_key`` is the client private-key, ``sender_id`` is the notification sender ID, ``msg_id`` is the Hub message-id (calls only, otherwise is `None`), ``mtype`` is the message MType, ``params`` is the message parameter set (content of ``"samp.params"``) and ``extra`` is a dictionary containing any extra message map entry. The client is automatically declared subscribed to the MType by default. Parameters ---------- mtype : str MType to be caught. function : callable Application function to be used when ``mtype`` is received. declare : bool, optional Specify whether the client must be automatically declared as subscribed to the MType (see also :meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`). metadata : dict, optional Dictionary containing additional metadata to declare associated with the MType subscribed to (see also :meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`). """ self.bind_receive_call(mtype, function, declare=declare, metadata=metadata) self.bind_receive_notification( mtype, function, declare=declare, metadata=metadata ) def bind_receive_notification(self, mtype, function, declare=True, metadata=None): """ Bind a specific MType notification to a function or class method. The function must be of the form:: def my_function_or_method(<self,> private_key, sender_id, mtype, params, extra) where ``private_key`` is the client private-key, ``sender_id`` is the notification sender ID, ``mtype`` is the message MType, ``params`` is the notified message parameter set (content of ``"samp.params"``) and ``extra`` is a dictionary containing any extra message map entry. The client is automatically declared subscribed to the MType by default. Parameters ---------- mtype : str MType to be caught. function : callable Application function to be used when ``mtype`` is received. declare : bool, optional Specify whether the client must be automatically declared as subscribed to the MType (see also :meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`). metadata : dict, optional Dictionary containing additional metadata to declare associated with the MType subscribed to (see also :meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`). """ if self._callable: if not metadata: metadata = {} self._notification_bindings[mtype] = [function, metadata] if declare: self._declare_subscriptions() else: raise SAMPClientError("Client not callable.") def bind_receive_call(self, mtype, function, declare=True, metadata=None): """ Bind a specific MType call to a function or class method. The function must be of the form:: def my_function_or_method(<self,> private_key, sender_id, msg_id, mtype, params, extra) where ``private_key`` is the client private-key, ``sender_id`` is the notification sender ID, ``msg_id`` is the Hub message-id, ``mtype`` is the message MType, ``params`` is the message parameter set (content of ``"samp.params"``) and ``extra`` is a dictionary containing any extra message map entry. The client is automatically declared subscribed to the MType by default. Parameters ---------- mtype : str MType to be caught. function : callable Application function to be used when ``mtype`` is received. declare : bool, optional Specify whether the client must be automatically declared as subscribed to the MType (see also :meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`). metadata : dict, optional Dictionary containing additional metadata to declare associated with the MType subscribed to (see also :meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`). """ if self._callable: if not metadata: metadata = {} self._call_bindings[mtype] = [function, metadata] if declare: self._declare_subscriptions() else: raise SAMPClientError("Client not callable.") def bind_receive_response(self, msg_tag, function): """ Bind a specific msg-tag response to a function or class method. The function must be of the form:: def my_function_or_method(<self,> private_key, responder_id, msg_tag, response) where ``private_key`` is the client private-key, ``responder_id`` is the message responder ID, ``msg_tag`` is the message-tag provided at call time and ``response`` is the response received. Parameters ---------- msg_tag : str Message-tag to be caught. function : callable Application function to be used when ``msg_tag`` is received. """ if self._callable: self._response_bindings[msg_tag] = function else: raise SAMPClientError("Client not callable.") def unbind_receive_notification(self, mtype, declare=True): """ Remove from the notifications binding table the specified MType and unsubscribe the client from it (if required). Parameters ---------- mtype : str MType to be removed. declare : bool Specify whether the client must be automatically declared as unsubscribed from the MType (see also :meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`). """ if self._callable: del self._notification_bindings[mtype] if declare: self._declare_subscriptions() else: raise SAMPClientError("Client not callable.") def unbind_receive_call(self, mtype, declare=True): """ Remove from the calls binding table the specified MType and unsubscribe the client from it (if required). Parameters ---------- mtype : str MType to be removed. declare : bool Specify whether the client must be automatically declared as unsubscribed from the MType (see also :meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`). """ if self._callable: del self._call_bindings[mtype] if declare: self._declare_subscriptions() else: raise SAMPClientError("Client not callable.") def unbind_receive_response(self, msg_tag): """ Remove from the responses binding table the specified message-tag. Parameters ---------- msg_tag : str Message-tag to be removed. """ if self._callable: del self._response_bindings[msg_tag] else: raise SAMPClientError("Client not callable.") def declare_subscriptions(self, subscriptions=None): """ Declares the MTypes the client wishes to subscribe to, implicitly defined with the MType binding methods :meth:`~astropy.samp.client.SAMPClient.bind_receive_notification` and :meth:`~astropy.samp.client.SAMPClient.bind_receive_call`. An optional ``subscriptions`` map can be added to the final map passed to the :meth:`~astropy.samp.hub_proxy.SAMPHubProxy.declare_subscriptions` method. Parameters ---------- subscriptions : dict, optional Dictionary containing the list of MTypes to subscribe to, with the same format of the ``subscriptions`` map passed to the :meth:`~astropy.samp.hub_proxy.SAMPHubProxy.declare_subscriptions` method. """ if self._callable: self._declare_subscriptions(subscriptions) else: raise SAMPClientError("Client not callable.") def register(self): """ Register the client to the SAMP Hub. """ if self.hub.is_connected: if self._private_key is not None: raise SAMPClientError("Client already registered") result = self.hub.register(self.hub.lockfile["samp.secret"]) if result["samp.self-id"] == "": raise SAMPClientError( "Registration failed - samp.self-id was not set by the hub." ) if result["samp.private-key"] == "": raise SAMPClientError( "Registration failed - samp.private-key was not set by the hub." ) self._public_id = result["samp.self-id"] self._private_key = result["samp.private-key"] self._hub_id = result["samp.hub-id"] if self._callable: self._set_xmlrpc_callback() self._declare_subscriptions() if self._metadata != {}: self.declare_metadata() self._is_registered = True else: raise SAMPClientError( "Unable to register to the SAMP Hub. Hub proxy not connected." ) def unregister(self): """ Unregister the client from the SAMP Hub. """ if self.hub.is_connected: self._is_registered = False self.hub.unregister(self._private_key) self._hub_id = None self._public_id = None self._private_key = None else: raise SAMPClientError( "Unable to unregister from the SAMP Hub. Hub proxy not connected." ) def _set_xmlrpc_callback(self): if self.hub.is_connected and self._private_key is not None: self.hub.set_xmlrpc_callback(self._private_key, self._xmlrpcAddr) def _declare_subscriptions(self, subscriptions=None): if self.hub.is_connected and self._private_key is not None: mtypes_dict = {} # Collect notification mtypes and metadata for mtype in self._notification_bindings.keys(): mtypes_dict[mtype] = copy.deepcopy( self._notification_bindings[mtype][1] ) # Collect notification mtypes and metadata for mtype in self._call_bindings.keys(): mtypes_dict[mtype] = copy.deepcopy(self._call_bindings[mtype][1]) # Add optional subscription map if subscriptions: mtypes_dict.update(copy.deepcopy(subscriptions)) self.hub.declare_subscriptions(self._private_key, mtypes_dict) else: raise SAMPClientError( "Unable to declare subscriptions. Hub " "unreachable or not connected or client " "not registered." ) def declare_metadata(self, metadata=None): """ Declare the client application metadata supported. Parameters ---------- metadata : dict, optional Dictionary containing the client application metadata as defined in the SAMP definition document. If omitted, then no metadata are declared. """ if self.hub.is_connected and self._private_key is not None: if metadata is not None: self._metadata.update(metadata) self.hub.declare_metadata(self._private_key, self._metadata) else: raise SAMPClientError( "Unable to declare metadata. Hub " "unreachable or not connected or client " "not registered." ) def get_private_key(self): """ Return the client private key used for the Standard Profile communications obtained at registration time (``samp.private-key``). Returns ------- key : str Client private key. """ return self._private_key def get_public_id(self): """ Return public client ID obtained at registration time (``samp.self-id``). Returns ------- id : str Client public ID. """ return self._public_id
SAMPClient
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/genericType28.py
{ "start": 940, "end": 979 }
class ____(Generic[T_contra]): ...
Class4
python
streamlit__streamlit
lib/tests/streamlit/elements/lib/options_selector_utils_test.py
{ "start": 1178, "end": 2240 }
class ____: def test_check_and_convert_to_indices_none_default(self): res = check_and_convert_to_indices(["a"], None) assert res is None def test_check_and_convert_to_indices_single_default(self): res = check_and_convert_to_indices(["a", "b"], "a") assert res == [0] def test_check_and_convert_to_indices_default_is_numpy_array(self): res = check_and_convert_to_indices(["a", "b"], np.array(["b"])) assert res == [1] def test_check_and_convert_to_indices_default_is_tuple(self): res = check_and_convert_to_indices(["a", "b"], ("b",)) assert res == [1] def test_check_and_convert_to_indices_default_is_set(self): res = check_and_convert_to_indices( ["a", "b"], set( "b", ), ) assert res == [1] def test_check_and_convert_to_indices_default_not_in_opts(self): with pytest.raises(StreamlitAPIException): check_and_convert_to_indices(["a", "b"], "c")
TestCheckAndConvertToIndices
python
streamlit__streamlit
lib/streamlit/vendor/pympler/asizeof.py
{ "start": 50203, "end": 50439 }
class ____(dict): """Internal obj visits counter.""" def again(self, key): try: s = self[key] + 1 except KeyError: s = 1 if s > 0: self[key] = s # Public classes
_Seen
python
huggingface__transformers
src/transformers/models/cwm/modeling_cwm.py
{ "start": 15547, "end": 19056 }
class ____(CwmPreTrainedModel): config_class = CwmConfig def __init__(self, config: CwmConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = torch.nn.ModuleList( [CwmDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = CwmRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = CwmRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @check_model_inputs() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, cache_position: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, **kwargs: Unpack[TransformersKwargs], ) -> CwmModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds: torch.Tensor = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position: torch.Tensor = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) if not isinstance(causal_mask_mapping := attention_mask, dict): mask_kwargs = { "config": self.config, "input_embeds": inputs_embeds, "attention_mask": attention_mask, "cache_position": cache_position, "past_key_values": past_key_values, "position_ids": position_ids, } sliding_mask_kwargs = mask_kwargs.copy() causal_mask_mapping = { "full_attention": create_causal_mask(**mask_kwargs), "sliding_attention": create_sliding_window_causal_mask(**sliding_mask_kwargs), } hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask_mapping[decoder_layer.attention_type], position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return CwmModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) @auto_docstring
CwmModel
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 1138546, "end": 1139205 }
class ____(ScaleInvalidDataShowAsradius): """ ScaleInvalidDataShowAsValueradius schema wrapper. Parameters ---------- value : float For arc mark, the primary (outer) radius in pixels. For text marks, polar coordinate radial offset, in pixels, of the text from the origin determined by the ``x`` and ``y`` properties. **Default value:** ``min(plot_width, plot_height)/2`` """ _schema = {"$ref": '#/definitions/ScaleInvalidDataShowAsValue<"radius">'} def __init__(self, value: Optional[float] = Undefined, **kwds): super().__init__(value=value, **kwds)
ScaleInvalidDataShowAsValueradius
python
google__flatbuffers
tests/monster_test_generated.py
{ "start": 97485, "end": 103957 }
class ____(object): __slots__ = ['_tab'] @classmethod def GetRootAs(cls, buf, offset=0): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = TypeAliases() x.Init(buf, n + offset) return x @classmethod def GetRootAsTypeAliases(cls, buf, offset=0): """This method is deprecated. Please switch to GetRootAs.""" return cls.GetRootAs(buf, offset) @classmethod def TypeAliasesBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4D\x4F\x4E\x53", size_prefixed=size_prefixed) # TypeAliases def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # TypeAliases def I8(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # TypeAliases def U8(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) return 0 # TypeAliases def I16(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int16Flags, o + self._tab.Pos) return 0 # TypeAliases def U16(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return self._tab.Get(flatbuffers.number_types.Uint16Flags, o + self._tab.Pos) return 0 # TypeAliases def I32(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # TypeAliases def U32(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) return 0 # TypeAliases def I64(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) return 0 # TypeAliases def U64(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) if o != 0: return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos) return 0 # TypeAliases def F32(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) if o != 0: return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) return 0.0 # TypeAliases def F64(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22)) if o != 0: return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos) return 0.0 # TypeAliases def V8(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Int8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) return 0 # TypeAliases def V8AsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int8Flags, o) return 0 # TypeAliases def V8Length(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24)) if o != 0: return self._tab.VectorLen(o) return 0 # TypeAliases def V8IsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24)) return o == 0 # TypeAliases def Vf64(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Float64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) return 0 # TypeAliases def Vf64AsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float64Flags, o) return 0 # TypeAliases def Vf64Length(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26)) if o != 0: return self._tab.VectorLen(o) return 0 # TypeAliases def Vf64IsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26)) return o == 0 def TypeAliasesStart(builder): builder.StartObject(12) def TypeAliasesAddI8(builder, i8): builder.PrependInt8Slot(0, i8, 0) def TypeAliasesAddU8(builder, u8): builder.PrependUint8Slot(1, u8, 0) def TypeAliasesAddI16(builder, i16): builder.PrependInt16Slot(2, i16, 0) def TypeAliasesAddU16(builder, u16): builder.PrependUint16Slot(3, u16, 0) def TypeAliasesAddI32(builder, i32): builder.PrependInt32Slot(4, i32, 0) def TypeAliasesAddU32(builder, u32): builder.PrependUint32Slot(5, u32, 0) def TypeAliasesAddI64(builder, i64): builder.PrependInt64Slot(6, i64, 0) def TypeAliasesAddU64(builder, u64): builder.PrependUint64Slot(7, u64, 0) def TypeAliasesAddF32(builder, f32): builder.PrependFloat32Slot(8, f32, 0.0) def TypeAliasesAddF64(builder, f64): builder.PrependFloat64Slot(9, f64, 0.0) def TypeAliasesAddV8(builder, v8): builder.PrependUOffsetTRelativeSlot(10, flatbuffers.number_types.UOffsetTFlags.py_type(v8), 0) def TypeAliasesStartV8Vector(builder, numElems): return builder.StartVector(1, numElems, 1) def TypeAliasesAddVf64(builder, vf64): builder.PrependUOffsetTRelativeSlot(11, flatbuffers.number_types.UOffsetTFlags.py_type(vf64), 0) def TypeAliasesStartVf64Vector(builder, numElems): return builder.StartVector(8, numElems, 8) def TypeAliasesEnd(builder): return builder.EndObject() try: from typing import List except: pass
TypeAliases
python
apache__airflow
providers/pinecone/tests/unit/pinecone/hooks/test_pinecone.py
{ "start": 992, "end": 8295 }
class ____: @pytest.fixture(autouse=True) def setup_connections(self, create_connection_without_db): create_connection_without_db( Connection( host="pinecone.io", conn_id="pinecone_default", conn_type="pinecone", login="us-west1-gcp", password="test_password", extra='{"region": "us-east-1", "debug_curl": true}', ) ) self.pinecone_hook = PineconeHook() self.index_name = "test_index" @patch("airflow.providers.pinecone.hooks.pinecone.Pinecone.Index") def test_upsert(self, mock_index): """Test the upsert_data_async method of PineconeHook for correct data insertion asynchronously.""" data = [("id1", [1.0, 2.0, 3.0], {"meta": "data"})] mock_upsert = Mock() mock_index.return_value.upsert = mock_upsert self.pinecone_hook.upsert(self.index_name, data) mock_upsert.assert_called_once_with(vectors=data, namespace="", batch_size=None, show_progress=True) @patch("airflow.providers.pinecone.hooks.pinecone.PineconeHook.list_indexes") def test_list_indexes(self, mock_list_indexes): """Test that the list_indexes method of PineconeHook is called correctly.""" self.pinecone_hook.list_indexes() mock_list_indexes.assert_called_once() def test_debug_curl_setting(self): """Test that the PINECONE_DEBUG_CURL environment variable is set when initializing Pinecone Object.""" self.pinecone_hook.pinecone_client assert os.environ.get("PINECONE_DEBUG_CURL") == "true" @patch("airflow.providers.pinecone.hooks.pinecone.PineconeHook.create_index") def test_create_index_for_pod_based(self, mock_create_index): """Test that the create_index method of PineconeHook is called with correct arguments for pod based index.""" pod_spec = self.pinecone_hook.get_pod_spec_obj() self.pinecone_hook.create_index(index_name=self.index_name, dimension=128, spec=pod_spec) mock_create_index.assert_called_once_with(index_name="test_index", dimension=128, spec=pod_spec) @patch("airflow.providers.pinecone.hooks.pinecone.PineconeHook.create_index") def test_create_index_for_serverless_based(self, mock_create_index): """Test that the create_index method of PineconeHook is called with correct arguments for serverless index.""" serverless_spec = self.pinecone_hook.get_serverless_spec_obj(cloud="aws") self.pinecone_hook.create_index(index_name=self.index_name, dimension=128, spec=serverless_spec) mock_create_index.assert_called_once_with( index_name="test_index", dimension=128, spec=serverless_spec ) def test_get_pod_spec_obj(self): """Test that the get_pod_spec_obj method of PineconeHook returns the correct pod spec object.""" pod_spec = self.pinecone_hook.get_pod_spec_obj() assert pod_spec.environment == "us-west1-gcp" def test_get_serverless_spec_obj(self): """Test that the get_serverless_spec_obj method of PineconeHook returns the correct serverless spec object.""" serverless_spec = self.pinecone_hook.get_serverless_spec_obj(cloud="gcp") assert serverless_spec.region == "us-east-1" @patch("airflow.providers.pinecone.hooks.pinecone.PineconeHook.describe_index") def test_describe_index(self, mock_describe_index): """Test that the describe_index method of PineconeHook is called with correct arguments.""" self.pinecone_hook.describe_index(index_name=self.index_name) mock_describe_index.assert_called_once_with(index_name=self.index_name) @patch("airflow.providers.pinecone.hooks.pinecone.PineconeHook.delete_index") def test_delete_index(self, mock_delete_index): """Test that the delete_index method of PineconeHook is called with the correct index name.""" self.pinecone_hook.delete_index(index_name="test_index") mock_delete_index.assert_called_once_with(index_name="test_index") @patch("airflow.providers.pinecone.hooks.pinecone.PineconeHook.create_collection") def test_create_collection(self, mock_create_collection): """ Test that the create_collection method of PineconeHook is called correctly. """ self.pinecone_hook.create_collection(collection_name="test_collection") mock_create_collection.assert_called_once_with(collection_name="test_collection") @patch("airflow.providers.pinecone.hooks.pinecone.PineconeHook.configure_index") def test_configure_index(self, mock_configure_index): """ Test that the configure_index method of PineconeHook is called correctly. """ self.pinecone_hook.configure_index(index_configuration={}) mock_configure_index.assert_called_once_with(index_configuration={}) @patch("airflow.providers.pinecone.hooks.pinecone.PineconeHook.describe_collection") def test_describe_collection(self, mock_describe_collection): """ Test that the describe_collection method of PineconeHook is called correctly. """ self.pinecone_hook.describe_collection(collection_name="test_collection") mock_describe_collection.assert_called_once_with(collection_name="test_collection") @patch("airflow.providers.pinecone.hooks.pinecone.PineconeHook.list_collections") def test_list_collections(self, mock_list_collections): """ Test that the list_collections method of PineconeHook is called correctly. """ self.pinecone_hook.list_collections() mock_list_collections.assert_called_once() @patch("airflow.providers.pinecone.hooks.pinecone.PineconeHook.query_vector") def test_query_vector(self, mock_query_vector): """ Test that the query_vector method of PineconeHook is called correctly. """ self.pinecone_hook.query_vector(vector=[1.0, 2.0, 3.0]) mock_query_vector.assert_called_once_with(vector=[1.0, 2.0, 3.0]) def test__chunks(self): """ Test that the _chunks method of PineconeHook behaves as expected. """ data = list(range(10)) chunked_data = list(self.pinecone_hook._chunks(data, 3)) assert chunked_data == [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9,)] @patch("airflow.providers.pinecone.hooks.pinecone.PineconeHook.upsert_data_async") def test_upsert_data_async_correctly(self, mock_upsert_data_async): """ Test that the upsert_data_async method of PineconeHook is called correctly. """ data = [("id1", [1.0, 2.0, 3.0], {"meta": "data"})] self.pinecone_hook.upsert_data_async(index_name="test_index", data=data) mock_upsert_data_async.assert_called_once_with(index_name="test_index", data=data) @patch("airflow.providers.pinecone.hooks.pinecone.PineconeHook.describe_index_stats") def test_describe_index_stats(self, mock_describe_index_stats): """ Test that the describe_index_stats method of PineconeHook is called correctly. """ self.pinecone_hook.describe_index_stats(index_name="test_index") mock_describe_index_stats.assert_called_once_with(index_name="test_index")
TestPineconeHook
python
getsentry__sentry
src/sentry/ingest/consumer/processors.py
{ "start": 1484, "end": 15426 }
class ____(Exception): pass def trace_func(**span_kwargs): def wrapper(f): @functools.wraps(f) def inner(*args, **kwargs): # First we check a local env var # if that's not set we check our conf file # if neither are set use 0 sample_rate = float( os.getenv( "SENTRY_INGEST_CONSUMER_APM_SAMPLING", default=getattr(settings, "SENTRY_INGEST_CONSUMER_APM_SAMPLING", 0), ) ) # New behavior is to add a custom `sample_rate` that is picked up by `traces_sampler` span_kwargs.setdefault( "custom_sampling_context", {"sample_rate": sample_rate}, ) with sentry_sdk.start_transaction(**span_kwargs): return f(*args, **kwargs) return inner return wrapper @trace_func(name="ingest_consumer.process_event") @metrics.wraps("ingest_consumer.process_event") def process_event( consumer_type: str, message: IngestMessage, project: Project, reprocess_only_stuck_events: bool = False, ) -> None: """ Perform some initial filtering and deserialize the message payload. """ payload = message["payload"] start_time = float(message["start_time"]) event_id = message["event_id"] project_id = int(message["project_id"]) remote_addr = message.get("remote_addr") attachments = message.get("attachments") or () sentry_sdk.set_extra("event_id", event_id) sentry_sdk.set_extra("len_attachments", len(attachments)) # check that we haven't already processed this event (a previous instance of the forwarder # died before it could commit the event queue offset) # # XXX(markus): I believe this code is extremely broken: # # * it practically uses memcached in prod which has no consistency # guarantees (no idea how we don't run into issues there) # # * a TTL of 1h basically doesn't guarantee any deduplication at all. It # just guarantees a good error message... for one hour. # # This code has been ripped from the old python store endpoint. We're # keeping it around because it does provide some protection against # reprocessing good events if a single consumer is in a restart loop. with sentry_sdk.start_span(op="deduplication_check"): deduplication_key = f"ev:{project_id}:{event_id}" try: cached_value = cache.get(deduplication_key) except Exception as exc: raise Retriable(exc) if cached_value is not None: logger.warning( "pre-process-forwarder detected a duplicated event" " with id:%s for project:%s.", event_id, project_id, ) return # message already processed do not reprocess with sentry_sdk.start_span( op="killswitch_matches_context", name="store.load-shed-pipeline-projects" ): if killswitch_matches_context( "store.load-shed-pipeline-projects", { "project_id": project_id, "event_id": event_id, "has_attachments": bool(attachments), }, ): # This killswitch is for the worst of scenarios and should probably not # cause additional load on our logging infrastructure return # Parse the JSON payload. This is required to compute the cache key and # call process_event. The payload will be put into Kafka raw, to avoid # serializing it again. with sentry_sdk.start_span(op="orjson.loads"): data = orjson.loads(payload) # We also need to check "type" as transactions are also sent to ingest-attachments # along with other event types if they have attachments. if consumer_type == ConsumerType.Transactions or data.get("type") == "transaction": processing_store = transaction_processing_store else: processing_store = event_processing_store sentry_sdk.set_extra("event_type", data.get("type")) with sentry_sdk.start_span( op="killswitch_matches_context", name="store.load-shed-parsed-pipeline-projects" ): if killswitch_matches_context( "store.load-shed-parsed-pipeline-projects", { "organization_id": project.organization_id, "project_id": project.id, "event_type": data.get("type") or "null", "has_attachments": bool(attachments), "event_id": event_id, }, ): return # Raise the retriable exception and skip DLQ if anything below this point fails as it may be caused by # intermittent network issue try: # If we only want to reprocess "stuck" events, we check if this event is already in the # `processing_store`. We only continue here if the event *is* present, as that will eventually # process and consume the event from the `processing_store`, whereby getting it "unstuck". if reprocess_only_stuck_events: with sentry_sdk.start_span(op="event_processing_store.exists"): if not processing_store.exists(data): return attachment_objects = [ CachedAttachment(type=attachment.pop("attachment_type"), **attachment) for attachment in attachments ] if attachment_objects: store_attachments_for_event(project, data, attachment_objects, timeout=CACHE_TIMEOUT) with metrics.timer("ingest_consumer._store_event"): cache_key = processing_store.store(data) if consumer_type == ConsumerType.Transactions: track_sampled_event( data["event_id"], ConsumerType.Transactions, TransactionStageStatus.REDIS_PUT ) try: # Records rc-processing usage broken down by # event type. event_type = data.get("type") if event_type == "error": app_feature = "errors" elif event_type == "transaction": app_feature = "transactions" else: app_feature = None if app_feature is not None: record(settings.EVENT_PROCESSING_STORE, app_feature, len(payload), UsageUnit.BYTES) except Exception: pass try: project.set_cached_field_value( "organization", Organization.objects.get_from_cache(id=project.organization_id) ) except Organization.DoesNotExist: logger.warning( "Organization does not exist", extra={ "project_id": project_id, "organization_id": project.organization_id, }, ) return if data.get("type") == "transaction": assert cache_key is not None # No need for preprocess/process for transactions thus submit # directly transaction specific save_event task. save_event_transaction.delay( cache_key=cache_key, data=None, start_time=start_time, event_id=event_id, project_id=project_id, ) try: collect_span_metrics(project, data) except Exception: pass elif data.get("type") == "feedback": if not is_in_feedback_denylist(project.organization): save_event_feedback.delay( cache_key=None, # no need to cache as volume is low data=data, start_time=start_time, event_id=event_id, project_id=project_id, ) else: metrics.incr("feedback.ingest.denylist") else: # Preprocess this event, which spawns either process_event or # save_event. Pass data explicitly to avoid fetching it again from the # cache. with sentry_sdk.start_span(op="ingest_consumer.process_event.preprocess_event"): preprocess_event( cache_key=cache_key or "", data=data, start_time=start_time, event_id=event_id, project=project, has_attachments=bool(attachments), ) # remember for an 1 hour that we saved this event (deduplication protection) with sentry_sdk.start_span(op="cache.set"): cache.set(deduplication_key, "", CACHE_TIMEOUT) # emit event_accepted once everything is done with sentry_sdk.start_span(op="event_accepted.send_robust"): event_accepted.send_robust( ip=remote_addr, data=data, project=project, sender=process_event ) except Exception as exc: if isinstance(exc, KeyError): # ex: missing event_id in message["payload"] raise raise Retriable(exc) @trace_func(name="ingest_consumer.process_attachment_chunk") @metrics.wraps("ingest_consumer.process_attachment_chunk") def process_attachment_chunk(message: IngestMessage) -> None: payload = message["payload"] event_id = message["event_id"] project_id = message["project_id"] id = message["id"] chunk_index = message["chunk_index"] cache_key = cache_key_for_event({"event_id": event_id, "project": project_id}) attachment_cache.set_chunk( key=cache_key, id=id, chunk_index=chunk_index, chunk_data=payload, timeout=CACHE_TIMEOUT ) @trace_func(name="ingest_consumer.process_individual_attachment") @metrics.wraps("ingest_consumer.process_individual_attachment") def process_individual_attachment(message: IngestMessage, project: Project) -> None: event_id = message["event_id"] cache_key = cache_key_for_event({"event_id": event_id, "project": project.id}) if not features.has("organizations:event-attachments", project.organization, actor=None): logger.info("Organization has no event attachments: %s", project.id) return if killswitch_matches_context( "store.load-shed-pipeline-projects", { "project_id": project.id, "event_id": event_id, "has_attachments": True, }, ): # This killswitch is for the worst of scenarios and should probably not # cause additional load on our logging infrastructure return try: # Attachments may be uploaded for events that already exist. Fetch the # existing group_id, so that the attachment can be fetched by group-level # APIs. This is inherently racy. # # This is not guaranteed to provide correct results. Eventstore runs queries # against Snuba. This is problematic on the critical path on the ingestion # pipeline as Snuba can rate limit queries for specific projects when they # are above their quota. There is no guarantee that, when a project is within # their ingestion quota, they are also within the snuba queries quota. # Since there is no dead letter queue on this consumer, the only way to # prevent the consumer to crash as of now is to ignore the error and proceed. event = eventstore.backend.get_event_by_id(project.id, event_id) except RateLimitExceeded as e: event = None logger.exception(str(e)) group_id = None if event is not None: group_id = event.group_id attachment_msg = message["attachment"] attachment_type = attachment_msg.pop("attachment_type") # NOTE: `get_from_chunks` will avoid the cache if `attachment_msg` contains `data` inline, # or if the attachment has already been stored with a `stored_id`. attachment = attachment_cache.get_from_chunks( key=cache_key, type=attachment_type, **attachment_msg ) if attachment_type in ("event.attachment", "event.view_hierarchy"): save_attachment( cache_key, attachment, project, event_id, key_id=None, # TODO: Inject this from Relay group_id=group_id, start_time=None, # TODO: Inject this from Relay ) else: logger.error("invalid individual attachment type: %s", attachment_type) attachment.delete() @trace_func(name="ingest_consumer.process_userreport") @metrics.wraps("ingest_consumer.process_userreport") def process_userreport(message: IngestMessage, project: Project) -> bool: start_time = to_datetime(message["start_time"]) feedback = orjson.loads(message["payload"]) try: save_userreport( project, feedback, FeedbackCreationSource.USER_REPORT_ENVELOPE, start_time=start_time, ) return True except Conflict as e: logger.info("Invalid userreport: %s", e) return False except Exception: # XXX(markus): Hotfix because we have broken data in kafka # If you want to remove this make sure to have triaged all errors in Sentry logger.exception("userreport.save.crash") return False def collect_span_metrics( project: Project, data: MutableMapping[str, Any], ): if not features.has("organizations:am3-tier", project.organization) and not features.has( "organizations:dynamic-sampling", project.organization ): amount = ( len(data.get("spans", [])) + 1 ) # Segment spans also get added to the total span count. metrics.incr( "event.save_event.unsampled.spans.count", amount=amount, tags={"organization": project.organization.slug}, )
Retriable
python
django__django
tests/signals/tests.py
{ "start": 391, "end": 1187 }
class ____: def setUp(self): # Save up the number of connected signals so that we can check at the # end that all the signals we register get properly unregistered # (#9989) self.pre_signals = ( len(signals.pre_save.receivers), len(signals.post_save.receivers), len(signals.pre_delete.receivers), len(signals.post_delete.receivers), ) def tearDown(self): # All our signals got disconnected properly. post_signals = ( len(signals.pre_save.receivers), len(signals.post_save.receivers), len(signals.pre_delete.receivers), len(signals.post_delete.receivers), ) self.assertEqual(self.pre_signals, post_signals)
BaseSignalSetup
python
django__django
tests/file_storage/test_inmemory_storage.py
{ "start": 9446, "end": 11975 }
class ____(SimpleTestCase): def test_deconstruction(self): storage = InMemoryStorage() path, args, kwargs = storage.deconstruct() self.assertEqual(path, "django.core.files.storage.InMemoryStorage") self.assertEqual(args, ()) self.assertEqual(kwargs, {}) kwargs_orig = { "location": "/custom_path", "base_url": "http://myfiles.example.com/", "file_permissions_mode": "0o755", "directory_permissions_mode": "0o600", } storage = InMemoryStorage(**kwargs_orig) path, args, kwargs = storage.deconstruct() self.assertEqual(kwargs, kwargs_orig) @override_settings( MEDIA_ROOT="media_root", MEDIA_URL="media_url/", FILE_UPLOAD_PERMISSIONS=0o777, FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777, ) def test_setting_changed(self): """ Properties using settings values as defaults should be updated on referenced settings change while specified values should be unchanged. """ storage = InMemoryStorage( location="explicit_location", base_url="explicit_base_url/", file_permissions_mode=0o666, directory_permissions_mode=0o666, ) defaults_storage = InMemoryStorage() settings = { "MEDIA_ROOT": "overridden_media_root", "MEDIA_URL": "/overridden_media_url/", "FILE_UPLOAD_PERMISSIONS": 0o333, "FILE_UPLOAD_DIRECTORY_PERMISSIONS": 0o333, } with self.settings(**settings): self.assertEqual(storage.base_location, "explicit_location") self.assertIn("explicit_location", storage.location) self.assertEqual(storage.base_url, "explicit_base_url/") self.assertEqual(storage.file_permissions_mode, 0o666) self.assertEqual(storage.directory_permissions_mode, 0o666) self.assertEqual(defaults_storage.base_location, settings["MEDIA_ROOT"]) self.assertIn(settings["MEDIA_ROOT"], defaults_storage.location) self.assertEqual(defaults_storage.base_url, settings["MEDIA_URL"]) self.assertEqual( defaults_storage.file_permissions_mode, settings["FILE_UPLOAD_PERMISSIONS"], ) self.assertEqual( defaults_storage.directory_permissions_mode, settings["FILE_UPLOAD_DIRECTORY_PERMISSIONS"], )
InMemoryStorageTests
python
ray-project__ray
python/ray/util/metrics.py
{ "start": 5820, "end": 8304 }
class ____(Metric): """A cumulative metric that is monotonically increasing. This corresponds to Prometheus' counter metric: https://prometheus.io/docs/concepts/metric_types/#counter Before Ray 2.10, this exports a Prometheus gauge metric instead of a counter metric, which is wrong. Since 2.10, this exports both counter (with a suffix "_total") and gauge metrics (for bug compatibility). Use `RAY_EXPORT_COUNTER_AS_GAUGE=0` to disable exporting the gauge metric. Args: name: Name of the metric. description: Description of the metric. tag_keys: Tag keys of the metric. """ def __init__( self, name: str, description: str = "", tag_keys: Optional[Tuple[str, ...]] = None, ): super().__init__(name, description, tag_keys) if self._discard_metric: self._metric = None else: if env_bool("RAY_enable_open_telemetry", False): """ For the previous opencensus implementation, we used Sum to support exporting Counter as a gauge metric. We'll drop that feature in the new opentelemetry implementation. """ self._metric = CythonSum(self._name, self._description, self._tag_keys) else: """ For the new opentelemetry implementation, we'll correctly use Counter rather than Sum. """ self._metric = CythonCount( self._name, self._description, self._tag_keys ) def __reduce__(self): deserializer = self.__class__ serialized_data = (self._name, self._description, self._tag_keys) return deserializer, serialized_data def inc(self, value: Union[int, float] = 1.0, tags: Dict[str, str] = None): """Increment the counter by `value` (defaults to 1). Tags passed in will take precedence over the metric's default tags. Args: value(int, float): Value to increment the counter by (default=1). tags(Dict[str, str]): Tags to set or override for this counter. """ if not isinstance(value, (int, float)): raise TypeError(f"value must be int or float, got {type(value)}.") if value <= 0: raise ValueError(f"value must be >0, got {value}") self._record(value, tags=tags) @DeveloperAPI
Counter
python
django__django
django/db/models/manager.py
{ "start": 6678, "end": 6866 }
class ____(Manager): def __init__(self, model): super().__init__() self.model = model def get_queryset(self): return super().get_queryset().none()
EmptyManager