language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
getsentry__sentry
tests/sentry/db/models/fields/test_slug.py
{ "start": 255, "end": 1049 }
class ____(TestCase): def setUp(self) -> None: self.field = SentrySlugField() def test_combination_of_numeric_and_non_numeric(self) -> None: # Valid slug value should not raise a ValidationError self.field.run_validators("49dk20sk5-34fas") def test_non_numeric(self) -> None: self.field.run_validators("abc") def test_numeric(self) -> None: # Numeric value should raise a ValidationError with pytest.raises(ValidationError): self.field.run_validators("123") def test_capitalized(self) -> None: with pytest.raises(ValidationError): self.field.run_validators("rjkl29FRJF-dh439") def test_underscore(self) -> None: self.field.run_validators("sdjkh2390_dhj3290-")
TestSentrySlugField
python
encode__django-rest-framework
tests/test_validation_error.py
{ "start": 400, "end": 527 }
class ____(serializers.Serializer): char = serializers.CharField() integer = serializers.IntegerField()
ExampleSerializer
python
geekcomputers__Python
PongPong_Game/pong/ball.py
{ "start": 82, "end": 1313 }
class ____(pyglet.shapes.Circle): def __init__(self, *args, **kwargs): super(BallObject, self).__init__(*args, **kwargs) self.color = (255, 180, 0) self.velocity_x, self.velocity_y = 0.0, 0.0 def update(self, win_size: Tuple, border: Tuple, other_object, dt) -> None: speed = [ 2.37, 2.49, 2.54, 2.62, 2.71, 2.85, 2.96, 3.08, 3.17, 3.25, ] # more choices more randomness rn = random.choice(speed) newx = self.x + self.velocity_x newy = self.y + self.velocity_y if newx < border + self.radius or newx > win_size[0] - border - self.radius: self.velocity_x = -(self.velocity_x / abs(self.velocity_x)) * rn elif newy > win_size[1] - border - self.radius: self.velocity_y = -(self.velocity_y / abs(self.velocity_y)) * rn elif (newy - self.radius < other_object.height) and ( other_object.x <= newx <= other_object.rightx ): self.velocity_y = -(self.velocity_y / abs(self.velocity_y)) * rn else: self.x = newx self.y = newy
BallObject
python
jazzband__django-formtools
tests/wizard/wizardtests/tests.py
{ "start": 12681, "end": 13932 }
class ____(TestCase): wizard_url = '/wiz_other_template/' wizard_step_1_data = { 'cookie_contact_wizard-current_step': 'form1', } wizard_step_data = ( { 'form1-name': 'Pony', 'form1-thirsty': '2', 'cookie_contact_wizard-current_step': 'form1', }, { 'form2-address1': '123 Main St', 'form2-address2': 'Djangoland', 'cookie_contact_wizard-current_step': 'form2', }, { 'form3-random_crap': 'blah blah', 'cookie_contact_wizard-current_step': 'form3', }, { 'form4-INITIAL_FORMS': '0', 'form4-TOTAL_FORMS': '2', 'form4-MAX_NUM_FORMS': '0', 'form4-0-random_crap': 'blah blah', 'form4-1-random_crap': 'blah blah', 'cookie_contact_wizard-current_step': 'form4', } ) def setUp(self): self.testuser, created = User.objects.get_or_create(username='testuser1') self.wizard_step_data[0]['form1-user'] = self.testuser.pk def test_template(self): response = self.client.get(self.wizard_url) self.assertTemplateUsed(response, 'other_wizard_form.html')
WizardTestKwargs
python
modin-project__modin
modin/core/execution/ray/generic/partitioning/partition_manager.py
{ "start": 1041, "end": 2273 }
class ____(PandasDataframePartitionManager): """The class implements the interface in `PandasDataframePartitionManager`.""" @classmethod def to_numpy(cls, partitions, **kwargs): """ Convert `partitions` into a NumPy array. Parameters ---------- partitions : NumPy array A 2-D array of partitions to convert to local NumPy array. **kwargs : dict Keyword arguments to pass to each partition ``.to_numpy()`` call. Returns ------- NumPy array """ if partitions.shape[1] == 1: parts = cls.get_objects_from_partitions(partitions.flatten()) parts = [part.to_numpy(**kwargs) for part in parts] else: parts = RayWrapper.materialize( [ obj.apply( lambda df, **kwargs: df.to_numpy(**kwargs) ).list_of_blocks[0] for row in partitions for obj in row ] ) rows, cols = partitions.shape parts = [parts[i * cols : (i + 1) * cols] for i in range(rows)] return np.block(parts)
GenericRayDataframePartitionManager
python
weaviate__weaviate-python-client
weaviate/rbac/models.py
{ "start": 10215, "end": 10275 }
class ____(_GroupsPermission): pass
GroupsPermissionOutput
python
joke2k__faker
faker/providers/credit_card/uk_UA/__init__.py
{ "start": 210, "end": 2110 }
class ____(CreditCardProvider): """Implement credit card provider for ``uk_UA`` locale. https://blog.ipay.ua/uk/sekrety-bankovskix-kart-kak-identificirovat-bank-po-nomeru-karty/ """ prefix_visa = ["4"] prefix_mastercard = ["51", "52", "53", "54"] prefix_prostir = ["9"] prefix_maestro = ["6762"] credit_card_types = OrderedDict( ( ("visa", CreditCard("Visa", prefix_visa, security_code="CVV2")), ("mastercard", CreditCard("Mastercard", prefix_mastercard, security_code="CVC2")), ("prostir", CreditCard("ПРОСТІР", prefix_prostir, security_code="CVC2")), ("maestro", CreditCard("Maestro", prefix_maestro, security_code="CVV")), ) ) def credit_card_full(self, card_type: Optional[CardType] = None) -> str: """Generate UA Credit Card: Supported card types 'visa', 'mastercard', 'prostir', 'maestro' :sample: :sample: card_type="prostir" :sample: card_type="mastercard" """ card = self._credit_card_type(card_type) tpl = "{provider}\n{owner}\n{number} {expire_date}\n{security}: {security_nb}\n{issuer}" tpl = tpl.format( provider=card.name, owner=translit( self.generator.parse( self.random_element( [ "{{first_name_male}} {{last_name_male}}", "{{first_name_female}} {{last_name_female}}", ] ) ) ), number=self.credit_card_number(card), expire_date=self.credit_card_expire(), security=card.security_code, security_nb=self.credit_card_security_code(card), issuer=self.generator.parse("{{bank}}"), ) return self.generator.parse(tpl)
Provider
python
Textualize__textual
src/textual/css/_style_properties.py
{ "start": 18379, "end": 20239 }
class ____: """Descriptor for getting and setting spacing properties (e.g. padding and margin).""" def __set_name__(self, owner: StylesBase, name: str) -> None: self.name = name def __get__( self, obj: StylesBase, objtype: type[StylesBase] | None = None ) -> Spacing: """Get the Spacing. Args: obj: The ``Styles`` object. objtype: The ``Styles`` class. Returns: The Spacing. If unset, returns the null spacing ``(0, 0, 0, 0)``. """ return obj.get_rule(self.name, NULL_SPACING) # type: ignore[return-value] def __set__(self, obj: StylesBase, spacing: SpacingDimensions | None): """Set the Spacing. Args: obj: The ``Styles`` object. style: You can supply the ``Style`` directly, or a string (e.g. ``"blue on #f0f0f0"``). Raises: ValueError: When the value is malformed, e.g. a ``tuple`` with a length that is not 1, 2, or 4. """ _rich_traceback_omit = True if spacing is None: if obj.clear_rule(self.name): obj.refresh(layout=True) else: try: unpacked_spacing = Spacing.unpack(spacing) except ValueError as error: raise StyleValueError( str(error), help_text=spacing_wrong_number_of_values_help_text( property_name=self.name, num_values_supplied=( 1 if isinstance(spacing, int) else len(spacing) ), context="inline", ), ) if obj.set_rule(self.name, unpacked_spacing): obj.refresh(layout=True)
SpacingProperty
python
numba__numba
numba/core/typing/builtins.py
{ "start": 15539, "end": 15611 }
class ____(CmpOpIdentity): pass @infer_global(operator.is_not)
CmpOpIs
python
oauthlib__oauthlib
oauthlib/oauth1/rfc5849/__init__.py
{ "start": 2014, "end": 16771 }
class ____: """A client used to sign OAuth 1.0 RFC 5849 requests.""" SIGNATURE_METHODS = { SIGNATURE_HMAC_SHA1: signature.sign_hmac_sha1_with_client, SIGNATURE_HMAC_SHA256: signature.sign_hmac_sha256_with_client, SIGNATURE_HMAC_SHA512: signature.sign_hmac_sha512_with_client, SIGNATURE_RSA_SHA1: signature.sign_rsa_sha1_with_client, SIGNATURE_RSA_SHA256: signature.sign_rsa_sha256_with_client, SIGNATURE_RSA_SHA512: signature.sign_rsa_sha512_with_client, SIGNATURE_PLAINTEXT: signature.sign_plaintext_with_client } @classmethod def register_signature_method(cls, method_name, method_callback): cls.SIGNATURE_METHODS[method_name] = method_callback def __init__(self, client_key, client_secret=None, resource_owner_key=None, resource_owner_secret=None, callback_uri=None, signature_method=SIGNATURE_HMAC_SHA1, signature_type=SIGNATURE_TYPE_AUTH_HEADER, rsa_key=None, verifier=None, realm=None, encoding='utf-8', decoding=None, nonce=None, timestamp=None): """Create an OAuth 1 client. :param client_key: Client key (consumer key), mandatory. :param resource_owner_key: Resource owner key (oauth token). :param resource_owner_secret: Resource owner secret (oauth token secret). :param callback_uri: Callback used when obtaining request token. :param signature_method: SIGNATURE_HMAC, SIGNATURE_RSA or SIGNATURE_PLAINTEXT. :param signature_type: SIGNATURE_TYPE_AUTH_HEADER (default), SIGNATURE_TYPE_QUERY or SIGNATURE_TYPE_BODY depending on where you want to embed the oauth credentials. :param rsa_key: RSA key used with SIGNATURE_RSA. :param verifier: Verifier used when obtaining an access token. :param realm: Realm (scope) to which access is being requested. :param encoding: If you provide non-unicode input you may use this to have oauthlib automatically convert. :param decoding: If you wish that the returned uri, headers and body from sign be encoded back from unicode, then set decoding to your preferred encoding, i.e. utf-8. :param nonce: Use this nonce instead of generating one. (Mainly for testing) :param timestamp: Use this timestamp instead of using current. (Mainly for testing) """ # Convert to unicode using encoding if given, else assume unicode def encode(x): return to_unicode(x, encoding) if encoding else x self.client_key = encode(client_key) self.client_secret = encode(client_secret) self.resource_owner_key = encode(resource_owner_key) self.resource_owner_secret = encode(resource_owner_secret) self.signature_method = encode(signature_method) self.signature_type = encode(signature_type) self.callback_uri = encode(callback_uri) self.rsa_key = encode(rsa_key) self.verifier = encode(verifier) self.realm = encode(realm) self.encoding = encode(encoding) self.decoding = encode(decoding) self.nonce = encode(nonce) self.timestamp = encode(timestamp) def __repr__(self): attrs = vars(self).copy() attrs['client_secret'] = '****' if attrs['client_secret'] else None attrs['rsa_key'] = '****' if attrs['rsa_key'] else None attrs[ 'resource_owner_secret'] = '****' if attrs['resource_owner_secret'] else None attribute_str = ', '.join('{}={}'.format(k, v) for k, v in attrs.items()) return '<{} {}>'.format(self.__class__.__name__, attribute_str) def get_oauth_signature(self, request): """Get an OAuth signature to be used in signing a request To satisfy `section 3.4.1.2`_ item 2, if the request argument's headers dict attribute contains a Host item, its value will replace any netloc part of the request argument's uri attribute value. .. _`section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2 """ if self.signature_method == SIGNATURE_PLAINTEXT: # fast-path return signature.sign_plaintext(self.client_secret, self.resource_owner_secret) uri, headers, body = self._render(request) collected_params = signature.collect_parameters( uri_query=urlparse.urlparse(uri).query, body=body, headers=headers) log.debug("Collected params: {}".format(collected_params)) normalized_params = signature.normalize_parameters(collected_params) normalized_uri = signature.base_string_uri(uri, headers.get('Host', None)) log.debug("Normalized params: {}".format(normalized_params)) log.debug("Normalized URI: {}".format(normalized_uri)) base_string = signature.signature_base_string(request.http_method, normalized_uri, normalized_params) log.debug("Signing: signature base string: {}".format(base_string)) if self.signature_method not in self.SIGNATURE_METHODS: raise ValueError('Invalid signature method.') sig = self.SIGNATURE_METHODS[self.signature_method](base_string, self) log.debug("Signature: {}".format(sig)) return sig def get_oauth_params(self, request): """Get the basic OAuth parameters to be used in generating a signature. """ nonce = (generate_nonce() if self.nonce is None else self.nonce) timestamp = (generate_timestamp() if self.timestamp is None else self.timestamp) params = [ ('oauth_nonce', nonce), ('oauth_timestamp', timestamp), ('oauth_version', '1.0'), ('oauth_signature_method', self.signature_method), ('oauth_consumer_key', self.client_key), ] if self.resource_owner_key: params.append(('oauth_token', self.resource_owner_key)) if self.callback_uri: params.append(('oauth_callback', self.callback_uri)) if self.verifier: params.append(('oauth_verifier', self.verifier)) # providing body hash for requests other than x-www-form-urlencoded # as described in https://tools.ietf.org/html/draft-eaton-oauth-bodyhash-00#section-4.1.1 # 4.1.1. When to include the body hash # * [...] MUST NOT include an oauth_body_hash parameter on requests with form-encoded request bodies # * [...] SHOULD include the oauth_body_hash parameter on all other requests. # Note that SHA-1 is vulnerable. The spec acknowledges that in https://tools.ietf.org/html/draft-eaton-oauth-bodyhash-00#section-6.2 # At this time, no further effort has been made to replace SHA-1 for the OAuth Request Body Hash extension. content_type = request.headers.get('Content-Type', None) content_type_eligible = content_type and content_type.find('application/x-www-form-urlencoded') < 0 if request.body is not None and content_type_eligible: params.append(('oauth_body_hash', base64.b64encode(hashlib.sha1(request.body.encode('utf-8')).digest()).decode('utf-8'))) # noqa: S324 return params def _render(self, request, formencode=False, realm=None): """Render a signed request according to signature type Returns a 3-tuple containing the request URI, headers, and body. If the formencode argument is True and the body contains parameters, it is escaped and returned as a valid formencoded string. """ # TODO what if there are body params on a header-type auth? # TODO what if there are query params on a body-type auth? uri, headers, body = request.uri, request.headers, request.body # TODO: right now these prepare_* methods are very narrow in scope--they # only affect their little thing. In some cases (for example, with # header auth) it might be advantageous to allow these methods to touch # other parts of the request, like the headers—so the prepare_headers # method could also set the Content-Type header to x-www-form-urlencoded # like the spec requires. This would be a fundamental change though, and # I'm not sure how I feel about it. if self.signature_type == SIGNATURE_TYPE_AUTH_HEADER: headers = parameters.prepare_headers( request.oauth_params, request.headers, realm=realm) elif self.signature_type == SIGNATURE_TYPE_BODY and request.decoded_body is not None: body = parameters.prepare_form_encoded_body( request.oauth_params, request.decoded_body) if formencode: body = urlencode(body) headers['Content-Type'] = 'application/x-www-form-urlencoded' elif self.signature_type == SIGNATURE_TYPE_QUERY: uri = parameters.prepare_request_uri_query( request.oauth_params, request.uri) else: raise ValueError('Unknown signature type specified.') return uri, headers, body def sign(self, uri, http_method='GET', body=None, headers=None, realm=None): """Sign a request Signs an HTTP request with the specified parts. Returns a 3-tuple of the signed request's URI, headers, and body. Note that http_method is not returned as it is unaffected by the OAuth signing process. Also worth noting is that duplicate parameters will be included in the signature, regardless of where they are specified (query, body). The body argument may be a dict, a list of 2-tuples, or a formencoded string. The Content-Type header must be 'application/x-www-form-urlencoded' if it is present. If the body argument is not one of the above, it will be returned verbatim as it is unaffected by the OAuth signing process. Attempting to sign a request with non-formencoded data using the OAuth body signature type is invalid and will raise an exception. If the body does contain parameters, it will be returned as a properly- formatted formencoded string. Body may not be included if the http_method is either GET or HEAD as this changes the semantic meaning of the request. All string data MUST be unicode or be encoded with the same encoding scheme supplied to the Client constructor, default utf-8. This includes strings inside body dicts, for example. """ # normalize request data request = Request(uri, http_method, body, headers, encoding=self.encoding) # sanity check content_type = request.headers.get('Content-Type', None) multipart = content_type and content_type.startswith('multipart/') should_have_params = content_type == CONTENT_TYPE_FORM_URLENCODED has_params = request.decoded_body is not None # 3.4.1.3.1. Parameter Sources # [Parameters are collected from the HTTP request entity-body, but only # if [...]: # * The entity-body is single-part. if multipart and has_params: raise ValueError( "Headers indicate a multipart body but body contains parameters.") # * The entity-body follows the encoding requirements of the # "application/x-www-form-urlencoded" content-type as defined by # [W3C.REC-html40-19980424]. elif should_have_params and not has_params: raise ValueError( "Headers indicate a formencoded body but body was not decodable.") # * The HTTP request entity-header includes the "Content-Type" # header field set to "application/x-www-form-urlencoded". elif not should_have_params and has_params: raise ValueError( "Body contains parameters but Content-Type header was {} " "instead of {}".format(content_type or "not set", CONTENT_TYPE_FORM_URLENCODED)) # 3.5.2. Form-Encoded Body # Protocol parameters can be transmitted in the HTTP request entity- # body, but only if the following REQUIRED conditions are met: # o The entity-body is single-part. # o The entity-body follows the encoding requirements of the # "application/x-www-form-urlencoded" content-type as defined by # [W3C.REC-html40-19980424]. # o The HTTP request entity-header includes the "Content-Type" header # field set to "application/x-www-form-urlencoded". elif self.signature_type == SIGNATURE_TYPE_BODY and not ( should_have_params and has_params and not multipart): raise ValueError( 'Body signatures may only be used with form-urlencoded content') # We amend https://tools.ietf.org/html/rfc5849#section-3.4.1.3.1 # with the clause that parameters from body should only be included # in non GET or HEAD requests. Extracting the request body parameters # and including them in the signature base string would give semantic # meaning to the body, which it should not have according to the # HTTP 1.1 spec. elif http_method.upper() in ('GET', 'HEAD') and has_params: raise ValueError('GET/HEAD requests should not include body.') # generate the basic OAuth parameters request.oauth_params = self.get_oauth_params(request) # generate the signature request.oauth_params.append( ('oauth_signature', self.get_oauth_signature(request))) # render the signed request and return it uri, headers, body = self._render(request, formencode=True, realm=(realm or self.realm)) if self.decoding: log.debug('Encoding URI, headers and body to %s.', self.decoding) uri = uri.encode(self.decoding) body = body.encode(self.decoding) if body else body new_headers = {} for k, v in headers.items(): new_headers[k.encode(self.decoding)] = v.encode(self.decoding) headers = new_headers return uri, headers, body
Client
python
getsentry__sentry
tests/acceptance/test_performance_summary.py
{ "start": 760, "end": 8563 }
class ____(AcceptanceTestCase, SnubaTestCase): def setUp(self) -> None: super().setUp() self.org = self.create_organization(owner=self.user, name="Rowdy Tiger") self.team = self.create_team( organization=self.org, name="Mariachi Band", members=[self.user] ) self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal") self.group = self.create_group(project=self.project) self.login_as(self.user) self.path = "/organizations/{}/performance/summary/?{}".format( self.org.slug, urlencode({"transaction": "/country_by_code/", "project": self.project.id}), ) AssistantActivity.objects.create( user=self.user, guide_id=20, viewed_ts=before_now(minutes=1) ) self.page = TransactionSummaryPage(self.browser) @patch("django.utils.timezone.now") def test_with_data(self, mock_now: MagicMock) -> None: mock_now.return_value = before_now() # Create a transaction event = make_event(load_data("transaction", timestamp=before_now(minutes=3))) self.store_event(data=event, project_id=self.project.id) self.store_event( data={ "transaction": "/country_by_code/", "message": "This is bad", "event_id": "b" * 32, "timestamp": before_now(minutes=1).isoformat(), }, project_id=self.project.id, ) with self.feature(FEATURES): self.browser.get(self.path) self.page.wait_until_loaded() # We have to wait for this again because there are loaders inside of the table self.page.wait_until_loaded() @patch("django.utils.timezone.now") def test_view_details_from_summary(self, mock_now: MagicMock) -> None: mock_now.return_value = before_now() event = make_event( load_data( "transaction", timestamp=before_now(minutes=3), trace="a" * 32, span_id="ab" * 8 ) ) self.store_event(data=event, project_id=self.project.id) with self.feature(FEATURES): self.browser.get(self.path) self.page.wait_until_loaded() # View the first event details. self.browser.element('[data-test-id="view-id"]').click() self.page.wait_until_loaded() @patch("django.utils.timezone.now") def test_tags_page(self, mock_now: MagicMock) -> None: mock_now.return_value = before_now() tags_path = "/organizations/{}/performance/summary/tags/?{}".format( self.org.slug, urlencode({"transaction": "/country_by_code/", "project": self.project.id}), ) # Create a transaction event_data = load_data("transaction", timestamp=before_now(minutes=3)) event = make_event(event_data) self.store_event(data=event, project_id=self.project.id) with self.feature(FEATURES): self.browser.get(tags_path) self.page.wait_until_loaded() @patch("django.utils.timezone.now") def test_transaction_vitals(self, mock_now: MagicMock) -> None: mock_now.return_value = before_now() vitals_path = "/organizations/{}/performance/summary/vitals/?{}".format( self.org.slug, urlencode({"transaction": "/country_by_code/", "project": self.project.id}), ) # Create a transaction event_data = load_data("transaction", timestamp=before_now(minutes=3)) # only frontend pageload transactions can be shown on the vitals tab event_data["contexts"]["trace"]["op"] = "pageload" event_data["measurements"]["fp"]["value"] = 5000 event = make_event(event_data) self.store_event(data=event, project_id=self.project.id) with self.feature(FEATURES): self.browser.get(vitals_path) self.page.wait_until_loaded() @patch("django.utils.timezone.now") def test_transaction_vitals_filtering(self, mock_now: MagicMock) -> None: mock_now.return_value = before_now() vitals_path = "/organizations/{}/performance/summary/vitals/?{}".format( self.org.slug, urlencode( { "transaction": "/country_by_code/", "project": self.project.id, "dataFilter": "exclude_outliers", } ), ) # Create transactions for seconds in range(3): event_data = load_data("transaction", timestamp=before_now(minutes=3)) event_data["contexts"]["trace"]["op"] = "pageload" event_data["contexts"]["trace"]["id"] = ("c" * 31) + hex(seconds)[2:] event_data["event_id"] = ("c" * 31) + hex(seconds)[2:] event_data["measurements"]["fp"]["value"] = seconds * 10 event_data["measurements"]["fcp"]["value"] = seconds * 10 event_data["measurements"]["lcp"]["value"] = seconds * 10 event_data["measurements"]["fid"]["value"] = seconds * 10 event_data["measurements"]["cls"]["value"] = seconds / 10.0 self.store_event(data=event_data, project_id=self.project.id) # add anchor point event_data = load_data("transaction", timestamp=before_now(minutes=2)) event_data["contexts"]["trace"]["op"] = "pageload" event_data["contexts"]["trace"]["id"] = "a" * 32 event_data["event_id"] = "a" * 32 event_data["measurements"]["fp"]["value"] = 3000 event_data["measurements"]["fcp"]["value"] = 3000 event_data["measurements"]["lcp"]["value"] = 3000 event_data["measurements"]["fid"]["value"] = 3000 event_data["measurements"]["cls"]["value"] = 0.3 self.store_event(data=event_data, project_id=self.project.id) # add outlier event_data = load_data("transaction", timestamp=before_now(minutes=2)) event_data["contexts"]["trace"]["op"] = "pageload" event_data["contexts"]["trace"]["id"] = "b" * 32 event_data["event_id"] = "b" * 32 event_data["measurements"]["fp"]["value"] = 3000000000 event_data["measurements"]["fcp"]["value"] = 3000000000 event_data["measurements"]["lcp"]["value"] = 3000000000 event_data["measurements"]["fid"]["value"] = 3000000000 event_data["measurements"]["cls"]["value"] = 3000000000 self.store_event(data=event_data, project_id=self.project.id) with self.feature(FEATURES): self.browser.get(vitals_path) self.page.wait_until_loaded() self.browser.element(xpath="//button//span[contains(text(), 'Exclude')]").click() self.browser.element(xpath="//p[contains(text(), 'Include')]").click() self.page.wait_until_loaded() @patch("django.utils.timezone.now") def test_transaction_threshold_modal(self, mock_now: MagicMock) -> None: mock_now.return_value = before_now() # Create a transaction event = make_event(load_data("transaction", timestamp=before_now(minutes=3))) self.store_event(data=event, project_id=self.project.id) self.store_event( data={ "transaction": "/country_by_code/", "message": "This is bad", "event_id": "b" * 32, "timestamp": before_now(minutes=3).isoformat(), }, project_id=self.project.id, ) with self.feature(FEATURES): self.browser.get(self.path) self.page.wait_until_loaded() self.browser.click('[data-test-id="set-transaction-threshold"]')
PerformanceSummaryTest
python
walkccc__LeetCode
solutions/3100. Water Bottles II/3100.py
{ "start": 0, "end": 248 }
class ____: def maxBottlesDrunk(self, numBottles: int, numExchange: int) -> int: ans = numBottles while numBottles >= numExchange: numBottles = numBottles - numExchange + 1 numExchange += 1 ans += 1 return ans
Solution
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_bandit/S602.py
{ "start": 748, "end": 1208 }
class ____: def __init__(self): self.shell_defaults = {} def fetch_shell_config(self, username): return {} def run(self, username): Popen("true", shell={**self.shell_defaults, **self.fetch_shell_config(username)}) # Additional truthiness cases for generator, lambda, and f-strings Popen("true", shell=(i for i in ())) Popen("true", shell=lambda: 0) Popen("true", shell=f"{b''}") x = 1 Popen("true", shell=f"{x=}")
ShellConfig
python
pytorch__pytorch
torch/_inductor/autoheuristic/artifacts/_MixedMMH100.py
{ "start": 461, "end": 7869 }
class ____(LearnedHeuristicDecision): def __init__(self) -> None: self.choices: list[Choice] = [] self.fill_choices() def check_precondition(self, metadata: AHMetadata, context: AHContext,) -> bool: return ( metadata.name == self.get_name() and metadata.shared_memory == 232448 and str(metadata.device_capa) == "(9, 0)" ) def get_confidence_threshold(self) -> float: return 0.0 def get_choice(self, idx: int) -> Optional[str]: if idx < len(self.choices): return self.choices[idx] return None def fill_choices(self) -> None: self.choices.append('extern_fallback_mixed_mm') self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=3_numwarps=4') self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=3_numwarps=4') self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=8') self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=128_numstages=4_numwarps=4') self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=32_numstages=2_numwarps=2') self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=32_numstages=5_numwarps=2') self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=64_numstages=5_numwarps=4') self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=256_BLOCK-N=128_numstages=3_numwarps=4') self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=256_BLOCK-N=128_numstages=5_numwarps=8') self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=8') self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=4') self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=128_numstages=4_numwarps=4') self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=32_numstages=2_numwarps=4') self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=32_numstages=5_numwarps=4') self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=32_BLOCK-N=64_numstages=5_numwarps=8') self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=128_numstages=4_numwarps=4') self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=32_numstages=5_numwarps=4') self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=64_numstages=5_numwarps=4') self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=128_numstages=3_numwarps=4') self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=8') def get_name(self) -> str: return 'mixed_mm' def get_best_choices(self, context: AHContext) -> Optional[list[tuple[float, int]]]: if context.get_value('arith_intensity') <= 15.988086223602295: if context.get_value('n') <= 25280.0: if context.get_value('n') <= 1344.0: if context.get_value('mat1_stride_0') <= 7808.0: return [(0.581, 7), (0.419, 6)] else: if context.get_value('m*n') <= 7680.0: return [(0.875, 0), (0.125, 6)] else: return [(0.833, 0), (0.167, 7)] else: if context.get_value('n') <= 8512.0: if str(context.get_value('mat2_dtype')) != 'torch.int8': return [(0.763, 6), (0.237, 7)] else: return [(0.725, 7), (0.275, 6)] else: if str(context.get_value('mat1_dtype')) != 'torch.bfloat16': return [(0.736, 7), (0.197, 9), (0.048, 6), (0.014, 8), (0.005, 10)] else: return [(0.473, 7), (0.398, 6), (0.097, 9), (0.032, 10)] else: if context.get_value('n') <= 42254.0: if context.get_value('n') <= 33856.0: if context.get_value('k*n') <= 68157440.0: return [(0.370, 4), (0.370, 5), (0.074, 7), (0.074, 8), (0.074, 11), (0.037, 6)] else: return [(0.916, 8), (0.036, 7), (0.036, 9), (0.012, 4)] else: return [(0.659, 5), (0.341, 6)] else: if context.get_value('k*n') <= 326052992.0: if context.get_value('n') <= 55232.0: return [(0.571, 6), (0.321, 7), (0.036, 4), (0.036, 8), (0.036, 9)] else: return [(0.506, 6), (0.325, 8), (0.104, 7), (0.039, 5), (0.026, 9)] else: if context.get_value('n') <= 57024.0: return [(0.462, 9), (0.385, 7), (0.115, 6), (0.038, 8)] else: return [(0.598, 8), (0.223, 9), (0.107, 6), (0.071, 7)] else: if context.get_value('m*n') <= 543936.0: if str(context.get_value('17LEQmLEQ32')) != 'True': if context.get_value('m*n') <= 262272.0: if context.get_value('n') <= 1592.5: return [(0.860, 0), (0.140, 9)] else: return None else: if context.get_value('m*k') <= 1294336.0: return [(0.833, 17), (0.150, 18), (0.017, 15)] else: return [(0.917, 17), (0.083, 8)] else: if context.get_value('n') <= 12416.0: if context.get_value('m*n') <= 43008.0: return None else: return [(0.853, 14), (0.147, 9)] else: return [(0.625, 12), (0.375, 14)] else: if context.get_value('m') <= 32.5: if context.get_value('mat2_stride_1') <= 6656.0: if context.get_value('n') <= 69184.0: return [(0.611, 12), (0.361, 14), (0.028, 13)] else: return [(1.000, 12)] else: if context.get_value('mat2_stride_1') <= 20864.0: return [(1.000, 12)] else: return [(0.958, 12), (0.042, 9)] else: if context.get_value('m*n') <= 1085440.0: if context.get_value('n') <= 9152.0: return [(1.000, 18)] else: return [(0.780, 18), (0.160, 16), (0.060, 20)] else: if context.get_value('m') <= 67.0: return [(0.650, 16), (0.203, 19), (0.122, 18), (0.016, 20), (0.008, 1)] else: return [(0.561, 3), (0.185, 16), (0.096, 20), (0.083, 19), (0.076, 2)]
MixedMMH100
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/abstractClass2.py
{ "start": 313, "end": 426 }
class ____(InterfaceA): @abc.abstractmethod def b(self) -> None: print("InterfaceAB.b")
InterfaceAB
python
allegroai__clearml
examples/frameworks/pytorch/pytorch_matplotlib.py
{ "start": 7434, "end": 10469 }
class ____(nn.Module): def __init__(self, target, ): super(ContentLoss, self).__init__() # we 'detach' the target content from the tree used # to dynamically compute the gradient: this is a stated value, # not a variable. Otherwise the forward method of the criterion # will throw an error. self.target = target.detach() def forward(self, input): self.loss = F.mse_loss(input, self.target) return input ###################################################################### # .. Note:: # **Important detail**: although this module is named ``ContentLoss``, it # is not a true PyTorch Loss function. If you want to define your content # loss as a PyTorch Loss function, you have to create a PyTorch autograd function # to recompute/implement the gradient manually in the ``backward`` # method. ###################################################################### # Style Loss # ~~~~~~~~~~ # # The style loss module is implemented similarly to the content loss # module. It will act as a transparent layer in a # network that computes the style loss of that layer. In order to # calculate the style loss, we need to compute the gram matrix :math:`G_{XL}`. A gram # matrix is the result of multiplying a given matrix by its transposed # matrix. In this application the given matrix is a reshaped version of # the feature maps :math:`F_{XL}` of a layer :math:`L`. :math:`F_{XL}` is reshaped to form :math:`\hat{F}_{XL}`, a :math:`K`\ x\ :math:`N` # matrix, where :math:`K` is the number of feature maps at layer :math:`L` and :math:`N` is the # length of any vectorized feature map :math:`F_{XL}^k`. For example, the first line # of :math:`\hat{F}_{XL}` corresponds to the first vectorized feature map :math:`F_{XL}^1`. # # Finally, the gram matrix must be normalized by dividing each element by # the total number of elements in the matrix. This normalization is to # counteract the fact that :math:`\hat{F}_{XL}` matrices with a large :math:`N` dimension yield # larger values in the Gram matrix. These larger values will cause the # first layers (before pooling layers) to have a larger impact during the # gradient descent. Style features tend to be in the deeper layers of the # network so this normalization step is crucial. # def gram_matrix(input): a, b, c, d = input.size() # a=batch size(=1) # b=number of feature maps # (c,d)=dimensions of a f. map (N=c*d) features = input.view(a * b, c * d) # resise F_XL into \hat F_XL G = torch.mm(features, features.t()) # compute the gram product # we 'normalize' the values of the gram matrix # by dividing by the number of element in each feature maps. return G.div(a * b * c * d) ###################################################################### # Now the style loss module looks almost exactly like the content loss # module. The style distance is also computed using the mean square # error between :math:`G_{XL}` and :math:`G_{SL}`. #
ContentLoss
python
huggingface__transformers
tests/models/clap/test_feature_extraction_clap.py
{ "start": 3706, "end": 31611 }
class ____(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = ClapFeatureExtractor # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.setUp with Whisper->Clap def setUp(self): self.feat_extract_tester = ClapFeatureExtractionTester(self) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test feature size input_features = feature_extractor(np_speech_inputs, padding="max_length", return_tensors="np").input_features self.assertTrue(input_features.ndim == 4) # Test not batched input encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test 2-D numpy arrays are batched. speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_double_precision_pad def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32) # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest._load_datasamples def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id")[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_integration_fusion_short_input(self): # fmt: off EXPECTED_INPUT_FEATURES = torch.tensor( [ [ # "repeat" [ -20.1049, -19.9764, -20.0731, -19.5055, -27.5018, -22.5761, -26.6071, -29.0091, -26.4659, -26.4236, -28.8808, -31.9190, -32.4848, -34.1186, -34.0340, -32.8803, -30.9895, -37.6238, -38.0347, -40.6263, -36.3496, -42.2533, -32.9132, -27.7068, -29.3704, -30.3208, -22.5972, -27.1494, -30.1975, -31.1005, -29.9372, -27.1917, -25.9806, -30.3489, -33.2380, -31.9062, -36.5498, -32.8721, -30.5629, -27.4674, -22.2232, -22.5653, -16.3868, -17.2713, -25.9738, -30.6256, -34.3766, -31.1292, -27.8950, -27.0588, -25.6206, -23.0712, -26.6050, -28.0112, -32.6847, -34.3396, -34.9738, -35.8463, -39.2324, -37.1188, -33.3705, -28.9230, -28.9112, -28.6578 ], [ -36.7233, -30.0587, -24.8431, -18.4611, -16.8149, -23.9319, -32.8580, -34.2264, -27.4332, -26.8027, -29.2721, -33.9033, -39.3403, -35.3232, -26.8076, -28.6460, -35.2780, -36.0738, -35.4996, -37.7631, -39.5056, -34.7112, -36.8741, -34.1066, -32.9474, -33.6604, -27.9937, -30.9594, -26.2928, -32.0485, -29.2151, -29.2917, -32.7308, -29.6542, -31.1454, -37.0088, -32.3388, -37.3086, -31.1024, -27.2889, -19.6788, -21.1488, -19.5144, -14.8889, -21.2006, -24.7488, -27.7940, -31.1058, -27.5068, -21.5737, -22.3780, -21.5151, -26.3086, -30.9223, -33.5043, -32.0307, -37.3806, -41.6188, -45.6650, -40.5131, -32.5023, -26.7385, -26.3709, -26.7761 ] ], [ # "repeatpad" [ -25.7496, -24.9339, -24.1357, -23.1271, -23.7853, -26.1264, -29.1456, -33.2060, -37.8179, -42.4833, -41.9386, -41.2164, -42.3566, -44.2575, -40.0217, -36.6794, -36.6974, -38.7819, -42.0880, -45.5560, -39.9368, -36.3219, -35.5981, -36.6434, -35.1851, -33.0684, -30.0437, -30.2010, -34.3476, -42.1373, -38.8039, -37.3355, -40.4576, -41.0485, -40.6377, -38.2275, -42.7481, -34.6084, -34.7048, -29.5149, -26.3935, -26.8952, -34.1336, -26.2904, -28.2571, -32.5642, -36.7240, -35.5334, -38.2451, -34.8177, -28.9754, -25.1096, -27.9768, -32.3184, -37.0269, -40.5136, -40.8061, -36.4948, -40.3767, -38.9671, -38.3552, -34.1250, -30.9035, -31.6112 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ], [ # None, same as "repeatpad" [ -25.7496, -24.9339, -24.1357, -23.1271, -23.7853, -26.1264, -29.1456, -33.2060, -37.8179, -42.4833, -41.9386, -41.2164, -42.3566, -44.2575, -40.0217, -36.6794, -36.6974, -38.7819, -42.0880, -45.5560, -39.9368, -36.3219, -35.5981, -36.6434, -35.1851, -33.0684, -30.0437, -30.2010, -34.3476, -42.1373, -38.8039, -37.3355, -40.4576, -41.0485, -40.6377, -38.2275, -42.7481, -34.6084, -34.7048, -29.5149, -26.3935, -26.8952, -34.1336, -26.2904, -28.2571, -32.5642, -36.7240, -35.5334, -38.2451, -34.8177, -28.9754, -25.1096, -27.9768, -32.3184, -37.0269, -40.5136, -40.8061, -36.4948, -40.3767, -38.9671, -38.3552, -34.1250, -30.9035, -31.6112 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ], [ # "pad" [ -58.5260, -58.1155, -57.8623, -57.5059, -57.9178, -58.7171, -59.2343, -59.9833, -60.9764, -62.0722, -63.5723, -65.7111, -67.5153, -68.7088, -69.8325, -70.2987, -70.1548, -70.6233, -71.5702, -72.5159, -72.3821, -70.1817, -67.0315, -64.1387, -62.2202, -61.0717, -60.4951, -61.6005, -63.7358, -67.1400, -67.6185, -65.5635, -64.3593, -63.7138, -63.6209, -66.4950, -72.6284, -63.3961, -56.8334, -52.7319, -50.6310, -51.3728, -53.5619, -51.9190, -50.9708, -52.8684, -55.8073, -58.8227, -60.6991, -57.0547, -52.7611, -51.4388, -54.4892, -60.8950, -66.1024, -72.4352, -67.8538, -65.1463, -68.7588, -72.3080, -68.4864, -60.4688, -57.1516, -60.9460 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ] ] ) # fmt: on MEL_BIN = [[976, 977], [976, 977], [976, 977], [196, 197]] input_speech = self._load_datasamples(1) feature_extractor = ClapFeatureExtractor() for padding, EXPECTED_VALUES, idx_in_mel in zip( ["repeat", "repeatpad", None, "pad"], EXPECTED_INPUT_FEATURES, MEL_BIN ): input_features = feature_extractor(input_speech, return_tensors="pt", padding=padding).input_features self.assertEqual(input_features.shape, (1, 4, 1001, 64)) torch.testing.assert_close(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], rtol=1e-4, atol=1e-4) torch.testing.assert_close(input_features[0, 0, idx_in_mel[1]], EXPECTED_VALUES[1], rtol=1e-4, atol=1e-4) self.assertTrue(torch.all(input_features[0, 0] == input_features[0, 1])) self.assertTrue(torch.all(input_features[0, 0] == input_features[0, 2])) self.assertTrue(torch.all(input_features[0, 0] == input_features[0, 3])) def test_integration_rand_trunc_short_input(self): # fmt: off EXPECTED_INPUT_FEATURES = torch.tensor( [ [ # "repeat" [ -35.0483, -35.7865, -38.2884, -40.0220, -42.5349, -44.9489, -43.2228, -44.6499, -47.6253, -49.6983, -50.2127, -52.5483, -52.2223, -51.9157, -49.4082, -51.2024, -57.0476, -56.2803, -58.1618, -60.7474, -55.0389, -60.9514, -59.3080, -50.4419, -47.8172, -48.7570, -55.2552, -44.5036, -44.1148, -50.8218, -51.0968, -52.9408, -51.1037, -48.9789, -47.5897, -52.0915, -55.4216, -54.1529, -58.0149, -58.0866, -52.7798, -52.6154, -45.9144, -46.2008, -40.7603, -41.1703, -50.2250, -55.4112, -59.4818, -54.5795, -53.5552, -51.3668, -49.8358, -50.3186, -54.0452, -57.6030, -61.1589, -61.6415, -63.2756, -66.5890, -62.8543, -58.0665, -56.7203, -56.7632 ], [ -47.1320, -37.9961, -34.0076, -36.7109, -47.9057, -48.4924, -43.8371, -44.9728, -48.1689, -52.9141, -57.6077, -52.8520, -44.8502, -45.6764, -51.8389, -56.4284, -54.6972, -53.4889, -55.6077, -58.7149, -60.3760, -54.0136, -56.0730, -55.9870, -54.4017, -53.1094, -53.5640, -50.3064, -49.9520, -49.3239, -48.1668, -53.4852, -50.4561, -50.8688, -55.1970, -51.5538, -53.0260, -59.6933, -54.8183, -59.5895, -55.9589, -50.3761, -44.1282, -44.1463, -43.8540, -39.1168, -45.3893, -49.5542, -53.1505, -55.2870, -50.3921, -46.8511, -47.4444, -49.5633, -56.0034, -59.0815, -59.0018, -63.7589, -69.5745, -71.5789, -64.0498, -56.0558, -54.3475, -54.7004 ] ], [ # "repeatpad" [ -40.3184, -39.7186, -39.8807, -41.6508, -45.3613, -50.4785, -57.0297, -60.4944, -59.1642, -58.9495, -60.4661, -62.5300, -58.4759, -55.2865, -54.8973, -56.0780, -57.5482, -59.6557, -64.3309, -65.0330, -59.4941, -56.8552, -55.0519, -55.9817, -56.9739, -55.2827, -54.5312, -51.4141, -50.4289, -51.9131, -57.5821, -63.9979, -59.9180, -58.9489, -62.3247, -62.6975, -63.7948, -60.5250, -64.6107, -58.7905, -57.0229, -54.3084, -49.8445, -50.4459, -57.0172, -50.6425, -52.5992, -57.4207, -61.6358, -60.6540, -63.1968, -57.4360, -52.3263, -51.7695, -57.1946, -62.9610, -66.7359, -67.0335, -63.7440, -68.1775, -66.3798, -62.8650, -59.8972, -59.3139 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ], [ # None, same as "repeatpad" [ -40.3184, -39.7186, -39.8807, -41.6508, -45.3613, -50.4785, -57.0297, -60.4944, -59.1642, -58.9495, -60.4661, -62.5300, -58.4759, -55.2865, -54.8973, -56.0780, -57.5482, -59.6557, -64.3309, -65.0330, -59.4941, -56.8552, -55.0519, -55.9817, -56.9739, -55.2827, -54.5312, -51.4141, -50.4289, -51.9131, -57.5821, -63.9979, -59.9180, -58.9489, -62.3247, -62.6975, -63.7948, -60.5250, -64.6107, -58.7905, -57.0229, -54.3084, -49.8445, -50.4459, -57.0172, -50.6425, -52.5992, -57.4207, -61.6358, -60.6540, -63.1968, -57.4360, -52.3263, -51.7695, -57.1946, -62.9610, -66.7359, -67.0335, -63.7440, -68.1775, -66.3798, -62.8650, -59.8972, -59.3139 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ], [ # "pad" [ -73.3190, -73.6349, -74.1451, -74.8539, -75.7476, -76.5438, -78.5540, -80.1339, -81.8911, -83.7560, -85.5387, -86.7466, -88.2072, -88.6090, -88.8243, -89.0784, -89.4364, -89.8179, -91.3146, -92.2833, -91.7221, -90.9440, -88.1315, -86.2425, -84.2281, -82.4893, -81.5993, -81.1328, -81.5759, -83.1068, -85.6525, -88.9520, -88.9187, -87.2703, -86.3052, -85.7188, -85.8802, -87.9996, -95.0464, -88.0133, -80.8561, -76.5597, -74.2816, -74.8109, -77.3615, -76.0719, -75.3426, -77.6428, -80.9663, -84.5275, -84.9907, -80.5205, -77.2851, -78.6259, -84.7740, -91.4535, -98.1894, -94.3872, -92.3735, -97.6807, -98.1501, -91.4344, -85.2842, -88.4338 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ] ] ) # fmt: on MEL_BIN = [[976, 977], [976, 977], [976, 977], [196, 197]] input_speech = self._load_datasamples(1) feature_extractor = ClapFeatureExtractor() for padding, EXPECTED_VALUES, idx_in_mel in zip( ["repeat", "repeatpad", None, "pad"], EXPECTED_INPUT_FEATURES, MEL_BIN ): input_features = feature_extractor( input_speech, return_tensors="pt", truncation="rand_trunc", padding=padding ).input_features self.assertEqual(input_features.shape, (1, 1, 1001, 64)) torch.testing.assert_close(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], rtol=1e-4, atol=1e-4) torch.testing.assert_close(input_features[0, 0, idx_in_mel[1]], EXPECTED_VALUES[1], rtol=1e-4, atol=1e-4) def test_integration_fusion_long_input(self): # fmt: off EXPECTED_INPUT_FEATURES = torch.tensor( [ [ -11.1830, -10.1894, -8.6051, -4.8578, -1.3268, -8.4606, -14.5453, -9.2017, 0.5781, 16.2129, 14.8289, 3.6326, -3.8794, -6.5544, -2.4408, 1.9531, 6.0967, 1.7590, -7.6730, -6.1571, 2.0052, 16.6694, 20.6447, 21.2145, 13.4972, 15.9043, 16.8987, 4.1766, 11.9428, 21.2372, 12.3016, 4.8604, 6.7241, 1.8543, 4.9235, 5.3188, -0.9897, -1.2416, -6.5864, 2.9529, 2.9274, 6.4753, 10.2300, 11.2127, 3.4042, -1.0055, -6.0475, -6.7524, -3.9801, -1.4434, 0.4740, -0.1584, -4.5457, -8.5746, -8.8428, -13.1475, -9.6079, -8.5798, -4.1143, -3.7966, -7.1651, -6.1517, -8.0258, -12.1486 ], [ -10.2017, -7.9924, -5.9517, -3.9372, -1.9735, -4.3130, 16.1647, 25.0592, 23.5532, 14.4974, -7.0778, -10.2262, 6.4782, 20.3454, 19.4269, 1.7976, -16.5070, 4.9380, 12.3390, 6.9285, -13.6325, -8.5298, 1.0839, -5.9629, -8.4812, 3.1331, -2.0963, -16.6046, -14.0070, -17.5707, -13.2080, -17.2168, -17.7770, -12.1111, -18.6184, -17.1897, -13.9801, -12.0426, -23.5400, -25.6823, -23.5813, -18.7847, -20.5473, -25.6458, -19.7585, -27.6007, -28.9276, -24.8948, -25.4458, -22.2807, -19.6613, -19.2669, -15.7813, -19.6821, -24.3439, -22.2598, -28.2631, -30.1017, -32.7646, -33.6525, -27.5639, -22.0548, -27.8054, -29.6947 ], [ -9.2078, -7.2963, -6.2095, -7.9959, -2.9280, -11.1843, -6.1490, 5.0733, 19.2957, 21.4578, 14.6803, -3.3153, -6.3334, -2.3542, 6.9509, 15.2965, 14.6620, 5.2075, -0.0873, 1.1919, 18.1986, 20.8470, 10.8035, 2.2516, 7.6905, 7.7427, -1.2543, -5.0018, 0.9809, -2.1584, -5.4580, -5.4760, -11.8888, -9.0605, -8.4638, -9.9897, -0.0540, -5.1629, 0.0483, -4.1504, -4.8140, -7.8236, -9.0622, -10.1742, -8.9597, -11.5380, -16.5603, -17.1858, -17.5032, -20.9326, -23.9543, -25.2602, -25.3429, -27.4536, -26.8859, -22.7852, -25.8288, -24.8399, -23.8893, -24.2096, -26.5415, -23.7281, -25.6851, -22.3629 ], [ 1.3448, 2.9883, 4.0366, -0.8019, -10.4191, -10.0883, -4.3812, 0.8136, 2.1579, 0.0832, 1.0949, -0.9759, -5.5319, -4.6009, -6.5452, -14.9155, -20.1584, -9.3611, -2.4271, 1.4031, 4.9910, 8.6916, 8.6785, 10.1973, 9.9029, 5.3840, 7.5336, 5.2803, 2.8144, -0.3138, 2.2216, 5.7328, 7.5574, 7.7402, 1.0681, 3.1049, 7.0742, 6.5588, 7.3712, 5.7881, 8.6874, 8.7725, 2.8133, -4.5809, -6.1317, -5.1719, -5.0192, -9.0977, -10.9391, -6.0769, 1.6016, -0.8965, -7.2252, -7.8632, -11.4468, -11.7446, -10.7447, -7.0601, -2.7748, -4.1798, -2.8433, -3.1352, 0.8097, 6.4212 ] ] ) # fmt: on MEL_BIN = 963 input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)]) feature_extractor = ClapFeatureExtractor() for padding, EXPECTED_VALUES, block_idx in zip( ["repeat", "repeatpad", None, "pad"], EXPECTED_INPUT_FEATURES, [1, 2, 0, 3] ): set_seed(987654321) input_features = feature_extractor(input_speech, return_tensors="pt", padding=padding).input_features self.assertEqual(input_features.shape, (1, 4, 1001, 64)) torch.testing.assert_close(input_features[0, block_idx, MEL_BIN], EXPECTED_VALUES, rtol=1e-3, atol=1e-3) def test_integration_rand_trunc_long_input(self): # fmt: off EXPECTED_INPUT_FEATURES = torch.tensor( [ [ -35.4022, -32.7555, -31.2004, -32.7764, -42.5770, -41.6339, -43.1630, -44.5080, -44.3029, -48.9628, -39.5022, -39.2105, -43.1350, -43.2195, -48.4894, -52.2344, -57.6891, -52.2228, -45.5155, -44.2893, -43.4697, -46.6702, -43.7490, -40.4819, -42.7275, -46.3434, -46.8412, -41.2003, -43.1681, -46.2948, -46.1925, -47.8333, -45.6812, -44.9182, -41.7786, -43.3809, -44.3199, -42.8814, -45.4771, -46.7114, -46.9746, -42.7090, -41.6057, -38.3965, -40.1980, -41.0263, -34.1256, -28.3289, -29.0201, -30.4453, -29.5561, -30.1734, -25.9406, -19.0897, -15.8452, -20.1351, -23.6515, -23.1194, -17.1845, -19.4399, -23.6527, -22.8768, -20.7279, -22.7864 ], [ -35.7719, -27.2566, -23.6964, -27.5521, 0.2510, 7.4391, 1.3917, -13.3417, -28.1758, -17.0856, -5.7723, -0.8000, -7.8832, -15.5548, -30.5935, -24.7571, -13.7009, -10.3432, -21.2464, -24.8118, -19.4080, -14.9779, -11.7991, -18.4485, -20.1982, -17.3652, -20.6328, -28.2967, -25.7819, -21.8962, -28.5083, -29.5719, -30.2120, -35.7033, -31.8218, -34.0408, -37.7744, -33.9653, -31.3009, -30.9063, -28.6153, -32.2202, -28.5456, -28.8579, -32.5170, -37.9152, -43.0052, -46.4849, -44.0786, -39.1933, -33.2757, -31.6313, -42.6386, -52.3679, -53.5785, -55.6444, -47.0050, -47.6459, -56.6361, -60.6781, -61.5244, -55.8272, -60.4832, -58.1897 ], [ -38.2686, -36.6285, -32.5835, -35.1693, -37.7938, -37.4035, -35.3132, -35.6083, -36.3609, -40.9472, -36.7846, -36.1544, -38.9076, -39.3618, -35.4953, -34.2809, -39.9466, -39.7433, -34.8347, -37.5674, -41.5689, -38.9161, -34.3947, -30.2924, -30.4841, -34.5831, -28.9261, -24.8849, -31.2324, -27.1622, -27.2107, -25.9385, -30.1691, -30.9223, -23.9495, -25.6047, -26.7119, -28.5523, -27.7481, -32.8427, -35.4650, -31.0399, -31.2073, -30.5163, -22.9819, -20.8892, -19.2510, -24.7905, -28.9426, -28.1998, -26.7386, -25.0140, -27.9223, -32.9913, -33.1864, -34.9742, -38.5995, -39.6990, -29.3203, -22.4697, -25.6415, -33.5608, -33.0945, -27.1716 ], [ -33.2015, -28.7741, -21.9457, -23.4888, -32.1072, -8.6307, 3.2724, 5.9157, -0.9221, -30.1814, -31.0015, -27.4508, -27.0477, -9.5342, 0.3221, 0.6511, -7.1596, -25.9707, -32.8924, -32.2300, -13.8974, -0.4895, 0.9168, -10.7663, -27.1176, -35.0829, -11.6859, -4.8855, -11.8898, -26.6167, -5.6192, -3.8443, -19.7947, -14.4101, -8.6236, -21.2458, -21.0801, -17.9136, -24.4663, -18.6333, -24.8085, -15.5854, -15.4344, -11.5046, -22.3625, -27.3387, -32.4353, -30.9670, -31.3789, -35.4044, -34.4591, -25.2433, -28.0773, -33.8736, -33.0224, -33.3155, -38.5302, -39.2741, -36.6395, -34.7729, -32.4483, -42.4001, -49.2857, -39.1682 ] ] ) # fmt: on MEL_BIN = 963 SEEDS = [987654321, 1234, 666, 5555] input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)]) feature_extractor = ClapFeatureExtractor() for padding, EXPECTED_VALUES, seed in zip( ["repeat", "repeatpad", None, "pad"], EXPECTED_INPUT_FEATURES, SEEDS ): set_seed(seed) input_features = feature_extractor( input_speech, return_tensors="pt", truncation="rand_trunc", padding=padding ).input_features self.assertEqual(input_features.shape, (1, 1, 1001, 64)) torch.testing.assert_close(input_features[0, 0, MEL_BIN], EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
ClapFeatureExtractionTest
python
google__python-fire
fire/console/console_attr.py
{ "start": 3623, "end": 3977 }
class ____(BoxLineCharacters): """ASCII Box/line drawing characters.""" dl = '+' dr = '+' h = '-' hd = '+' hu = '+' ul = '+' ur = '+' v = '|' vh = '+' vl = '+' vr = '+' d_dl = '#' d_dr = '#' d_h = '=' d_hd = '#' d_hu = '#' d_ul = '#' d_ur = '#' d_v = '#' d_vh = '#' d_vl = '#' d_vr = '#'
BoxLineCharactersAscii
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/hooks/bedrock.py
{ "start": 2079, "end": 2697 }
class ____(AwsBaseHook): """ Interact with the Amazon Agents for Bedrock API. Provide thin wrapper around :external+boto3:py:class:`boto3.client("bedrock-agent") <AgentsforBedrock.Client>`. Additional arguments (such as ``aws_conn_id``) may be specified and are passed down to the underlying AwsBaseHook. .. seealso:: - :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook` """ client_type = "bedrock-agent" def __init__(self, *args, **kwargs) -> None: kwargs["client_type"] = self.client_type super().__init__(*args, **kwargs)
BedrockAgentHook
python
PyCQA__pylint
tests/functional/r/regression/regression_property_no_member_844.py
{ "start": 135, "end": 268 }
class ____: def __init__(self): self.__thing = 'foo' @property def thing(self): return self.__thing
Parent
python
sanic-org__sanic
sanic/cli/executor.py
{ "start": 683, "end": 2805 }
class ____: def __init__(self, app: Sanic, kwargs: dict) -> None: self.app = app self.kwargs = kwargs self.commands = self._make_commands() self.parser = self._make_parser() def run(self, command: str, args: list[str]) -> None: if command == "exec": args = ["--help"] parsed_args = self.parser.parse_args(args) if command not in self.commands: raise ValueError(f"Unknown command: {command}") parsed_kwargs = vars(parsed_args) parsed_kwargs.pop("command") run(self.commands[command](**parsed_kwargs)) def _make_commands(self) -> dict[str, Callable]: commands = {c.name: c.func for c in self.app._future_commands} return commands def _make_parser(self) -> SanicArgumentParser: width = shutil.get_terminal_size().columns parser = SanicArgumentParser( prog="sanic", description=get_logo(True), formatter_class=lambda prog: SanicHelpFormatter( prog, max_help_position=36 if width > 96 else 24, indent_increment=4, width=None, ), ) subparsers = parser.add_subparsers( dest="command", title=" Commands", parser_class=ExecutorSubParser, ) for command in self.app._future_commands: sub = subparsers.add_parser( command.name, help=command.func.__doc__ or f"Execute {command.name}", formatter_class=SanicHelpFormatter, ) self._add_arguments(sub, command.func) return parser def _add_arguments(self, parser: ArgumentParser, func: Callable) -> None: sig = signature(func) for param in sig.parameters.values(): kwargs = {} if param.default is not param.empty: kwargs["default"] = param.default parser.add_argument( f"--{param.name}", help=param.annotation, **kwargs, )
Executor
python
openai__openai-python
src/openai/types/beta/realtime/response_audio_transcript_delta_event.py
{ "start": 211, "end": 773 }
class ____(BaseModel): content_index: int """The index of the content part in the item's content array.""" delta: str """The transcript delta.""" event_id: str """The unique ID of the server event.""" item_id: str """The ID of the item.""" output_index: int """The index of the output item in the response.""" response_id: str """The ID of the response.""" type: Literal["response.audio_transcript.delta"] """The event type, must be `response.audio_transcript.delta`."""
ResponseAudioTranscriptDeltaEvent
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/dataplex.py
{ "start": 119371, "end": 122473 }
class ____(DataplexCatalogBaseOperator): """ Get an EntryType resource. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:DataplexCatalogGetEntryTypeOperator` :param entry_type_id: Required. EntryType identifier. :param project_id: Required. The ID of the Google Cloud project where the service is used. :param location: Required. The ID of the Google Cloud region where the service is used. :param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud. :param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not be retried. :param timeout: Optional. The amount of time, in seconds, to wait for the request to complete. Note that if `retry` is specified, the timeout applies to each individual attempt. :param metadata: Optional. Additional metadata that is provided to the method. :param impersonation_chain: Optional. Service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = tuple( {"entry_type_id"} | set(DataplexCatalogBaseOperator.template_fields) ) operator_extra_links = (DataplexCatalogEntryTypeLink(),) def __init__( self, entry_type_id: str, *args, **kwargs, ) -> None: super().__init__(*args, **kwargs) self.entry_type_id = entry_type_id @property def extra_links_params(self) -> dict[str, Any]: return { **super().extra_links_params, "entry_type_id": self.entry_type_id, } def execute(self, context: Context): DataplexCatalogEntryTypeLink.persist(context=context) self.log.info( "Retrieving Dataplex Catalog EntryType %s.", self.entry_type_id, ) try: entry_type = self.hook.get_entry_type( entry_type_id=self.entry_type_id, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) except NotFound: self.log.info( "Dataplex Catalog EntryType %s not found.", self.entry_type_id, ) raise AirflowException(NotFound) except Exception as ex: raise AirflowException(ex) return EntryType.to_dict(entry_type)
DataplexCatalogGetEntryTypeOperator
python
airbytehq__airbyte
airbyte-integrations/connectors/source-appsflyer/source_appsflyer/source.py
{ "start": 13889, "end": 13988 }
class ____(EventsMixin, RetargetingPartnersReport): pass # Source
RetargetingPartnersEventsReport
python
allegroai__clearml
clearml/backend_api/utils.py
{ "start": 3027, "end": 8211 }
class ____(requests.Session): """requests.Session with a send timeout for requests with a content length header""" write_timeout = (300.0, 300.0) request_size_threshold = 15000 def __init__(self, *args: Any, **kwargs: Any) -> None: super(SessionWithTimeout, self).__init__(*args, **kwargs) def send(self, request: requests.models.PreparedRequest, **kwargs: Any) -> requests.Response: """Overrides the send request in case a Content-Length headers exists""" if ( isinstance(request, requests.models.PreparedRequest) and request.headers and request.headers.get("Content-Length") ): try: if int(request.headers["Content-Length"]) > self.request_size_threshold: timeout = kwargs.get("timeout", 0) kwargs["timeout"] = ( ( max(self.write_timeout[0], timeout[0]), max(self.write_timeout[1], timeout[1]), ) if isinstance(timeout, (list, tuple)) else max(self.write_timeout[0], timeout) ) except (TypeError, ValueError, NameError): pass return super(SessionWithTimeout, self).send(request, **kwargs) def get_http_session_with_retry( total: int = 0, connect: Optional[int] = None, read: Optional[int] = None, redirect: Optional[int] = None, status: Optional[int] = None, status_forcelist: Optional[List[int]] = None, backoff_factor: float = 0, backoff_max: Optional[float] = None, pool_connections: Optional[int] = None, pool_maxsize: Optional[int] = None, config: Optional[Any] = None, ) -> SessionWithTimeout: """ Returns a requests.Session-derived object that supports a retry behavior. """ global __disable_certificate_verification_warning if not all(isinstance(x, (int, type(None))) for x in (total, connect, read, redirect, status)): raise ValueError("Bad configuration. All retry count values must be null or int") if status_forcelist and not all(isinstance(x, int) for x in status_forcelist): raise ValueError("Bad configuration. Retry status_forcelist must be null or list of ints") config = config or get_config() pool_maxsize = pool_maxsize if pool_maxsize is not None else config.get("api.http.pool_maxsize", 512) pool_connections = ( pool_connections if pool_connections is not None else config.get("api.http.pool_connections", 512) ) session = SessionWithTimeout() # HACK: with python 2.7 there is a potential race condition that can cause # a deadlock when importing "netrc", inside the get_netrc_auth() function # setting 'session.trust_env' to False will make sure the `get_netrc_auth` is not called # see details: https://github.com/psf/requests/issues/2925 if six.PY2: session.trust_env = False if backoff_max is not None: if "BACKOFF_MAX" in vars(Retry): Retry.BACKOFF_MAX = backoff_max else: Retry.DEFAULT_BACKOFF_MAX = backoff_max retry = Retry( total=total, connect=connect, read=read, redirect=redirect, status=status, status_forcelist=status_forcelist, backoff_factor=backoff_factor, ) adapter = TLSv1HTTPAdapter(max_retries=retry, pool_connections=pool_connections, pool_maxsize=pool_maxsize) session.mount("http://", adapter) session.mount("https://", adapter) # update verify host certificate verify = ENV_HOST_VERIFY_CERT.get(default=config.get("api.verify_certificate", True)) try: session.verify = bool(strtobool(verify) if isinstance(verify, str) else verify) except (ValueError, AttributeError): session.verify = verify if not session.verify and __disable_certificate_verification_warning < 2: # show warning __disable_certificate_verification_warning += 1 logging.getLogger("clearml").warning( msg="InsecureRequestWarning: Certificate verification is disabled! Adding " "certificate verification is strongly advised. See: " "https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings" ) # make sure we only do not see the warning import urllib3 # noinspection PyBroadException try: urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) except Exception: pass return session def get_response_cls(request_cls: type) -> type: """Extract a request's response class using the mapping found in the module defining the request's service""" for req_cls in request_cls.mro(): module = sys.modules[req_cls.__module__] if hasattr(module, "action_mapping"): return module.action_mapping[(request_cls._action, request_cls._version)][1] elif hasattr(module, "response_mapping"): return module.response_mapping[req_cls] raise TypeError("no response class!")
SessionWithTimeout
python
pennersr__django-allauth
tests/apps/socialaccount/providers/drip/tests.py
{ "start": 236, "end": 684 }
class ____(OAuth2TestsMixin, TestCase): provider_id = DripProvider.id def get_mocked_response(self): return MockedResponse( HTTPStatus.OK, """{ "users":[{ "email": "john@acme.com", "name": "John Doe", "time_zone": "America/Los_Angeles" }] }""", ) def get_expected_to_str(self): return "john@acme.com"
DripTests
python
scikit-learn__scikit-learn
asv_benchmarks/benchmarks/manifold.py
{ "start": 116, "end": 820 }
class ____(Estimator, Benchmark): """ Benchmarks for t-SNE. """ param_names = ["method"] params = (["exact", "barnes_hut"],) def setup_cache(self): super().setup_cache() def make_data(self, params): (method,) = params n_samples = 500 if method == "exact" else None return _digits_dataset(n_samples=n_samples) def make_estimator(self, params): (method,) = params estimator = TSNE(random_state=0, method=method) return estimator def make_scorers(self): self.train_scorer = lambda _, __: self.estimator.kl_divergence_ self.test_scorer = lambda _, __: self.estimator.kl_divergence_
TSNEBenchmark
python
pandas-dev__pandas
pandas/util/version/__init__.py
{ "start": 5691, "end": 13142 }
class ____(_BaseVersion): _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE) _key: CmpKey def __init__(self, version: str) -> None: # Validate the version and parse it into pieces match = self._regex.search(version) if not match: raise InvalidVersion(f"Invalid version: '{version}'") # Store the parsed out pieces of the version self._version = _Version( epoch=int(match.group("epoch")) if match.group("epoch") else 0, release=tuple(int(i) for i in match.group("release").split(".")), pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")), post=_parse_letter_version( match.group("post_l"), match.group("post_n1") or match.group("post_n2") ), dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")), local=_parse_local_version(match.group("local")), ) # Generate a key which will be used for sorting self._key = _cmpkey( self._version.epoch, self._version.release, self._version.pre, self._version.post, self._version.dev, self._version.local, ) def __repr__(self) -> str: return f"<Version('{self}')>" def __str__(self) -> str: parts = [] # Epoch if self.epoch != 0: parts.append(f"{self.epoch}!") # Release segment parts.append(".".join(str(x) for x in self.release)) # Pre-release if self.pre is not None: parts.append("".join(str(x) for x in self.pre)) # Post-release if self.post is not None: parts.append(f".post{self.post}") # Development release if self.dev is not None: parts.append(f".dev{self.dev}") # Local version segment if self.local is not None: parts.append(f"+{self.local}") return "".join(parts) @property def epoch(self) -> int: return self._version.epoch @property def release(self) -> tuple[int, ...]: return self._version.release @property def pre(self) -> tuple[str, int] | None: return self._version.pre @property def post(self) -> int | None: return self._version.post[1] if self._version.post else None @property def dev(self) -> int | None: return self._version.dev[1] if self._version.dev else None @property def local(self) -> str | None: if self._version.local: return ".".join(str(x) for x in self._version.local) else: return None @property def public(self) -> str: return str(self).split("+", 1)[0] @property def base_version(self) -> str: parts = [] # Epoch if self.epoch != 0: parts.append(f"{self.epoch}!") # Release segment parts.append(".".join(str(x) for x in self.release)) return "".join(parts) @property def is_prerelease(self) -> bool: return self.dev is not None or self.pre is not None @property def is_postrelease(self) -> bool: return self.post is not None @property def is_devrelease(self) -> bool: return self.dev is not None @property def major(self) -> int: return self.release[0] if len(self.release) >= 1 else 0 @property def minor(self) -> int: return self.release[1] if len(self.release) >= 2 else 0 @property def micro(self) -> int: return self.release[2] if len(self.release) >= 3 else 0 def _parse_letter_version( letter: str | None, number: str | bytes | SupportsInt | None ) -> tuple[str, int] | None: if letter: # We consider there to be an implicit 0 in a pre-release if there is # not a numeral associated with it. if number is None: number = 0 # We normalize any letters to their lower case form letter = letter.lower() # We consider some words to be alternate spellings of other words and # in those cases we want to normalize the spellings to our preferred # spelling. if letter == "alpha": letter = "a" elif letter == "beta": letter = "b" elif letter in ["c", "pre", "preview"]: letter = "rc" elif letter in ["rev", "r"]: letter = "post" return letter, int(number) if not letter and number: # We assume if we are given a number, but we are not given a letter # then this is using the implicit post release syntax (e.g. 1.0-1) letter = "post" return letter, int(number) return None _local_version_separators = re.compile(r"[\._-]") def _parse_local_version(local: str | None) -> LocalType | None: if local is not None: return tuple( part.lower() if not part.isdigit() else int(part) for part in _local_version_separators.split(local) ) return None def _cmpkey( epoch: int, release: tuple[int, ...], pre: tuple[str, int] | None, post: tuple[str, int] | None, dev: tuple[str, int] | None, local: LocalType | None, ) -> CmpKey: # When we compare a release version, we want to compare it with all of the # trailing zeros removed. So we'll use a reverse the list, drop all the now # leading zeros until we come to something non zero, then take the rest # re-reverse it back into the correct order and make it a tuple and use # that for our sorting key. _release = tuple( reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release)))) ) # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. # We'll do this by abusing the pre segment, but we _only_ want to do this # if there is not a pre or a post segment. If we have one of those then # the normal sorting rules will handle this case correctly. if pre is None and post is None and dev is not None: _pre: CmpPrePostDevType = NegativeInfinity # Versions without a pre-release (except as noted above) should sort after # those with one. elif pre is None: _pre = Infinity else: _pre = pre # Versions without a post segment should sort before those with one. if post is None: _post: CmpPrePostDevType = NegativeInfinity else: _post = post # Versions without a development segment should sort after those with one. if dev is None: _dev: CmpPrePostDevType = Infinity else: _dev = dev if local is None: # Versions without a local segment should sort before those with one. _local: CmpLocalType = NegativeInfinity else: # Versions with a local segment need that segment parsed to implement # the sorting rules in PEP440. # - Alpha numeric segments sort before numeric segments # - Alpha numeric segments sort lexicographically # - Numeric segments sort numerically # - Shorter versions sort before longer versions when the prefixes # match exactly _local = tuple( (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local ) return epoch, _release, _pre, _post, _dev, _local
Version
python
getsentry__sentry
tests/sentry/mail/test_adapter.py
{ "start": 6738, "end": 7180 }
class ____(BaseMailAdapterTest): def test_default_prefix(self) -> None: assert build_subject_prefix(self.project) == "[Sentry]" def test_project_level_prefix(self) -> None: prefix = "[Example prefix]" ProjectOption.objects.set_value( project=self.project, key="mail:subject_prefix", value=prefix ) assert build_subject_prefix(self.project) == prefix
MailAdapterBuildSubjectPrefixTest
python
huggingface__transformers
src/transformers/models/moonshine/modeling_moonshine.py
{ "start": 21555, "end": 22442 }
class ____(PreTrainedModel): config: MoonshineConfig base_model_prefix = "model" main_input_name = "input_values" input_modalities = "audio" supports_gradient_checkpointing = True _no_split_modules = ["MoonshineEncoderLayer", "MoonshineDecoderLayer"] _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True # TODO arthur, how do we separate when it cross / self coming from different layer? def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): """ Computes the output length of the convolutional layers """ output_conv1_length = int((input_lengths - 127) / 64 + 1) output_conv2_length = int((output_conv1_length - 7) / 3 + 1) output_conv3_length = int((output_conv2_length - 3) / 2 + 1) return output_conv3_length
MoonshinePreTrainedModel
python
getsentry__sentry
tests/sentry/grouping/seer_similarity/test_get_seer_similar_issues.py
{ "start": 21648, "end": 50364 }
class ____(TestCase): @patch("sentry.grouping.ingest.seer.metrics.distribution") @patch("sentry.grouping.ingest.seer.metrics.incr") def test_simple(self, mock_incr: MagicMock, mock_distribution: MagicMock) -> None: existing_event = save_new_event({"message": "Dogs are great!"}, self.project) existing_hash = existing_event.get_primary_hash() existing_grouphash = GroupHash.objects.filter( hash=existing_hash, project_id=self.project.id ).first() assert existing_event.group_id existing_event2 = save_new_event({"message": "Adopt, don't shop"}, self.project) existing_hash2 = existing_event2.get_primary_hash() assert existing_event2.group_id new_event, new_variants, new_grouphash, _ = create_new_event(self.project) seer_result_data = [ SeerSimilarIssueData( parent_hash=existing_hash, parent_group_id=existing_event.group_id, stacktrace_distance=0.01, should_group=True, ), SeerSimilarIssueData( parent_hash=existing_hash2, parent_group_id=existing_event2.group_id, stacktrace_distance=0.02, should_group=True, ), ] with patch( "sentry.grouping.ingest.seer.get_similarity_data_from_seer", return_value=seer_result_data, ): # It picks the first, more similar match assert get_seer_similar_issues(new_event, new_grouphash, new_variants) == ( 0.01, existing_grouphash, ) assert_metrics_call( mock_incr, "get_seer_similar_issues", "match_found", {"is_hybrid": False, "training_mode": False}, ) assert_metrics_call( mock_distribution, "seer_results_returned", "match_found", {"is_hybrid": False, "training_mode": False}, value=2, ) # Ensure we're not recording things we don't want to be. (The metrics we're checking # should only be recorded for events or parent grouphashes with hybrid fingerprints.) incr_metrics_recorded = {call.args[0] for call in mock_incr.mock_calls} distribution_metrics_recorded = {call.args[0] for call in mock_distribution.mock_calls} assert "grouping.similarity.hybrid_fingerprint_match_check" not in incr_metrics_recorded assert ( "grouping.similarity.hybrid_fingerprint_results_checked" not in distribution_metrics_recorded ) @patch("sentry.grouping.ingest.seer.metrics.distribution") @patch("sentry.grouping.ingest.seer.metrics.incr") def test_hybrid_fingerprint_match_first( self, mock_incr: MagicMock, mock_distribution: MagicMock ) -> None: existing_event = save_new_event( {"message": "Dogs are great!", "fingerprint": ["{{ default }}", "maisey"]}, self.project, ) existing_hash = existing_event.get_primary_hash() existing_grouphash = GroupHash.objects.filter( hash=existing_hash, project_id=self.project.id ).first() assert existing_event.group_id existing_event2 = save_new_event( {"message": "Dogs are great!", "fingerprint": ["{{ default }}", "charlie"]}, self.project, ) existing_hash2 = existing_event2.get_primary_hash() assert existing_event2.group_id new_event, new_variants, new_grouphash, _ = create_new_event( self.project, fingerprint=["{{ default }}", "maisey"], ) seer_result_data = [ SeerSimilarIssueData( parent_hash=existing_hash, parent_group_id=existing_event.group_id, stacktrace_distance=0.01, should_group=True, ), SeerSimilarIssueData( parent_hash=existing_hash2, parent_group_id=existing_event2.group_id, stacktrace_distance=0.01, should_group=True, ), ] with patch( "sentry.grouping.ingest.seer.get_similarity_data_from_seer", return_value=seer_result_data, ): # It picks the first result because the fingerprint matches the new event assert get_seer_similar_issues(new_event, new_grouphash, new_variants) == ( 0.01, existing_grouphash, ) # Metric from `_should_use_seer_match_for_grouping` assert_metrics_call(mock_incr, "hybrid_fingerprint_match_check", "fingerprint_match") # Metrics from `get_seer_similar_issues` assert_metrics_call( mock_incr, "get_seer_similar_issues", "match_found", {"is_hybrid": True, "training_mode": False}, ) assert_metrics_call( mock_distribution, "seer_results_returned", "match_found", {"is_hybrid": True, "training_mode": False}, value=2, ) assert_metrics_call( mock_distribution, "hybrid_fingerprint_results_checked", "match_found", value=1, # It only does one check because it stops once it's found a match ) @patch("sentry.grouping.ingest.seer.metrics.distribution") @patch("sentry.grouping.ingest.seer.metrics.incr") def test_hybrid_fingerprint_match_second( self, mock_incr: MagicMock, mock_distribution: MagicMock ) -> None: existing_event = save_new_event( {"message": "Dogs are great!", "fingerprint": ["{{ default }}", "charlie"]}, self.project, ) existing_hash = existing_event.get_primary_hash() assert existing_event.group_id existing_event2 = save_new_event( {"message": "Dogs are great!", "fingerprint": ["{{ default }}", "maisey"]}, self.project, ) existing_hash2 = existing_event2.get_primary_hash() existing_grouphash2 = GroupHash.objects.filter( hash=existing_hash2, project_id=self.project.id ).first() assert existing_event2.group_id new_event, new_variants, new_grouphash, _ = create_new_event( self.project, fingerprint=["{{ default }}", "maisey"], ) seer_result_data = [ SeerSimilarIssueData( parent_hash=existing_hash, parent_group_id=existing_event.group_id, stacktrace_distance=0.01, should_group=True, ), SeerSimilarIssueData( parent_hash=existing_hash2, parent_group_id=existing_event2.group_id, stacktrace_distance=0.02, should_group=True, ), ] with patch( "sentry.grouping.ingest.seer.get_similarity_data_from_seer", return_value=seer_result_data, ): # It picks the second result even though it's less similar because the fingerprint # matches the new event assert get_seer_similar_issues(new_event, new_grouphash, new_variants) == ( 0.02, existing_grouphash2, ) # Metrics from `_should_use_seer_match_for_grouping` assert_metrics_call(mock_incr, "hybrid_fingerprint_match_check", "no_fingerprint_match") assert_metrics_call(mock_incr, "hybrid_fingerprint_match_check", "fingerprint_match") # Metrics from `get_seer_similar_issues` assert_metrics_call( mock_incr, "get_seer_similar_issues", "match_found", {"is_hybrid": True, "training_mode": False}, ) assert_metrics_call( mock_distribution, "seer_results_returned", "match_found", {"is_hybrid": True, "training_mode": False}, value=2, ) assert_metrics_call( mock_distribution, "hybrid_fingerprint_results_checked", "match_found", value=2, # It does two checks because the first result isn't a match ) @patch("sentry.grouping.ingest.seer.metrics.distribution") @patch("sentry.grouping.ingest.seer.metrics.incr") def test_hybrid_fingerprint_mismatch( self, mock_incr: MagicMock, mock_distribution: MagicMock ) -> None: existing_event = save_new_event( {"message": "Dogs are great!", "fingerprint": ["{{ default }}", "maisey"]}, self.project, ) assert existing_event.group_id existing_event2 = save_new_event( {"message": "Dogs are great!", "fingerprint": ["{{ default }}", "cory"]}, self.project, ) assert existing_event2.group_id new_event, new_variants, new_grouphash, _ = create_new_event( self.project, fingerprint=["{{ default }}", "charlie"], ) seer_result_data = [ SeerSimilarIssueData( parent_hash=existing_event.get_primary_hash(), parent_group_id=existing_event.group_id, stacktrace_distance=0.01, should_group=True, ), SeerSimilarIssueData( parent_hash=existing_event2.get_primary_hash(), parent_group_id=existing_event2.group_id, stacktrace_distance=0.01, should_group=True, ), ] with patch( "sentry.grouping.ingest.seer.get_similarity_data_from_seer", return_value=seer_result_data, ): assert get_seer_similar_issues(new_event, new_grouphash, new_variants) == (None, None) # Metric from `_should_use_seer_match_for_grouping` assert_metrics_call(mock_incr, "hybrid_fingerprint_match_check", "no_fingerprint_match") # Metrics from `get_seer_similar_issues` assert_metrics_call( mock_incr, "get_seer_similar_issues", "no_matches_usable", {"is_hybrid": True, "training_mode": False}, ) assert_metrics_call( mock_distribution, "seer_results_returned", "no_matches_usable", {"is_hybrid": True, "training_mode": False}, value=2, ) assert_metrics_call( mock_distribution, "hybrid_fingerprint_results_checked", "no_matches_usable", value=2, ) @patch("sentry.grouping.ingest.seer.metrics.distribution") @patch("sentry.grouping.ingest.seer.metrics.incr") def test_hybrid_fingerprint_on_new_event_only( self, mock_incr: MagicMock, mock_distribution: MagicMock ) -> None: existing_event = save_new_event( {"message": "Dogs are great!"}, self.project, ) assert existing_event.group_id existing_event2 = save_new_event( {"message": "Adopt, don't shop"}, self.project, ) assert existing_event2.group_id new_event, new_variants, new_grouphash, _ = create_new_event( self.project, fingerprint=["{{ default }}", "charlie"], ) seer_result_data = [ SeerSimilarIssueData( parent_hash=existing_event.get_primary_hash(), parent_group_id=existing_event.group_id, stacktrace_distance=0.01, should_group=True, ), SeerSimilarIssueData( parent_hash=existing_event2.get_primary_hash(), parent_group_id=existing_event2.group_id, stacktrace_distance=0.02, should_group=True, ), ] with patch( "sentry.grouping.ingest.seer.get_similarity_data_from_seer", return_value=seer_result_data, ): assert get_seer_similar_issues(new_event, new_grouphash, new_variants) == (None, None) # Metric from `_should_use_seer_match_for_grouping` assert_metrics_call(mock_incr, "hybrid_fingerprint_match_check", "only_event_hybrid") # Metrics from `get_seer_similar_issues` assert_metrics_call( mock_incr, "get_seer_similar_issues", "no_matches_usable", {"is_hybrid": True, "training_mode": False}, ) assert_metrics_call( mock_distribution, "seer_results_returned", "no_matches_usable", {"is_hybrid": True, "training_mode": False}, value=2, ) assert_metrics_call( mock_distribution, "hybrid_fingerprint_results_checked", "no_matches_usable", value=2, ) @patch("sentry.grouping.ingest.seer.metrics.distribution") @patch("sentry.grouping.ingest.seer.metrics.incr") def test_hybrid_fingerprint_on_parent_groups_only( self, mock_incr: MagicMock, mock_distribution: MagicMock ) -> None: existing_event = save_new_event( {"message": "Dogs are great!", "fingerprint": ["{{ default }}", "maisey"]}, self.project, ) assert existing_event.group_id existing_event2 = save_new_event( {"message": "Dogs are great!", "fingerprint": ["{{ default }}", "charlie"]}, self.project, ) assert existing_event2.group_id new_event, new_variants, new_grouphash, _ = create_new_event(self.project) seer_result_data = [ SeerSimilarIssueData( parent_hash=existing_event.get_primary_hash(), parent_group_id=existing_event.group_id, stacktrace_distance=0.01, should_group=True, ), SeerSimilarIssueData( parent_hash=existing_event2.get_primary_hash(), parent_group_id=existing_event2.group_id, stacktrace_distance=0.02, should_group=True, ), ] with patch( "sentry.grouping.ingest.seer.get_similarity_data_from_seer", return_value=seer_result_data, ): assert get_seer_similar_issues(new_event, new_grouphash, new_variants) == (None, None) # Metric from `_should_use_seer_match_for_grouping` assert_metrics_call(mock_incr, "hybrid_fingerprint_match_check", "only_parent_hybrid") # Metrics from `get_seer_similar_issues` assert_metrics_call( mock_incr, "get_seer_similar_issues", "no_matches_usable", {"is_hybrid": True, "training_mode": False}, ) assert_metrics_call( mock_distribution, "seer_results_returned", "no_matches_usable", {"is_hybrid": True, "training_mode": False}, value=2, ) assert_metrics_call( mock_distribution, "hybrid_fingerprint_results_checked", "no_matches_usable", value=2, ) @patch("sentry.grouping.ingest.seer.metrics.distribution") @patch("sentry.grouping.ingest.seer.metrics.incr") def test_hybrid_fingerprint_on_first_parent_group_only( self, mock_incr: MagicMock, mock_distribution: MagicMock ) -> None: existing_event = save_new_event( {"message": "Dogs are great!", "fingerprint": ["{{ default }}", "maisey"]}, self.project, ) assert existing_event.group_id existing_event2 = save_new_event({"message": "Adopt, don't shop"}, self.project) existing_hash2 = existing_event2.get_primary_hash() existing_grouphash2 = GroupHash.objects.filter( hash=existing_hash2, project_id=self.project.id ).first() assert existing_event2.group_id new_event, new_variants, new_grouphash, _ = create_new_event(self.project) seer_result_data = [ SeerSimilarIssueData( parent_hash=existing_event.get_primary_hash(), parent_group_id=existing_event.group_id, stacktrace_distance=0.01, should_group=True, ), SeerSimilarIssueData( parent_hash=existing_event2.get_primary_hash(), parent_group_id=existing_event2.group_id, stacktrace_distance=0.02, should_group=True, ), ] with patch( "sentry.grouping.ingest.seer.get_similarity_data_from_seer", return_value=seer_result_data, ): # It picks the second result even though it's less similar because it has to find a # match which isn't hybrid, since the new event isn't hybrid assert get_seer_similar_issues(new_event, new_grouphash, new_variants) == ( 0.02, existing_grouphash2, ) # Metrics from `_should_use_seer_match_for_grouping` assert_metrics_call(mock_incr, "hybrid_fingerprint_match_check", "only_parent_hybrid") assert_metrics_call(mock_incr, "hybrid_fingerprint_match_check", "non-hybrid") # Metrics from `get_seer_similar_issues` assert_metrics_call( mock_incr, "get_seer_similar_issues", "match_found", {"is_hybrid": True, "training_mode": False}, ) assert_metrics_call( mock_distribution, "seer_results_returned", "match_found", {"is_hybrid": True, "training_mode": False}, value=2, ) assert_metrics_call( mock_distribution, "hybrid_fingerprint_results_checked", "match_found", value=2 ) @patch("sentry.grouping.ingest.seer.metrics.distribution") @patch("sentry.grouping.ingest.seer.metrics.incr") def test_hybrid_fingerprint_no_parent_metadata( self, mock_incr: MagicMock, mock_distribution: MagicMock ) -> None: existing_event = save_new_event( {"message": "Dogs are great!", "fingerprint": ["{{ default }}", "maisey"]}, self.project, ) existing_hash = existing_event.get_primary_hash() existing_grouphash = GroupHash.objects.filter( hash=existing_hash, project_id=self.project.id ).first() assert existing_event.group_id assert existing_grouphash existing_event2 = save_new_event( {"message": "Adopt, don't shop", "fingerprint": ["{{ default }}", "maisey"]}, self.project, ) existing_hash2 = existing_event2.get_primary_hash() existing_grouphash2 = GroupHash.objects.filter( hash=existing_hash2, project_id=self.project.id ).first() assert existing_event2.group_id assert existing_grouphash2 # Ensure the grouphash for the first existing has no metadata GroupHashMetadata.objects.filter(grouphash=existing_grouphash).delete() assert existing_grouphash.metadata is None new_event, new_variants, new_grouphash, _ = create_new_event( self.project, fingerprint=["{{ default }}", "maisey"], ) seer_result_data = [ SeerSimilarIssueData( parent_hash=existing_hash, parent_group_id=existing_event.group_id, stacktrace_distance=0.01, should_group=True, ), SeerSimilarIssueData( parent_hash=existing_hash2, parent_group_id=existing_event2.group_id, stacktrace_distance=0.02, should_group=True, ), ] with patch( "sentry.grouping.ingest.seer.get_similarity_data_from_seer", return_value=seer_result_data, ): # It picks the second result even though it's less similar, and even though the first # result has a matching fingerprint, because it has to find a match whose fingerprint it # can retrieve using metadata assert get_seer_similar_issues(new_event, new_grouphash, new_variants) == ( 0.02, existing_grouphash2, ) # Metrics from `_should_use_seer_match_for_grouping` assert_metrics_call(mock_incr, "hybrid_fingerprint_match_check", "no_parent_metadata") assert_metrics_call(mock_incr, "hybrid_fingerprint_match_check", "fingerprint_match") # Metrics from `get_seer_similar_issues` assert_metrics_call( mock_incr, "get_seer_similar_issues", "match_found", {"is_hybrid": True, "training_mode": False}, ) assert_metrics_call( mock_distribution, "seer_results_returned", "match_found", {"is_hybrid": True, "training_mode": False}, value=2, ) assert_metrics_call( mock_distribution, "hybrid_fingerprint_results_checked", "match_found", value=2 ) @patch("sentry.grouping.ingest.seer.metrics.distribution") @patch("sentry.grouping.ingest.seer.metrics.incr") def test_hybrid_fingerprint_stops_checking_when_match_found( self, mock_incr: MagicMock, mock_distribution: MagicMock ) -> None: existing_event = save_new_event( {"message": "Dogs are great!", "fingerprint": ["{{ default }}", "charlie"]}, self.project, ) existing_hash = existing_event.get_primary_hash() assert existing_event.group_id existing_event2 = save_new_event( {"message": "Dogs are great!", "fingerprint": ["{{ default }}", "maisey"]}, self.project, ) existing_hash2 = existing_event2.get_primary_hash() existing_grouphash2 = GroupHash.objects.filter( hash=existing_hash2, project_id=self.project.id ).first() assert existing_event2.group_id existing_event3 = save_new_event( { "message": "Cats who think they're dogs are great!", "fingerprint": ["{{ default }}", "piper"], }, self.project, ) existing_hash3 = existing_event3.get_primary_hash() assert existing_event3.group_id new_event, new_variants, new_grouphash, _ = create_new_event( self.project, fingerprint=["{{ default }}", "maisey"], ) seer_result_data = [ SeerSimilarIssueData( parent_hash=existing_hash, parent_group_id=existing_event.group_id, stacktrace_distance=0.01, should_group=True, ), SeerSimilarIssueData( parent_hash=existing_hash2, parent_group_id=existing_event2.group_id, stacktrace_distance=0.02, should_group=True, ), SeerSimilarIssueData( parent_hash=existing_hash3, parent_group_id=existing_event3.group_id, stacktrace_distance=0.03, should_group=True, ), ] with patch( "sentry.grouping.ingest.seer.get_similarity_data_from_seer", return_value=seer_result_data, ): # It picks the second result even though it's less similar because the fingerprint # matches the new event assert get_seer_similar_issues(new_event, new_grouphash, new_variants) == ( 0.02, existing_grouphash2, ) # Metrics from `_should_use_seer_match_for_grouping` assert_metrics_call(mock_incr, "hybrid_fingerprint_match_check", "no_fingerprint_match") assert_metrics_call(mock_incr, "hybrid_fingerprint_match_check", "fingerprint_match") # Metrics from `get_seer_similar_issues` assert_metrics_call( mock_incr, "get_seer_similar_issues", "match_found", {"is_hybrid": True, "training_mode": False}, ) assert_metrics_call( mock_distribution, "seer_results_returned", "match_found", {"is_hybrid": True, "training_mode": False}, value=3, ) # It only does two checks because the second result is a match assert_metrics_call( mock_distribution, "hybrid_fingerprint_results_checked", "match_found", value=2 ) @patch("sentry.grouping.ingest.seer.metrics.distribution") @patch("sentry.grouping.ingest.seer.metrics.incr") def test_non_hybrid_fingerprint_uses_first_non_hybrid_result( self, mock_incr: MagicMock, mock_distribution: MagicMock ) -> None: existing_event = save_new_event({"message": "Dogs are great!"}, self.project) existing_hash = existing_event.get_primary_hash() existing_grouphash = GroupHash.objects.filter( hash=existing_hash, project_id=self.project.id ).first() assert existing_event.group_id existing_event2 = save_new_event( {"message": "Dogs are great!", "fingerprint": ["{{ default }}", "maisey"]}, self.project, ) assert existing_event2.group_id new_event, new_variants, new_grouphash, _ = create_new_event(self.project) seer_result_data = [ SeerSimilarIssueData( parent_hash=existing_event.get_primary_hash(), parent_group_id=existing_event.group_id, stacktrace_distance=0.01, should_group=True, ), SeerSimilarIssueData( parent_hash=existing_event2.get_primary_hash(), parent_group_id=existing_event2.group_id, stacktrace_distance=0.02, should_group=True, ), ] with patch( "sentry.grouping.ingest.seer.get_similarity_data_from_seer", return_value=seer_result_data, ): assert get_seer_similar_issues(new_event, new_grouphash, new_variants) == ( 0.01, existing_grouphash, ) # It doesn't consider this a hybrid fingerprint case because neither the incoming event # nor the chosen parent issue is hybrid assert_metrics_call( mock_incr, "get_seer_similar_issues", "match_found", {"is_hybrid": False, "training_mode": False}, ) assert_metrics_call( mock_distribution, "seer_results_returned", "match_found", {"is_hybrid": False, "training_mode": False}, value=2, ) # Ensure we're not recording things we don't want to be. (The metrics we're checking # should only be recorded for events or parent grouphashes with hybrid fingerprints.) incr_metrics_recorded = {call.args[0] for call in mock_incr.mock_calls} distribution_metrics_recorded = {call.args[0] for call in mock_distribution.mock_calls} assert "grouping.similarity.hybrid_fingerprint_match_check" not in incr_metrics_recorded assert ( "grouping.similarity.hybrid_fingerprint_results_checked" not in distribution_metrics_recorded )
MultipleParentGroupsFoundTest
python
walkccc__LeetCode
solutions/3229. Minimum Operations to Make Array Equal to Target/3229.py
{ "start": 0, "end": 638 }
class ____: # Similar to 1526. Minimum Number of Increments on Subarrays to Form a Target Array def minimumOperations(self, nums: list[int], target: list[int]) -> int: ans = abs(nums[0] - target[0]) for (prevNum, prevTarget), (currNum, currTarget) in ( itertools.pairwise(zip(nums, target)) ): currDiff = currTarget - currNum prevDiff = prevTarget - prevNum if currDiff >= 0 and prevDiff >= 0: ans += max(0, currDiff - prevDiff) elif currDiff <= 0 and prevDiff <= 0: ans += max(0, abs(currDiff) - abs(prevDiff)) else: ans += abs(currDiff) return ans
Solution
python
run-llama__llama_index
llama-index-integrations/llms/llama-index-llms-siliconflow/llama_index/llms/siliconflow/base.py
{ "start": 5894, "end": 19848 }
class ____(FunctionCallingLLM): """ SiliconFlow LLM. Visit https://siliconflow.cn/ to get more information about SiliconFlow. Examples: `pip install llama-index-llms-siliconflow` ```python from llama_index.llms.siliconflow import SiliconFlow llm = SiliconFlow(api_key="YOUR API KEY") response = llm.complete("who are you?") print(response) ``` """ model: str = Field( default="deepseek-ai/DeepSeek-V2.5", description="The name of the model to query.", ) api_key: Optional[str] = Field( default=None, description="The API key to use for the SiliconFlow API.", ) base_url: str = Field( default=DEFAULT_SILICONFLOW_API_URL, description="The base URL for the SiliconFlow API.", ) temperature: float = Field( default=0.7, description="Determines the degree of randomness in the response.", ge=0.0, le=1.0, ) max_tokens: int = Field( default=512, description="The maximum number of tokens to generate.", ) frequency_penalty: float = Field(default=0.5) timeout: float = Field( default=DEFAULT_REQUEST_TIMEOUT, description="The timeout for making http request to ZhipuAI API server", ) stop: Optional[str] = Field( default=None, description="Up to 4 sequences where the API will stop generating further tokens.", ) max_retries: int = Field( default=3, description="The maximum number of API retries.", ge=0, ) _headers: Any = PrivateAttr() def __init__( self, api_key: str, model: str = "deepseek-ai/DeepSeek-V2.5", base_url: str = DEFAULT_SILICONFLOW_API_URL, temperature: float = 0.7, max_tokens: int = 512, frequency_penalty: float = 0.5, timeout: float = DEFAULT_REQUEST_TIMEOUT, stop: Optional[str] = None, max_retries: int = 3, **kwargs: Any, ) -> None: super().__init__( model=model, base_url=base_url, temperature=temperature, max_tokens=max_tokens, frequency_penalty=frequency_penalty, timeout=timeout, stop=stop, max_retries=max_retries, **kwargs, ) self._headers = { "Authorization": f"Bearer {api_key}", "Content-Type": "application/json", } @classmethod def class_name(cls) -> str: return "SiliconFlow" @property def metadata(self) -> LLMMetadata: """LLM metadata.""" return LLMMetadata( context_window=DEFAULT_CONTEXT_WINDOW, num_output=DEFAULT_NUM_OUTPUTS, model_name=self.model, is_chat_model=True, is_function_calling_model=is_function_calling_llm(self.model), ) @property def model_kwargs(self) -> Dict[str, Any]: return { "temperature": self.temperature, "max_tokens": self.max_tokens, "frequency_penalty": self.frequency_penalty, "stop": self.stop, } def _convert_to_llm_messages(self, messages: Sequence[ChatMessage]) -> List: return [ { "role": message.role.value, "content": message.content or "", } for message in messages ] def _prepare_chat_with_tools( self, tools: List["BaseTool"], user_msg: Optional[Union[str, ChatMessage]] = None, chat_history: Optional[List[ChatMessage]] = None, verbose: bool = False, allow_parallel_tool_calls: bool = False, tool_required: bool = False, # unsupported by SiliconFlow - https://docs.siliconflow.cn/en/api-reference/chat-completions/chat-completions **kwargs: Any, ) -> Dict[str, Any]: tool_specs = [ tool.metadata.to_openai_tool(skip_length_check=True) for tool in tools ] if isinstance(user_msg, str): user_msg = ChatMessage(role=MessageRole.USER, content=user_msg) messages = chat_history or [] if user_msg: messages.append(user_msg) return { "messages": messages, "tools": tool_specs or None, } def _validate_chat_with_tools_response( self, response: ChatResponse, tools: List["BaseTool"], allow_parallel_tool_calls: bool = False, **kwargs: Any, ) -> ChatResponse: """Validate the response from chat_with_tools.""" if not allow_parallel_tool_calls: force_single_tool_call(response) return response def get_tool_calls_from_response( self, response: Union[ChatResponse, CompletionResponse], error_on_no_tool_call: bool = True, **kwargs: Any, ) -> List[ToolSelection]: """Predict and call the tool.""" if isinstance(response, ChatResponse): tool_calls = response.message.additional_kwargs.get("tool_calls", []) else: tool_calls = response.additional_kwargs.get("tool_calls", []) if len(tool_calls) < 1: if error_on_no_tool_call: raise ValueError( f"Expected at least one tool call, but got {len(tool_calls)} " "tool calls." ) return [] tool_selections = [] for tool_call in tool_calls: tool_selections.append( ToolSelection( tool_id=tool_call["id"], tool_name=tool_call["function"]["name"], tool_kwargs=json.loads(tool_call["function"]["arguments"]), ) ) return tool_selections @llm_chat_callback() @llm_retry_decorator def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: messages_dict = self._convert_to_llm_messages(messages) response_format = kwargs.get("response_format", {"type": "text"}) with requests.Session() as session: input_json = { "model": self.model, "messages": messages_dict, "stream": False, "n": 1, "tools": kwargs.get("tools"), "response_format": response_format, **self.model_kwargs, } response = session.post( self.base_url, json=input_json, headers=self._headers, timeout=self.timeout, ) response.raise_for_status() response_json = response.json() message: dict = response_json["choices"][0]["message"] return ChatResponse( message=ChatMessage( content=message["content"], role=message["role"], additional_kwargs={"tool_calls": message.get("tool_calls")}, ), raw=response_json, ) @llm_chat_callback() @llm_retry_decorator async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: messages_dict = self._convert_to_llm_messages(messages) response_format = kwargs.get("response_format", {"type": "text"}) async with aiohttp.ClientSession() as session: input_json = { "model": self.model, "messages": messages_dict, "stream": False, "n": 1, "tools": kwargs.get("tools"), "response_format": response_format, **self.model_kwargs, } async with session.post( self.base_url, json=input_json, headers=self._headers, timeout=self.timeout, ) as response: response_json = await response.json() message: dict = response_json["choices"][0]["message"] response.raise_for_status() return ChatResponse( message=ChatMessage( content=message["content"], role=message["role"], additional_kwargs={"tool_calls": message.get("tool_calls")}, ), raw=response_json, ) @llm_chat_callback() @llm_retry_decorator def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: messages_dict = self._convert_to_llm_messages(messages) response_format = kwargs.get("response_format", {"type": "text"}) def gen() -> ChatResponseGen: with requests.Session() as session: input_json = { "model": self.model, "messages": messages_dict, "stream": True, "n": 1, "tools": kwargs.get("tools"), "response_format": response_format, **self.model_kwargs, } response = session.post( self.base_url, json=input_json, headers=self._headers, timeout=self.timeout, ) response.raise_for_status() response_txt = "" response_role = "assistant" for line in response.iter_lines(): line = cast(bytes, line).decode("utf-8") if line.startswith("data:"): if line.strip() == "data: [DONE]": break chunk_json = json.loads(line[5:]) delta: dict = chunk_json["choices"][0]["delta"] response_role = delta.get("role") or response_role response_txt += delta["content"] tool_calls = delta.get("tool_calls") yield ChatResponse( message=ChatMessage( content=response_txt, role=response_role, additional_kwargs={"tool_calls": tool_calls}, ), delta=delta["content"], raw=chunk_json, ) return gen() @llm_chat_callback() @llm_retry_decorator async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: messages_dict = self._convert_to_llm_messages(messages) response_format = kwargs.get("response_format", {"type": "text"}) async def gen() -> ChatResponseAsyncGen: async with aiohttp.ClientSession(trust_env=True) as session: input_json = { "model": self.model, "messages": messages_dict, "stream": True, "n": 1, "tools": kwargs.get("tools"), "response_format": response_format, **self.model_kwargs, } async with session.post( self.base_url, json=input_json, headers=self._headers, timeout=self.timeout, ) as response: response.raise_for_status() response_txt = "" response_role = "assistant" async for line in response.content.iter_any(): line = cast(bytes, line).decode("utf-8") chunks = list(filter(None, line.split("data: "))) for chunk in chunks: if chunk.strip() == "[DONE]": break chunk_json = json.loads(chunk) delta: dict = chunk_json["choices"][0]["delta"] response_role = delta.get("role") or response_role response_txt += delta["content"] tool_calls = delta.get("tool_calls") yield ChatResponse( message=ChatMessage( content=response_txt, role=response_role, additional_kwargs={"tool_calls": tool_calls}, ), delta=delta["content"], raw=line, ) return gen() @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: return chat_to_completion_decorator(self.chat)(prompt, **kwargs) @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: return await achat_to_completion_decorator(self.achat)(prompt, **kwargs) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: return stream_chat_to_completion_decorator(self.stream_chat)(prompt, **kwargs) @llm_completion_callback() async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: return await astream_chat_to_completion_decorator(self.astream_chat)( prompt, **kwargs )
SiliconFlow
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/some_virtual_mv/package.py
{ "start": 217, "end": 791 }
class ____(Package): """Package providing a virtual dependency and with a multivalued variant.""" homepage = "http://www.example.com" url = "http://www.example.com/foo-1.0.tar.gz" version("1.0", md5="0123456789abcdef0123456789abcdef") provides("somevirtual") # This multi valued variant is needed to trigger an optimization # criteria for clingo variant( "libs", default="shared,static", values=("shared", "static"), multi=True, description="Build shared libs, static libs or both", )
SomeVirtualMv
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/auto_suggest.py
{ "start": 3746, "end": 4488 }
class ____(AutoSuggest): """ Give suggestions based on the lines in the history. """ def get_suggestion(self, buffer: Buffer, document: Document) -> Suggestion | None: history = buffer.history # Consider only the last line for the suggestion. text = document.text.rsplit("\n", 1)[-1] # Only create a suggestion when this is not an empty line. if text.strip(): # Find first matching line in history. for string in reversed(list(history.get_strings())): for line in reversed(string.splitlines()): if line.startswith(text): return Suggestion(line[len(text) :]) return None
AutoSuggestFromHistory
python
dagster-io__dagster
python_modules/dagster/dagster/_core/execution/plan/inputs.py
{ "start": 24722, "end": 26561 }
class ____(IHaveNew): """This step input source models being downstream of another unresolved step, for example indirectly downstream from a step with dynamic output. """ unresolved_step_output_handle: UnresolvedStepOutputHandle # deprecated, preserved for back-compat node_handle: NodeHandle input_name: str def __new__( cls, unresolved_step_output_handle: UnresolvedStepOutputHandle, # deprecated, preserved for back-compat node_handle: Optional[NodeHandle] = None, input_name: Optional[str] = None, ): return super().__new__( cls, unresolved_step_output_handle=unresolved_step_output_handle, # add placeholder values for back-compat node_handle=node_handle or NodeHandle("", None), input_name=input_name or "", ) @property def resolved_by_step_key(self) -> str: return self.unresolved_step_output_handle.resolved_by_step_key @property def resolved_by_output_name(self) -> str: return self.unresolved_step_output_handle.resolved_by_output_name def resolve(self, mapping_key: str) -> FromStepOutput: check.str_param(mapping_key, "mapping_key") return FromStepOutput( step_output_handle=self.unresolved_step_output_handle.resolve(mapping_key), fan_in=False, ) def get_step_output_handle_dep_with_placeholder(self) -> StepOutputHandle: return self.unresolved_step_output_handle.get_step_output_handle_with_placeholder() def required_resource_keys( self, _job_def: JobDefinition, op_handle: NodeHandle, op_input_name: str ) -> set[str]: return set() @whitelist_for_serdes(storage_field_names={"node_handle": "solid_handle"}) @record_custom
FromUnresolvedStepOutput
python
wandb__wandb
wandb/vendor/pygments/lexers/forth.py
{ "start": 432, "end": 7144 }
class ____(RegexLexer): """ Lexer for Forth files. .. versionadded:: 2.2 """ name = 'Forth' aliases = ['forth'] filenames = ['*.frt', '*.fs'] mimetypes = ['application/x-forth'] delimiter = r'\s' delimiter_end = r'(?=[%s])' % delimiter valid_name_chars = r'[^%s]' % delimiter valid_name = r"%s+%s" % (valid_name_chars, delimiter_end) flags = re.IGNORECASE | re.MULTILINE tokens = { 'root': [ (r'\s+', Text), # All comment types (r'\\.*?\n', Comment.Single), (r'\([\s].*?\)', Comment.Single), # defining words. The next word is a new command name (r'(:|variable|constant|value|buffer:)(\s+)', bygroups(Keyword.Namespace, Text), 'worddef'), # strings are rather simple (r'([.sc]")(\s+?)', bygroups(String, Text), 'stringdef'), # keywords from the various wordsets # *** Wordset BLOCK (r'(blk|block|buffer|evaluate|flush|load|save-buffers|update|' # *** Wordset BLOCK-EXT r'empty-buffers|list|refill|scr|thru|' # *** Wordset CORE r'\#s|\*\/mod|\+loop|\/mod|0<|0=|1\+|1-|2!|' r'2\*|2\/|2@|2drop|2dup|2over|2swap|>body|' r'>in|>number|>r|\?dup|abort|abort\"|abs|' r'accept|align|aligned|allot|and|base|begin|' r'bl|c!|c,|c@|cell\+|cells|char|char\+|' r'chars|constant|count|cr|create|decimal|' r'depth|do|does>|drop|dup|else|emit|environment\?|' r'evaluate|execute|exit|fill|find|fm\/mod|' r'here|hold|i|if|immediate|invert|j|key|' r'leave|literal|loop|lshift|m\*|max|min|' r'mod|move|negate|or|over|postpone|quit|' r'r>|r@|recurse|repeat|rot|rshift|s\"|s>d|' r'sign|sm\/rem|source|space|spaces|state|swap|' r'then|type|u\.|u\<|um\*|um\/mod|unloop|until|' r'variable|while|word|xor|\[char\]|\[\'\]|' r'@|!|\#|<\#|\#>|:|;|\+|-|\*|\/|,|<|>|\|1\+|1-|\.|' # *** Wordset CORE-EXT r'\.r|0<>|' r'0>|2>r|2r>|2r@|:noname|\?do|again|c\"|' r'case|compile,|endcase|endof|erase|false|' r'hex|marker|nip|of|pad|parse|pick|refill|' r'restore-input|roll|save-input|source-id|to|' r'true|tuck|u\.r|u>|unused|value|within|' r'\[compile\]|' # *** Wordset CORE-EXT-obsolescent r'\#tib|convert|expect|query|span|' r'tib|' # *** Wordset DOUBLE r'2constant|2literal|2variable|d\+|d-|' r'd\.|d\.r|d0<|d0=|d2\*|d2\/|d<|d=|d>s|' r'dabs|dmax|dmin|dnegate|m\*\/|m\+|' # *** Wordset DOUBLE-EXT r'2rot|du<|' # *** Wordset EXCEPTION r'catch|throw|' # *** Wordset EXCEPTION-EXT r'abort|abort\"|' # *** Wordset FACILITY r'at-xy|key\?|page|' # *** Wordset FACILITY-EXT r'ekey|ekey>char|ekey\?|emit\?|ms|time&date|' # *** Wordset FILE r'BIN|CLOSE-FILE|CREATE-FILE|DELETE-FILE|FILE-POSITION|' r'FILE-SIZE|INCLUDE-FILE|INCLUDED|OPEN-FILE|R\/O|' r'R\/W|READ-FILE|READ-LINE|REPOSITION-FILE|RESIZE-FILE|' r'S\"|SOURCE-ID|W/O|WRITE-FILE|WRITE-LINE|' # *** Wordset FILE-EXT r'FILE-STATUS|FLUSH-FILE|REFILL|RENAME-FILE|' # *** Wordset FLOAT r'>float|d>f|' r'f!|f\*|f\+|f-|f\/|f0<|f0=|f<|f>d|f@|' r'falign|faligned|fconstant|fdepth|fdrop|fdup|' r'fliteral|float\+|floats|floor|fmax|fmin|' r'fnegate|fover|frot|fround|fswap|fvariable|' r'represent|' # *** Wordset FLOAT-EXT r'df!|df@|dfalign|dfaligned|dfloat\+|' r'dfloats|f\*\*|f\.|fabs|facos|facosh|falog|' r'fasin|fasinh|fatan|fatan2|fatanh|fcos|fcosh|' r'fe\.|fexp|fexpm1|fln|flnp1|flog|fs\.|fsin|' r'fsincos|fsinh|fsqrt|ftan|ftanh|f~|precision|' r'set-precision|sf!|sf@|sfalign|sfaligned|sfloat\+|' r'sfloats|' # *** Wordset LOCAL r'\(local\)|to|' # *** Wordset LOCAL-EXT r'locals\||' # *** Wordset MEMORY r'allocate|free|resize|' # *** Wordset SEARCH r'definitions|find|forth-wordlist|get-current|' r'get-order|search-wordlist|set-current|set-order|' r'wordlist|' # *** Wordset SEARCH-EXT r'also|forth|only|order|previous|' # *** Wordset STRING r'-trailing|\/string|blank|cmove|cmove>|compare|' r'search|sliteral|' # *** Wordset TOOLS r'.s|dump|see|words|' # *** Wordset TOOLS-EXT r';code|' r'ahead|assembler|bye|code|cs-pick|cs-roll|' r'editor|state|\[else\]|\[if\]|\[then\]|' # *** Wordset TOOLS-EXT-obsolescent r'forget|' # Forth 2012 r'defer|defer@|defer!|action-of|begin-structure|field:|buffer:|' r'parse-name|buffer:|traverse-wordlist|n>r|nr>|2value|fvalue|' r'name>interpret|name>compile|name>string|' r'cfield:|end-structure)'+delimiter, Keyword), # Numbers (r'(\$[0-9A-F]+)', Number.Hex), (r'(\#|%|&|\-|\+)?[0-9]+', Number.Integer), (r'(\#|%|&|\-|\+)?[0-9.]+', Keyword.Type), # amforth specific (r'(@i|!i|@e|!e|pause|noop|turnkey|sleep|' r'itype|icompare|sp@|sp!|rp@|rp!|up@|up!|' r'>a|a>|a@|a!|a@+|a@-|>b|b>|b@|b!|b@+|b@-|' r'find-name|1ms|' r'sp0|rp0|\(evaluate\)|int-trap|int!)' + delimiter, Name.Constant), # a proposal (r'(do-recognizer|r:fail|recognizer:|get-recognizers|' r'set-recognizers|r:float|r>comp|r>int|r>post|' r'r:name|r:word|r:dnum|r:num|recognizer|forth-recognizer|' r'rec:num|rec:float|rec:word)' + delimiter, Name.Decorator), # defining words. The next word is a new command name (r'(Evalue|Rvalue|Uvalue|Edefer|Rdefer|Udefer)(\s+)', bygroups(Keyword.Namespace, Text), 'worddef'), (valid_name, Name.Function), # Anything else is executed ], 'worddef': [ (r'\S+', Name.Class, '#pop'), ], 'stringdef': [ (r'[^"]+', String, '#pop'), ], }
ForthLexer
python
scipy__scipy
scipy/signal/tests/test_signaltools.py
{ "start": 190377, "end": 192850 }
class ____: def test_basic(self, xp): detrended = detrend(xp.asarray([1, 2, 3])) detrended_exact = xp.asarray([0, 0, 0]) assert_array_almost_equal(detrended, detrended_exact) @skip_xp_backends("jax.numpy", reason="overwrite_data not implemented") def test_copy(self, xp): x = xp.asarray([1, 1.2, 1.5, 1.6, 2.4]) copy_array = detrend(x, overwrite_data=False) inplace = detrend(x, overwrite_data=True) assert_array_almost_equal(copy_array, inplace) @pytest.mark.parametrize('kind', ['linear', 'constant']) @pytest.mark.parametrize('axis', [0, 1, 2]) def test_axis(self, axis, kind, xp): data = xp.reshape(xp.arange(5*6*7), (5, 6, 7)) detrended = detrend(data, type=kind, axis=axis) assert detrended.shape == data.shape def test_bp(self, xp): data = [0, 1, 2] + [5, 0, -5, -10] data = xp.asarray(data) detrended = detrend(data, type='linear', bp=3) xp_assert_close(detrended, xp.zeros_like(detrended), atol=1e-14) # repeat with ndim > 1 and axis data = xp.asarray(data)[None, :, None] detrended = detrend(data, type="linear", bp=3, axis=1) xp_assert_close(detrended, xp.zeros_like(detrended), atol=1e-14) # breakpoint index > shape[axis]: raises with assert_raises(ValueError): detrend(data, type="linear", bp=3) @pytest.mark.parametrize('bp', [np.array([0, 2]), [0, 2]]) def test_detrend_array_bp(self, bp, xp): # regression test for https://github.com/scipy/scipy/issues/18675 rng = np.random.RandomState(12345) x = rng.rand(10) x = xp.asarray(x, dtype=xp_default_dtype(xp)) if isinstance(bp, np.ndarray) and not is_jax(xp): # JAX expects a static array for bp, so don't call xp.asarray # for JAX. bp = xp.asarray(bp) else: if not (is_numpy(xp) or is_jax(xp)): pytest.skip("list bp is currently numpy and jax only") res = detrend(x, bp=bp) res_scipy_191 = xp.asarray([-4.44089210e-16, -2.22044605e-16, -1.11128506e-01, -1.69470553e-01, 1.14710683e-01, 6.35468419e-02, 3.53533144e-01, -3.67877935e-02, -2.00417675e-02, -1.94362049e-01]) atol = 3e-7 if xp_default_dtype(xp) == xp.float32 else 1e-14 xp_assert_close(res, res_scipy_191, atol=atol) @make_xp_test_case(unique_roots)
TestDetrend
python
doocs__leetcode
solution/0400-0499/0461.Hamming Distance/Solution.py
{ "start": 0, "end": 105 }
class ____: def hammingDistance(self, x: int, y: int) -> int: return (x ^ y).bit_count()
Solution
python
skorch-dev__skorch
skorch/tests/test_probabilistic.py
{ "start": 4024, "end": 4684 }
class ____(gpytorch.likelihoods.BernoulliLikelihood): """This class only exists to add a param to BernoulliLikelihood BernoulliLikelihood used to have parameters before gpytorch v1.10, but now it does not have any parameters anymore. This is not an issue per se, but there are a few things we cannot test anymore, e.g. that parameters are passed to the likelihood correctly when using grid search. Therefore, create a custom class with a (pointless) parameter. """ def __init__(self, *args, some_parameter=1, **kwargs): self.some_parameter = some_parameter super().__init__(*args, **kwargs)
MyBernoulliLikelihood
python
numpy__numpy
tools/swig/test/testVector.py
{ "start": 13018, "end": 13291 }
class ____(VectorTestCase): def __init__(self, methodName="runTest"): VectorTestCase.__init__(self, methodName) self.typeStr = "ulongLong" self.typeCode = "Q" ######################################################################
ulongLongTestCase
python
tensorflow__tensorflow
tensorflow/python/ops/lookup_ops.py
{ "start": 34730, "end": 35625 }
class ____(HasherSpec): """A structure to specify a key of the strong keyed hash spec. The strong hash requires a `key`, which is a list of 2 unsigned integer numbers. These should be non-zero; random numbers generated from random.org would be a fine choice. Fields: key: The key to be used by the keyed hashing function. """ __slots__ = () def __new__(cls, key): if len(key) != 2: raise ValueError(f"`key` must have size 2, received {len(key)}") if not isinstance(key[0], compat_util.integral_types) or not isinstance( key[1], compat_util.integral_types): raise TypeError("Invalid key %s. Must be unsigned integer values." % key) return super(cls, StrongHashSpec).__new__(cls, "stronghash", key) def _as_string(tensor): if dtypes.string == tensor.dtype.base_dtype: return tensor return string_ops.as_string(tensor)
StrongHashSpec
python
huggingface__transformers
src/transformers/models/git/modeling_git.py
{ "start": 20614, "end": 22010 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states # Copied from transformers.models.siglip.modeling_siglip.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights
GitVisionMLP
python
pytorch__pytorch
test/profiler/test_profiler.py
{ "start": 95688, "end": 95936 }
class ____(nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(10, 5) self.fc2 = nn.Linear(5, 2) def forward(self, x): return self.fc2(self.fc1(x)) @dataclass(frozen=True)
SimpleNet
python
spack__spack
lib/spack/spack/error.py
{ "start": 4879, "end": 5103 }
class ____(PackageError): """Raised when someone tries to build a URL for a package with no URLs.""" def __init__(self, cls): super().__init__("Package %s has no version with a URL." % cls.__name__)
NoURLError
python
PrefectHQ__prefect
tests/blocks/test_core.py
{ "start": 87707, "end": 87751 }
class ____(Block): base: int = 0
BaseBlock
python
allegroai__clearml
clearml/backend_config/defs.py
{ "start": 1432, "end": 2136 }
class ____(object): """Supported environment names""" default = "default" demo = "demo" local = "local" CONFIG_FILE_EXTENSION = ".conf" def is_config_file(path: str) -> bool: return Path(path).suffix == CONFIG_FILE_EXTENSION def get_active_config_file() -> Optional[str]: f = LOCAL_CONFIG_FILE_OVERRIDE_VAR.get() if f and exists(expanduser(expandvars(f))): return f for f in LOCAL_CONFIG_FILES: if exists(expanduser(expandvars(f))): return f return None def get_config_file() -> str: f = LOCAL_CONFIG_FILE_OVERRIDE_VAR.get() f = f if f else LOCAL_CONFIG_FILES[-1] return expanduser(expandvars(f)) if f else None
Environment
python
EpistasisLab__tpot
tpot/search_spaces/nodes/estimator_node.py
{ "start": 1773, "end": 4629 }
class ____(SklearnIndividual): """ Note that ConfigurationSpace does not support None as a parameter. Instead, use the special string "<NONE>". TPOT will automatically replace instances of this string with the Python None. Parameters ---------- method : type The class of the estimator to be used space : ConfigurationSpace|dict The hyperparameter space to be used. If a dict is passed, hyperparameters are fixed and not learned. """ def __init__(self, method: type, space: ConfigurationSpace|dict, #TODO If a dict is passed, hyperparameters are fixed and not learned. Is this confusing? Should we make a second node type? hyperparameter_parser: callable = None, rng=None) -> None: super().__init__() self.method = method self.space = space if hyperparameter_parser is None: self.hyperparameter_parser = default_hyperparameter_parser else: self.hyperparameter_parser = hyperparameter_parser if isinstance(space, dict): self.hyperparameters = space else: rng = np.random.default_rng(rng) self.space.seed(rng.integers(0, 2**32)) self.hyperparameters = dict(self.space.sample_configuration()) def mutate(self, rng=None): if isinstance(self.space, dict): return False rng = np.random.default_rng(rng) self.space.seed(rng.integers(0, 2**32)) self.hyperparameters = dict(self.space.sample_configuration()) return True def crossover(self, other, rng=None): if isinstance(self.space, dict): return False rng = np.random.default_rng(rng) if self.method != other.method: return False #loop through hyperparameters, randomly swap items in self.hyperparameters with items in other.hyperparameters for hyperparameter in self.space: if rng.choice([True, False]): if hyperparameter in other.hyperparameters: self.hyperparameters[hyperparameter] = other.hyperparameters[hyperparameter] return True @final #this method should not be overridden, instead override hyperparameter_parser def export_pipeline(self, **kwargs): return self.method(**self.hyperparameter_parser(self.hyperparameters)) def unique_id(self): #return a dictionary of the method and the hyperparameters method_str = self.method.__name__ params = list(self.hyperparameters.keys()) params = sorted(params) id_str = f"{method_str}({', '.join([f'{param}={self.hyperparameters[param]}' for param in params])})" return id_str
EstimatorNodeIndividual
python
scipy__scipy
scipy/io/_harwell_boeing/hb.py
{ "start": 14964, "end": 19395 }
class ____: def __init__(self, file, hb_info=None): """Create a HBFile instance. Parameters ---------- file : file-object StringIO work as well hb_info : HBInfo, optional Should be given as an argument for writing, in which case the file should be writable. """ self._fid = file if hb_info is None: self._hb_info = HBInfo.from_file(file) else: #raise OSError("file %s is not writable, and hb_info " # "was given." % file) self._hb_info = hb_info @property def title(self): return self._hb_info.title @property def key(self): return self._hb_info.key @property def type(self): return self._hb_info.mxtype.value_type @property def structure(self): return self._hb_info.mxtype.structure @property def storage(self): return self._hb_info.mxtype.storage def read_matrix(self): return _read_hb_data(self._fid, self._hb_info) def write_matrix(self, m): return _write_data(m, self._fid, self._hb_info) def hb_read(path_or_open_file, *, spmatrix=True): """Read HB-format file. Parameters ---------- path_or_open_file : path-like or file-like If a file-like object, it is used as-is. Otherwise, it is opened before reading. spmatrix : bool, optional (default: True) If ``True``, return sparse matrix. Otherwise return sparse array. Returns ------- data : csc_array or csc_matrix The data read from the HB file as a sparse array. Notes ----- At the moment not the full Harwell-Boeing format is supported. Supported features are: - assembled, non-symmetric, real matrices - integer for pointer/indices - exponential format for float values, and int format Examples -------- We can read and write a harwell-boeing format file: >>> from scipy.io import hb_read, hb_write >>> from scipy.sparse import csr_array, eye >>> data = csr_array(eye(3)) # create a sparse array >>> hb_write("data.hb", data) # write a hb file >>> print(hb_read("data.hb", spmatrix=False)) # read a hb file <Compressed Sparse Column sparse array of dtype 'float64' with 3 stored elements and shape (3, 3)> Coords Values (0, 0) 1.0 (1, 1) 1.0 (2, 2) 1.0 """ def _get_matrix(fid): hb = HBFile(fid) return hb.read_matrix() if hasattr(path_or_open_file, 'read'): data = _get_matrix(path_or_open_file) else: with open(path_or_open_file) as f: data = _get_matrix(f) if spmatrix: return csc_matrix(data) return data def hb_write(path_or_open_file, m, hb_info=None): """Write HB-format file. Parameters ---------- path_or_open_file : path-like or file-like If a file-like object, it is used as-is. Otherwise, it is opened before writing. m : sparse array or matrix the sparse array to write hb_info : HBInfo contains the meta-data for write Returns ------- None Notes ----- At the moment not the full Harwell-Boeing format is supported. Supported features are: - assembled, non-symmetric, real matrices - integer for pointer/indices - exponential format for float values, and int format Examples -------- We can read and write a harwell-boeing format file: >>> from scipy.io import hb_read, hb_write >>> from scipy.sparse import csr_array, eye >>> data = csr_array(eye(3)) # create a sparse array >>> hb_write("data.hb", data) # write a hb file >>> print(hb_read("data.hb", spmatrix=False)) # read a hb file <Compressed Sparse Column sparse array of dtype 'float64' with 3 stored elements and shape (3, 3)> Coords Values (0, 0) 1.0 (1, 1) 1.0 (2, 2) 1.0 """ m = m.tocsc(copy=False) if hb_info is None: hb_info = HBInfo.from_data(m) def _set_matrix(fid): hb = HBFile(fid, hb_info) return hb.write_matrix(m) if hasattr(path_or_open_file, 'write'): return _set_matrix(path_or_open_file) else: with open(path_or_open_file, 'w') as f: return _set_matrix(f)
HBFile
python
chroma-core__chroma
chromadb/api/collection_configuration.py
{ "start": 6798, "end": 7934 }
class ____(TypedDict, total=False): search_nprobe: int write_nprobe: int space: Space ef_construction: int ef_search: int max_neighbors: int reassign_neighbor_count: int split_threshold: int merge_threshold: int def json_to_create_spann_configuration( json_map: Dict[str, Any] ) -> CreateSpannConfiguration: config: CreateSpannConfiguration = {} if "search_nprobe" in json_map: config["search_nprobe"] = json_map["search_nprobe"] if "write_nprobe" in json_map: config["write_nprobe"] = json_map["write_nprobe"] if "space" in json_map: space_value = json_map["space"] if space_value in get_args(Space): config["space"] = space_value else: raise ValueError(f"not a valid space: {space_value}") if "ef_construction" in json_map: config["ef_construction"] = json_map["ef_construction"] if "ef_search" in json_map: config["ef_search"] = json_map["ef_search"] if "max_neighbors" in json_map: config["max_neighbors"] = json_map["max_neighbors"] return config
CreateSpannConfiguration
python
airbytehq__airbyte
airbyte-integrations/connectors/source-salesforce/unit_tests/salesforce_describe_response_builder.py
{ "start": 153, "end": 559 }
class ____: def __init__(self) -> None: self._fields = [] def field(self, name: str, _type: Optional[str] = None) -> "SalesforceDescribeResponseBuilder": self._fields.append({"name": name, "type": _type if _type else "string"}) return self def build(self) -> HttpResponse: return HttpResponse(json.dumps({"fields": self._fields}))
SalesforceDescribeResponseBuilder
python
pypa__pip
src/pip/_internal/resolution/resolvelib/candidates.py
{ "start": 14324, "end": 19107 }
class ____(Candidate): """A candidate that has 'extras', indicating additional dependencies. Requirements can be for a project with dependencies, something like foo[extra]. The extras don't affect the project/version being installed directly, but indicate that we need additional dependencies. We model that by having an artificial ExtrasCandidate that wraps the "base" candidate. The ExtrasCandidate differs from the base in the following ways: 1. It has a unique name, of the form foo[extra]. This causes the resolver to treat it as a separate node in the dependency graph. 2. When we're getting the candidate's dependencies, a) We specify that we want the extra dependencies as well. b) We add a dependency on the base candidate. See below for why this is needed. 3. We return None for the underlying InstallRequirement, as the base candidate will provide it, and we don't want to end up with duplicates. The dependency on the base candidate is needed so that the resolver can't decide that it should recommend foo[extra1] version 1.0 and foo[extra2] version 2.0. Having those candidates depend on foo=1.0 and foo=2.0 respectively forces the resolver to recognise that this is a conflict. """ def __init__( self, base: BaseCandidate, extras: frozenset[str], *, comes_from: InstallRequirement | None = None, ) -> None: """ :param comes_from: the InstallRequirement that led to this candidate if it differs from the base's InstallRequirement. This will often be the case in the sense that this candidate's requirement has the extras while the base's does not. Unlike the InstallRequirement backed candidates, this requirement is used solely for reporting purposes, it does not do any leg work. """ self.base = base self.extras = frozenset(canonicalize_name(e) for e in extras) self._comes_from = comes_from if comes_from is not None else self.base._ireq def __str__(self) -> str: name, rest = str(self.base).split(" ", 1) return "{}[{}] {}".format(name, ",".join(self.extras), rest) def __repr__(self) -> str: return f"{self.__class__.__name__}(base={self.base!r}, extras={self.extras!r})" def __hash__(self) -> int: return hash((self.base, self.extras)) def __eq__(self, other: Any) -> bool: if isinstance(other, self.__class__): return self.base == other.base and self.extras == other.extras return False @property def project_name(self) -> NormalizedName: return self.base.project_name @property def name(self) -> str: """The normalised name of the project the candidate refers to""" return format_name(self.base.project_name, self.extras) @property def version(self) -> Version: return self.base.version def format_for_error(self) -> str: return "{} [{}]".format( self.base.format_for_error(), ", ".join(sorted(self.extras)) ) @property def is_installed(self) -> bool: return self.base.is_installed @property def is_editable(self) -> bool: return self.base.is_editable @property def source_link(self) -> Link | None: return self.base.source_link def iter_dependencies(self, with_requires: bool) -> Iterable[Requirement | None]: factory = self.base._factory # Add a dependency on the exact base # (See note 2b in the class docstring) yield factory.make_requirement_from_candidate(self.base) if not with_requires: return # The user may have specified extras that the candidate doesn't # support. We ignore any unsupported extras here. valid_extras = self.extras.intersection(self.base.dist.iter_provided_extras()) invalid_extras = self.extras.difference(self.base.dist.iter_provided_extras()) for extra in sorted(invalid_extras): logger.warning( "%s %s does not provide the extra '%s'", self.base.name, self.version, extra, ) for r in self.base.dist.iter_dependencies(valid_extras): yield from factory.make_requirements_from_spec( str(r), self._comes_from, valid_extras, ) def get_install_requirement(self) -> InstallRequirement | None: # We don't return anything here, because we always # depend on the base candidate, and we'll get the # install requirement from that. return None
ExtrasCandidate
python
apache__airflow
task-sdk/tests/task_sdk/bases/test_sensor.py
{ "start": 1776, "end": 2034 }
class ____(BaseSensorOperator): def __init__(self, return_value: bool | None = False, **kwargs): super().__init__(**kwargs) self.return_value = return_value def poke(self, context: Context): return self.return_value
DummySensor
python
bokeh__bokeh
src/bokeh/models/filters.py
{ "start": 3890, "end": 4148 }
class ____(CompositeFilter): """ Computes union of indices resulting from other filters. """ # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs)
UnionFilter
python
dagster-io__dagster
python_modules/dagster/dagster/_core/execution/plan/inputs.py
{ "start": 2999, "end": 6779 }
class ____(StepInputSource): """Load input value from an asset.""" # deprecated, preserved for back-compat node_handle: NodeHandle = NodeHandle("", None) input_name: str = "" def load_input_object( self, step_context: "StepExecutionContext", input_def: InputDefinition, ) -> Iterator[object]: from dagster._core.events import DagsterEvent from dagster._core.execution.context.output import OutputContext asset_layer = step_context.job_def.asset_layer input_asset_key = input_def.hardcoded_asset_key or asset_layer.get_asset_key_for_node_input( step_context.node_handle, input_name=input_def.name ) assert input_asset_key is not None input_manager_key = ( input_def.input_manager_key if input_def.input_manager_key else asset_layer.get(input_asset_key).io_manager_key ) op_config = step_context.resolved_run_config.ops.get(str(step_context.node_handle)) config_data = op_config.inputs.get(input_def.name) if op_config else None loader = getattr(step_context.resources, input_manager_key) resources = build_resources_for_manager(input_manager_key, step_context) resource_config = step_context.resolved_run_config.resources[input_manager_key].config load_input_context = step_context.for_input_manager( input_def.name, config_data, definition_metadata=input_def.metadata, dagster_type=input_def.dagster_type, resource_config=resource_config, resources=resources, artificial_output_context=OutputContext( resources=resources, asset_key=input_asset_key, name=input_asset_key.path[-1], step_key="none", definition_metadata=asset_layer.get(input_asset_key).metadata, resource_config=resource_config, log_manager=step_context.log, step_context=step_context, ), ) yield from _load_input_with_input_manager(loader, load_input_context) metadata = { **load_input_context.definition_metadata, **load_input_context.consume_logged_metadata(), } yield DagsterEvent.loaded_input( step_context, input_name=input_def.name, manager_key=input_manager_key, metadata=metadata, ) def required_resource_keys( self, job_def: JobDefinition, op_handle: NodeHandle, op_input_name: str ) -> set[str]: input_asset_key = job_def.asset_layer.get_asset_key_for_node_input(op_handle, op_input_name) if input_asset_key is None: check.failed( f"Must have an asset key associated with input {op_input_name} to load it" " using FromSourceAsset", ) input_def = job_def.get_node(op_handle).input_def_named(op_input_name) if input_def.input_manager_key is not None: input_manager_key = input_def.input_manager_key else: input_manager_key = ( job_def.asset_layer.get(input_asset_key).io_manager_key if job_def.asset_layer.has(input_asset_key) else DEFAULT_IO_MANAGER_KEY ) if input_manager_key is None: check.failed( f"Must have an io_manager associated with asset {input_asset_key} to load it using" " FromSourceAsset" ) return {input_manager_key} @whitelist_for_serdes( storage_name="FromRootInputManager", storage_field_names={"node_handle": "solid_handle"} ) @record
FromLoadableAsset
python
apache__airflow
providers/google/tests/unit/google/cloud/hooks/vertex_ai/test_batch_prediction_job.py
{ "start": 10643, "end": 15829 }
class ____: def setup_method(self): with mock.patch( BASE_STRING.format("GoogleBaseHook.__init__"), new=mock_base_gcp_hook_default_project_id ): self.hook = BatchPredictionJobAsyncHook(gcp_conn_id=TEST_GCP_CONN_ID) @pytest.mark.asyncio @mock.patch(BATCH_PREDICTION_JOB_STRING.format("BatchPredictionJobAsyncHook.get_job_service_client")) async def test_get_batch_prediction_job(self, mock_get_job_service_client): mock_client = mock.MagicMock() mock_get_job_service_client.side_effect = mock.AsyncMock(return_value=mock_client) mock_job_name = mock_client.batch_prediction_job_path.return_value mock_job = mock.MagicMock() mock_async_get_batch_prediction_job = mock.AsyncMock(return_value=mock_job) mock_client.get_batch_prediction_job.side_effect = mock_async_get_batch_prediction_job result = await self.hook.get_batch_prediction_job( project_id=TEST_PROJECT_ID, location=TEST_REGION, job_id=TEST_BATCH_PREDICTION_JOB_ID, retry=DEFAULT, timeout=None, metadata=(), ) mock_get_job_service_client.assert_called_once_with(region=TEST_REGION) mock_client.batch_prediction_job_path.assert_called_once_with( TEST_PROJECT_ID, TEST_REGION, TEST_BATCH_PREDICTION_JOB_ID ) mock_async_get_batch_prediction_job.assert_awaited_once_with( request={"name": mock_job_name}, retry=DEFAULT, timeout=None, metadata=(), ) assert result == mock_job @pytest.mark.parametrize( "state", [ JobState.JOB_STATE_CANCELLED, JobState.JOB_STATE_FAILED, JobState.JOB_STATE_PAUSED, JobState.JOB_STATE_SUCCEEDED, ], ) @pytest.mark.asyncio @mock.patch(BATCH_PREDICTION_JOB_STRING.format("asyncio.sleep")) async def test_wait_hyperparameter_tuning_job(self, mock_sleep, state): mock_job = mock.MagicMock(state=state) mock_async_get_batch_prediction_job = mock.AsyncMock(return_value=mock_job) mock_get_batch_prediction_job = mock.MagicMock(side_effect=mock_async_get_batch_prediction_job) await_kwargs = dict( project_id=TEST_PROJECT_ID, location=TEST_REGION, job_id=TEST_BATCH_PREDICTION_JOB_ID, retry=DEFAULT, timeout=None, metadata=(), ) with mock.patch.object(self.hook, "get_batch_prediction_job", mock_get_batch_prediction_job): result = await self.hook.wait_batch_prediction_job(**await_kwargs) mock_async_get_batch_prediction_job.assert_awaited_once_with(**await_kwargs) mock_sleep.assert_not_awaited() assert result == mock_job @pytest.mark.parametrize( "state", [ JobState.JOB_STATE_UNSPECIFIED, JobState.JOB_STATE_QUEUED, JobState.JOB_STATE_PENDING, JobState.JOB_STATE_RUNNING, JobState.JOB_STATE_CANCELLING, JobState.JOB_STATE_EXPIRED, JobState.JOB_STATE_UPDATING, JobState.JOB_STATE_PARTIALLY_SUCCEEDED, ], ) @pytest.mark.asyncio @mock.patch(BATCH_PREDICTION_JOB_STRING.format("asyncio.sleep")) async def test_wait_batch_prediction_job_waited(self, mock_sleep, state): mock_job_incomplete = mock.MagicMock(state=state) mock_job_complete = mock.MagicMock(state=JobState.JOB_STATE_SUCCEEDED) mock_async_get_batch_prediction_job = mock.AsyncMock( side_effect=[mock_job_incomplete, mock_job_complete] ) mock_get_batch_prediction_job = mock.MagicMock(side_effect=mock_async_get_batch_prediction_job) await_kwargs = dict( project_id=TEST_PROJECT_ID, location=TEST_REGION, job_id=TEST_BATCH_PREDICTION_JOB_ID, retry=DEFAULT, timeout=None, metadata=(), ) with mock.patch.object(self.hook, "get_batch_prediction_job", mock_get_batch_prediction_job): result = await self.hook.wait_batch_prediction_job(**await_kwargs) mock_async_get_batch_prediction_job.assert_has_awaits( [ mock.call(**await_kwargs), mock.call(**await_kwargs), ] ) mock_sleep.assert_awaited_once() assert result == mock_job_complete @pytest.mark.asyncio async def test_wait_batch_prediction_job_exception(self): mock_get_batch_prediction_job = mock.MagicMock(side_effect=Exception) with mock.patch.object(self.hook, "get_batch_prediction_job", mock_get_batch_prediction_job): with pytest.raises(AirflowException): await self.hook.wait_batch_prediction_job( project_id=TEST_PROJECT_ID, location=TEST_REGION, job_id=TEST_BATCH_PREDICTION_JOB_ID, retry=DEFAULT, timeout=None, metadata=(), )
TestBatchPredictionJobAsyncHook
python
ray-project__ray
rllib/utils/actor_manager.py
{ "start": 2168, "end": 4674 }
class ____: """Represents a list of results from calls to a set of actors. CallResults provides convenient APIs to iterate over the results while skipping errors, etc. .. testcode:: :skipif: True manager = FaultTolerantActorManager( actors, max_remote_requests_in_flight_per_actor=2, ) results = manager.foreach_actor(lambda w: w.call()) # Iterate over all results ignoring errors. for result in results.ignore_errors(): print(result.get()) """ class _Iterator: """An iterator over the results of a remote call.""" def __init__(self, call_results: List[CallResult]): self._call_results = call_results def __iter__(self) -> Iterator[CallResult]: return self def __next__(self) -> CallResult: if not self._call_results: raise StopIteration return self._call_results.pop(0) def __init__(self): self.result_or_errors: List[CallResult] = [] def add_result(self, actor_id: int, result_or_error: ResultOrError, tag: str): """Add index of a remote actor plus the call result to the list. Args: actor_id: ID of the remote actor. result_or_error: The result or error from the call. tag: A description to identify the call. """ self.result_or_errors.append(CallResult(actor_id, result_or_error, tag)) def __iter__(self) -> Iterator[ResultOrError]: """Return an iterator over the results.""" # Shallow copy the list. return self._Iterator(copy.copy(self.result_or_errors)) def __len__(self) -> int: return len(self.result_or_errors) def ignore_errors(self) -> Iterator[ResultOrError]: """Return an iterator over the results, skipping all errors.""" return self._Iterator([r for r in self.result_or_errors if r.ok]) def ignore_ray_errors(self) -> Iterator[ResultOrError]: """Return an iterator over the results, skipping only Ray errors. Similar to ignore_errors, but only skips Errors raised because of remote actor problems (often get restored automatcially). This is useful for callers that want to handle application errors differently from Ray errors. """ return self._Iterator( [r for r in self.result_or_errors if not isinstance(r.get(), RayError)] ) @DeveloperAPI
RemoteCallResults
python
walkccc__LeetCode
solutions/1186. Maximum Subarray Sum with One Deletion/1186.py
{ "start": 0, "end": 507 }
class ____: # Similar to 53. Maximum Subarray def maximumSum(self, arr: list[int]) -> int: # dp[0][i] := the maximum sum subarray ending in i (no deletion) # dp[1][i] := the maximum sum subarray ending in i (at most 1 deletion) dp = [[0] * len(arr) for _ in range(2)] dp[0][0] = arr[0] dp[1][0] = arr[0] for i in range(1, len(arr)): dp[0][i] = max(arr[i], dp[0][i - 1] + arr[i]) dp[1][i] = max(arr[i], dp[1][i - 1] + arr[i], dp[0][i - 1]) return max(dp[1])
Solution
python
pytorch__pytorch
test/dynamo/test_config.py
{ "start": 224, "end": 4268 }
class ____(torch._dynamo.test_case.TestCase): @disable_cache_limit() def test_no_automatic_dynamic(self): def fn(a, b): return a - b * 10 torch._dynamo.reset() cnt_static = torch._dynamo.testing.CompileCounter() with torch._dynamo.config.patch( automatic_dynamic_shapes=False, assume_static_by_default=True ): opt_fn = torch.compile(fn, backend=cnt_static) for i in range(2, 12): opt_fn(torch.randn(i), torch.randn(i)) self.assertEqual(cnt_static.frame_count, 10) @disable_cache_limit() def test_automatic_dynamic(self): def fn(a, b): return a - b * 10 torch._dynamo.reset() cnt_dynamic = torch._dynamo.testing.CompileCounter() with torch._dynamo.config.patch( automatic_dynamic_shapes=True, assume_static_by_default=True ): opt_fn = torch.compile(fn, backend=cnt_dynamic) # NB: must not do 0, 1 as they specialized for i in range(2, 12): opt_fn(torch.randn(i), torch.randn(i)) # two graphs now rather than 10 self.assertEqual(cnt_dynamic.frame_count, 2) @disable_cache_limit() def test_no_assume_static_by_default(self): def fn(a, b): return a - b * 10 torch._dynamo.reset() cnt_dynamic = torch._dynamo.testing.CompileCounter() with torch._dynamo.config.patch( automatic_dynamic_shapes=True, assume_static_by_default=False ): opt_fn = torch.compile(fn, backend=cnt_dynamic) # NB: must not do 0, 1 as they specialized for i in range(2, 12): opt_fn(torch.randn(i), torch.randn(i)) # one graph now, as we didn't wait for recompile self.assertEqual(cnt_dynamic.frame_count, 1) def test_config_compile_ignored(self): # Remove from this list if no longer relevant dynamo_guarded_config_ignorelist = { "log_file_name", "verbose", "verify_correctness", # will not affect model, will raise RuntimeError # (no silent change to compilation behaviour) "recompile_limit", "accumulated_recompile_limit", "replay_record_enabled", "cprofile", # only wraps _compile, not graph "repro_after", "repro_level", "repro_forward_only", "repro_tolerance", "same_two_models_use_fp64", "error_on_recompile", # safe because: will throw error "report_guard_failures", "base_dir", # used for minifying / logging "DEBUG_DIR_VAR_NAME", "debug_dir_root", } for k in dynamo_guarded_config_ignorelist: assert k in torch._dynamo.config._compile_ignored_keys, k def test_config_hash(self): config = torch._dynamo.config starting_hash = config.get_hash() with config.patch({"verbose": not config.verbose}): new_hash = config.get_hash() assert "verbose" in config._compile_ignored_keys assert new_hash == starting_hash new_hash = config.get_hash() assert new_hash == starting_hash with config.patch({"suppress_errors": not config.suppress_errors}): changed_hash = config.get_hash() assert "suppress_errors" not in config._compile_ignored_keys assert changed_hash != starting_hash # Test nested patch with config.patch({"verbose": not config.verbose}): inner_changed_hash = config.get_hash() assert inner_changed_hash == changed_hash assert inner_changed_hash != starting_hash newest_hash = config.get_hash() assert changed_hash != newest_hash assert newest_hash == starting_hash if __name__ == "__main__": from torch._dynamo.test_case import run_tests run_tests()
ConfigTests
python
pytorch__pytorch
test/test_opaque_obj_v2.py
{ "start": 1637, "end": 1977 }
class ____: def __init__(self, start): self.counter = torch.tensor(start) def increment_counter(self): self.counter += 1 register_opaque_type(OpaqueQueue, "_TestOpaqueObject_OpaqueQueue") register_opaque_type(RNGState, "_TestOpaqueObject_RNGState") register_opaque_type(Counter, "_TestOpaqueObject_Counter")
Counter
python
pytorch__pytorch
torch/nn/utils/spectral_norm.py
{ "start": 370, "end": 8237 }
class ____: # Invariant before and after each forward call: # u = F.normalize(W @ v) # NB: At initialization, this invariant is not enforced _version: int = 1 # At version 1: # made `W` not a buffer, # added `v` as a buffer, and # made eval mode use `W = u @ W_orig @ v` rather than the stored `W`. name: str dim: int n_power_iterations: int eps: float def __init__( self, name: str = "weight", n_power_iterations: int = 1, dim: int = 0, eps: float = 1e-12, ) -> None: self.name = name self.dim = dim if n_power_iterations <= 0: raise ValueError( "Expected n_power_iterations to be positive, but " f"got n_power_iterations={n_power_iterations}" ) self.n_power_iterations = n_power_iterations self.eps = eps def reshape_weight_to_matrix(self, weight: torch.Tensor) -> torch.Tensor: weight_mat = weight if self.dim != 0: # permute dim to front weight_mat = weight_mat.permute( self.dim, *[d for d in range(weight_mat.dim()) if d != self.dim] ) height = weight_mat.size(0) return weight_mat.reshape(height, -1) def compute_weight(self, module: Module, do_power_iteration: bool) -> torch.Tensor: # NB: If `do_power_iteration` is set, the `u` and `v` vectors are # updated in power iteration **in-place**. This is very important # because in `DataParallel` forward, the vectors (being buffers) are # broadcast from the parallelized module to each module replica, # which is a new module object created on the fly. And each replica # runs its own spectral norm power iteration. So simply assigning # the updated vectors to the module this function runs on will cause # the update to be lost forever. And the next time the parallelized # module is replicated, the same randomly initialized vectors are # broadcast and used! # # Therefore, to make the change propagate back, we rely on two # important behaviors (also enforced via tests): # 1. `DataParallel` doesn't clone storage if the broadcast tensor # is already on correct device; and it makes sure that the # parallelized module is already on `device[0]`. # 2. If the out tensor in `out=` kwarg has correct shape, it will # just fill in the values. # Therefore, since the same power iteration is performed on all # devices, simply updating the tensors in-place will make sure that # the module replica on `device[0]` will update the _u vector on the # parallelized module (by shared storage). # # However, after we update `u` and `v` in-place, we need to **clone** # them before using them to normalize the weight. This is to support # backproping through two forward passes, e.g., the common pattern in # GAN training: loss = D(real) - D(fake). Otherwise, engine will # complain that variables needed to do backward for the first forward # (i.e., the `u` and `v` vectors) are changed in the second forward. weight = getattr(module, self.name + "_orig") u = getattr(module, self.name + "_u") v = getattr(module, self.name + "_v") weight_mat = self.reshape_weight_to_matrix(weight) if do_power_iteration: with torch.no_grad(): for _ in range(self.n_power_iterations): # Spectral norm of weight equals to `u^T W v`, where `u` and `v` # are the first left and right singular vectors. # This power iteration produces approximations of `u` and `v`. v = F.normalize( torch.mv(weight_mat.t(), u), dim=0, eps=self.eps, out=v ) u = F.normalize(torch.mv(weight_mat, v), dim=0, eps=self.eps, out=u) if self.n_power_iterations > 0: # See above on why we need to clone u = u.clone(memory_format=torch.contiguous_format) v = v.clone(memory_format=torch.contiguous_format) sigma = torch.dot(u, torch.mv(weight_mat, v)) weight = weight / sigma return weight def remove(self, module: Module) -> None: with torch.no_grad(): weight = self.compute_weight(module, do_power_iteration=False) delattr(module, self.name) delattr(module, self.name + "_u") delattr(module, self.name + "_v") delattr(module, self.name + "_orig") module.register_parameter(self.name, torch.nn.Parameter(weight.detach())) def __call__(self, module: Module, inputs: Any) -> None: setattr( module, self.name, self.compute_weight(module, do_power_iteration=module.training), ) def _solve_v_and_rescale(self, weight_mat, u, target_sigma): # Tries to returns a vector `v` s.t. `u = F.normalize(W @ v)` # (the invariant at top of this class) and `u @ W @ v = sigma`. # This uses pinverse in case W^T W is not invertible. v = torch.linalg.multi_dot( [weight_mat.t().mm(weight_mat).pinverse(), weight_mat.t(), u.unsqueeze(1)] ).squeeze(1) return v.mul_(target_sigma / torch.dot(u, torch.mv(weight_mat, v))) @staticmethod def apply( module: Module, name: str, n_power_iterations: int, dim: int, eps: float ) -> "SpectralNorm": for hook in module._forward_pre_hooks.values(): if isinstance(hook, SpectralNorm) and hook.name == name: raise RuntimeError( f"Cannot register two spectral_norm hooks on the same parameter {name}" ) fn = SpectralNorm(name, n_power_iterations, dim, eps) weight = module._parameters[name] if weight is None: raise ValueError( f"`SpectralNorm` cannot be applied as parameter `{name}` is None" ) if isinstance(weight, torch.nn.parameter.UninitializedParameter): raise ValueError( "The module passed to `SpectralNorm` can't have uninitialized parameters. " "Make sure to run the dummy forward before applying spectral normalization" ) with torch.no_grad(): weight_mat = fn.reshape_weight_to_matrix(weight) h, w = weight_mat.size() # randomly initialize `u` and `v` u = F.normalize(weight.new_empty(h).normal_(0, 1), dim=0, eps=fn.eps) v = F.normalize(weight.new_empty(w).normal_(0, 1), dim=0, eps=fn.eps) delattr(module, fn.name) module.register_parameter(fn.name + "_orig", weight) # We still need to assign weight back as fn.name because all sorts of # things may assume that it exists, e.g., when initializing weights. # However, we can't directly assign as it could be an nn.Parameter and # gets added as a parameter. Instead, we register weight.data as a plain # attribute. setattr(module, fn.name, weight.data) module.register_buffer(fn.name + "_u", u) module.register_buffer(fn.name + "_v", v) module.register_forward_pre_hook(fn) module._register_state_dict_hook(SpectralNormStateDictHook(fn)) module._register_load_state_dict_pre_hook(SpectralNormLoadStateDictPreHook(fn)) return fn # This is a top level class because Py2 pickle doesn't like inner class nor an # instancemethod.
SpectralNorm
python
jazzband__django-model-utils
tests/models.py
{ "start": 4841, "end": 5153 }
class ____(StatusModel): """An abstract status model with a custom manager.""" STATUS = Choices( ("first_choice", _("First choice")), ("second_choice", _("Second choice")), ) objects = StatusCustomManager() class Meta: abstract = True
AbstractCustomManagerStatusModel
python
PrefectHQ__prefect
src/prefect/client/orchestration/_flows/client.py
{ "start": 5735, "end": 10888 }
class ____(BaseAsyncClient): async def create_flow(self, flow: "FlowObject[Any, Any]") -> "UUID": """ Create a flow in the Prefect API. Args: flow: a `Flow` object Raises: httpx.RequestError: if a flow was not created for any reason Returns: the ID of the flow in the backend """ return await self.create_flow_from_name(flow.name) async def create_flow_from_name(self, flow_name: str) -> "UUID": """ Create a flow in the Prefect API. Args: flow_name: the name of the new flow Raises: httpx.RequestError: if a flow was not created for any reason Returns: the ID of the flow in the backend """ from prefect.client.schemas.actions import FlowCreate flow_data = FlowCreate(name=flow_name) response = await self.request( "POST", "/flows/", json=flow_data.model_dump(mode="json") ) flow_id = response.json().get("id") if not flow_id: raise RequestError(f"Malformed response: {response}") # Return the id of the created flow from uuid import UUID return UUID(flow_id) async def read_flow(self, flow_id: "UUID") -> "Flow": """ Query the Prefect API for a flow by id. Args: flow_id: the flow ID of interest Returns: a Flow model representation of the flow """ response = await self.request("GET", "/flows/{id}", path_params={"id": flow_id}) from prefect.client.schemas.objects import Flow return Flow.model_validate(response.json()) async def delete_flow(self, flow_id: "UUID") -> None: """ Delete a flow by UUID. Args: flow_id: ID of the flow to be deleted Raises: prefect.exceptions.ObjectNotFound: If request returns 404 httpx.RequestError: If requests fail """ try: await self.request("DELETE", "/flows/{id}", path_params={"id": flow_id}) except HTTPStatusError as e: if e.response.status_code == 404: raise ObjectNotFound(http_exc=e) from e else: raise async def read_flows( self, *, flow_filter: "FlowFilter | None" = None, flow_run_filter: "FlowRunFilter | None" = None, task_run_filter: "TaskRunFilter | None" = None, deployment_filter: "DeploymentFilter | None" = None, work_pool_filter: "WorkPoolFilter | None" = None, work_queue_filter: "WorkQueueFilter | None" = None, sort: "FlowSort | None" = None, limit: int | None = None, offset: int = 0, ) -> list["Flow"]: """ Query the Prefect API for flows. Only flows matching all criteria will be returned. Args: flow_filter: filter criteria for flows flow_run_filter: filter criteria for flow runs task_run_filter: filter criteria for task runs deployment_filter: filter criteria for deployments work_pool_filter: filter criteria for work pools work_queue_filter: filter criteria for work pool queues sort: sort criteria for the flows limit: limit for the flow query offset: offset for the flow query Returns: a list of Flow model representations of the flows """ body: dict[str, Any] = { "flows": flow_filter.model_dump(mode="json") if flow_filter else None, "flow_runs": ( flow_run_filter.model_dump(mode="json", exclude_unset=True) if flow_run_filter else None ), "task_runs": ( task_run_filter.model_dump(mode="json") if task_run_filter else None ), "deployments": ( deployment_filter.model_dump(mode="json") if deployment_filter else None ), "work_pools": ( work_pool_filter.model_dump(mode="json") if work_pool_filter else None ), "work_queues": ( work_queue_filter.model_dump(mode="json") if work_queue_filter else None ), "sort": sort, "limit": limit, "offset": offset, } response = await self.request("POST", "/flows/filter", json=body) from prefect.client.schemas.objects import Flow return Flow.model_validate_list(response.json()) async def read_flow_by_name( self, flow_name: str, ) -> "Flow": """ Query the Prefect API for a flow by name. Args: flow_name: the name of a flow Returns: a fully hydrated Flow model """ response = await self.request( "GET", "/flows/name/{name}", path_params={"name": flow_name} ) from prefect.client.schemas.objects import Flow return Flow.model_validate(response.json())
FlowAsyncClient
python
tensorflow__tensorflow
tensorflow/compiler/tests/reduce_ops_test.py
{ "start": 1284, "end": 7471 }
class ____(xla_test.XLATestCase, parameterized.TestCase): def _testReduction(self, tf_reduce_fn, np_reduce_fn, dtype, test_inputs, index_dtype, rtol=1e-4, atol=1e-4): """Tests that the output of 'tf_reduce_fn' matches numpy's output.""" for test_input in test_inputs: with self.session() as sess: with self.test_scope(): a = array_ops.placeholder(dtype) index = array_ops.placeholder(index_dtype) out = tf_reduce_fn(a, index) result = sess.run(out, {a: test_input, index: [0]}) self.assertAllClose( result, np_reduce_fn(test_input, axis=0), rtol=rtol, atol=atol) result = sess.run(out, {a: test_input, index: [1]}) self.assertAllClose( result, np_reduce_fn(test_input, axis=1), rtol=rtol, atol=atol) result = sess.run(out, {a: test_input, index: [-1]}) self.assertAllClose( result, np_reduce_fn(test_input, axis=1), rtol=rtol, atol=atol) # MLIR bridge doesn't return the same error so it can't be matched # directly. if not test_util.is_mlir_bridge_enabled(): with self.assertRaisesWithPredicateMatch( errors_impl.InvalidArgumentError, 'Invalid reduction dim'): sess.run(out, {a: test_input, index: [-33]}) with self.assertRaisesWithPredicateMatch( errors_impl.InvalidArgumentError, 'Invalid reduction dim'): sess.run(out, {a: test_input, index: [2]}) REAL_DATA = [ np.zeros(shape=(2, 0)), np.zeros(shape=(0, 30)), np.arange(1, 7).reshape(2, 3), np.arange(-10, -4).reshape(2, 3), np.arange(-4, 2).reshape(2, 3), ] COMPLEX_DATA = [ np.zeros(shape=(2, 0)).astype(np.complex64), np.zeros(shape=(0, 30)).astype(np.complex64), np.arange(1, 13, dtype=np.float32).view(np.complex64).reshape(2, 3), np.arange(-14, -2, dtype=np.float32).view(np.complex64).reshape(2, 3), np.arange(-4, 8, dtype=np.float32).view(np.complex64).reshape(2, 3), ] NONEMPTY_REAL_DATA = [x for x in REAL_DATA if np.size(x) > 0] NONEMPTY_COMPLEX_DATA = [x for x in COMPLEX_DATA if np.size(x) > 0] BOOL_DATA = [ np.array([], dtype=np.bool_).reshape(2, 0), np.array([], dtype=np.bool_).reshape(0, 3), np.array([[False, True, False], [True, True, False]]), ] ONES = [np.ones([34000, 2])] def testReduceSumF32(self, index_dtype): self._testReduction(math_ops.reduce_sum, np.sum, np.float32, self.REAL_DATA, index_dtype) def testReduceSumC64(self, index_dtype): self._testReduction(math_ops.reduce_sum, np.sum, np.complex64, self.COMPLEX_DATA, index_dtype) def testReduceProdF32(self, index_dtype): self._testReduction(math_ops.reduce_prod, np.prod, np.float32, self.REAL_DATA, index_dtype) def testReduceProdC64(self, index_dtype): self._testReduction(math_ops.reduce_prod, np.prod, np.complex64, self.COMPLEX_DATA, index_dtype) def testReduceMin(self, index_dtype): def reference_min(dtype, inp, axis): """Wrapper around np.amin that returns +infinity for an empty input.""" if inp.shape[axis] == 0: if np.issubdtype(dtype, np.floating): return np.full(inp.shape[0:axis] + inp.shape[axis + 1:], float('inf')) return np.full(inp.shape[0:axis] + inp.shape[axis + 1:], np.iinfo(dtype).max) return np.amin(inp, axis) for dtype in set(self.all_types).intersection( [np.float32, np.int32, np.int64]): self._testReduction(math_ops.reduce_min, functools.partial(reference_min, dtype), dtype, self.REAL_DATA, index_dtype) def testReduceMax(self, index_dtype): def reference_max(dtype, inp, axis): """Wrapper around np.amax that returns -infinity for an empty input.""" if inp.shape[axis] == 0: if np.issubdtype(dtype, np.floating): return np.full(inp.shape[0:axis] + inp.shape[axis + 1:], float('-inf')) return np.full(inp.shape[0:axis] + inp.shape[axis + 1:], np.iinfo(dtype).min) return np.amax(inp, axis) for dtype in set(self.all_types).intersection( [np.float32, np.int32, np.int64]): self._testReduction(math_ops.reduce_max, functools.partial(reference_max, dtype), dtype, self.REAL_DATA, index_dtype) def testReduceMeanF32(self, index_dtype): # TODO(phawkins): mean on XLA currently returns 0 instead of NaN when # reducing across zero inputs. self._testReduction(math_ops.reduce_mean, np.mean, np.float32, self.NONEMPTY_REAL_DATA, index_dtype) def testReduceMeanF16(self, index_dtype): if np.float16 in self.all_types: self._testReduction(math_ops.reduce_mean, np.mean, np.float16, self.ONES, index_dtype) def testReduceMeanC64(self, index_dtype): self._testReduction(math_ops.reduce_mean, np.mean, np.complex64, self.NONEMPTY_COMPLEX_DATA, index_dtype) def testReduceAll(self, index_dtype): self._testReduction(math_ops.reduce_all, np.all, np.bool_, self.BOOL_DATA, index_dtype) def testReduceAny(self, index_dtype): self._testReduction(math_ops.reduce_any, np.any, np.bool_, self.BOOL_DATA, index_dtype) @test_util.disable_mlir_bridge('Error messages differ') def testReduceSumWithDuplicateAxes(self, index_dtype): with self.session() as sess: with self.test_scope(): a = array_ops.placeholder(np.float32) index = array_ops.placeholder(np.int32) out = math_ops.reduce_sum(a, index) with self.assertRaisesWithPredicateMatch( errors_impl.InvalidArgumentError, 'Axes contains duplicate dimension'): sess.run(out, {a: [10, 20, 30], index: [0, 0]})
ReduceOpsTest
python
openai__openai-python
src/openai/_extras/pandas_proxy.py
{ "start": 338, "end": 637 }
class ____(LazyProxy[Any]): @override def __load__(self) -> Any: try: import pandas except ImportError as err: raise MissingDependencyError(PANDAS_INSTRUCTIONS) from err return pandas if not TYPE_CHECKING: pandas = PandasProxy()
PandasProxy
python
dask__dask
dask/dataframe/dask_expr/_accessor.py
{ "start": 3233, "end": 3753 }
class ____(Elemwise): _parameters = ["frame", "accessor", "attr", "args", "kwargs"] @functools.cached_property def _meta(self): args = [ meta_nonempty(op._meta) if isinstance(op, Expr) else op for op in self._args ] return make_meta(self.operation(*args, **self._kwargs)) @staticmethod def operation(obj, accessor, attr, args, kwargs): out = getattr(getattr(obj, accessor, obj), attr)(*args, **kwargs) return maybe_wrap_pandas(obj, out)
FunctionMap
python
python-poetry__poetry
tests/repositories/fixtures/pypi.org/generate.py
{ "start": 3930, "end": 5982 }
class ____: def __init__(self, locations: list[Path] | None = None) -> None: self.locations = locations or [ RELEASE_FILE_LOCATIONS.dist, RELEASE_FILE_LOCATIONS.stubbed, ] def filename_exists(self, filename: str) -> bool: return any(location.joinpath(filename).exists() for location in self.locations) def find(self, filename: str) -> ReleaseFileMetadata | None: for location in self.locations: if location.joinpath(filename).exists(): return ReleaseFileMetadata(location) return None def list(self, location: Path | None = None) -> Iterator[ReleaseFileMetadata]: locations = [location] if location is not None else self.locations for candidate in locations: for file in candidate.glob("*.tar.*"): yield ReleaseFileMetadata(file) for file in candidate.glob("*.zip"): yield ReleaseFileMetadata(file) for file in candidate.glob("*.whl"): yield ReleaseFileMetadata(file) RELEASE_FILE_COLLECTION = _ReleaseFileCollection() def generate_distribution_hashes_fixture(files: list[ReleaseFileMetadata]) -> None: fixture_py = FIXTURE_PATH_REPOSITORIES / "distribution_hashes.py" files.sort(key=lambda f: f.path.name) text = ",\n".join( [ f' "{file.path.name}": DistributionHash(\n' f' "{file.sha256}",\n' f' "{file.md5}",\n' f" )" for file in files ] ) logger.info( "Generating fixture file at %s", fixture_py.relative_to(FIXTURE_PATH.parent.parent), ) fixture_py.write_text( f"""# this file is generated by {Path(__file__).relative_to(FIXTURE_PATH.parent.parent).as_posix()} from __future__ import annotations import dataclasses from typing import TYPE_CHECKING import pytest if TYPE_CHECKING: from tests.types import DistributionHashGetter @dataclasses.dataclass
_ReleaseFileCollection
python
scikit-learn__scikit-learn
sklearn/externals/array_api_compat/common/_typing.py
{ "start": 1859, "end": 2762 }
class ____(Protocol[_T_co]): @property def shape(self, /) -> _T_co: ... # Return type of `__array_namespace_info__.default_dtypes` Capabilities = TypedDict( "Capabilities", { "boolean indexing": bool, "data-dependent shapes": bool, "max dimensions": int, }, ) # Return type of `__array_namespace_info__.default_dtypes` DefaultDTypes = TypedDict( "DefaultDTypes", { "real floating": DType, "complex floating": DType, "integral": DType, "indexing": DType, }, ) _DTypeKind: TypeAlias = Literal[ "bool", "signed integer", "unsigned integer", "integral", "real floating", "complex floating", "numeric", ] # Type of the `kind` parameter in `__array_namespace_info__.dtypes` DTypeKind: TypeAlias = _DTypeKind | tuple[_DTypeKind, ...] # `__array_namespace_info__.dtypes(kind="bool")`
HasShape
python
pennersr__django-allauth
examples/react-spa/backend/backend/drf_demo/views.py
{ "start": 288, "end": 796 }
class ____(APIView): authentication_classes = [ authentication.SessionAuthentication, XSessionTokenAuthentication, ] permission_classes = [permissions.IsAuthenticated] def get(self, request): serializer = AddSerializer(data=request.GET) serializer.is_valid(raise_exception=True) return Response( data={ "result": serializer.validated_data["x"] + serializer.validated_data["y"] } )
AddAPIView
python
lepture__authlib
authlib/integrations/base_client/errors.py
{ "start": 400, "end": 484 }
class ____(OAuthError): error = "unsupported_token_type"
UnsupportedTokenTypeError
python
numba__llvmlite
llvmlite/binding/context.py
{ "start": 222, "end": 417 }
class ____(ffi.ObjectRef): def __init__(self, context_ptr): super(ContextRef, self).__init__(context_ptr) def _dispose(self): ffi.lib.LLVMPY_ContextDispose(self)
ContextRef
python
run-llama__llama_index
llama-index-integrations/tools/llama-index-tools-box/llama_index/tools/box/text_extract/base.py
{ "start": 418, "end": 2419 }
class ____(BaseToolSpec): """ Box Text Extraction Tool Specification. This class provides a specification for extracting text content from Box files and creating Document objects. It leverages the Box API to retrieve the text representation (if available) of specified Box files. Attributes: _box_client (BoxClient): An instance of the Box client for interacting with the Box API. """ spec_functions = ["extract"] _box_client: BoxClient def __init__(self, box_client: BoxClient) -> None: """ Initializes the Box Text Extraction Tool Specification with the provided Box client instance. Args: box_client (BoxClient): The Box client instance. """ self._box_client = add_extra_header_to_box_client(box_client) def extract( self, file_id: str, ) -> Document: """ Extracts text content from Box files and creates Document objects. This method utilizes the Box API to retrieve the text representation (if available) of the specified Box files. It then creates Document objects containing the extracted text and file metadata. Args: file_id (str): A of Box file ID to extract text from. Returns: List[Document]: A list of Document objects containing the extracted text content and file metadata. """ # Connect to Box box_check_connection(self._box_client) # get payload information box_file = get_box_files_details( box_client=self._box_client, file_ids=[file_id] )[0] box_file = get_text_representation( box_client=self._box_client, box_files=[box_file], )[0] doc = box_file_to_llama_document(box_file) doc.text = box_file.text_representation if box_file.text_representation else "" return doc
BoxTextExtractToolSpec
python
readthedocs__readthedocs.org
readthedocs/settings/proxito/test.py
{ "start": 91, "end": 371 }
class ____( CommunityProxitoSettingsMixin, CommunityTestSettings ): PUBLIC_DOMAIN = "dev.readthedocs.io" RTD_BUILD_MEDIA_STORAGE = "readthedocs.proxito.tests.storage.BuildMediaStorageTest" CommunityProxitoTestSettings.load_settings(__name__)
CommunityProxitoTestSettings
python
Farama-Foundation__Gymnasium
tests/functional/test_functional.py
{ "start": 169, "end": 2508 }
class ____(FuncEnv): """Generic testing functional environment.""" def __init__(self, options: dict[str, Any] | None = None): """Constructor that allows generic options to be set on the environment.""" super().__init__(options) def initial(self, rng: Any, params=None) -> np.ndarray: """Testing initial function.""" return np.array([0, 0], dtype=np.float32) def observation(self, state: np.ndarray, rng: Any, params=None) -> np.ndarray: """Testing observation function.""" return state def transition( self, state: np.ndarray, action: int, rng: None, params=None ) -> np.ndarray: """Testing transition function.""" return state + np.array([0, action], dtype=np.float32) def reward( self, state: np.ndarray, action: int, next_state: np.ndarray, rng: Any, params=None, ) -> float: """Testing reward function.""" return 1.0 if next_state[1] > 0 else 0.0 def terminal(self, state: np.ndarray, rng: Any, params=None) -> bool: """Testing terminal function.""" return state[1] > 0 def test_functional_api(): """Tests the core functional api specification using a generic testing environment.""" env = GenericTestFuncEnv() state = env.initial(None) obs = env.observation(state, None) assert state.shape == (2,) assert state.dtype == np.float32 assert obs.shape == (2,) assert obs.dtype == np.float32 assert np.allclose(obs, state) actions = [-1, -2, -5, 3, 5, 2] for i, action in enumerate(actions): next_state = env.transition(state, action, None) assert next_state.shape == (2,) assert next_state.dtype == np.float32 assert np.allclose(next_state, state + np.array([0, action])) observation = env.observation(next_state, None) assert observation.shape == (2,) assert observation.dtype == np.float32 assert np.allclose(observation, next_state) reward = env.reward(state, action, next_state, None) assert reward == (1.0 if next_state[1] > 0 else 0.0) terminal = env.terminal(next_state, None) assert terminal == (i == 5) # terminal state is in the final action state = next_state
GenericTestFuncEnv
python
django__django
tests/sitemaps_tests/urls/http.py
{ "start": 533, "end": 679 }
class ____(Sitemap): lastmod = date.today() def items(self): return [object() for x in range(Sitemap.limit + 1)]
SimplePagedSitemap
python
PyCQA__pydocstyle
src/tests/test_cases/test.py
{ "start": 257, "end": 3268 }
class ____: expect('meta', 'D419: Docstring is empty') class meta: """""" @expect('D102: Missing docstring in public method') def method(self=None): pass def _ok_since_private(self=None): pass @overload def overloaded_method(self, a: int) -> str: ... @overload def overloaded_method(self, a: str) -> str: """Foo bar documentation.""" ... def overloaded_method(a): """Foo bar documentation.""" return str(a) expect('overloaded_method', "D418: Function/ Method decorated with @overload" " shouldn't contain a docstring") @property def foo(self): """The foo of the thing, which isn't in imperitive mood.""" return "hello" @expect('D102: Missing docstring in public method') def __new__(self=None): pass @expect('D107: Missing docstring in __init__') def __init__(self=None): pass @expect('D105: Missing docstring in magic method') def __str__(self=None): pass @expect('D102: Missing docstring in public method') def __call__(self=None, x=None, y=None, z=None): pass @expect('D419: Docstring is empty') def function(): """ """ def ok_since_nested(): pass @expect('D419: Docstring is empty') def nested(): '' def function_with_nesting(): """Foo bar documentation.""" @overload def nested_overloaded_func(a: int) -> str: ... @overload def nested_overloaded_func(a: str) -> str: """Foo bar documentation.""" ... def nested_overloaded_func(a): """Foo bar documentation.""" return str(a) expect('nested_overloaded_func', "D418: Function/ Method decorated with @overload" " shouldn't contain a docstring") @overload def overloaded_func(a: int) -> str: ... @overload def overloaded_func(a: str) -> str: """Foo bar documentation.""" ... def overloaded_func(a): """Foo bar documentation.""" return str(a) expect('overloaded_func', "D418: Function/ Method decorated with @overload" " shouldn't contain a docstring") @expect('D200: One-line docstring should fit on one line with quotes ' '(found 3)') @expect('D212: Multi-line docstring summary should start at the first line') def asdlkfasd(): """ Wrong. """ @expect('D201: No blank lines allowed before function docstring (found 1)') def leading_space(): """Leading space.""" @expect('D202: No blank lines allowed after function docstring (found 1)') def trailing_space(): """Leading space.""" pass @expect('D201: No blank lines allowed before function docstring (found 1)') @expect('D202: No blank lines allowed after function docstring (found 1)') def trailing_and_leading_space(): """Trailing and leading space.""" pass expect('LeadingSpaceMissing', 'D203: 1 blank line required before class docstring (found 0)')
class_
python
huggingface__transformers
src/transformers/models/gpt2/modeling_gpt2.py
{ "start": 34779, "end": 39424 }
class ____(GPT2PreTrainedModel, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "transformer.wte.weight"} def __init__(self, config): super().__init__(config) self.transformer = GPT2Model(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs, ) -> Union[tuple, CausalLMOutputWithCrossAttentions]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, cache_position=cache_position, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: # Flatten the tokens loss = self.loss_function( logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions, ) @auto_docstring( custom_intro=""" The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence). """ )
GPT2LMHeadModel
python
dagster-io__dagster
python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/commands/ci/state.py
{ "start": 300, "end": 457 }
class ____(BaseModel, extra=Extra.forbid): strategy: Literal["docker"] = "docker" python_version: Optional[str] = None image: str
DockerBuildOutput
python
numba__numba
numba/tests/test_tuples.py
{ "start": 2408, "end": 3069 }
class ____(unittest.TestCase): ''' issue 4369 raise an error if 'type' is not iterable ''' def test_namedtuple_types_exception(self): with self.assertRaises(errors.TypingError) as raises: types.NamedTuple(types.uint32, 'p') self.assertIn( "Argument 'types' is not iterable", str(raises.exception) ) def test_tuple_types_exception(self): with self.assertRaises(errors.TypingError) as raises: types.Tuple((types.uint32)) self.assertIn( "Argument 'types' is not iterable", str(raises.exception) )
TestTupleTypeNotIterable
python
tensorflow__tensorflow
tensorflow/python/autograph/pyct/cfg.py
{ "start": 5315, "end": 5495 }
class ____(enum.Enum): FORWARD = 1 REVERSE = 2 # TODO(mdan): Rename to DataFlowAnalyzer. # TODO(mdan): Consider specializations that use gen/kill/transfer abstractions.
_WalkMode
python
huggingface__transformers
tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py
{ "start": 1917, "end": 5202 }
class ____: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ task_name: str | None = field( default=None, metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())}, ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) pad_to_max_length: bool = field( default=True, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) max_train_samples: int | None = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_val_samples: int | None = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of validation examples to this " "value if set." ) }, ) max_test_samples: int | None = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of test examples to this " "value if set." ) }, ) train_file: str | None = field( default=None, metadata={"help": "A csv or a json file containing the training data."} ) validation_file: str | None = field( default=None, metadata={"help": "A csv or a json file containing the validation data."} ) test_file: str | None = field(default=None, metadata={"help": "A csv or a json file containing the test data."}) def __post_init__(self): if self.task_name is not None: self.task_name = self.task_name.lower() if self.task_name not in task_to_keys: raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys())) elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task or a training/validation file.") else: train_extension = self.train_file.split(".")[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." validation_extension = self.validation_file.split(".")[-1] assert validation_extension == train_extension, ( "`validation_file` should have the same extension (csv or json) as `train_file`." ) @dataclass
DataTrainingArguments
python
scikit-learn__scikit-learn
sklearn/kernel_ridge.py
{ "start": 563, "end": 9263 }
class ____(MultiOutputMixin, RegressorMixin, BaseEstimator): """Kernel ridge regression. Kernel ridge regression (KRR) combines ridge regression (linear least squares with l2-norm regularization) with the kernel trick. It thus learns a linear function in the space induced by the respective kernel and the data. For non-linear kernels, this corresponds to a non-linear function in the original space. The form of the model learned by KRR is identical to support vector regression (SVR). However, different loss functions are used: KRR uses squared error loss while support vector regression uses epsilon-insensitive loss, both combined with l2 regularization. In contrast to SVR, fitting a KRR model can be done in closed-form and is typically faster for medium-sized datasets. On the other hand, the learned model is non-sparse and thus slower than SVR, which learns a sparse model for epsilon > 0, at prediction-time. This estimator has built-in support for multi-variate regression (i.e., when y is a 2d-array of shape [n_samples, n_targets]). Read more in the :ref:`User Guide <kernel_ridge>`. Parameters ---------- alpha : float or array-like of shape (n_targets,), default=1.0 Regularization strength; must be a positive float. Regularization improves the conditioning of the problem and reduces the variance of the estimates. Larger values specify stronger regularization. Alpha corresponds to ``1 / (2C)`` in other linear models such as :class:`~sklearn.linear_model.LogisticRegression` or :class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are assumed to be specific to the targets. Hence they must correspond in number. See :ref:`ridge_regression` for formula. kernel : str or callable, default="linear" Kernel mapping used internally. This parameter is directly passed to :class:`~sklearn.metrics.pairwise.pairwise_kernels`. If `kernel` is a string, it must be one of the metrics in `pairwise.PAIRWISE_KERNEL_FUNCTIONS` or "precomputed". If `kernel` is "precomputed", X is assumed to be a kernel matrix. Alternatively, if `kernel` is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two rows from X as input and return the corresponding kernel value as a single number. This means that callables from :mod:`sklearn.metrics.pairwise` are not allowed, as they operate on matrices, not single samples. Use the string identifying the kernel instead. gamma : float, default=None Gamma parameter for the RBF, laplacian, polynomial, exponential chi2 and sigmoid kernels. Interpretation of the default value is left to the kernel; see the documentation for sklearn.metrics.pairwise. Ignored by other kernels. degree : float, default=3 Degree of the polynomial kernel. Ignored by other kernels. coef0 : float, default=1 Zero coefficient for polynomial and sigmoid kernels. Ignored by other kernels. kernel_params : dict, default=None Additional parameters (keyword arguments) for kernel function passed as callable object. Attributes ---------- dual_coef_ : ndarray of shape (n_samples,) or (n_samples, n_targets) Representation of weight vector(s) in kernel space X_fit_ : {ndarray, sparse matrix} of shape (n_samples, n_features) Training data, which is also required for prediction. If kernel == "precomputed" this is instead the precomputed training matrix, of shape (n_samples, n_samples). n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- sklearn.gaussian_process.GaussianProcessRegressor : Gaussian Process regressor providing automatic kernel hyperparameters tuning and predictions uncertainty. sklearn.linear_model.Ridge : Linear ridge regression. sklearn.linear_model.RidgeCV : Ridge regression with built-in cross-validation. sklearn.svm.SVR : Support Vector Regression accepting a large variety of kernels. References ---------- * Kevin P. Murphy "Machine Learning: A Probabilistic Perspective", The MIT Press chapter 14.4.3, pp. 492-493 Examples -------- >>> from sklearn.kernel_ridge import KernelRidge >>> import numpy as np >>> n_samples, n_features = 10, 5 >>> rng = np.random.RandomState(0) >>> y = rng.randn(n_samples) >>> X = rng.randn(n_samples, n_features) >>> krr = KernelRidge(alpha=1.0) >>> krr.fit(X, y) KernelRidge(alpha=1.0) """ _parameter_constraints: dict = { "alpha": [Interval(Real, 0, None, closed="left"), "array-like"], "kernel": [ StrOptions(set(PAIRWISE_KERNEL_FUNCTIONS.keys()) | {"precomputed"}), callable, ], "gamma": [Interval(Real, 0, None, closed="left"), None], "degree": [Interval(Real, 0, None, closed="left")], "coef0": [Interval(Real, None, None, closed="neither")], "kernel_params": [dict, None], } def __init__( self, alpha=1, *, kernel="linear", gamma=None, degree=3, coef0=1, kernel_params=None, ): self.alpha = alpha self.kernel = kernel self.gamma = gamma self.degree = degree self.coef0 = coef0 self.kernel_params = kernel_params def _get_kernel(self, X, Y=None): if callable(self.kernel): params = self.kernel_params or {} else: params = {"gamma": self.gamma, "degree": self.degree, "coef0": self.coef0} return pairwise_kernels(X, Y, metric=self.kernel, filter_params=True, **params) def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.sparse = True tags.input_tags.pairwise = self.kernel == "precomputed" return tags @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y, sample_weight=None): """Fit Kernel Ridge regression model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. If kernel == "precomputed" this is instead a precomputed kernel matrix, of shape (n_samples, n_samples). y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. sample_weight : float or array-like of shape (n_samples,), default=None Individual weights for each sample, ignored if None is passed. Returns ------- self : object Returns the instance itself. """ # Convert data X, y = validate_data( self, X, y, accept_sparse=("csr", "csc"), multi_output=True, y_numeric=True ) if sample_weight is not None and not isinstance(sample_weight, float): sample_weight = _check_sample_weight(sample_weight, X) K = self._get_kernel(X) alpha = np.atleast_1d(self.alpha) ravel = False if len(y.shape) == 1: y = y.reshape(-1, 1) ravel = True copy = self.kernel == "precomputed" self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha, sample_weight, copy) if ravel: self.dual_coef_ = self.dual_coef_.ravel() self.X_fit_ = X return self def predict(self, X): """Predict using the kernel ridge model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. If kernel == "precomputed" this is instead a precomputed kernel matrix, shape = [n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for this estimator. Returns ------- C : ndarray of shape (n_samples,) or (n_samples, n_targets) Returns predicted values. """ check_is_fitted(self) X = validate_data(self, X, accept_sparse=("csr", "csc"), reset=False) K = self._get_kernel(X, self.X_fit_) return np.dot(K, self.dual_coef_)
KernelRidge
python
scikit-learn__scikit-learn
sklearn/exceptions.py
{ "start": 5007, "end": 6176 }
class ____(UserWarning): """Warning raised when an estimator is unpickled with an inconsistent version. Parameters ---------- estimator_name : str Estimator name. current_sklearn_version : str Current scikit-learn version. original_sklearn_version : str Original scikit-learn version. """ def __init__( self, *, estimator_name, current_sklearn_version, original_sklearn_version ): self.estimator_name = estimator_name self.current_sklearn_version = current_sklearn_version self.original_sklearn_version = original_sklearn_version def __str__(self): return ( f"Trying to unpickle estimator {self.estimator_name} from version" f" {self.original_sklearn_version} when " f"using version {self.current_sklearn_version}. This might lead to breaking" " code or " "invalid results. Use at your own risk. " "For more info please refer to:\n" "https://scikit-learn.org/stable/model_persistence.html" "#security-maintainability-limitations" )
InconsistentVersionWarning
python
openai__openai-python
src/openai/types/beta/threads/required_action_function_tool_call.py
{ "start": 220, "end": 395 }
class ____(BaseModel): arguments: str """The arguments that the model expects you to pass to the function.""" name: str """The name of the function."""
Function
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/orm/_typing.py
{ "start": 2774, "end": 3030 }
class ____(Protocol): """protocol for the :class:`.AliasedInsp._orm_adapt_element` method which is a synonym for :class:`.AliasedInsp._adapt_element`. """ def __call__(self, obj: _CE, key: Optional[str] = None) -> _CE: ...
_ORMAdapterProto
python
charliermarsh__ruff
crates/ruff_python_parser/resources/valid/statement/class.py
{ "start": 648, "end": 689 }
class ____[T, U,](): ... # TypeVarTuple
Test
python
django__django
django/core/files/storage/handler.py
{ "start": 257, "end": 1507 }
class ____: def __init__(self, backends=None): # backends is an optional dict of storage backend definitions # (structured like settings.STORAGES). self._backends = backends self._storages = {} @cached_property def backends(self): if self._backends is None: self._backends = settings.STORAGES.copy() return self._backends def __getitem__(self, alias): try: return self._storages[alias] except KeyError: try: params = self.backends[alias] except KeyError: raise InvalidStorageError( f"Could not find config for '{alias}' in settings.STORAGES." ) storage = self.create_storage(params) self._storages[alias] = storage return storage def create_storage(self, params): params = params.copy() backend = params.pop("BACKEND") options = params.pop("OPTIONS", {}) try: storage_cls = import_string(backend) except ImportError as e: raise InvalidStorageError(f"Could not find backend {backend!r}: {e}") from e return storage_cls(**options)
StorageHandler
python
walkccc__LeetCode
solutions/2817. Minimum Absolute Difference Between Elements With Constraint/2817.py
{ "start": 41, "end": 416 }
class ____: def minAbsoluteDifference(self, nums: list[int], x: int) -> int: ans = math.inf seen = SortedSet() for i in range(x, len(nums)): seen.add(nums[i - x]) it = seen.bisect_left(nums[i]) if it != len(seen): ans = min(ans, seen[it] - nums[i]) if it != 0: ans = min(ans, nums[i] - seen[it - 1]) return ans
Solution
python
dask__dask
dask/dataframe/dask_expr/_groupby.py
{ "start": 32513, "end": 33464 }
class ____(Blockwise, GroupByBase): operation = staticmethod(groupby_get_group) _parameters = ["frame", "get_key", "columns"] _keyword_only = ["get_key", "columns"] @property def _args(self) -> list: return [self.frame] + self.by @property def _kwargs(self) -> dict: cols = self.operand("columns") return { "get_key": self.get_key, "columns": cols if cols is not None else self.frame.columns, } def _median_groupby_aggregate( df, by=None, key=None, group_keys=True, # not used dropna=None, observed=None, numeric_only=False, args=None, **kwargs, ): dropna = {"dropna": dropna} if dropna is not None else {} observed = {"observed": observed} if observed is not None else {} g = df.groupby(by=by, **observed, **dropna) if key is not None: g = g[key] return g.median(numeric_only=numeric_only)
GetGroup
python
keon__algorithms
tests/test_strings.py
{ "start": 16654, "end": 16929 }
class ____(unittest.TestCase): def test_min_distance(self): self.assertEqual(2, min_distance_dp("sea", "eat")) self.assertEqual(6, min_distance_dp("abAlgocrithmf", "Algorithmmd")) self.assertEqual(4, min_distance("acbbd", "aabcd"))
TestMinDistanceDP
python
django__django
django/core/exceptions.py
{ "start": 6623, "end": 6753 }
class ____(Exception): """The user tried to call a sync-only function from an async context.""" pass
SynchronousOnlyOperation
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_uuid.py
{ "start": 2633, "end": 5975 }
class ____(ColumnMapExpectation): """Expect column values to conform to valid UUID format.""" # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "well_formed_uuids": [ # standard random UUIDs "28d12e8e-80aa-4b32-8afb-19da0aa7e3d5", "d711cb07-1f05-4ef6-bc54-3a5ec703a88d", "9d5175ae-4d9e-4370-854c-a5e9bbb9b2c7", "c3eef74b-d977-46e3-ad40-0bfe5dbaf64b", # hyphens may or may not be present "e8a4926e5f7643079e8acdbd49a4e15b", # curly braces may or may not be present "{00010203-0405-1607-8809-0a0b0c0d0e0f}", # leading identifier "urn:uuid:" is allowed "urn:uuid:12345678-1234-5678-9234-567812345678", ], "malformed_uuids": [ # has non-hexidecimal value "5d700619-51de-4e28-b949-f596cddcd25z", # is too long "ff4a6854-79b9-4210-82b3-ca7cd6d03b711", # is too short "19bf8112-a972-4e38-a404-16864cb9d88", # has invalid punctuation "f13cbe4c_05df_4cbf_88f6_3b8c7d2f5cfc", # more invalid punctuation "a82af99c.20d3.4bb4.9a73.b9ec7c6f6a36", # left field "not-even-close", "ValueError('All arrays must be of the same length')", ], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "well_formed_uuids"}, "out": {"success": True}, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "malformed_uuids"}, "out": {"success": False}, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.valid_uuid" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ("mostly",) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This object contains metadata for display in the public Gallery library_metadata = { "tags": ["typed-entities"], # Tags for this Expectation in the Gallery "contributors": [ # Github handles for all contributors to this Expectation. "@joshua-stauffer", # Don't forget to add your github handle here! "@asafla", ], } if __name__ == "__main__": ExpectColumnValuesToBeValidUUID().print_diagnostic_checklist()
ExpectColumnValuesToBeValidUUID
python
ansible__ansible
test/lib/ansible_test/_internal/cli/argparsing/parsers.py
{ "start": 1263, "end": 2271 }
class ____(Completion): """Successful argument completion result.""" list_mode: bool consumed: str continuation: str matches: list[str] = dataclasses.field(default_factory=list) @property def preserve(self) -> bool: """ True if argcomplete should not mangle completion values, otherwise False. Only used when more than one completion exists to avoid overwriting the word undergoing completion. """ return len(self.matches) > 1 and self.list_mode @property def completions(self) -> list[str]: """List of completion values to return to argcomplete.""" completions = self.matches continuation = '' if self.list_mode else self.continuation if not self.preserve: # include the existing prefix to avoid rewriting the word undergoing completion completions = [f'{self.consumed}{completion}{continuation}' for completion in completions] return completions
CompletionSuccess
python
django__django
tests/update/models.py
{ "start": 667, "end": 732 }
class ____(models.Model): y = models.IntegerField(default=10)
C
python
pytorch__pytorch
torch/_export/db/examples/autograd_function.py
{ "start": 321, "end": 652 }
class ____(torch.nn.Module): """ TorchDynamo does not keep track of backward() on autograd functions. We recommend to use `allow_in_graph` to mitigate this problem. """ def forward(self, x): return MyAutogradFunction.apply(x) example_args = (torch.randn(3, 2),) model = AutogradFunction()
AutogradFunction
python
django__django
tests/model_forms/models.py
{ "start": 6943, "end": 7160 }
class ____(models.Model): left = models.IntegerField() middle = models.IntegerField() right = models.IntegerField() class Meta: unique_together = (("left", "middle"), ("middle", "right"))
Triple
python
joke2k__faker
tests/providers/test_person.py
{ "start": 28251, "end": 29971 }
class ____(unittest.TestCase): """Tests person in the fr-BE locale""" def setUp(self): self.fake = Faker("fr-BE") self.provider = FrBEProvider Faker.seed(0) def test_first_name(self): # General first name name = self.fake.first_name() assert name self.assertIsInstance(name, str) assert name in self.provider.first_names # Females first name name = self.fake.first_name_female() assert name self.assertIsInstance(name, str) assert name in self.provider.first_names assert name in self.provider.first_names_female # Male first name name = self.fake.first_name_male() assert name self.assertIsInstance(name, str) assert name in self.provider.first_names assert name in self.provider.first_names_male def test_last_name(self): assert not hasattr(self.provider, "last_names_male") assert not hasattr(self.provider, "last_names_female") # All last names apply for all genders. assert hasattr(self.provider, "last_names") # General last name. name = self.fake.last_name() assert name self.assertIsInstance(name, str) assert name in self.provider.last_names # Females last name. name = self.fake.last_name_female() assert name self.assertIsInstance(name, str) assert name in self.provider.last_names assert name in self.provider.last_names # Male last name. name = self.fake.last_name_male() assert name self.assertIsInstance(name, str) assert name in self.provider.last_names
TestFrBE
python
great-expectations__great_expectations
contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_arizona_zip.py
{ "start": 1729, "end": 4054 }
class ____(ColumnMapExpectation): """Expect values in this column to be valid Arizona zipcodes. See https://pypi.org/project/zipcodes/ for more information. """ # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "validarizonazip": ["86511", "85920", "85308", "85001"], "invalidarizonazip": ["-10000", "1234", "99999", "25487"], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "validarizonazip"}, "out": {"success": True}, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "invalidarizonazip"}, "out": {"success": False}, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.validarizonazip" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ("mostly",) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This object contains metadata for display in the public Gallery library_metadata = { "maturity": "experimental", # "experimental", "beta", or "production" "tags": [ "hackathon", "typed-entities", ], # Tags for this Expectation in the Gallery "contributors": [ # Github handles for all contributors to this Expectation. "@luismdiaz01", "@derekma73", # Don't forget to add your github handle here! ], "requirements": ["zipcodes"], } if __name__ == "__main__": ExpectColumnValuesToBeValidArizonaZip().print_diagnostic_checklist()
ExpectColumnValuesToBeValidArizonaZip
python
neetcode-gh__leetcode
python/0554-brick-wall.py
{ "start": 0, "end": 395 }
class ____: def leastBricks(self, wall: List[List[int]]) -> int: countGap = { 0 : 0 } # { Position : Gap count } for r in wall: total = 0 # Position for b in r[:-1]: total += b countGap[total] = 1 + countGap.get(total, 0) return len(wall) - max(countGap.values()) # Total number of rows - Max gap
Solution
python
pytorch__pytorch
torch/_inductor/codegen/wrapper.py
{ "start": 13353, "end": 14011 }
class ____: def __init__(self): super().__init__() self.reuse_pool: dict[ReuseKey, list[FreeIfNotReusedLine]] = ( collections.defaultdict(list) ) self.total_allocated_buffer_size: int = 0 def __contains__(self, key: ReuseKey) -> bool: return bool(self.reuse_pool.get(key, None)) def pop(self, key: ReuseKey) -> FreeIfNotReusedLine: item = self.reuse_pool[key].pop() assert not item.is_reused return item def push(self, key: ReuseKey, item: FreeIfNotReusedLine) -> None: assert not item.is_reused self.reuse_pool[key].append(item)
MemoryPlanningState