language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
ray-project__ray
|
rllib/algorithms/impala/impala_learner.py
|
{
"start": 12425,
"end": 16346
}
|
class ____(threading.Thread):
def __init__(
self,
*,
in_queue: queue.Queue,
out_queue: deque,
device: torch.device,
metrics_logger: MetricsLogger,
):
super().__init__(name="_GPULoaderThread")
self.daemon = True
self._in_queue = in_queue
self._out_queue = out_queue
self._ts_dropped = 0
self._device = device
self.metrics = metrics_logger
self._metrics_impala_gpu_loader_thread_step_time = Histogram(
name="rllib_learner_impala_gpu_loader_thread_step_time",
description="Time taken in seconds for gpu loader thread _step.",
boundaries=DEFAULT_HISTOGRAM_BOUNDARIES_SHORT_EVENTS,
tag_keys=("rllib",),
)
self._metrics_impala_gpu_loader_thread_step_time.set_default_tags(
{"rllib": "IMPALA/GPULoaderThread"}
)
self._metrics_impala_gpu_loader_thread_step_in_queue_get_time = Histogram(
name="rllib_learner_impala_gpu_loader_thread_step_get_time",
description="Time taken in seconds for gpu loader thread _step _in_queue.get().",
boundaries=DEFAULT_HISTOGRAM_BOUNDARIES_SHORT_EVENTS,
tag_keys=("rllib",),
)
self._metrics_impala_gpu_loader_thread_step_in_queue_get_time.set_default_tags(
{"rllib": "IMPALA/GPULoaderThread"}
)
self._metrics_impala_gpu_loader_thread_step_load_to_gpu_time = Histogram(
name="rllib_learner_impala_gpu_loader_thread_step_load_to_gpu_time",
description="Time taken in seconds for GPU loader thread _step to load batch to GPU.",
boundaries=DEFAULT_HISTOGRAM_BOUNDARIES_SHORT_EVENTS,
tag_keys=("rllib",),
)
self._metrics_impala_gpu_loader_thread_step_load_to_gpu_time.set_default_tags(
{"rllib": "IMPALA/GPULoaderThread"}
)
self._metrics_impala_gpu_loader_thread_in_qsize_beginning_of_step = Gauge(
name="rllib_impala_gpu_loader_thread_in_qsize_beginning_of_step",
description="Size of the _GPULoaderThread in-queue size, at the beginning of the step.",
tag_keys=("rllib",),
)
self._metrics_impala_gpu_loader_thread_in_qsize_beginning_of_step.set_default_tags(
{"rllib": "IMPALA/GPULoaderThread"}
)
def run(self) -> None:
while True:
with TimerAndPrometheusLogger(
self._metrics_impala_gpu_loader_thread_step_time
):
self._step()
def _step(self) -> None:
self._metrics_impala_gpu_loader_thread_in_qsize_beginning_of_step.set(
value=self._in_queue.qsize()
)
# Get a new batch from the data (in-queue).
with self.metrics.log_time((ALL_MODULES, GPU_LOADER_QUEUE_WAIT_TIMER)):
with TimerAndPrometheusLogger(
self._metrics_impala_gpu_loader_thread_step_in_queue_get_time
):
ma_batch_on_cpu = self._in_queue.get()
# Load the batch onto the GPU device.
with self.metrics.log_time((ALL_MODULES, GPU_LOADER_LOAD_TO_GPU_TIMER)):
with TimerAndPrometheusLogger(
self._metrics_impala_gpu_loader_thread_step_load_to_gpu_time
):
ma_batch_on_gpu = ma_batch_on_cpu.to_device(
self._device, pin_memory=False
)
if isinstance(self._out_queue, CircularBuffer):
ts_dropped = self._out_queue.add(ma_batch_on_gpu)
self.metrics.log_value(
(ALL_MODULES, LEARNER_THREAD_ENV_STEPS_DROPPED),
ts_dropped,
reduce="sum",
)
else:
# Enqueue to Learner thread's in-queue.
_LearnerThread.enqueue(self._out_queue, ma_batch_on_gpu, self.metrics)
|
_GPULoaderThread
|
python
|
sphinx-doc__sphinx
|
sphinx/domains/cpp/_ast.py
|
{
"start": 152341,
"end": 153316
}
|
class ____(ASTBase):
def __init__(self, expr: ASTExpression) -> None:
self.expr = expr
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTRequiresClause):
return NotImplemented
return self.expr == other.expr
def __hash__(self) -> int:
return hash(self.expr)
def _stringify(self, transform: StringifyTransform) -> str:
return 'requires ' + transform(self.expr)
def describe_signature(
self,
signode: nodes.TextElement,
mode: str,
env: BuildEnvironment,
symbol: Symbol,
) -> None:
signode += addnodes.desc_sig_keyword('requires', 'requires')
signode += addnodes.desc_sig_space()
self.expr.describe_signature(signode, mode, env, symbol)
################################################################################
################################################################################
|
ASTRequiresClause
|
python
|
run-llama__llama_index
|
llama-index-experimental/llama_index/experimental/param_tuner/base.py
|
{
"start": 289,
"end": 470
}
|
class ____(BaseModel):
"""Run result."""
score: float
params: Dict[str, Any]
metadata: Dict[str, Any] = Field(default_factory=dict, description="Metadata.")
|
RunResult
|
python
|
wandb__wandb
|
wandb/integration/tensorflow/estimator_hook.py
|
{
"start": 727,
"end": 1756
}
|
class ____(SessionRunHook):
def __init__(self, summary_op=None, steps_per_log=1000, history=None):
self._summary_op = summary_op
self._steps_per_log = steps_per_log
self._history = history
with telemetry.context() as tel:
tel.feature.estimator_hook = True
def begin(self):
if wandb.run is None:
raise wandb.Error("You must call `wandb.init()` before calling `WandbHook`")
if self._summary_op is None:
self._summary_op = merge_all_summaries()
self._step = -1
def before_run(self, run_context):
return SessionRunArgs(
{"summary": self._summary_op, "global_step": get_global_step()}
)
def after_run(self, run_context, run_values):
step = run_values.results["global_step"]
if step % self._steps_per_log == 0:
wandb.tensorboard._log(
run_values.results["summary"],
history=self._history,
step=step,
)
|
WandbHook
|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/endpoints/validators/actions/test_msteams.py
|
{
"start": 254,
"end": 2155
}
|
class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.integration, self.org_integration = self.create_provider_integration_for(
provider="msteams", organization=self.organization, user=self.user, name="msteams"
)
self.valid_data = {
"type": Action.Type.MSTEAMS,
"config": {"targetDisplay": "cathy-sentry", "targetType": "specific"},
"data": {},
"integrationId": self.integration.id,
}
@mock.patch("sentry.integrations.msteams.actions.form.find_channel_id")
def test_validate(self, mock_check_for_channel: mock.MagicMock) -> None:
mock_check_for_channel.return_value = "C1234567890"
validator = BaseActionValidator(
data=self.valid_data,
context={"organization": self.organization},
)
result = validator.is_valid()
assert result is True
validator.save()
@mock.patch("sentry.integrations.msteams.actions.form.find_channel_id")
def test_validate__invalid_channel_id(self, mock_find_channel_id: mock.MagicMock) -> None:
mock_find_channel_id.return_value = None
validator = BaseActionValidator(
data={
**self.valid_data,
"config": {
"targetType": "specific",
"targetIdentifier": "C1234567890",
"targetDisplay": "asdf",
},
},
context={"organization": self.organization},
)
result = validator.is_valid()
assert result is False
assert validator.errors == {
"all": [
ErrorDetail(
string='The channel or user "asdf" could not be found in the msteams Team.',
code="invalid",
)
]
}
|
TestMSTeamsActionValidator
|
python
|
mlflow__mlflow
|
tests/gateway/tools.py
|
{
"start": 4068,
"end": 4514
}
|
class ____(mock.Mock):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
async def __aenter__(self):
return self
async def __aexit__(self, *args):
return
def mock_http_client(mock_response: MockAsyncResponse | MockAsyncStreamingResponse):
mock_http_client = MockHttpClient()
mock_http_client.post = mock.Mock(return_value=mock_response)
return mock_http_client
|
MockHttpClient
|
python
|
mlflow__mlflow
|
mlflow/pyfunc/scoring_server/client.py
|
{
"start": 485,
"end": 1124
}
|
class ____(ABC):
@abstractmethod
def wait_server_ready(self, timeout=30, scoring_server_proc=None):
"""
Wait until the scoring server is ready to accept requests.
"""
@abstractmethod
def invoke(self, data, params: dict[str, Any] | None = None):
"""
Invoke inference on input data. The input data must be pandas dataframe or numpy array or
a dict of numpy arrays.
Args:
data: Model input data.
params: Additional parameters to pass to the model for inference.
Returns:
Prediction result.
"""
|
BaseScoringServerClient
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_ingress_service_backend.py
|
{
"start": 383,
"end": 4426
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'port': 'V1ServiceBackendPort'
}
attribute_map = {
'name': 'name',
'port': 'port'
}
def __init__(self, name=None, port=None, local_vars_configuration=None): # noqa: E501
"""V1IngressServiceBackend - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._port = None
self.discriminator = None
self.name = name
if port is not None:
self.port = port
@property
def name(self):
"""Gets the name of this V1IngressServiceBackend. # noqa: E501
name is the referenced service. The service must exist in the same namespace as the Ingress object. # noqa: E501
:return: The name of this V1IngressServiceBackend. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1IngressServiceBackend.
name is the referenced service. The service must exist in the same namespace as the Ingress object. # noqa: E501
:param name: The name of this V1IngressServiceBackend. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def port(self):
"""Gets the port of this V1IngressServiceBackend. # noqa: E501
:return: The port of this V1IngressServiceBackend. # noqa: E501
:rtype: V1ServiceBackendPort
"""
return self._port
@port.setter
def port(self, port):
"""Sets the port of this V1IngressServiceBackend.
:param port: The port of this V1IngressServiceBackend. # noqa: E501
:type: V1ServiceBackendPort
"""
self._port = port
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1IngressServiceBackend):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1IngressServiceBackend):
return True
return self.to_dict() != other.to_dict()
|
V1IngressServiceBackend
|
python
|
pypa__pip
|
src/pip/_vendor/distlib/util.py
|
{
"start": 33749,
"end": 36067
}
|
class ____(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s', event, args, kwargs, result)
return result
#
# Simple sequencing
#
|
EventMixin
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/memberAccess1.py
|
{
"start": 2448,
"end": 2823
}
|
class ____:
@Decorator
async def method1(self, a: int, *, b: str) -> str: ...
def method2(self):
reveal_type(self.method1, expected_text="(a: int, *, b: str) -> Awaitable[str]")
@classmethod
def method3(cls):
reveal_type(
cls.method1,
expected_text="Decorator[Self@ClassF, (a: int, *, b: str), str]",
)
|
ClassF
|
python
|
pyca__cryptography
|
src/cryptography/x509/ocsp.py
|
{
"start": 3587,
"end": 6568
}
|
class ____:
def __init__(
self,
request: tuple[
x509.Certificate, x509.Certificate, hashes.HashAlgorithm
]
| None = None,
request_hash: tuple[bytes, bytes, int, hashes.HashAlgorithm]
| None = None,
extensions: list[x509.Extension[x509.ExtensionType]] = [],
) -> None:
self._request = request
self._request_hash = request_hash
self._extensions = extensions
def add_certificate(
self,
cert: x509.Certificate,
issuer: x509.Certificate,
algorithm: hashes.HashAlgorithm,
) -> OCSPRequestBuilder:
if self._request is not None or self._request_hash is not None:
raise ValueError("Only one certificate can be added to a request")
_verify_algorithm(algorithm)
if not isinstance(cert, x509.Certificate) or not isinstance(
issuer, x509.Certificate
):
raise TypeError("cert and issuer must be a Certificate")
return OCSPRequestBuilder(
(cert, issuer, algorithm), self._request_hash, self._extensions
)
def add_certificate_by_hash(
self,
issuer_name_hash: bytes,
issuer_key_hash: bytes,
serial_number: int,
algorithm: hashes.HashAlgorithm,
) -> OCSPRequestBuilder:
if self._request is not None or self._request_hash is not None:
raise ValueError("Only one certificate can be added to a request")
if not isinstance(serial_number, int):
raise TypeError("serial_number must be an integer")
_verify_algorithm(algorithm)
utils._check_bytes("issuer_name_hash", issuer_name_hash)
utils._check_bytes("issuer_key_hash", issuer_key_hash)
if algorithm.digest_size != len(
issuer_name_hash
) or algorithm.digest_size != len(issuer_key_hash):
raise ValueError(
"issuer_name_hash and issuer_key_hash must be the same length "
"as the digest size of the algorithm"
)
return OCSPRequestBuilder(
self._request,
(issuer_name_hash, issuer_key_hash, serial_number, algorithm),
self._extensions,
)
def add_extension(
self, extval: x509.ExtensionType, critical: bool
) -> OCSPRequestBuilder:
if not isinstance(extval, x509.ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = x509.Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return OCSPRequestBuilder(
self._request, self._request_hash, [*self._extensions, extension]
)
def build(self) -> OCSPRequest:
if self._request is None and self._request_hash is None:
raise ValueError("You must add a certificate before building")
return ocsp.create_ocsp_request(self)
|
OCSPRequestBuilder
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/hooks/vertex_ai/experiment_service.py
|
{
"start": 3870,
"end": 9012
}
|
class ____(GoogleBaseHook):
"""Use the Vertex AI SDK for Python to create and manage your experiment runs."""
@GoogleBaseHook.fallback_to_default_project_id
def create_experiment_run(
self,
experiment_run_name: str,
experiment_name: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
experiment_run_tensorboard: str | None = None,
run_after_creation: bool = False,
) -> None:
"""
Create experiment run for the experiment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud location that the service belongs to.
:param experiment_name: Required. The name of the evaluation experiment.
:param experiment_run_name: Required. The specific run name or ID for this experiment.
:param experiment_run_tensorboard: Optional. A backing TensorBoard resource to enable and store time
series metrics logged to this experiment run.
:param run_after_creation: Optional. Responsible for state after creation of experiment run.
If true experiment run will be created with state RUNNING.
"""
experiment_run_state = (
gca_execution.Execution.State.NEW
if not run_after_creation
else gca_execution.Execution.State.RUNNING
)
experiment_run = aiplatform.ExperimentRun.create(
run_name=experiment_run_name,
experiment=experiment_name,
project=project_id,
location=location,
state=experiment_run_state,
tensorboard=experiment_run_tensorboard,
)
self.log.info(
"Created experiment run with name: %s and status: %s",
experiment_run.name,
experiment_run.state,
)
@GoogleBaseHook.fallback_to_default_project_id
def list_experiment_runs(
self,
experiment_name: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
) -> list[aiplatform.ExperimentRun]:
"""
List experiment run for the experiment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud location that the service belongs to.
:param experiment_name: Required. The name of the evaluation experiment.
"""
experiment_runs = aiplatform.ExperimentRun.list(
experiment=experiment_name,
project=project_id,
location=location,
)
return experiment_runs
@GoogleBaseHook.fallback_to_default_project_id
def update_experiment_run_state(
self,
experiment_run_name: str,
experiment_name: str,
location: str,
new_state: gca_execution.Execution.State,
project_id: str = PROVIDE_PROJECT_ID,
) -> None:
"""
Update state of the experiment run.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud location that the service belongs to.
:param experiment_name: Required. The name of the evaluation experiment.
:param experiment_run_name: Required. The specific run name or ID for this experiment.
:param new_state: Required. New state of the experiment run.
"""
experiment_run = aiplatform.ExperimentRun(
run_name=experiment_run_name,
experiment=experiment_name,
project=project_id,
location=location,
)
self.log.info("State of the %s before update is: %s", experiment_run.name, experiment_run.state)
experiment_run.update_state(new_state)
@GoogleBaseHook.fallback_to_default_project_id
def delete_experiment_run(
self,
experiment_run_name: str,
experiment_name: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
delete_backing_tensorboard_run: bool = False,
) -> None:
"""
Delete experiment run from the experiment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud location that the service belongs to.
:param experiment_name: Required. The name of the evaluation experiment.
:param experiment_run_name: Required. The specific run name or ID for this experiment.
:param delete_backing_tensorboard_run: Whether to delete the backing Vertex AI TensorBoard run
that stores time series metrics for this run.
"""
self.log.info("Next experiment run will be deleted: %s", experiment_run_name)
experiment_run = aiplatform.ExperimentRun(
run_name=experiment_run_name, experiment=experiment_name, project=project_id, location=location
)
experiment_run.delete(delete_backing_tensorboard_run=delete_backing_tensorboard_run)
|
ExperimentRunHook
|
python
|
oauthlib__oauthlib
|
oauthlib/oauth2/rfc8628/endpoints/device_authorization.py
|
{
"start": 448,
"end": 9721
}
|
class ____(BaseEndpoint):
"""DeviceAuthorization endpoint - used by the client to initiate
the authorization flow by requesting a set of verification codes
from the authorization server by making an HTTP "POST" request to
the device authorization endpoint.
The client authentication requirements of Section 3.2.1 of [RFC6749]
apply to requests on this endpoint, which means that confidential
clients (those that have established client credentials) authenticate
in the same manner as when making requests to the token endpoint, and
public clients provide the "client_id" parameter to identify
themselves.
"""
def __init__(
self,
request_validator,
verification_uri,
expires_in=1800,
interval=None,
verification_uri_complete=None,
user_code_generator: Callable[[None], str] = None,
):
"""
:param request_validator: An instance of RequestValidator.
:type request_validator: oauthlib.oauth2.rfc6749.RequestValidator.
:param verification_uri: a string containing the URL that can be polled by the client application
:param expires_in: a number that represents the lifetime of the `user_code` and `device_code`
:param interval: an option number that represents the number of seconds between each poll requests
:param verification_uri_complete: a string of a function that can be called with `user_data` as parameter
:param user_code_generator: a callable that returns a configurable user code
"""
self.request_validator = request_validator
self._expires_in = expires_in
self._interval = interval
self._verification_uri = verification_uri
self._verification_uri_complete = verification_uri_complete
self.user_code_generator = user_code_generator
BaseEndpoint.__init__(self)
@property
def interval(self):
"""The minimum amount of time in seconds that the client
SHOULD wait between polling requests to the token endpoint. If no
value is provided, clients MUST use 5 as the default.
"""
return self._interval
@property
def expires_in(self):
"""The lifetime in seconds of the "device_code" and "user_code"."""
return self._expires_in
@property
def verification_uri(self):
"""The end-user verification URI on the authorization
server. The URI should be short and easy to remember as end users
will be asked to manually type it into their user agent.
"""
return self._verification_uri
def verification_uri_complete(self, user_code):
if not self._verification_uri_complete:
return None
if isinstance(self._verification_uri_complete, str):
return self._verification_uri_complete.format(user_code=user_code)
if callable(self._verification_uri_complete):
return self._verification_uri_complete(user_code)
return None
@catch_errors_and_unavailability
def validate_device_authorization_request(self, request):
"""Validate the device authorization request.
The client_id is required if the client is not authenticating with the
authorization server as described in `Section 3.2.1. of [RFC6749]`_.
The client identifier as described in `Section 2.2 of [RFC6749]`_.
.. _`Section 3.2.1. of [RFC6749]`: https://www.rfc-editor.org/rfc/rfc6749#section-3.2.1
.. _`Section 2.2 of [RFC6749]`: https://www.rfc-editor.org/rfc/rfc6749#section-2.2
"""
# First check duplicate parameters
for param in ("client_id", "scope"):
try:
duplicate_params = request.duplicate_params
except ValueError:
raise errors.InvalidRequestFatalError(
description="Unable to parse query string", request=request
)
if param in duplicate_params:
raise errors.InvalidRequestFatalError(
description="Duplicate %s parameter." % param, request=request
)
# the "application/x-www-form-urlencoded" format, per Appendix B of [RFC6749]
# https://www.rfc-editor.org/rfc/rfc6749#appendix-B
if request.headers["Content-Type"] != "application/x-www-form-urlencoded":
raise errors.InvalidRequestError(
"Content-Type must be application/x-www-form-urlencoded",
request=request,
)
# REQUIRED. The client identifier as described in Section 2.2.
# https://tools.ietf.org/html/rfc6749#section-2.2
# TODO: extract client_id an helper validation function.
if not request.client_id:
raise errors.MissingClientIdError(request=request)
if not self.request_validator.validate_client_id(request.client_id, request):
raise errors.InvalidClientIdError(request=request)
# The client authentication requirements of Section 3.2.1 of [RFC6749]
# apply to requests on this endpoint, which means that confidential
# clients (those that have established client credentials) authenticate
# in the same manner as when making requests to the token endpoint, and
# public clients provide the "client_id" parameter to identify
# themselves.
self._raise_on_invalid_client(request)
@catch_errors_and_unavailability
def create_device_authorization_response(
self, uri, http_method="POST", body=None, headers=None
):
"""
Generate a unique device verification code and an end-user code that are valid for a limited time.
Include them in the HTTP response body using the "application/json" format [RFC8259] with a
200 (OK) status code, as described in `Section-3.2`_.
:param uri: The full URI of the token request.
:type uri: str
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param user_code_generator:
A callable that returns a string for the user code.
This allows the caller to decide how the `user_code` should be formatted.
:type user_code_generator: Callable[[], str]
:return: A tuple of three elements:
1. A dict of headers to set on the response.
2. The response body as a string.
3. The response status code as an integer.
:rtype: tuple
The response contains the following parameters:
device_code
**REQUIRED.** The device verification code.
user_code
**REQUIRED.** The end-user verification code.
verification_uri
**REQUIRED.** The end-user verification URI on the authorization server.
The URI should be short and easy to remember as end users will be asked
to manually type it into their user agent.
verification_uri_complete
**OPTIONAL.** A verification URI that includes the `user_code` (or
other information with the same function as the `user_code`), which is
designed for non-textual transmission.
expires_in
**REQUIRED.** The lifetime in seconds of the `device_code` and `user_code`.
interval
**OPTIONAL.** The minimum amount of time in seconds that the client
SHOULD wait between polling requests to the token endpoint. If no
value is provided, clients MUST use 5 as the default.
**For example:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Cache-Control: no-store
{
"device_code": "GmRhmhcxhwAzkoEqiMEg_DnyEysNkuNhszIySk9eS",
"user_code": "WDJB-MJHT",
"verification_uri": "https://example.com/device",
"verification_uri_complete":
"https://example.com/device?user_code=WDJB-MJHT",
"expires_in": 1800,
"interval": 5
}
.. _`Section-3.2`: https://www.rfc-editor.org/rfc/rfc8628#section-3.2
"""
request = Request(uri, http_method, body, headers)
self.validate_device_authorization_request(request)
log.debug("Pre resource owner authorization validation ok for %r.", request)
headers = {}
user_code = self.user_code_generator() if self.user_code_generator else generate_token()
data = {
"verification_uri": self.verification_uri,
"expires_in": self.expires_in,
"user_code": user_code,
"device_code": generate_token(),
}
if self.interval is not None:
data["interval"] = self.interval
verification_uri_complete = self.verification_uri_complete(user_code)
if verification_uri_complete:
data["verification_uri_complete"] = verification_uri_complete
return headers, data, 200
|
DeviceAuthorizationEndpoint
|
python
|
pytest-dev__pytest-cov
|
src/pytest_cov/__init__.py
|
{
"start": 436,
"end": 547
}
|
class ____(PytestCovWarning):
"""
Indicates that we failed to generate a report.
"""
|
CovReportWarning
|
python
|
scipy__scipy
|
scipy/stats/tests/test_distributions.py
|
{
"start": 409098,
"end": 409978
}
|
class ____:
@pytest.mark.parametrize("case", [ # a, b, loc, scale, m1, m2, g1, g2
(-0.01, 1.1, 0.02, 0.0001, 0.02000137427557091,
2.1112742956578063e-08, 0.05989781342460999, 20.36324408592951-3),
(2.554395574161155, 2.2482281679651965, 0, 1, -1.54215386737391,
0.7629882028469993, -1.256656139406788, 6.303058419339775-3)])
def test_moment_gh18071(self, case):
# gh-18071 reported an IntegrationWarning emitted by johnsonsu.stats
# Check that the warning is no longer emitted and that the values
# are accurate compared against results from Mathematica.
# Reference values from Mathematica, e.g.
# Mean[JohnsonDistribution["SU",-0.01, 1.1, 0.02, 0.0001]]
res = stats.johnsonsu.stats(*case[:4], moments='mvsk')
assert_allclose(res, case[4:], rtol=1e-14)
|
TestJohnsonSU
|
python
|
getsentry__sentry
|
src/sentry/snuba/entity_subscription.py
|
{
"start": 13194,
"end": 16696
}
|
class ____(BaseEntitySubscription, ABC):
def __init__(
self,
aggregate: str,
time_window: int,
extra_fields: _EntitySpecificParams | None = None,
):
super().__init__(aggregate, time_window, extra_fields)
self.aggregate = aggregate
if not extra_fields or "org_id" not in extra_fields:
raise InvalidQuerySubscription(
"org_id is a required param when "
"building snuba filter for a metrics subscription"
)
self.org_id = extra_fields["org_id"]
self.time_window = time_window
self.use_metrics_layer = False
self.on_demand_metrics_enabled = features.has(
"organizations:on-demand-metrics-extraction",
Organization.objects.get_from_cache(id=self.org_id),
)
@abstractmethod
def get_snql_aggregations(self) -> list[str]:
raise NotImplementedError
@abstractmethod
def get_snql_extra_conditions(self) -> list[Condition]:
raise NotImplementedError
@abstractmethod
def get_granularity(self) -> int:
pass
def get_entity_extra_params(self) -> Mapping[str, Any]:
return {
"organization": self.org_id,
"granularity": self.get_granularity(),
}
def _get_use_case_id(self) -> UseCaseID:
if self.dataset == Dataset.PerformanceMetrics:
return UseCaseID.TRANSACTIONS
else:
return UseCaseID.SESSIONS
def resolve_tag_key_if_needed(self, string: str) -> str:
if self.use_metrics_layer:
return string
return resolve_tag_key(self._get_use_case_id(), self.org_id, string)
def resolve_tag_values_if_needed(self, strings: Sequence[str]) -> Sequence[str | int]:
if self.use_metrics_layer:
return strings
return resolve_tag_values(self._get_use_case_id(), self.org_id, strings)
def build_query_builder(
self,
query: str,
project_ids: list[int],
environment: Environment | None,
params: ParamsType | None = None,
skip_field_validation_for_entity_subscription_deletion: bool = False,
) -> BaseQueryBuilder:
if params is None:
params = {}
if environment:
params["environment"] = environment.name
query = apply_dataset_query_conditions(self.query_type, query, None)
params["project_id"] = project_ids
params["use_case_id"] = self._get_use_case_id().value
qb = AlertMetricsQueryBuilder(
dataset=Dataset(self.dataset.value),
query=query,
selected_columns=self.get_snql_aggregations(),
params=params,
offset=None,
granularity=self.get_granularity(),
time_range_window=self.time_window,
config=QueryBuilderConfig(
skip_time_conditions=True,
use_metrics_layer=self.use_metrics_layer,
on_demand_metrics_enabled=self.on_demand_metrics_enabled,
on_demand_metrics_type=MetricSpecType.SIMPLE_QUERY,
skip_field_validation_for_entity_subscription_deletion=skip_field_validation_for_entity_subscription_deletion,
insights_metrics_override_metric_layer=True,
),
)
extra_conditions = self.get_snql_extra_conditions()
qb.add_conditions(extra_conditions)
return qb
|
BaseMetricsEntitySubscription
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/backends/_backend_pdf_ps.py
|
{
"start": 3476,
"end": 5968
}
|
class ____(RendererBase):
# The following attributes must be defined by the subclasses:
# - _afm_font_dir
# - _use_afm_rc_name
def __init__(self, width, height):
super().__init__()
self.width = width
self.height = height
def flipy(self):
# docstring inherited
return False # y increases from bottom to top.
def option_scale_image(self):
# docstring inherited
return True # PDF and PS support arbitrary image scaling.
def option_image_nocomposite(self):
# docstring inherited
# Decide whether to composite image based on rcParam value.
return not mpl.rcParams["image.composite_image"]
def get_canvas_width_height(self):
# docstring inherited
return self.width * 72.0, self.height * 72.0
def get_text_width_height_descent(self, s, prop, ismath):
# docstring inherited
if ismath == "TeX":
return super().get_text_width_height_descent(s, prop, ismath)
elif ismath:
parse = self._text2path.mathtext_parser.parse(s, 72, prop)
return parse.width, parse.height, parse.depth
elif mpl.rcParams[self._use_afm_rc_name]:
font = self._get_font_afm(prop)
l, b, w, h, d = font.get_str_bbox_and_descent(s)
scale = prop.get_size_in_points() / 1000
w *= scale
h *= scale
d *= scale
return w, h, d
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=ft2font.LoadFlags.NO_HINTING)
w, h = font.get_width_height()
d = font.get_descent()
scale = 1 / 64
w *= scale
h *= scale
d *= scale
return w, h, d
def _get_font_afm(self, prop):
fname = font_manager.findfont(
prop, fontext="afm", directory=self._afm_font_dir)
return _cached_get_afm_from_fname(fname)
def _get_font_ttf(self, prop):
fnames = font_manager.fontManager._find_fonts_by_props(prop)
try:
font = font_manager.get_font(fnames)
font.clear()
font.set_size(prop.get_size_in_points(), 72)
return font
except RuntimeError:
logging.getLogger(__name__).warning(
"The PostScript/PDF backend does not currently "
"support the selected font (%s).", fnames)
raise
|
RendererPDFPSBase
|
python
|
getsentry__sentry
|
src/sentry/seer/models.py
|
{
"start": 2490,
"end": 2703
}
|
class ____(Exception):
def __init__(self, message: str):
self.message = message
def __str__(self):
return f"Seer API response validation error: {self.message}"
|
SeerApiResponseValidationError
|
python
|
conda__conda
|
conda/exceptions.py
|
{
"start": 12687,
"end": 13050
}
|
class ____(CondaError):
def __init__(self, environment_name: str):
message = dals(
"""
Could not find conda environment: %(environment_name)s
You can list all discoverable environments with `conda info --envs`.
"""
)
super().__init__(message, environment_name=environment_name)
|
EnvironmentNameNotFound
|
python
|
apache__airflow
|
providers/cncf/kubernetes/tests/unit/cncf/kubernetes/operators/test_job.py
|
{
"start": 3803,
"end": 36307
}
|
class ____:
@pytest.fixture(autouse=True)
def setup_tests(self):
self._default_client_patch = patch(f"{HOOK_CLASS}._get_default_client")
self._default_client_mock = self._default_client_patch.start()
yield
patch.stopall()
def test_templates(self, create_task_instance_of_operator, session):
dag_id = "TestKubernetesJobOperator"
ti = create_task_instance_of_operator(
KubernetesJobOperator,
dag_id=dag_id,
task_id="task-id",
namespace="{{ dag.dag_id }}",
container_resources=k8s.V1ResourceRequirements(
requests={"memory": "{{ dag.dag_id }}", "cpu": "{{ dag.dag_id }}"},
limits={"memory": "{{ dag.dag_id }}", "cpu": "{{ dag.dag_id }}"},
),
volume_mounts=[
k8s.V1VolumeMount(
name="{{ dag.dag_id }}",
mount_path="mount_path",
sub_path="{{ dag.dag_id }}",
)
],
job_template_file="{{ dag.dag_id }}",
config_file="{{ dag.dag_id }}",
labels="{{ dag.dag_id }}",
env_vars=["{{ dag.dag_id }}"],
arguments="{{ dag.dag_id }}",
cmds="{{ dag.dag_id }}",
image="{{ dag.dag_id }}",
annotations={"dag-id": "{{ dag.dag_id }}"},
session=session,
)
session.add(ti)
session.commit()
rendered = ti.render_templates()
assert dag_id == rendered.container_resources.limits["memory"]
assert dag_id == rendered.container_resources.limits["cpu"]
assert dag_id == rendered.container_resources.requests["memory"]
assert dag_id == rendered.container_resources.requests["cpu"]
assert dag_id == rendered.volume_mounts[0].name
assert dag_id == rendered.volume_mounts[0].sub_path
assert dag_id == ti.task.image
assert dag_id == ti.task.cmds
assert dag_id == ti.task.namespace
assert dag_id == ti.task.config_file
assert dag_id == ti.task.labels
assert dag_id == ti.task.job_template_file
assert dag_id == ti.task.arguments
assert dag_id == ti.task.env_vars[0]
assert dag_id == rendered.annotations["dag-id"]
def sanitize_for_serialization(self, obj):
return ApiClient().sanitize_for_serialization(obj)
def test_backoff_limit_correctly_set(self, clean_dags_dagruns_and_dagbundles):
k = KubernetesJobOperator(
task_id="task",
backoff_limit=6,
)
job = k.build_job_request_obj(create_context(k))
assert job.spec.backoff_limit == 6
def test_completion_mode_correctly_set(self, clean_dags_dagruns_and_dagbundles):
k = KubernetesJobOperator(
task_id="task",
completion_mode="NonIndexed",
)
job = k.build_job_request_obj(create_context(k))
assert job.spec.completion_mode == "NonIndexed"
def test_completions_correctly_set(self, clean_dags_dagruns_and_dagbundles):
k = KubernetesJobOperator(
task_id="task",
completions=1,
)
job = k.build_job_request_obj(create_context(k))
assert job.spec.completions == 1
def test_manual_selector_correctly_set(self, clean_dags_dagruns_and_dagbundles):
k = KubernetesJobOperator(
task_id="task",
manual_selector=False,
)
job = k.build_job_request_obj(create_context(k))
assert job.spec.manual_selector is False
def test_parallelism_correctly_set(self, clean_dags_dagruns_and_dagbundles):
k = KubernetesJobOperator(
task_id="task",
parallelism=2,
)
job = k.build_job_request_obj(create_context(k))
assert job.spec.parallelism == 2
def test_selector(self, clean_dags_dagruns_and_dagbundles):
selector = k8s.V1LabelSelector(
match_expressions=[],
match_labels={"foo": "bar", "hello": "airflow"},
)
k = KubernetesJobOperator(
task_id="task",
selector=selector,
)
job = k.build_job_request_obj(create_context(k))
assert isinstance(job.spec.selector, k8s.V1LabelSelector)
assert job.spec.selector == selector
def test_suspend_correctly_set(self, clean_dags_dagruns_and_dagbundles):
k = KubernetesJobOperator(
task_id="task",
suspend=True,
)
job = k.build_job_request_obj(create_context(k))
assert job.spec.suspend is True
def test_ttl_seconds_after_finished_correctly_set(self, clean_dags_dagruns_and_dagbundles):
k = KubernetesJobOperator(task_id="task", ttl_seconds_after_finished=5)
job = k.build_job_request_obj(create_context(k))
assert job.spec.ttl_seconds_after_finished == 5
@pytest.mark.parametrize("randomize", [True, False])
def test_provided_job_name(self, randomize, clean_dags_dagruns_and_dagbundles):
name_base = "test"
k = KubernetesJobOperator(
name=name_base,
random_name_suffix=randomize,
task_id="task",
)
context = create_context(k)
job = k.build_job_request_obj(context)
if randomize:
assert job.metadata.name.startswith(f"job-{name_base}")
assert job.metadata.name != f"job-{name_base}"
else:
assert job.metadata.name == f"job-{name_base}"
@pytest.fixture
def job_spec(self):
return k8s.V1Job(
metadata=k8s.V1ObjectMeta(name="hello", labels={"foo": "bar"}, namespace="jobspecnamespace"),
spec=k8s.V1JobSpec(
template=k8s.V1PodTemplateSpec(
metadata=k8s.V1ObjectMeta(
name="world", labels={"foo": "bar"}, namespace="podspecnamespace"
),
spec=k8s.V1PodSpec(
containers=[
k8s.V1Container(
name="base",
image="ubuntu:16.04",
command=["something"],
)
]
),
)
),
)
@pytest.mark.parametrize("randomize_name", (True, False))
def test_full_job_spec(self, randomize_name, job_spec, clean_dags_dagruns_and_dagbundles):
job_spec_name_base = job_spec.metadata.name
k = KubernetesJobOperator(
task_id="task",
random_name_suffix=randomize_name,
full_job_spec=job_spec,
)
context = create_context(k)
job = k.build_job_request_obj(context)
if randomize_name:
assert job.metadata.name.startswith(f"job-{job_spec_name_base}")
assert job.metadata.name != f"job-{job_spec_name_base}"
else:
assert job.metadata.name == f"job-{job_spec_name_base}"
assert job.metadata.namespace == job_spec.metadata.namespace
assert job.spec.template.spec.containers[0].image == job_spec.spec.template.spec.containers[0].image
assert (
job.spec.template.spec.containers[0].command == job_spec.spec.template.spec.containers[0].command
)
assert job.metadata.labels == {"foo": "bar"}
@pytest.mark.parametrize("randomize_name", (True, False))
def test_full_job_spec_kwargs(self, randomize_name, job_spec, clean_dags_dagruns_and_dagbundles):
# kwargs take precedence, however
image = "some.custom.image:andtag"
name_base = "world"
k = KubernetesJobOperator(
task_id="task",
random_name_suffix=randomize_name,
full_job_spec=job_spec,
name=name_base,
image=image,
labels={"hello": "world"},
)
job = k.build_job_request_obj(create_context(k))
# make sure the kwargs takes precedence (and that name is randomized when expected)
if randomize_name:
assert job.metadata.name.startswith(f"job-{name_base}")
assert job.metadata.name != f"job-{name_base}"
else:
assert job.metadata.name == f"job-{name_base}"
assert job.spec.template.spec.containers[0].image == image
assert job.metadata.labels == {
"foo": "bar",
"hello": "world",
}
@pytest.fixture
def job_template_file(self, tmp_path):
job_template_yaml = """
apiVersion: batch/v1
kind: Job
metadata:
name: hello
namespace: templatenamespace
labels:
foo: bar
spec:
ttlSecondsAfterFinished: 60
parallelism: 3
completions: 3
suspend: true
template:
spec:
serviceAccountName: foo
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/role
operator: In
values:
- foo
- bar
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: kubernetes.io/role
operator: In
values:
- foo
- bar
containers:
- name: base
image: ubuntu:16.04
imagePullPolicy: Always
command:
- something
"""
tpl_file = tmp_path / "template.yaml"
tpl_file.write_text(job_template_yaml)
return tpl_file
@pytest.mark.parametrize("randomize_name", (True, False))
def test_job_template_file(self, randomize_name, job_template_file, clean_dags_dagruns_and_dagbundles):
k = KubernetesJobOperator(
task_id="task",
random_name_suffix=randomize_name,
job_template_file=job_template_file,
)
job = k.build_job_request_obj(create_context(k))
if randomize_name:
assert job.metadata.name.startswith("job-hello")
assert job.metadata.name != "job-hello"
else:
assert job.metadata.name == "job-hello"
assert job.metadata.labels == {"foo": "bar"}
assert job.metadata.namespace == "templatenamespace"
assert job.spec.template.spec.containers[0].image == "ubuntu:16.04"
assert job.spec.template.spec.containers[0].image_pull_policy == "Always"
assert job.spec.template.spec.containers[0].command == ["something"]
assert job.spec.template.spec.service_account_name == "foo"
affinity = {
"node_affinity": {
"preferred_during_scheduling_ignored_during_execution": [
{
"preference": {
"match_expressions": [
{"key": "kubernetes.io/role", "operator": "In", "values": ["foo", "bar"]}
],
"match_fields": None,
},
"weight": 1,
}
],
"required_during_scheduling_ignored_during_execution": {
"node_selector_terms": [
{
"match_expressions": [
{"key": "kubernetes.io/role", "operator": "In", "values": ["foo", "bar"]}
],
"match_fields": None,
}
]
},
},
"pod_affinity": None,
"pod_anti_affinity": None,
}
assert job.spec.template.spec.affinity.to_dict() == affinity
@pytest.mark.parametrize("randomize_name", (True, False))
def test_job_template_file_kwargs_override(
self, randomize_name, job_template_file, clean_dags_dagruns_and_dagbundles
):
# kwargs take precedence, however
image = "some.custom.image:andtag"
name_base = "world"
k = KubernetesJobOperator(
task_id="task",
job_template_file=job_template_file,
name=name_base,
random_name_suffix=randomize_name,
image=image,
labels={"hello": "world"},
)
job = k.build_job_request_obj(create_context(k))
# make sure the kwargs takes precedence (and that name is randomized when expected)
if randomize_name:
assert job.metadata.name.startswith(f"job-{name_base}")
assert job.metadata.name != f"job-{name_base}"
else:
assert job.metadata.name == f"job-{name_base}"
assert job.spec.template.spec.containers[0].image == image
assert job.metadata.labels == {
"foo": "bar",
"hello": "world",
}
def test_task_id_as_name(self):
k = KubernetesJobOperator(
task_id=".hi.-_09HI",
random_name_suffix=False,
)
job = k.build_job_request_obj({})
assert job.metadata.name == "job-hi-09hi"
def test_task_id_as_name_with_suffix(self):
k = KubernetesJobOperator(
task_id=".hi.-_09HI",
random_name_suffix=True,
)
job = k.build_job_request_obj({})
expected = "job-hi-09hi"
assert job.metadata.name[: len(expected)] == expected
assert re.match(rf"{expected}-[a-z0-9]{{8}}", job.metadata.name) is not None
def test_task_id_as_name_with_suffix_very_long(self):
k = KubernetesJobOperator(
task_id="a" * 250,
random_name_suffix=True,
)
job = k.build_job_request_obj({})
assert (
re.match(
r"job-a{50}-[a-z0-9]{8}",
job.metadata.name,
)
is not None
)
def test_task_id_as_name_dag_id_is_ignored(self):
dag = DAG(dag_id="this_is_a_dag_name", schedule=None, start_date=pendulum.now())
k = KubernetesJobOperator(
task_id="a_very_reasonable_task_name",
dag=dag,
)
job = k.build_job_request_obj({})
assert re.match(r"job-a-very-reasonable-task-name-[a-z0-9-]+", job.metadata.name) is not None
@pytest.mark.non_db_test_override
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.get_or_create_pod"))
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.build_job_request_obj"))
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.create_job"))
@patch(HOOK_CLASS)
def test_execute(self, mock_hook, mock_create_job, mock_build_job_request_obj, mock_get_or_create_pod):
mock_hook.return_value.is_job_failed.return_value = False
mock_job_request_obj = mock_build_job_request_obj.return_value
mock_job_expected = mock_create_job.return_value
mock_ti = mock.MagicMock()
context = dict(ti=mock_ti)
op = KubernetesJobOperator(
task_id="test_task_id",
)
with pytest.warns(AirflowProviderDeprecationWarning):
execute_result = op.execute(context=context)
mock_build_job_request_obj.assert_called_once_with(context)
mock_create_job.assert_called_once_with(job_request_obj=mock_job_request_obj)
mock_ti.xcom_push.assert_has_calls(
[
mock.call(key="job_name", value=mock_job_expected.metadata.name),
mock.call(key="job_namespace", value=mock_job_expected.metadata.namespace),
mock.call(key="job", value=mock_job_expected.to_dict.return_value),
]
)
assert op.job_request_obj == mock_job_request_obj
assert op.job == mock_job_expected
assert not op.wait_until_job_complete
assert execute_result is None
assert not mock_hook.wait_until_job_complete.called
@pytest.mark.non_db_test_override
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.get_pods"))
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.build_job_request_obj"))
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.create_job"))
@patch(HOOK_CLASS)
def test_execute_with_parallelism(
self, mock_hook, mock_create_job, mock_build_job_request_obj, mock_get_pods
):
mock_hook.return_value.is_job_failed.return_value = False
mock_job_request_obj = mock_build_job_request_obj.return_value
mock_job_expected = mock_create_job.return_value
mock_get_pods.return_value = [mock.MagicMock(), mock.MagicMock()]
mock_pods_expected = mock_get_pods.return_value
mock_ti = mock.MagicMock()
context = dict(ti=mock_ti)
op = KubernetesJobOperator(
task_id="test_task_id",
parallelism=2,
)
execute_result = op.execute(context=context)
mock_build_job_request_obj.assert_called_once_with(context)
mock_create_job.assert_called_once_with(job_request_obj=mock_job_request_obj)
mock_ti.xcom_push.assert_has_calls(
[
mock.call(key="job_name", value=mock_job_expected.metadata.name),
mock.call(key="job_namespace", value=mock_job_expected.metadata.namespace),
mock.call(key="job", value=mock_job_expected.to_dict.return_value),
]
)
assert op.job_request_obj == mock_job_request_obj
assert op.job == mock_job_expected
assert op.pods == mock_pods_expected
with pytest.warns(AirflowProviderDeprecationWarning):
assert op.pod is mock_pods_expected[0]
assert not op.wait_until_job_complete
assert execute_result is None
assert not mock_hook.wait_until_job_complete.called
@pytest.mark.non_db_test_override
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.get_or_create_pod"))
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.build_job_request_obj"))
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.create_job"))
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.execute_deferrable"))
@patch(HOOK_CLASS)
def test_execute_in_deferrable(
self,
mock_hook,
mock_execute_deferrable,
mock_create_job,
mock_build_job_request_obj,
mock_get_or_create_pod,
):
mock_hook.return_value.is_job_failed.return_value = False
mock_job_request_obj = mock_build_job_request_obj.return_value
mock_job_expected = mock_create_job.return_value
mock_ti = mock.MagicMock()
context = dict(ti=mock_ti)
op = KubernetesJobOperator(
task_id="test_task_id",
wait_until_job_complete=True,
deferrable=True,
)
with pytest.warns(AirflowProviderDeprecationWarning):
actual_result = op.execute(context=context)
mock_build_job_request_obj.assert_called_once_with(context)
mock_create_job.assert_called_once_with(job_request_obj=mock_job_request_obj)
mock_ti.xcom_push.assert_has_calls(
[
mock.call(key="job_name", value=mock_job_expected.metadata.name),
mock.call(key="job_namespace", value=mock_job_expected.metadata.namespace),
]
)
mock_execute_deferrable.assert_called_once()
assert op.job_request_obj == mock_job_request_obj
assert op.job == mock_job_expected
assert actual_result is None
assert not mock_hook.wait_until_job_complete.called
@pytest.mark.non_db_test_override
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.get_or_create_pod"))
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.build_job_request_obj"))
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.create_job"))
@patch(HOOK_CLASS)
def test_execute_fail(
self, mock_hook, mock_create_job, mock_build_job_request_obj, mock_get_or_create_pod
):
mock_hook.return_value.is_job_failed.return_value = "Error"
op = KubernetesJobOperator(
task_id="test_task_id",
wait_until_job_complete=True,
)
with pytest.warns(AirflowProviderDeprecationWarning):
with pytest.raises(AirflowException):
op.execute(context=dict(ti=mock.MagicMock()))
@pytest.mark.non_db_test_override
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.defer"))
@patch(JOB_OPERATORS_PATH.format("KubernetesJobTrigger"))
def test_execute_deferrable(self, mock_trigger, mock_execute_deferrable):
mock_cluster_context = mock.MagicMock()
mock_config_file = mock.MagicMock()
mock_in_cluster = mock.MagicMock()
mock_job = mock.MagicMock()
mock_job.metadata.name = JOB_NAME
mock_job.metadata.namespace = JOB_NAMESPACE
mock_pod = mock.MagicMock()
mock_pod.metadata.name = POD_NAME
mock_pod.metadata.namespace = POD_NAMESPACE
mock_trigger_instance = mock_trigger.return_value
op = KubernetesJobOperator(
task_id="test_task_id",
kubernetes_conn_id=KUBERNETES_CONN_ID,
cluster_context=mock_cluster_context,
config_file=mock_config_file,
in_cluster=mock_in_cluster,
job_poll_interval=POLL_INTERVAL,
wait_until_job_complete=True,
deferrable=True,
)
op.job = mock_job
op.pod = mock_pod
op.pods = [
mock_pod,
]
actual_result = op.execute_deferrable()
mock_execute_deferrable.assert_called_once_with(
trigger=mock_trigger_instance,
method_name="execute_complete",
)
mock_trigger.assert_called_once_with(
job_name=JOB_NAME,
job_namespace=JOB_NAMESPACE,
pod_names=[
POD_NAME,
],
pod_namespace=POD_NAMESPACE,
base_container_name=op.BASE_CONTAINER_NAME,
kubernetes_conn_id=KUBERNETES_CONN_ID,
cluster_context=mock_cluster_context,
config_file=mock_config_file,
in_cluster=mock_in_cluster,
poll_interval=POLL_INTERVAL,
get_logs=True,
do_xcom_push=False,
)
assert actual_result is None
@pytest.mark.non_db_test_override
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.defer"))
@patch(JOB_OPERATORS_PATH.format("KubernetesJobTrigger"))
def test_execute_deferrable_with_parallelism(self, mock_trigger, mock_execute_deferrable):
mock_cluster_context = mock.MagicMock()
mock_config_file = mock.MagicMock()
mock_in_cluster = mock.MagicMock()
mock_job = mock.MagicMock()
mock_job.metadata.name = JOB_NAME
mock_job.metadata.namespace = JOB_NAMESPACE
pod_name_1 = POD_NAME + "-1"
mock_pod_1 = mock.MagicMock()
mock_pod_1.metadata.name = pod_name_1
mock_pod_1.metadata.namespace = POD_NAMESPACE
pod_name_2 = POD_NAME + "-2"
mock_pod_2 = mock.MagicMock()
mock_pod_2.metadata.name = pod_name_2
mock_pod_2.metadata.namespace = POD_NAMESPACE
mock_trigger_instance = mock_trigger.return_value
op = KubernetesJobOperator(
task_id="test_task_id",
kubernetes_conn_id=KUBERNETES_CONN_ID,
cluster_context=mock_cluster_context,
config_file=mock_config_file,
in_cluster=mock_in_cluster,
job_poll_interval=POLL_INTERVAL,
parallelism=2,
wait_until_job_complete=True,
deferrable=True,
)
op.job = mock_job
op.pods = [mock_pod_1, mock_pod_2]
actual_result = op.execute_deferrable()
mock_execute_deferrable.assert_called_once_with(
trigger=mock_trigger_instance,
method_name="execute_complete",
)
mock_trigger.assert_called_once_with(
job_name=JOB_NAME,
job_namespace=JOB_NAMESPACE,
pod_names=[pod_name_1, pod_name_2],
pod_namespace=POD_NAMESPACE,
base_container_name=op.BASE_CONTAINER_NAME,
kubernetes_conn_id=KUBERNETES_CONN_ID,
cluster_context=mock_cluster_context,
config_file=mock_config_file,
in_cluster=mock_in_cluster,
poll_interval=POLL_INTERVAL,
get_logs=True,
do_xcom_push=False,
)
assert actual_result is None
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.get_or_create_pod"))
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.build_job_request_obj"))
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.create_job"))
@patch(f"{HOOK_CLASS}.wait_until_job_complete")
def test_wait_until_job_complete(
self,
mock_wait_until_job_complete,
mock_create_job,
mock_build_job_request_obj,
mock_get_or_create_pod,
):
mock_job_expected = mock_create_job.return_value
mock_ti = mock.MagicMock()
op = KubernetesJobOperator(
task_id="test_task_id", wait_until_job_complete=True, job_poll_interval=POLL_INTERVAL
)
with pytest.warns(AirflowProviderDeprecationWarning):
op.execute(context=dict(ti=mock_ti))
assert op.wait_until_job_complete
assert op.job_poll_interval == POLL_INTERVAL
mock_wait_until_job_complete.assert_called_once_with(
job_name=mock_job_expected.metadata.name,
namespace=mock_job_expected.metadata.namespace,
job_poll_interval=POLL_INTERVAL,
)
@pytest.mark.parametrize("do_xcom_push", [True, False])
@pytest.mark.parametrize("get_logs", [True, False])
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator._write_logs"))
def test_execute_complete(self, mocked_write_logs, get_logs, do_xcom_push):
mock_ti = mock.MagicMock()
context = {"ti": mock_ti}
mock_job = mock.MagicMock()
event = {
"job": mock_job,
"status": "success",
"pod_names": [
POD_NAME,
]
if get_logs
else None,
"pod_namespace": POD_NAMESPACE if get_logs else None,
"xcom_result": [
TEST_XCOM_RESULT,
]
if do_xcom_push
else None,
}
KubernetesJobOperator(
task_id="test_task_id", get_logs=get_logs, do_xcom_push=do_xcom_push
).execute_complete(context=context, event=event)
mock_ti.xcom_push.assert_called_once_with(key="job", value=mock_job)
if get_logs:
mocked_write_logs.assert_called_once()
else:
mocked_write_logs.assert_not_called()
@pytest.mark.non_db_test_override
def test_execute_complete_fail(self):
mock_ti = mock.MagicMock()
context = {"ti": mock_ti}
mock_job = mock.MagicMock()
event = {"job": mock_job, "status": "error", "message": "error message"}
with pytest.raises(AirflowException):
KubernetesJobOperator(task_id="test_task_id").execute_complete(context=context, event=event)
mock_ti.xcom_push.assert_called_once_with(key="job", value=mock_job)
@pytest.mark.non_db_test_override
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.job_client"))
def test_on_kill(self, mock_client):
mock_job = mock.MagicMock()
mock_job.metadata.name = JOB_NAME
mock_job.metadata.namespace = JOB_NAMESPACE
op = KubernetesJobOperator(task_id="test_task_id")
op.job = mock_job
op.on_kill()
mock_client.delete_namespaced_job.assert_called_once_with(
name=JOB_NAME,
namespace=JOB_NAMESPACE,
propagation_policy=ON_KILL_PROPAGATION_POLICY,
)
@pytest.mark.non_db_test_override
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.job_client"))
def test_on_kill_termination_grace_period(self, mock_client):
mock_job = mock.MagicMock()
mock_job.metadata.name = JOB_NAME
mock_job.metadata.namespace = JOB_NAMESPACE
mock_termination_grace_period = mock.MagicMock()
op = KubernetesJobOperator(
task_id="test_task_id", termination_grace_period=mock_termination_grace_period
)
op.job = mock_job
op.on_kill()
mock_client.delete_namespaced_job.assert_called_once_with(
name=JOB_NAME,
namespace=JOB_NAMESPACE,
propagation_policy=ON_KILL_PROPAGATION_POLICY,
grace_period_seconds=mock_termination_grace_period,
)
@pytest.mark.non_db_test_override
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.client"))
@patch(HOOK_CLASS)
def test_on_kill_none_job(self, mock_hook, mock_client):
mock_serialize = mock_hook.return_value.batch_v1_client.api_client.sanitize_for_serialization
op = KubernetesJobOperator(task_id="test_task_id")
op.on_kill()
mock_client.delete_namespaced_job.assert_not_called()
mock_serialize.assert_not_called()
@pytest.mark.parametrize("parallelism", [None, 2])
@pytest.mark.parametrize("do_xcom_push", [True, False])
@pytest.mark.parametrize("get_logs", [True, False])
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.extract_xcom"))
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.get_pods"))
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.get_or_create_pod"))
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.build_job_request_obj"))
@patch(JOB_OPERATORS_PATH.format("KubernetesJobOperator.create_job"))
@patch(f"{POD_MANAGER_CLASS}.fetch_requested_container_logs")
@patch(f"{POD_MANAGER_CLASS}.await_xcom_sidecar_container_start")
@patch(f"{POD_MANAGER_CLASS}.await_container_completion")
@patch(f"{HOOK_CLASS}.wait_until_job_complete")
def test_execute_xcom_and_logs(
self,
mock_wait_until_job_complete,
mock_await_container_completion,
mock_await_xcom_sidecar_container_start,
mocked_fetch_logs,
mock_create_job,
mock_build_job_request_obj,
mock_get_or_create_pod,
mock_get_pods,
mock_extract_xcom,
get_logs,
do_xcom_push,
parallelism,
):
if parallelism == 2:
mock_pod_1 = mock.MagicMock()
mock_pod_2 = mock.MagicMock()
mock_get_pods.return_value = [mock_pod_1, mock_pod_2]
mock_ti = mock.MagicMock()
op = KubernetesJobOperator(
task_id="test_task_id",
wait_until_job_complete=True,
job_poll_interval=POLL_INTERVAL,
get_logs=get_logs,
do_xcom_push=do_xcom_push,
parallelism=parallelism,
)
if not parallelism:
with pytest.warns(AirflowProviderDeprecationWarning):
op.execute(context=dict(ti=mock_ti))
else:
op.execute(context=dict(ti=mock_ti))
if do_xcom_push and not parallelism:
mock_extract_xcom.assert_called_once()
elif do_xcom_push and parallelism is not None:
assert mock_extract_xcom.call_count == parallelism
else:
mock_extract_xcom.assert_not_called()
if get_logs and not parallelism:
mocked_fetch_logs.assert_called_once()
elif get_logs and parallelism is not None:
assert mocked_fetch_logs.call_count == parallelism
else:
mocked_fetch_logs.assert_not_called()
@pytest.mark.db_test
@pytest.mark.execution_timeout(300)
|
TestKubernetesJobOperator
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/sankey.py
|
{
"start": 514,
"end": 36150
}
|
class ____:
"""
Sankey diagram.
Sankey diagrams are a specific type of flow diagram, in which
the width of the arrows is shown proportionally to the flow
quantity. They are typically used to visualize energy or
material or cost transfers between processes.
`Wikipedia (6/1/2011) <https://en.wikipedia.org/wiki/Sankey_diagram>`_
"""
def __init__(self, ax=None, scale=1.0, unit='', format='%G', gap=0.25,
radius=0.1, shoulder=0.03, offset=0.15, head_angle=100,
margin=0.4, tolerance=1e-6, **kwargs):
"""
Create a new Sankey instance.
The optional arguments listed below are applied to all subdiagrams so
that there is consistent alignment and formatting.
In order to draw a complex Sankey diagram, create an instance of
`Sankey` by calling it without any kwargs::
sankey = Sankey()
Then add simple Sankey sub-diagrams::
sankey.add() # 1
sankey.add() # 2
#...
sankey.add() # n
Finally, create the full diagram::
sankey.finish()
Or, instead, simply daisy-chain those calls::
Sankey().add().add... .add().finish()
Other Parameters
----------------
ax : `~matplotlib.axes.Axes`
Axes onto which the data should be plotted. If *ax* isn't
provided, new Axes will be created.
scale : float
Scaling factor for the flows. *scale* sizes the width of the paths
in order to maintain proper layout. The same scale is applied to
all subdiagrams. The value should be chosen such that the product
of the scale and the sum of the inputs is approximately 1.0 (and
the product of the scale and the sum of the outputs is
approximately -1.0).
unit : str
The physical unit associated with the flow quantities. If *unit*
is None, then none of the quantities are labeled.
format : str or callable
A Python number formatting string or callable used to label the
flows with their quantities (i.e., a number times a unit, where the
unit is given). If a format string is given, the label will be
``format % quantity``. If a callable is given, it will be called
with ``quantity`` as an argument.
gap : float
Space between paths that break in/break away to/from the top or
bottom.
radius : float
Inner radius of the vertical paths.
shoulder : float
Size of the shoulders of output arrows.
offset : float
Text offset (from the dip or tip of the arrow).
head_angle : float
Angle, in degrees, of the arrow heads (and negative of the angle of
the tails).
margin : float
Minimum space between Sankey outlines and the edge of the plot
area.
tolerance : float
Acceptable maximum of the magnitude of the sum of flows. The
magnitude of the sum of connected flows cannot be greater than
*tolerance*.
**kwargs
Any additional keyword arguments will be passed to `add`, which
will create the first subdiagram.
See Also
--------
Sankey.add
Sankey.finish
Examples
--------
.. plot:: gallery/specialty_plots/sankey_basics.py
"""
# Check the arguments.
if gap < 0:
raise ValueError(
"'gap' is negative, which is not allowed because it would "
"cause the paths to overlap")
if radius > gap:
raise ValueError(
"'radius' is greater than 'gap', which is not allowed because "
"it would cause the paths to overlap")
if head_angle < 0:
raise ValueError(
"'head_angle' is negative, which is not allowed because it "
"would cause inputs to look like outputs and vice versa")
if tolerance < 0:
raise ValueError(
"'tolerance' is negative, but it must be a magnitude")
# Create Axes if necessary.
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[])
self.diagrams = []
# Store the inputs.
self.ax = ax
self.unit = unit
self.format = format
self.scale = scale
self.gap = gap
self.radius = radius
self.shoulder = shoulder
self.offset = offset
self.margin = margin
self.pitch = np.tan(np.pi * (1 - head_angle / 180.0) / 2.0)
self.tolerance = tolerance
# Initialize the vertices of tight box around the diagram(s).
self.extent = np.array((np.inf, -np.inf, np.inf, -np.inf))
# If there are any kwargs, create the first subdiagram.
if len(kwargs):
self.add(**kwargs)
def _arc(self, quadrant=0, cw=True, radius=1, center=(0, 0)):
"""
Return the codes and vertices for a rotated, scaled, and translated
90 degree arc.
Other Parameters
----------------
quadrant : {0, 1, 2, 3}, default: 0
Uses 0-based indexing (0, 1, 2, or 3).
cw : bool, default: True
If True, the arc vertices are produced clockwise; counter-clockwise
otherwise.
radius : float, default: 1
The radius of the arc.
center : (float, float), default: (0, 0)
(x, y) tuple of the arc's center.
"""
# Note: It would be possible to use matplotlib's transforms to rotate,
# scale, and translate the arc, but since the angles are discrete,
# it's just as easy and maybe more efficient to do it here.
ARC_CODES = [Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4]
# Vertices of a cubic Bezier curve approximating a 90 deg arc
# These can be determined by Path.arc(0, 90).
ARC_VERTICES = np.array([[1.00000000e+00, 0.00000000e+00],
[1.00000000e+00, 2.65114773e-01],
[8.94571235e-01, 5.19642327e-01],
[7.07106781e-01, 7.07106781e-01],
[5.19642327e-01, 8.94571235e-01],
[2.65114773e-01, 1.00000000e+00],
# Insignificant
# [6.12303177e-17, 1.00000000e+00]])
[0.00000000e+00, 1.00000000e+00]])
if quadrant in (0, 2):
if cw:
vertices = ARC_VERTICES
else:
vertices = ARC_VERTICES[:, ::-1] # Swap x and y.
else: # 1, 3
# Negate x.
if cw:
# Swap x and y.
vertices = np.column_stack((-ARC_VERTICES[:, 1],
ARC_VERTICES[:, 0]))
else:
vertices = np.column_stack((-ARC_VERTICES[:, 0],
ARC_VERTICES[:, 1]))
if quadrant > 1:
radius = -radius # Rotate 180 deg.
return list(zip(ARC_CODES, radius * vertices +
np.tile(center, (ARC_VERTICES.shape[0], 1))))
def _add_input(self, path, angle, flow, length):
"""
Add an input to a path and return its tip and label locations.
"""
if angle is None:
return [0, 0], [0, 0]
else:
x, y = path[-1][1] # Use the last point as a reference.
dipdepth = (flow / 2) * self.pitch
if angle == RIGHT:
x -= length
dip = [x + dipdepth, y + flow / 2.0]
path.extend([(Path.LINETO, [x, y]),
(Path.LINETO, dip),
(Path.LINETO, [x, y + flow]),
(Path.LINETO, [x + self.gap, y + flow])])
label_location = [dip[0] - self.offset, dip[1]]
else: # Vertical
x -= self.gap
if angle == UP:
sign = 1
else:
sign = -1
dip = [x - flow / 2, y - sign * (length - dipdepth)]
if angle == DOWN:
quadrant = 2
else:
quadrant = 1
# Inner arc isn't needed if inner radius is zero
if self.radius:
path.extend(self._arc(quadrant=quadrant,
cw=angle == UP,
radius=self.radius,
center=(x + self.radius,
y - sign * self.radius)))
else:
path.append((Path.LINETO, [x, y]))
path.extend([(Path.LINETO, [x, y - sign * length]),
(Path.LINETO, dip),
(Path.LINETO, [x - flow, y - sign * length])])
path.extend(self._arc(quadrant=quadrant,
cw=angle == DOWN,
radius=flow + self.radius,
center=(x + self.radius,
y - sign * self.radius)))
path.append((Path.LINETO, [x - flow, y + sign * flow]))
label_location = [dip[0], dip[1] - sign * self.offset]
return dip, label_location
def _add_output(self, path, angle, flow, length):
"""
Append an output to a path and return its tip and label locations.
.. note:: *flow* is negative for an output.
"""
if angle is None:
return [0, 0], [0, 0]
else:
x, y = path[-1][1] # Use the last point as a reference.
tipheight = (self.shoulder - flow / 2) * self.pitch
if angle == RIGHT:
x += length
tip = [x + tipheight, y + flow / 2.0]
path.extend([(Path.LINETO, [x, y]),
(Path.LINETO, [x, y + self.shoulder]),
(Path.LINETO, tip),
(Path.LINETO, [x, y - self.shoulder + flow]),
(Path.LINETO, [x, y + flow]),
(Path.LINETO, [x - self.gap, y + flow])])
label_location = [tip[0] + self.offset, tip[1]]
else: # Vertical
x += self.gap
if angle == UP:
sign, quadrant = 1, 3
else:
sign, quadrant = -1, 0
tip = [x - flow / 2.0, y + sign * (length + tipheight)]
# Inner arc isn't needed if inner radius is zero
if self.radius:
path.extend(self._arc(quadrant=quadrant,
cw=angle == UP,
radius=self.radius,
center=(x - self.radius,
y + sign * self.radius)))
else:
path.append((Path.LINETO, [x, y]))
path.extend([(Path.LINETO, [x, y + sign * length]),
(Path.LINETO, [x - self.shoulder,
y + sign * length]),
(Path.LINETO, tip),
(Path.LINETO, [x + self.shoulder - flow,
y + sign * length]),
(Path.LINETO, [x - flow, y + sign * length])])
path.extend(self._arc(quadrant=quadrant,
cw=angle == DOWN,
radius=self.radius - flow,
center=(x - self.radius,
y + sign * self.radius)))
path.append((Path.LINETO, [x - flow, y + sign * flow]))
label_location = [tip[0], tip[1] + sign * self.offset]
return tip, label_location
def _revert(self, path, first_action=Path.LINETO):
"""
A path is not simply reversible by path[::-1] since the code
specifies an action to take from the **previous** point.
"""
reverse_path = []
next_code = first_action
for code, position in path[::-1]:
reverse_path.append((next_code, position))
next_code = code
return reverse_path
# This might be more efficient, but it fails because 'tuple' object
# doesn't support item assignment:
# path[1] = path[1][-1:0:-1]
# path[1][0] = first_action
# path[2] = path[2][::-1]
# return path
@_docstring.interpd
def add(self, patchlabel='', flows=None, orientations=None, labels='',
trunklength=1.0, pathlengths=0.25, prior=None, connect=(0, 0),
rotation=0, **kwargs):
"""
Add a simple Sankey diagram with flows at the same hierarchical level.
Parameters
----------
patchlabel : str
Label to be placed at the center of the diagram.
Note that *label* (not *patchlabel*) can be passed as keyword
argument to create an entry in the legend.
flows : list of float
Array of flow values. By convention, inputs are positive and
outputs are negative.
Flows are placed along the top of the diagram from the inside out
in order of their index within *flows*. They are placed along the
sides of the diagram from the top down and along the bottom from
the outside in.
If the sum of the inputs and outputs is
nonzero, the discrepancy will appear as a cubic Bézier curve along
the top and bottom edges of the trunk.
orientations : list of {-1, 0, 1}
List of orientations of the flows (or a single orientation to be
used for all flows). Valid values are 0 (inputs from
the left, outputs to the right), 1 (from and to the top) or -1
(from and to the bottom).
labels : list of (str or None)
List of labels for the flows (or a single label to be used for all
flows). Each label may be *None* (no label), or a labeling string.
If an entry is a (possibly empty) string, then the quantity for the
corresponding flow will be shown below the string. However, if
the *unit* of the main diagram is None, then quantities are never
shown, regardless of the value of this argument.
trunklength : float
Length between the bases of the input and output groups (in
data-space units).
pathlengths : list of float
List of lengths of the vertical arrows before break-in or after
break-away. If a single value is given, then it will be applied to
the first (inside) paths on the top and bottom, and the length of
all other arrows will be justified accordingly. The *pathlengths*
are not applied to the horizontal inputs and outputs.
prior : int
Index of the prior diagram to which this diagram should be
connected.
connect : (int, int)
A (prior, this) tuple indexing the flow of the prior diagram and
the flow of this diagram which should be connected. If this is the
first diagram or *prior* is *None*, *connect* will be ignored.
rotation : float
Angle of rotation of the diagram in degrees. The interpretation of
the *orientations* argument will be rotated accordingly (e.g., if
*rotation* == 90, an *orientations* entry of 1 means to/from the
left). *rotation* is ignored if this diagram is connected to an
existing one (using *prior* and *connect*).
Returns
-------
Sankey
The current `.Sankey` instance.
Other Parameters
----------------
**kwargs
Additional keyword arguments set `matplotlib.patches.PathPatch`
properties, listed below. For example, one may want to use
``fill=False`` or ``label="A legend entry"``.
%(Patch:kwdoc)s
See Also
--------
Sankey.finish
"""
# Check and preprocess the arguments.
flows = np.array([1.0, -1.0]) if flows is None else np.array(flows)
n = flows.shape[0] # Number of flows
if rotation is None:
rotation = 0
else:
# In the code below, angles are expressed in deg/90.
rotation /= 90.0
if orientations is None:
orientations = 0
try:
orientations = np.broadcast_to(orientations, n)
except ValueError:
raise ValueError(
f"The shapes of 'flows' {np.shape(flows)} and 'orientations' "
f"{np.shape(orientations)} are incompatible"
) from None
try:
labels = np.broadcast_to(labels, n)
except ValueError:
raise ValueError(
f"The shapes of 'flows' {np.shape(flows)} and 'labels' "
f"{np.shape(labels)} are incompatible"
) from None
if trunklength < 0:
raise ValueError(
"'trunklength' is negative, which is not allowed because it "
"would cause poor layout")
if abs(np.sum(flows)) > self.tolerance:
_log.info("The sum of the flows is nonzero (%f; patchlabel=%r); "
"is the system not at steady state?",
np.sum(flows), patchlabel)
scaled_flows = self.scale * flows
gain = sum(max(flow, 0) for flow in scaled_flows)
loss = sum(min(flow, 0) for flow in scaled_flows)
if prior is not None:
if prior < 0:
raise ValueError("The index of the prior diagram is negative")
if min(connect) < 0:
raise ValueError(
"At least one of the connection indices is negative")
if prior >= len(self.diagrams):
raise ValueError(
f"The index of the prior diagram is {prior}, but there "
f"are only {len(self.diagrams)} other diagrams")
if connect[0] >= len(self.diagrams[prior].flows):
raise ValueError(
"The connection index to the source diagram is {}, but "
"that diagram has only {} flows".format(
connect[0], len(self.diagrams[prior].flows)))
if connect[1] >= n:
raise ValueError(
f"The connection index to this diagram is {connect[1]}, "
f"but this diagram has only {n} flows")
if self.diagrams[prior].angles[connect[0]] is None:
raise ValueError(
f"The connection cannot be made, which may occur if the "
f"magnitude of flow {connect[0]} of diagram {prior} is "
f"less than the specified tolerance")
flow_error = (self.diagrams[prior].flows[connect[0]] +
flows[connect[1]])
if abs(flow_error) >= self.tolerance:
raise ValueError(
f"The scaled sum of the connected flows is {flow_error}, "
f"which is not within the tolerance ({self.tolerance})")
# Determine if the flows are inputs.
are_inputs = [None] * n
for i, flow in enumerate(flows):
if flow >= self.tolerance:
are_inputs[i] = True
elif flow <= -self.tolerance:
are_inputs[i] = False
else:
_log.info(
"The magnitude of flow %d (%f) is below the tolerance "
"(%f).\nIt will not be shown, and it cannot be used in a "
"connection.", i, flow, self.tolerance)
# Determine the angles of the arrows (before rotation).
angles = [None] * n
for i, (orient, is_input) in enumerate(zip(orientations, are_inputs)):
if orient == 1:
if is_input:
angles[i] = DOWN
elif is_input is False:
# Be specific since is_input can be None.
angles[i] = UP
elif orient == 0:
if is_input is not None:
angles[i] = RIGHT
else:
if orient != -1:
raise ValueError(
f"The value of orientations[{i}] is {orient}, "
f"but it must be -1, 0, or 1")
if is_input:
angles[i] = UP
elif is_input is False:
angles[i] = DOWN
# Justify the lengths of the paths.
if np.iterable(pathlengths):
if len(pathlengths) != n:
raise ValueError(
f"The lengths of 'flows' ({n}) and 'pathlengths' "
f"({len(pathlengths)}) are incompatible")
else: # Make pathlengths into a list.
urlength = pathlengths
ullength = pathlengths
lrlength = pathlengths
lllength = pathlengths
d = dict(RIGHT=pathlengths)
pathlengths = [d.get(angle, 0) for angle in angles]
# Determine the lengths of the top-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(zip(angles, are_inputs,
scaled_flows)):
if angle == DOWN and is_input:
pathlengths[i] = ullength
ullength += flow
elif angle == UP and is_input is False:
pathlengths[i] = urlength
urlength -= flow # Flow is negative for outputs.
# Determine the lengths of the bottom-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(reversed(list(zip(
angles, are_inputs, scaled_flows)))):
if angle == UP and is_input:
pathlengths[n - i - 1] = lllength
lllength += flow
elif angle == DOWN and is_input is False:
pathlengths[n - i - 1] = lrlength
lrlength -= flow
# Determine the lengths of the left-side arrows
# from the bottom upwards.
has_left_input = False
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, zip(scaled_flows, pathlengths))))):
if angle == RIGHT:
if is_input:
if has_left_input:
pathlengths[n - i - 1] = 0
else:
has_left_input = True
# Determine the lengths of the right-side arrows
# from the top downwards.
has_right_output = False
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == RIGHT:
if is_input is False:
if has_right_output:
pathlengths[i] = 0
else:
has_right_output = True
# Begin the subpaths, and smooth the transition if the sum of the flows
# is nonzero.
urpath = [(Path.MOVETO, [(self.gap - trunklength / 2.0), # Upper right
gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
gain / 2.0]),
(Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
gain / 2.0]),
(Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
-loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
-loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap),
-loss / 2.0])]
llpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower left
loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
loss / 2.0]),
(Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
loss / 2.0]),
(Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
-gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
-gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0),
-gain / 2.0])]
lrpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower right
loss / 2.0])]
ulpath = [(Path.LINETO, [self.gap - trunklength / 2.0, # Upper left
gain / 2.0])]
# Add the subpaths and assign the locations of the tips and labels.
tips = np.zeros((n, 2))
label_locations = np.zeros((n, 2))
# Add the top-side inputs and outputs from the middle outwards.
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == DOWN and is_input:
tips[i, :], label_locations[i, :] = self._add_input(
ulpath, angle, *spec)
elif angle == UP and is_input is False:
tips[i, :], label_locations[i, :] = self._add_output(
urpath, angle, *spec)
# Add the bottom-side inputs and outputs from the middle outwards.
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
if angle == UP and is_input:
tip, label_location = self._add_input(llpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
elif angle == DOWN and is_input is False:
tip, label_location = self._add_output(lrpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
# Add the left-side inputs from the bottom upwards.
has_left_input = False
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
if angle == RIGHT and is_input:
if not has_left_input:
# Make sure the lower path extends
# at least as far as the upper one.
if llpath[-1][1][0] > ulpath[-1][1][0]:
llpath.append((Path.LINETO, [ulpath[-1][1][0],
llpath[-1][1][1]]))
has_left_input = True
tip, label_location = self._add_input(llpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
# Add the right-side outputs from the top downwards.
has_right_output = False
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == RIGHT and is_input is False:
if not has_right_output:
# Make sure the upper path extends
# at least as far as the lower one.
if urpath[-1][1][0] < lrpath[-1][1][0]:
urpath.append((Path.LINETO, [lrpath[-1][1][0],
urpath[-1][1][1]]))
has_right_output = True
tips[i, :], label_locations[i, :] = self._add_output(
urpath, angle, *spec)
# Trim any hanging vertices.
if not has_left_input:
ulpath.pop()
llpath.pop()
if not has_right_output:
lrpath.pop()
urpath.pop()
# Concatenate the subpaths in the correct order (clockwise from top).
path = (urpath + self._revert(lrpath) + llpath + self._revert(ulpath) +
[(Path.CLOSEPOLY, urpath[0][1])])
# Create a patch with the Sankey outline.
codes, vertices = zip(*path)
vertices = np.array(vertices)
def _get_angle(a, r):
if a is None:
return None
else:
return a + r
if prior is None:
if rotation != 0: # By default, none of this is needed.
angles = [_get_angle(angle, rotation) for angle in angles]
rotate = Affine2D().rotate_deg(rotation * 90).transform_affine
tips = rotate(tips)
label_locations = rotate(label_locations)
vertices = rotate(vertices)
text = self.ax.text(0, 0, s=patchlabel, ha='center', va='center')
else:
rotation = (self.diagrams[prior].angles[connect[0]] -
angles[connect[1]])
angles = [_get_angle(angle, rotation) for angle in angles]
rotate = Affine2D().rotate_deg(rotation * 90).transform_affine
tips = rotate(tips)
offset = self.diagrams[prior].tips[connect[0]] - tips[connect[1]]
translate = Affine2D().translate(*offset).transform_affine
tips = translate(tips)
label_locations = translate(rotate(label_locations))
vertices = translate(rotate(vertices))
kwds = dict(s=patchlabel, ha='center', va='center')
text = self.ax.text(*offset, **kwds)
if mpl.rcParams['_internal.classic_mode']:
fc = kwargs.pop('fc', kwargs.pop('facecolor', '#bfd1d4'))
lw = kwargs.pop('lw', kwargs.pop('linewidth', 0.5))
else:
fc = kwargs.pop('fc', kwargs.pop('facecolor', None))
lw = kwargs.pop('lw', kwargs.pop('linewidth', None))
if fc is None:
fc = self.ax._get_patches_for_fill.get_next_color()
patch = PathPatch(Path(vertices, codes), fc=fc, lw=lw, **kwargs)
self.ax.add_patch(patch)
# Add the path labels.
texts = []
for number, angle, label, location in zip(flows, angles, labels,
label_locations):
if label is None or angle is None:
label = ''
elif self.unit is not None:
if isinstance(self.format, str):
quantity = self.format % abs(number) + self.unit
elif callable(self.format):
quantity = self.format(number)
else:
raise TypeError(
'format must be callable or a format string')
if label != '':
label += "\n"
label += quantity
texts.append(self.ax.text(x=location[0], y=location[1],
s=label,
ha='center', va='center'))
# Text objects are placed even they are empty (as long as the magnitude
# of the corresponding flow is larger than the tolerance) in case the
# user wants to provide labels later.
# Expand the size of the diagram if necessary.
self.extent = (min(np.min(vertices[:, 0]),
np.min(label_locations[:, 0]),
self.extent[0]),
max(np.max(vertices[:, 0]),
np.max(label_locations[:, 0]),
self.extent[1]),
min(np.min(vertices[:, 1]),
np.min(label_locations[:, 1]),
self.extent[2]),
max(np.max(vertices[:, 1]),
np.max(label_locations[:, 1]),
self.extent[3]))
# Include both vertices _and_ label locations in the extents; there are
# where either could determine the margins (e.g., arrow shoulders).
# Add this diagram as a subdiagram.
self.diagrams.append(
SimpleNamespace(patch=patch, flows=flows, angles=angles, tips=tips,
text=text, texts=texts))
# Allow a daisy-chained call structure (see docstring for the class).
return self
def finish(self):
"""
Adjust the Axes and return a list of information about the Sankey
subdiagram(s).
Returns a list of subdiagrams with the following fields:
======== =============================================================
Field Description
======== =============================================================
*patch* Sankey outline (a `~matplotlib.patches.PathPatch`).
*flows* Flow values (positive for input, negative for output).
*angles* List of angles of the arrows [deg/90].
For example, if the diagram has not been rotated,
an input to the top side has an angle of 3 (DOWN),
and an output from the top side has an angle of 1 (UP).
If a flow has been skipped (because its magnitude is less
than *tolerance*), then its angle will be *None*.
*tips* (N, 2)-array of the (x, y) positions of the tips (or "dips")
of the flow paths.
If the magnitude of a flow is less the *tolerance* of this
`Sankey` instance, the flow is skipped and its tip will be at
the center of the diagram.
*text* `.Text` instance for the diagram label.
*texts* List of `.Text` instances for the flow labels.
======== =============================================================
See Also
--------
Sankey.add
"""
self.ax.axis([self.extent[0] - self.margin,
self.extent[1] + self.margin,
self.extent[2] - self.margin,
self.extent[3] + self.margin])
self.ax.set_aspect('equal', adjustable='datalim')
return self.diagrams
|
Sankey
|
python
|
realpython__materials
|
flask-google-login/user.py
|
{
"start": 58,
"end": 855
}
|
class ____(UserMixin):
def __init__(self, id_, name, email, profile_pic):
self.id = id_
self.name = name
self.email = email
self.profile_pic = profile_pic
@staticmethod
def get(user_id):
db = get_db()
user = db.execute(
"SELECT * FROM user WHERE id = ?", (user_id,)
).fetchone()
if not user:
return None
user = User(
id_=user[0], name=user[1], email=user[2], profile_pic=user[3]
)
return user
@staticmethod
def create(id_, name, email, profile_pic):
db = get_db()
db.execute(
"INSERT INTO user (id, name, email, profile_pic) VALUES (?, ?, ?, ?)",
(id_, name, email, profile_pic),
)
db.commit()
|
User
|
python
|
doocs__leetcode
|
solution/1600-1699/1688.Count of Matches in Tournament/Solution.py
|
{
"start": 0,
"end": 83
}
|
class ____:
def numberOfMatches(self, n: int) -> int:
return n - 1
|
Solution
|
python
|
pandas-dev__pandas
|
pandas/io/json/_json.py
|
{
"start": 43337,
"end": 45985
}
|
class ____(Parser):
_default_orient = "columns"
_split_keys = ("columns", "index", "data")
def _parse(self) -> DataFrame:
json = self.json
orient = self.orient
if orient == "split":
decoded = {
str(k): v
for k, v in ujson_loads(json, precise_float=self.precise_float).items()
}
self.check_keys_split(decoded)
orig_names = [
(tuple(col) if isinstance(col, list) else col)
for col in decoded["columns"]
]
decoded["columns"] = dedup_names(
orig_names,
is_potential_multi_index(orig_names, None),
)
return DataFrame(dtype=None, **decoded)
elif orient == "index":
return DataFrame.from_dict(
ujson_loads(json, precise_float=self.precise_float),
dtype=None,
orient="index",
)
elif orient == "table":
return parse_table_schema(json, precise_float=self.precise_float)
else:
# includes orient == "columns"
return DataFrame(
ujson_loads(json, precise_float=self.precise_float), dtype=None
)
def _try_convert_types(self, obj: DataFrame) -> DataFrame:
arrays = []
for col_label, series in obj.items():
result, _ = self._try_convert_data(
col_label,
series,
convert_dates=_should_convert_dates(
self.convert_dates,
keep_default_dates=self.keep_default_dates,
col=col_label,
),
)
arrays.append(result.array)
return DataFrame._from_arrays(
arrays, obj.columns, obj.index, verify_integrity=False
)
def _should_convert_dates(
convert_dates: bool | list[str],
keep_default_dates: bool,
col: Hashable,
) -> bool:
"""
Return bool whether a DataFrame column should be cast to datetime.
"""
if convert_dates is False:
# convert_dates=True means follow keep_default_dates
return False
elif not isinstance(convert_dates, bool) and col in set(convert_dates):
return True
elif not keep_default_dates:
return False
elif not isinstance(col, str):
return False
col_lower = col.lower()
if (
col_lower.endswith(("_at", "_time"))
or col_lower in {"modified", "date", "datetime"}
or col_lower.startswith("timestamp")
):
return True
return False
|
FrameParser
|
python
|
kamyu104__LeetCode-Solutions
|
Python/shortest-path-in-a-weighted-tree.py
|
{
"start": 33,
"end": 535
}
|
class ____(object): # 0-indexed.
def __init__(self, n):
self.__bit = [0]*(n+1) # Extra one for dummy node.
def add(self, i, val):
i += 1 # Extra one for dummy node.
while i < len(self.__bit):
self.__bit[i] += val
i += (i & -i)
def query(self, i):
i += 1 # Extra one for dummy node.
ret = 0
while i > 0:
ret += self.__bit[i]
i -= (i & -i)
return ret
# iterative dfs, fenwick tree
|
BIT
|
python
|
fluentpython__example-code
|
14-it-generator/sentence_iter.py
|
{
"start": 244,
"end": 525
}
|
class ____:
def __init__(self, text):
self.text = text
self.words = RE_WORD.findall(text)
def __repr__(self):
return 'Sentence(%s)' % reprlib.repr(self.text)
def __iter__(self): # <1>
return SentenceIterator(self.words) # <2>
|
Sentence
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_cache_implementation.py
|
{
"start": 1206,
"end": 1857
}
|
class ____(GenericCache):
def new_entry(self, key, value):
return 1
def on_access(self, key, value, score):
return score + 1
@st.composite
def write_pattern(draw, min_distinct_keys=0):
keys = draw(
st.lists(st.integers(0, 1000), unique=True, min_size=max(min_distinct_keys, 1))
)
values = draw(st.lists(st.integers(), unique=True, min_size=1))
s = st.lists(
st.tuples(st.sampled_from(keys), st.sampled_from(values)),
min_size=min_distinct_keys,
)
if min_distinct_keys > 0:
s = s.filter(lambda ls: len({k for k, _ in ls}) >= min_distinct_keys)
return draw(s)
|
LFUCache
|
python
|
Textualize__textual
|
tests/test_on.py
|
{
"start": 9002,
"end": 10866
}
|
class ____(Widget):
class Parent(Message):
pass
class JustSomeRandomMixin:
pass
class Child(JustSomeRandomMixin, Parent):
pass
def post_parent(self) -> None:
self.post_message(self.Parent())
def post_child(self) -> None:
self.post_message(self.Child())
async def test_fire_on_inherited_message_plus_mixins() -> None:
"""Handlers should fire when descendant messages are posted, without mixins messing things up."""
posted: list[str] = []
class InheritTestApp(App[None]):
def compose(self) -> ComposeResult:
yield MixinMessageSender()
@on(MixinMessageSender.Parent)
def catch_parent(self) -> None:
posted.append("parent")
@on(MixinMessageSender.Child)
def catch_child(self) -> None:
posted.append("child")
def on_mount(self) -> None:
self.query_one(MixinMessageSender).post_parent()
self.query_one(MixinMessageSender).post_child()
async with InheritTestApp().run_test():
pass
assert posted == ["parent", "child", "parent"]
async def test_on_with_enter_and_leave_events():
class EnterLeaveApp(App):
messages = []
def compose(self) -> ComposeResult:
yield Button("OK")
@on(Enter, "Button")
@on(Leave, "Button")
def record(self, event: Enter | Leave) -> None:
self.messages.append(event.__class__.__name__)
app = EnterLeaveApp()
async with app.run_test() as pilot:
expected_messages = []
await pilot.hover(Button)
expected_messages.append("Enter")
assert app.messages == expected_messages
await pilot.hover(Button, offset=(0, 20))
expected_messages.append("Leave")
assert app.messages == expected_messages
|
MixinMessageSender
|
python
|
pytorch__pytorch
|
test/distributed/_composable/fsdp/test_fully_shard_training.py
|
{
"start": 32732,
"end": 34621
}
|
class ____(FSDPTest):
@property
def world_size(self) -> int:
return min(8, torch.get_device_module(device_type).device_count())
@skip_if_lt_x_gpu(2)
def test_train_parity_shard_placement_fn_shard_largest_dim(self):
torch.manual_seed(42)
model_args = ModelArgs(n_layers=3, dropout_p=0.0)
model = Transformer(model_args)
ref_model = copy.deepcopy(model).to(device_type)
ref_optim = torch.optim.AdamW(ref_model.parameters(), lr=1e-2)
def shard_placement_fn(param: nn.Parameter) -> Optional[Shard]:
return Shard(param.shape.index(max(param.shape)))
for layer in model.layers:
fully_shard(layer, shard_placement_fn=shard_placement_fn)
fully_shard(model, shard_placement_fn=shard_placement_fn)
optim = torch.optim.AdamW(model.parameters(), lr=1e-2)
for param, ref_param in zip(model.parameters(), ref_model.parameters()):
full_param = param.full_tensor()
self.assertEqual(full_param, ref_param)
torch.manual_seed(42 + self.rank)
inp = torch.randint(0, model_args.vocab_size, (2, 16), device=device_type.type)
for _ in range(5):
ref_loss = ref_model(inp).sum()
loss = model(inp).sum()
self.assertEqual(ref_loss, loss)
ref_loss.backward()
loss.backward()
for param in ref_model.parameters():
if param.grad is not None:
dist.all_reduce(param.grad, op=dist.ReduceOp.AVG)
ref_optim.step()
optim.step()
ref_optim.zero_grad()
optim.zero_grad()
for param, ref_param in zip(model.parameters(), ref_model.parameters()):
full_param = param.full_tensor()
self.assertEqual(full_param, ref_param)
|
TestFullyShardShardPlacementFnMultiProcess
|
python
|
PyCQA__pycodestyle
|
testing/data/python313.py
|
{
"start": 146,
"end": 271
}
|
class ____[T, U: str = str]:
pass
def f[T: (int, str) = str](t: T) -> T:
pass
def f2[T = str](t: T) -> T:
pass
|
C3
|
python
|
PyCQA__pylint
|
pylint/testutils/lint_module_test.py
|
{
"start": 1235,
"end": 13268
}
|
class ____:
maxDiff = None
def __init__(
self, test_file: FunctionalTestFile, config: Config | None = None
) -> None:
_test_reporter = FunctionalTestReporter()
self._linter = PyLinter()
self._linter.config.persistent = 0
checkers.initialize(self._linter)
# See if test has its own .rc file, if so we use that one
rc_file: Path | str = PYLINTRC
try:
rc_file = test_file.option_file
self._linter.disable("suppressed-message")
self._linter.disable("locally-disabled")
self._linter.disable("useless-suppression")
except NoFileError:
pass
self._test_file = test_file
try:
args = [test_file.source]
except NoFileError:
# If we're still raising NoFileError the actual source file doesn't exist
args = [""]
if config and config.getoption("minimal_messages_config"):
with self._open_source_file() as f:
messages_to_enable = {msg[1] for msg in self.get_expected_messages(f)}
# Always enable fatal errors
messages_to_enable.add("astroid-error")
messages_to_enable.add("fatal")
messages_to_enable.add("syntax-error")
args.extend(["--disable=all", f"--enable={','.join(messages_to_enable)}"])
# Add testoptions
self._linter._arg_parser.add_argument(
"--min_pyver", type=parse_python_version, default=(2, 5)
)
self._linter._arg_parser.add_argument(
"--max_pyver", type=parse_python_version, default=(4, 0)
)
self._linter._arg_parser.add_argument(
"--min_pyver_end_position", type=parse_python_version, default=(3, 8)
)
self._linter._arg_parser.add_argument(
"--requires", type=lambda s: [i.strip() for i in s.split(",")], default=[]
)
self._linter._arg_parser.add_argument(
"--except_implementations",
type=lambda s: [i.strip() for i in s.split(",")],
default=[],
)
self._linter._arg_parser.add_argument(
"--exclude_platforms",
type=lambda s: [i.strip() for i in s.split(",")],
default=[],
)
self._linter._arg_parser.add_argument(
"--exclude_from_minimal_messages_config", default=False
)
_config_initialization(
self._linter, args_list=args, config_file=rc_file, reporter=_test_reporter
)
self._check_end_position = (
sys.version_info >= self._linter.config.min_pyver_end_position
)
self._config = config
def setUp(self) -> None:
if self._should_be_skipped_due_to_version():
pytest.skip(
f"Test cannot run with Python {sys.version.split(' ', maxsplit=1)[0]}."
)
missing = []
for requirement in self._linter.config.requires:
try:
__import__(requirement)
except ImportError:
missing.append(requirement)
if missing:
pytest.skip(f"Requires {','.join(missing)} to be present.")
except_implementations = self._linter.config.except_implementations
if except_implementations:
if platform.python_implementation() in except_implementations:
msg = "Test cannot run with Python implementation %r"
pytest.skip(msg % platform.python_implementation())
excluded_platforms = self._linter.config.exclude_platforms
if excluded_platforms:
if sys.platform.lower() in excluded_platforms:
pytest.skip(f"Test cannot run on platform {sys.platform!r}")
if (
self._config
and self._config.getoption("minimal_messages_config")
and self._linter.config.exclude_from_minimal_messages_config
):
pytest.skip("Test excluded from --minimal-messages-config")
def runTest(self) -> None:
self._runTest()
def _should_be_skipped_due_to_version(self) -> bool:
return ( # type: ignore[no-any-return]
sys.version_info < self._linter.config.min_pyver
or sys.version_info > self._linter.config.max_pyver
)
def __str__(self) -> str:
return f"{self._test_file.base} ({self.__class__.__module__}.{self.__class__.__name__})"
@staticmethod
def get_expected_messages(stream: TextIO) -> MessageCounter:
"""Parses a file and get expected messages.
:param stream: File-like input stream.
:type stream: enumerable
:returns: A dict mapping line,msg-symbol tuples to the count on this line.
:rtype: dict
"""
messages: MessageCounter = Counter()
for i, line in enumerate(stream):
match = _EXPECTED_RE.search(line)
if match is None:
continue
line = match.group("line")
if line is None:
lineno = i + 1
elif line.startswith(("+", "-")):
lineno = i + 1 + int(line)
else:
lineno = int(line)
version = match.group("version")
op = match.group("op")
if version:
required = parse_python_version(version)
if not _OPERATORS[op](sys.version_info, required):
continue
for msg_id in match.group("msgs").split(","):
messages[lineno, msg_id.strip()] += 1
return messages
@staticmethod
def multiset_difference(
expected_entries: MessageCounter,
actual_entries: MessageCounter,
) -> tuple[MessageCounter, dict[tuple[int, str], int]]:
"""Takes two multisets and compares them.
A multiset is a dict with the cardinality of the key as the value.
"""
missing = expected_entries.copy()
missing.subtract(actual_entries)
unexpected = {}
for key, value in list(missing.items()):
if value <= 0:
missing.pop(key)
if value < 0:
unexpected[key] = -value
return missing, unexpected
def _open_expected_file(self) -> TextIO:
try:
return open(self._test_file.expected_output, encoding="utf-8")
except FileNotFoundError:
return StringIO("")
def _open_source_file(self) -> TextIO:
if self._test_file.base == "invalid_encoded_data":
return open(self._test_file.source, encoding="utf-8")
if "latin1" in self._test_file.base:
return open(self._test_file.source, encoding="latin1")
return open(self._test_file.source, encoding="utf8")
def _get_expected(self) -> tuple[MessageCounter, list[OutputLine]]:
with self._open_source_file() as f:
expected_msgs = self.get_expected_messages(f)
if not expected_msgs:
expected_msgs = Counter()
with self._open_expected_file() as f:
expected_output_lines = [
OutputLine.from_csv(row, self._check_end_position)
for row in csv.reader(f, "test")
]
return expected_msgs, expected_output_lines
def _get_actual(self) -> tuple[MessageCounter, list[OutputLine]]:
messages: list[Message] = self._linter.reporter.messages
messages.sort(key=lambda m: (m.line, m.symbol, m.msg))
received_msgs: MessageCounter = Counter()
received_output_lines = []
for msg in messages:
assert (
msg.symbol != "fatal"
), f"Pylint analysis failed because of '{msg.msg}'"
received_msgs[msg.line, msg.symbol] += 1
received_output_lines.append(
OutputLine.from_msg(msg, self._check_end_position)
)
return received_msgs, received_output_lines
def _runTest(self) -> None:
__tracebackhide__ = True # pylint: disable=unused-variable
modules_to_check = [self._test_file.source]
self._linter.check(modules_to_check)
expected_messages, expected_output = self._get_expected()
actual_messages, actual_output = self._get_actual()
assert (
expected_messages == actual_messages
), self.error_msg_for_unequal_messages(
actual_messages, expected_messages, actual_output
)
self._check_output_text(expected_messages, expected_output, actual_output)
def error_msg_for_unequal_messages(
self,
actual_messages: MessageCounter,
expected_messages: MessageCounter,
actual_output: list[OutputLine],
) -> str:
msg = [f'Wrong message(s) raised for "{Path(self._test_file.source).name}":']
missing, unexpected = self.multiset_difference(
expected_messages, actual_messages
)
if missing:
msg.append("\nExpected in testdata:")
msg.extend( # pragma: no cover
f" {msg[0]:3}: {msg[1]} (times {times})"
for msg, times in sorted(missing.items())
)
if unexpected:
msg.append("\nUnexpected in testdata:")
msg.extend(
f" {msg[0]:3}: {msg[1]} (times {times})"
for msg, times in sorted(unexpected.items())
)
error_msg = "\n".join(msg)
if self._config and self._config.getoption("verbose") > 0:
error_msg += "\n\nActual pylint output for this file:\n"
error_msg += "\n".join(str(o) for o in actual_output)
return error_msg
def error_msg_for_unequal_output(
self,
expected_lines: list[OutputLine],
received_lines: list[OutputLine],
) -> str:
missing = set(expected_lines) - set(received_lines)
unexpected = set(received_lines) - set(expected_lines)
error_msg = f'Wrong output for "{Path(self._test_file.expected_output).name}":'
sort_by_line_number = operator.attrgetter("lineno")
if missing:
error_msg += "\n- Missing lines:\n"
for line in sorted(missing, key=sort_by_line_number):
error_msg += f"{line}\n"
if unexpected:
error_msg += "\n- Unexpected lines:\n"
for line in sorted(unexpected, key=sort_by_line_number):
error_msg += f"{line}\n"
error_msg += (
"\nYou can update the expected output automatically with:\n'"
f"python tests/test_functional.py {UPDATE_OPTION} -k "
f'"test_functional[{self._test_file.base}]"\'\n\n'
"Here's the update text in case you can't:\n"
)
expected_csv = StringIO()
writer = csv.writer(expected_csv, dialect="test")
for line in sorted(received_lines, key=sort_by_line_number):
self.safe_write_output_line(writer, line)
error_msg += expected_csv.getvalue()
return error_msg
def safe_write_output_line(self, writer: _csv._writer, line: OutputLine) -> None:
"""Write an OutputLine to the CSV writer, handling UnicodeEncodeError."""
try:
writer.writerow(line.to_csv())
except UnicodeEncodeError:
writer.writerow(
[
BaseReporter.reencode_output_after_unicode_error(s)
for s in line.to_csv()
]
)
def _check_output_text(
self,
_: MessageCounter,
expected_output: list[OutputLine],
actual_output: list[OutputLine],
) -> None:
"""This is a function because we want to be able to update the text in
LintModuleOutputUpdate.
"""
assert expected_output == actual_output, self.error_msg_for_unequal_output(
expected_output, actual_output
)
|
LintModuleTest
|
python
|
django__django
|
tests/sitemaps_tests/urls/http.py
|
{
"start": 2094,
"end": 2162
}
|
class ____(SimpleSitemap):
lastmod = date(2013, 3, 13)
|
DateSiteMap
|
python
|
crytic__slither
|
slither/slithir/variables/temporary.py
|
{
"start": 161,
"end": 873
}
|
class ____(Variable):
def __init__(self, node: "Node", index: Optional[int] = None) -> None:
super().__init__()
if index is None:
self._index = node.compilation_unit.counter_slithir_temporary
node.compilation_unit.counter_slithir_temporary += 1
else:
self._index = index
self._node = node
@property
def node(self) -> "Node":
return self._node
@property
def index(self):
return self._index
@index.setter
def index(self, idx):
self._index = idx
@property
def name(self) -> str:
return f"TMP_{self.index}"
def __str__(self) -> str:
return self.name
|
TemporaryVariable
|
python
|
sympy__sympy
|
sympy/matrices/sparse.py
|
{
"start": 570,
"end": 14432
}
|
class ____(RepMatrix):
"""
A sparse matrix (a matrix with a large number of zero elements).
Examples
========
>>> from sympy import SparseMatrix, ones
>>> SparseMatrix(2, 2, range(4))
Matrix([
[0, 1],
[2, 3]])
>>> SparseMatrix(2, 2, {(1, 1): 2})
Matrix([
[0, 0],
[0, 2]])
A SparseMatrix can be instantiated from a ragged list of lists:
>>> SparseMatrix([[1, 2, 3], [1, 2], [1]])
Matrix([
[1, 2, 3],
[1, 2, 0],
[1, 0, 0]])
For safety, one may include the expected size and then an error
will be raised if the indices of any element are out of range or
(for a flat list) if the total number of elements does not match
the expected shape:
>>> SparseMatrix(2, 2, [1, 2])
Traceback (most recent call last):
...
ValueError: List length (2) != rows*columns (4)
Here, an error is not raised because the list is not flat and no
element is out of range:
>>> SparseMatrix(2, 2, [[1, 2]])
Matrix([
[1, 2],
[0, 0]])
But adding another element to the first (and only) row will cause
an error to be raised:
>>> SparseMatrix(2, 2, [[1, 2, 3]])
Traceback (most recent call last):
...
ValueError: The location (0, 2) is out of designated range: (1, 1)
To autosize the matrix, pass None for rows:
>>> SparseMatrix(None, [[1, 2, 3]])
Matrix([[1, 2, 3]])
>>> SparseMatrix(None, {(1, 1): 1, (3, 3): 3})
Matrix([
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 3]])
Values that are themselves a Matrix are automatically expanded:
>>> SparseMatrix(4, 4, {(1, 1): ones(2)})
Matrix([
[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]])
A ValueError is raised if the expanding matrix tries to overwrite
a different element already present:
>>> SparseMatrix(3, 3, {(0, 0): ones(2), (1, 1): 2})
Traceback (most recent call last):
...
ValueError: collision at (1, 1)
See Also
========
DenseMatrix
MutableSparseMatrix
ImmutableSparseMatrix
"""
@classmethod
def _handle_creation_inputs(cls, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], MatrixBase):
rows = args[0].rows
cols = args[0].cols
smat = args[0].todok()
return rows, cols, smat
smat = {}
# autosizing
if len(args) == 2 and args[0] is None:
args = [None, None, args[1]]
if len(args) == 3:
r, c, arg3 = args
if r is c is None:
rows = cols = None
elif None in (r, c):
raise ValueError(
'Pass rows=None and no cols for autosizing.')
else:
rows, cols = as_int(args[0]), as_int(args[1])
if isinstance(arg3, Callable):
op = arg3
if rows is None or cols is None:
raise ValueError(
"{} and {} must be integers for this "
"specification.".format(rows, cols))
row_indices = [cls._sympify(i) for i in range(rows)]
col_indices = [cls._sympify(j) for j in range(cols)]
for i in row_indices:
for j in col_indices:
value = cls._sympify(op(i, j))
if value != cls.zero:
smat[i, j] = value
return rows, cols, smat
elif isinstance(arg3, (dict, Dict)):
def update(i, j, v):
# update smat and make sure there are no collisions
if v:
if (i, j) in smat and v != smat[i, j]:
raise ValueError(
"There is a collision at {} for {} and {}."
.format((i, j), v, smat[i, j])
)
smat[i, j] = v
# manual copy, copy.deepcopy() doesn't work
for (r, c), v in arg3.items(): # type: ignore
if isinstance(v, MatrixBase):
for (i, j), vv in v.todok().items():
update(r + i, c + j, vv)
elif isinstance(v, (list, tuple)):
_, _, smat = cls._handle_creation_inputs(v, **kwargs)
for i, j in smat:
update(r + i, c + j, smat[i, j])
else:
v = cls._sympify(v) # type: ignore
update(r, c, cls._sympify(v))
elif is_sequence(arg3):
flat = not any(is_sequence(i) for i in arg3)
if not flat:
_, _, smat = \
cls._handle_creation_inputs(arg3, **kwargs)
else:
flat_list = arg3
if len(flat_list) != rows * cols: # type: ignore
raise ValueError(
"The length of the flat list ({}) does not "
"match the specified size ({} * {})."
.format(len(flat_list), rows, cols)
)
for i in range(rows): # type: ignore
for j in range(cols): # type: ignore
value = flat_list[i*cols + j] # type: ignore
value = cls._sympify(value)
if value != cls.zero:
smat[i, j] = value
if rows is None: # autosizing
keys = smat.keys()
rows = max(r for r, _ in keys) + 1 if keys else 0
cols = max(c for _, c in keys) + 1 if keys else 0
else:
for i, j in smat.keys():
if i and i >= rows or j and j >= cols:
raise ValueError(
"The location {} is out of the designated range"
"[{}, {}]x[{}, {}]"
.format((i, j), 0, rows - 1, 0, cols - 1) # type: ignore
)
return rows, cols, smat
elif len(args) == 1 and isinstance(args[0], (list, tuple)):
# list of values or lists
v = args[0]
c = 0
for i, row in enumerate(v):
if not isinstance(row, (list, tuple)):
row = [row]
for j, vv in enumerate(row):
if vv != cls.zero:
smat[i, j] = cls._sympify(vv)
c = max(c, len(row))
rows = len(v) if c else 0
cols = c
return rows, cols, smat
else:
# handle full matrix forms with _handle_creation_inputs
rows, cols, mat = super()._handle_creation_inputs(*args)
for i in range(rows):
for j in range(cols):
value = mat[cols*i + j]
if value != cls.zero:
smat[i, j] = value
return rows, cols, smat
@property
def _smat(self):
sympy_deprecation_warning(
"""
The private _smat attribute of SparseMatrix is deprecated. Use the
.todok() method instead.
""",
deprecated_since_version="1.9",
active_deprecations_target="deprecated-private-matrix-attributes"
)
return self.todok()
def _eval_inverse(self, **kwargs):
return self.inv(method=kwargs.get('method', 'LDL'),
iszerofunc=kwargs.get('iszerofunc', _iszero),
try_block_diag=kwargs.get('try_block_diag', False))
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> from sympy import SparseMatrix
>>> m = SparseMatrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError("`f` must be callable.")
# XXX: This only applies the function to the nonzero elements of the
# matrix so is inconsistent with DenseMatrix.applyfunc e.g.
# zeros(2, 2).applyfunc(lambda x: x + 1)
dok = {}
for k, v in self.todok().items():
fv = f(v)
if fv != 0:
dok[k] = fv
return self.from_dok(self.rows, self.cols, dok)
def as_immutable(self):
"""Returns an Immutable version of this Matrix."""
from .immutable import ImmutableSparseMatrix
return ImmutableSparseMatrix(self)
def as_mutable(self):
"""Returns a mutable version of this matrix.
Examples
========
>>> from sympy import ImmutableMatrix
>>> X = ImmutableMatrix([[1, 2], [3, 4]])
>>> Y = X.as_mutable()
>>> Y[1, 1] = 5 # Can set values in Y
>>> Y
Matrix([
[1, 2],
[3, 5]])
"""
return MutableSparseMatrix(self)
def col_list(self):
"""Returns a column-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy import SparseMatrix
>>> a=SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.CL
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
See Also
========
sympy.matrices.sparse.SparseMatrix.row_list
"""
return [tuple(k + (self[k],)) for k in sorted(self.todok().keys(), key=lambda k: list(reversed(k)))]
def nnz(self):
"""Returns the number of non-zero elements in Matrix."""
return len(self.todok())
def row_list(self):
"""Returns a row-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.RL
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
See Also
========
sympy.matrices.sparse.SparseMatrix.col_list
"""
return [tuple(k + (self[k],)) for k in
sorted(self.todok().keys(), key=list)]
def scalar_multiply(self, scalar):
"Scalar element-wise multiplication"
return scalar * self
def solve_least_squares(self, rhs, method='LDL'):
"""Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other
methods of matrix inversion can be used. To find out which are
available, see the docstring of the .inv() method.
Examples
========
>>> from sympy import SparseMatrix, Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = SparseMatrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
"""
t = self.T
return (t*self).inv(method=method)*t*rhs
def solve(self, rhs, method='LDL'):
"""Return solution to self*soln = rhs using given inversion method.
For a list of possible inversion methods, see the .inv() docstring.
"""
if not self.is_square:
if self.rows < self.cols:
raise ValueError('Under-determined system.')
elif self.rows > self.cols:
raise ValueError('For over-determined system, M, having '
'more rows than columns, try M.solve_least_squares(rhs).')
return self.inv(method=method).multiply(rhs)
RL = property(row_list, None, None, "Alternate faster representation")
CL = property(col_list, None, None, "Alternate faster representation")
def liupc(self):
return _liupc(self)
def row_structure_symbolic_cholesky(self):
return _row_structure_symbolic_cholesky(self)
def cholesky(self, hermitian=True):
return _cholesky_sparse(self, hermitian=hermitian)
def LDLdecomposition(self, hermitian=True):
return _LDLdecomposition_sparse(self, hermitian=hermitian)
def lower_triangular_solve(self, rhs):
return _lower_triangular_solve_sparse(self, rhs)
def upper_triangular_solve(self, rhs):
return _upper_triangular_solve_sparse(self, rhs)
liupc.__doc__ = _liupc.__doc__
row_structure_symbolic_cholesky.__doc__ = _row_structure_symbolic_cholesky.__doc__
cholesky.__doc__ = _cholesky_sparse.__doc__
LDLdecomposition.__doc__ = _LDLdecomposition_sparse.__doc__
lower_triangular_solve.__doc__ = lower_triangular_solve.__doc__
upper_triangular_solve.__doc__ = upper_triangular_solve.__doc__
|
SparseRepMatrix
|
python
|
django-guardian__django-guardian
|
guardian/testapp/tests/test_utils.py
|
{
"start": 4604,
"end": 5345
}
|
class ____(TestCase):
def test_for_instance(self):
project = Project(name="Foobar")
self.assertEqual(get_user_obj_perms_model(project), ProjectUserObjectPermission)
def test_for_class(self):
self.assertEqual(get_user_obj_perms_model(Project), ProjectUserObjectPermission)
def test_default(self):
self.assertEqual(get_user_obj_perms_model(ContentType), UserObjectPermission)
def test_user_model(self):
# this test assumes that there were no direct obj perms model to User
# model defined (i.e. while testing guardian app in some custom
# project)
self.assertEqual(get_user_obj_perms_model(User), UserObjectPermission)
@skipUnlessTestApp
|
GetUserObjPermsModelTest
|
python
|
huggingface__transformers
|
tests/models/groupvit/test_modeling_groupvit.py
|
{
"start": 12463,
"end": 15733
}
|
class ____:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
rng = random.Random(0)
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size, rng=rng)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return GroupViTTextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, input_ids, input_mask):
model = GroupViTTextModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
|
GroupViTTextModelTester
|
python
|
apache__airflow
|
airflow-ctl/src/airflowctl/api/datamodels/generated.py
|
{
"start": 39749,
"end": 40512
}
|
class ____(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
action: Annotated[
Literal["update"], Field(description="The action to be performed on the entities.", title="Action")
]
entities: Annotated[
list[PoolBody], Field(description="A list of entities to be updated.", title="Entities")
]
update_mask: Annotated[
list[str] | None,
Field(
description="A list of field names to update for each entity.Only these fields will be applied from the request body to the database model.Any extra fields provided will be ignored.",
title="Update Mask",
),
] = None
action_on_non_existence: BulkActionNotOnExistence | None = "fail"
|
BulkUpdateActionPoolBody
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airlift/dagster_airlift/in_airflow/dag_proxy_operator.py
|
{
"start": 2407,
"end": 3771
}
|
class ____(BaseProxyDAGToDagsterOperator):
"""The default task proxying operator - which opens a blank session and expects the dagster URL to be set in the environment.
The dagster url is expected to be set in the environment as DAGSTER_URL.
This operator should not be instantiated directly - it is instantiated by :py:func:`proxying_to_dagster` if no
override operator is provided.
"""
def get_dagster_session(self, context: Context) -> requests.Session:
return requests.Session()
def get_dagster_url(self, context: Context) -> str:
return os.environ["DAGSTER_URL"]
@classmethod
def build_from_dag(cls, dag: DAG) -> "DefaultProxyDAGToDagsterOperator":
return DefaultProxyDAGToDagsterOperator(
task_id=f"DAGSTER_OVERRIDE_DAG_{dag.dag_id}",
dag=dag,
)
def matched_dag_id(asset_node: Mapping[str, Any], dag_id: str) -> bool:
json_metadata_entries = {
entry["label"]: entry["jsonString"]
for entry in asset_node["metadataEntries"]
if entry["__typename"] == "JsonMetadataEntry"
}
mapping_entry = json_metadata_entries.get(DAG_MAPPING_METADATA_KEY)
if mapping_entry:
mappings = json.loads(mapping_entry)
return any(mapping["dag_id"] == dag_id for mapping in mappings)
return False
|
DefaultProxyDAGToDagsterOperator
|
python
|
facelessuser__soupsieve
|
tests/test_level2/test_next_sibling.py
|
{
"start": 59,
"end": 1207
}
|
class ____(util.TestCase):
"""Test next sibling combinators."""
MARKUP = """
<div>
<p id="0">Some text <span id="1"> in a paragraph</span>.</p>
<a id="2" href="http://google.com">Link</a>
<span id="3">Direct child</span>
<pre>
<span id="4">Child 1</span>
<span id="5">Child 2</span>
<span id="6">Child 3</span>
</pre>
</div>
"""
def test_direct_sibling(self):
"""Test direct sibling."""
# Spaces
self.assert_selector(
self.MARKUP,
"span + span",
["5", "6"],
flags=util.HTML
)
def test_direct_sibling_no_spaces(self):
"""Test direct sibling with no spaces."""
# No spaces
self.assert_selector(
self.MARKUP,
"span+span",
["5", "6"],
flags=util.HTML
)
def test_complex_direct_siblings(self):
"""Test direct sibling with no spaces."""
# Complex
self.assert_selector(
self.MARKUP,
"span#\\34 + span#\\35",
["5"],
flags=util.HTML
)
|
TestNextSibling
|
python
|
numba__numba
|
numba/cuda/cudadrv/driver.py
|
{
"start": 2761,
"end": 6008
}
|
class ____(CudaDriverError):
def __init__(self, code, msg):
self.code = code
self.msg = msg
super(CudaAPIError, self).__init__(code, msg)
def __str__(self):
return "[%s] %s" % (self.code, self.msg)
def locate_driver_and_loader():
envpath = config.CUDA_DRIVER
if envpath == '0':
# Force fail
_raise_driver_not_found()
# Determine DLL type
if sys.platform == 'win32':
dlloader = ctypes.WinDLL
dldir = ['\\windows\\system32']
dlnames = ['nvcuda.dll']
elif sys.platform == 'darwin':
dlloader = ctypes.CDLL
dldir = ['/usr/local/cuda/lib']
dlnames = ['libcuda.dylib']
else:
# Assume to be *nix like
dlloader = ctypes.CDLL
dldir = ['/usr/lib', '/usr/lib64']
dlnames = ['libcuda.so', 'libcuda.so.1']
if envpath:
try:
envpath = os.path.abspath(envpath)
except ValueError:
raise ValueError("NUMBA_CUDA_DRIVER %s is not a valid path" %
envpath)
if not os.path.isfile(envpath):
raise ValueError("NUMBA_CUDA_DRIVER %s is not a valid file "
"path. Note it must be a filepath of the .so/"
".dll/.dylib or the driver" % envpath)
candidates = [envpath]
else:
# First search for the name in the default library path.
# If that is not found, try the specific path.
candidates = dlnames + [os.path.join(x, y)
for x, y in product(dldir, dlnames)]
return dlloader, candidates
def load_driver(dlloader, candidates):
# Load the driver; Collect driver error information
path_not_exist = []
driver_load_error = []
for path in candidates:
try:
dll = dlloader(path)
except OSError as e:
# Problem opening the DLL
path_not_exist.append(not os.path.isfile(path))
driver_load_error.append(e)
else:
return dll, path
# Problem loading driver
if all(path_not_exist):
_raise_driver_not_found()
else:
errmsg = '\n'.join(str(e) for e in driver_load_error)
_raise_driver_error(errmsg)
def find_driver():
dlloader, candidates = locate_driver_and_loader()
dll, path = load_driver(dlloader, candidates)
return dll
DRIVER_NOT_FOUND_MSG = """
CUDA driver library cannot be found.
If you are sure that a CUDA driver is installed,
try setting environment variable NUMBA_CUDA_DRIVER
with the file path of the CUDA driver shared library.
"""
DRIVER_LOAD_ERROR_MSG = """
Possible CUDA driver libraries are found but error occurred during load:
%s
"""
def _raise_driver_not_found():
raise CudaSupportError(DRIVER_NOT_FOUND_MSG)
def _raise_driver_error(e):
raise CudaSupportError(DRIVER_LOAD_ERROR_MSG % e)
def _build_reverse_error_map():
prefix = 'CUDA_ERROR'
map = utils.UniqueDict()
for name in dir(enums):
if name.startswith(prefix):
code = getattr(enums, name)
map[code] = name
return map
def _getpid():
return os.getpid()
ERROR_MAP = _build_reverse_error_map()
|
CudaAPIError
|
python
|
spack__spack
|
lib/spack/spack/vendor/jinja2/nodes.py
|
{
"start": 18469,
"end": 18857
}
|
class ____(Literal):
"""A constant template string."""
fields = ("data",)
data: str
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> str:
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
|
TemplateData
|
python
|
ray-project__ray
|
python/ray/llm/_internal/serve/core/configs/openai_api_models.py
|
{
"start": 3947,
"end": 4077
}
|
class ____(vLLMTranscriptionStreamResponse):
model_config = ConfigDict(arbitrary_types_allowed=True)
|
TranscriptionStreamResponse
|
python
|
ansible__ansible
|
test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/bypass_host_loop.py
|
{
"start": 217,
"end": 487
}
|
class ____(ActionBase):
BYPASS_HOST_LOOP = True
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
result['bypass_inventory_hostname'] = task_vars['inventory_hostname']
return result
|
ActionModule
|
python
|
huggingface__transformers
|
src/transformers/models/mixtral/modeling_mixtral.py
|
{
"start": 6511,
"end": 7238
}
|
class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
MixtralRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
MixtralRMSNorm
|
python
|
getsentry__sentry
|
src/sentry/workflow_engine/typings/notification_action.py
|
{
"start": 523,
"end": 733
}
|
class ____(Enum):
ALL_MEMBERS = "AllMembers"
ACTIVE_MEMBERS = "ActiveMembers"
NO_ONE = "NoOne"
# Keep existing excluded keys constant
EXCLUDED_ACTION_DATA_KEYS = ["uuid", "id"]
|
FallthroughChoiceType
|
python
|
django__django
|
django/contrib/postgres/indexes.py
|
{
"start": 4030,
"end": 5001
}
|
class ____(PostgresIndex):
suffix = "btree"
def __init__(self, *expressions, fillfactor=None, deduplicate_items=None, **kwargs):
self.fillfactor = fillfactor
self.deduplicate_items = deduplicate_items
super().__init__(*expressions, **kwargs)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
if self.fillfactor is not None:
kwargs["fillfactor"] = self.fillfactor
if self.deduplicate_items is not None:
kwargs["deduplicate_items"] = self.deduplicate_items
return path, args, kwargs
def get_with_params(self):
with_params = []
if self.fillfactor is not None:
with_params.append("fillfactor = %d" % self.fillfactor)
if self.deduplicate_items is not None:
with_params.append(
"deduplicate_items = %s" % ("on" if self.deduplicate_items else "off")
)
return with_params
|
BTreeIndex
|
python
|
django__django
|
tests/filtered_relation/models.py
|
{
"start": 2679,
"end": 3053
}
|
class ____(models.Model):
rate_date = models.DateField()
from_currency = models.ForeignKey(
Currency,
models.CASCADE,
related_name="rates_from",
)
to_currency = models.ForeignKey(
Currency,
models.CASCADE,
related_name="rates_to",
)
rate = models.DecimalField(max_digits=6, decimal_places=4)
|
ExchangeRate
|
python
|
openai__openai-python
|
src/openai/types/shared_params/compound_filter.py
|
{
"start": 372,
"end": 646
}
|
class ____(TypedDict, total=False):
filters: Required[Iterable[Filter]]
"""Array of filters to combine.
Items can be `ComparisonFilter` or `CompoundFilter`.
"""
type: Required[Literal["and", "or"]]
"""Type of operation: `and` or `or`."""
|
CompoundFilter
|
python
|
pytorch__pytorch
|
test/test_python_dispatch.py
|
{
"start": 99178,
"end": 104319
}
|
class ____(TestCase):
def _test_wrapper_subclass_aliasing(self, op, args, kwargs):
def to_subclass(t: torch.Tensor):
return TwoTensor(t, t.clone())
result_ref = op(*args, **kwargs)
args_subclass = pytree.tree_map_only(torch.Tensor, to_subclass, args)
kwargs_subclass = pytree.tree_map_only(torch.Tensor, to_subclass, kwargs)
result_test = op(*args_subclass, **kwargs_subclass)
args_ref_flat = pytree.arg_tree_leaves(*args, **kwargs)
args_ref_flat_tensors = [
x for x in args_ref_flat if isinstance(x, torch.Tensor)
]
args_test_flat = pytree.tree_leaves((args_subclass, kwargs_subclass))
args_test_flat_tensors = [
x for x in args_test_flat if isinstance(x, torch.Tensor)
]
result_ref_flat = pytree.tree_leaves(result_ref)
result_ref_flat_tensors = [
x for x in result_ref_flat if isinstance(x, torch.Tensor)
]
result_test_flat = pytree.tree_leaves(result_test)
result_test_flat_tensors = [
x for x in result_test_flat if isinstance(x, torch.Tensor)
]
for o_ref, o_test in zip(result_ref_flat_tensors, result_test_flat_tensors):
for a_ref, a_test in zip(args_ref_flat_tensors, args_test_flat_tensors):
out_is_inpt = o_ref is a_ref
if out_is_inpt:
self.assertTrue(o_test is a_test)
out_aliases_inpt = StorageWeakRef(
o_ref.untyped_storage()
) == StorageWeakRef(a_ref.untyped_storage())
if out_aliases_inpt:
self.assertTrue(
StorageWeakRef(o_test.untyped_storage())
== StorageWeakRef(a_test.untyped_storage())
)
else:
self.assertFalse(
StorageWeakRef(o_test.untyped_storage())
== StorageWeakRef(a_test.untyped_storage())
)
# This tests the correctness of `torch.utils._python_dispatch.return_and_correct_aliasing`,
# a util for wrapper subclasses to promise correct aliasing behavior.
# It's probably overkill to test every OpInfo,
# so I picked a sampling of ops with representative schemas.
@ops(
[
op
for op in op_db
if op.name
in [
"mul", # out-of-place
"cat", # out-of-place (TensorList input)
"index", # out-of-place (Optional TensorList input)
"mul_", # inplace
"view", # view
"t_", # inplace-view
"split", # view (multi-return)
"native_batch_norm", # mutable op (returns outputs and mutates some inputs)
]
],
allowed_dtypes=(torch.float,),
)
def test_wrapper_subclass_aliasing(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
sample = first_sample(self, samples)
args = (sample.input, *sample.args)
kwargs = sample.kwargs
self._test_wrapper_subclass_aliasing(op, args, kwargs)
@ops(custom_op_db, allowed_dtypes=(torch.float,))
def test_wrapper_subclass_aliasing_custom(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
sample = first_sample(self, samples)
args = (sample.input, *sample.args)
kwargs = sample.kwargs
self._test_wrapper_subclass_aliasing(op, args, kwargs)
def test_wrapper_subclass_aliasing_conv2d(self, device):
args = (torch.randn(4, 4, 4, 4), torch.randn(4, 4, 4, 4))
kwargs = {}
# conv2d has a default arg 'int[2] strides=0',
# which torchscript expands into 'int[2] strides=[0, 0]'
# Make sure that _return_and_correct_aliasing can handle this case
# (I'm using inference_mode to make sure conv2d doesn't decompose and goes to torch_dispatch)
with torch.inference_mode():
self._test_wrapper_subclass_aliasing(
torch.ops.aten.conv2d.default, args, kwargs
)
def test_wrapper_subclass_aliasing_out_op(self, device):
# Make sure that _return_and_correct_aliasing can handle kwargs w mutable tensors
args = (torch.ones(4), torch.ones(4))
kwargs = {"out": torch.empty(4)}
self._test_wrapper_subclass_aliasing(torch.ops.aten.add.out, args, kwargs)
def test_wrapper_subclass_aliasing_fft_fft2(self, device):
args = (torch.randn(4, 4),)
kwargs = {}
# fft_fft2 has a default arg 'int[1] dim=[-2,-1]',
# Make sure that _return_and_correct_aliasing can handle this case
# (I'm using inference_mode to make sure fft_fft2 doesn't decompose and goes to torch_dispatch)
with torch.inference_mode():
self._test_wrapper_subclass_aliasing(torch.ops.aten.fft_fft2, args, kwargs)
instantiate_device_type_tests(TestWrapperSubclassAliasing, globals())
if __name__ == "__main__":
run_tests()
|
TestWrapperSubclassAliasing
|
python
|
cython__cython
|
tests/run/genexpr_arg_order.py
|
{
"start": 2340,
"end": 4534
}
|
class ____:
@property
def indexer(self):
print("Getting indexer")
return IndexableClass()
@property
def function(self):
print("Getting function")
def func(a, b, c):
print("In func")
return [a, b, c]
return func
def genexp_index_order():
"""
>>> list(genexp_index_order())
Getting indexer
In zero
In five
In one
In indexer
Made generator expression
[0, 5, 1]
"""
obj = NoisyAttributeLookup()
ret = (a for a in obj.indexer[zero():five():one()])
print("Made generator expression")
return ret
@cython.test_assert_path_exists("//InlinedGeneratorExpressionNode")
def list_index_order():
"""
>>> list_index_order()
Getting indexer
In zero
In five
In one
In indexer
[0, 5, 1]
"""
obj = NoisyAttributeLookup()
return list(a for a in obj.indexer[zero():five():one()])
def genexpr_fcall_order():
"""
Note that the order of getting the function and evaluating the
function arguments can end up slightly different in Python and
Cython and so isn't tested.
>>> list(genexpr_fcall_order())
Getting function
In func
Made generator expression
[0, 5, 1]
"""
obj = NoisyAttributeLookup()
ret = (a for a in obj.function(0, 5, 1))
print("Made generator expression")
return ret
@cython.test_assert_path_exists("//InlinedGeneratorExpressionNode")
def list_fcall_order():
"""
Note that the order of getting the function and evaluating the
function arguments can end up slightly different in Python and
Cython and so isn't tested.
>>> list_fcall_order()
Getting function
In func
[0, 5, 1]
"""
obj = NoisyAttributeLookup()
return list(a for a in obj.function(0, 5, 1))
def call1():
print("In call1")
return ["a"]
def call2():
print("In call2")
return ["b"]
def multiple_genexps_to_call_order():
"""
>>> multiple_genexps_to_call_order()
In call1
In call2
"""
def takes_two_genexps(a, b):
pass
return takes_two_genexps((x for x in call1()), (x for x in call2()))
|
NoisyAttributeLookup
|
python
|
ray-project__ray
|
python/ray/serve/_private/deployment_state.py
|
{
"start": 2517,
"end": 2644
}
|
class ____(Enum):
PENDING_ALLOCATION = 1
PENDING_INITIALIZATION = 2
SUCCEEDED = 3
FAILED = 4
|
ReplicaStartupStatus
|
python
|
django__django
|
tests/servers/tests.py
|
{
"start": 878,
"end": 1188
}
|
class ____(LiveServerTestCase):
available_apps = [
"servers",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
]
fixtures = ["testdata.json"]
def urlopen(self, url):
return urlopen(self.live_server_url + url)
|
LiveServerBase
|
python
|
astropy__astropy
|
astropy/cosmology/_src/core.py
|
{
"start": 2658,
"end": 2944
}
|
class ____(Exception):
pass
# TODO: replace with `field(converter=lambda x: None if x is None else str(x))` when
# the `converter` argument is available in `field` (py3.13, maybe?).
# See https://peps.python.org/pep-0712/
@dataclass(frozen=True, slots=True)
|
CosmologyError
|
python
|
pola-rs__polars
|
py-polars/src/polars/interchange/protocol.py
|
{
"start": 6679,
"end": 7076
}
|
class ____(Protocol):
"""Dataframe that supports conversion into an interchange dataframe object."""
def __dataframe__(
self,
nan_as_null: bool = False, # noqa: FBT001
allow_copy: bool = True, # noqa: FBT001
) -> SupportsInterchange:
"""Convert to a dataframe object implementing the dataframe interchange protocol.""" # noqa: W505
|
SupportsInterchange
|
python
|
getsentry__sentry
|
tests/sentry/receivers/test_onboarding.py
|
{
"start": 2655,
"end": 55177
}
|
class ____(TestCase):
@assume_test_silo_mode(SiloMode.CONTROL)
def _create_integration(self, provider: str, external_id: int = 9999):
return self.create_provider_integration(
provider=provider,
name="test",
external_id=external_id,
)
def test_existing_complete_task(self) -> None:
now = timezone.now()
project = self.create_project(first_event=now)
task = OrganizationOnboardingTask.objects.create(
organization=project.organization,
task=OnboardingTask.FIRST_PROJECT,
status=OnboardingTaskStatus.COMPLETE,
)
event = self.store_event(data={}, project_id=project.id)
first_event_received.send(project=project, event=event, sender=None)
task = OrganizationOnboardingTask.objects.get(id=task.id)
assert task.status == OnboardingTaskStatus.COMPLETE
assert not task.project_id
# Tests on the receivers
def test_event_processed(self) -> None:
now = timezone.now()
project = self.create_project(first_event=now)
event = self.store_event(
data={
"event_id": "a" * 32,
"platform": "javascript",
"timestamp": before_now(minutes=1).isoformat(),
"tags": {
"sentry:user": "id:41656",
},
"release": "e1b5d1900526feaf20fe2bc9cad83d392136030a",
"user": {"ip_address": "0.0.0.0", "id": "41656", "email": "test@example.com"},
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"data": {
"sourcemap": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js.map"
}
}
]
},
"type": "TypeError",
}
]
},
},
project_id=project.id,
)
event_processed.send(project=project, event=event, sender=None)
task = OrganizationOnboardingTask.objects.get(
organization=project.organization,
task=OnboardingTask.RELEASE_TRACKING,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
task = OrganizationOnboardingTask.objects.get(
organization=project.organization,
task=OnboardingTask.SOURCEMAPS,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
def test_project_created(self) -> None:
now = timezone.now()
project = self.create_project(first_event=now)
project_created.send(project=project, user=self.user, sender=None)
task = OrganizationOnboardingTask.objects.get(
organization=project.organization,
task=OnboardingTask.FIRST_PROJECT,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
second_project = self.create_project(first_event=now)
project_created.send(project=second_project, user=self.user, sender=None)
second_project.delete()
task = OrganizationOnboardingTask.objects.get(
organization=project.organization,
task=OnboardingTask.FIRST_PROJECT,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
def test_project_created__default_workflow(self) -> None:
project = self.create_project(fire_project_created=True)
assert Rule.objects.filter(project=project).exists()
workflow = Workflow.objects.get(organization=project.organization, name=DEFAULT_RULE_LABEL)
assert Detector.objects.filter(project=project, type=ErrorGroupType.slug).count() == 1
assert Detector.objects.filter(project=project, type=IssueStreamGroupType.slug).count() == 1
assert DetectorWorkflow.objects.filter(workflow=workflow).count() == 2
@patch("sentry.analytics.record", wraps=record)
def test_project_created_with_origin(self, record_analytics: MagicMock) -> None:
project = self.create_project()
project_created.send(
project=project, user=self.user, default_rules=False, sender=None, origin="ui"
)
task = OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.FIRST_PROJECT,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
# Verify origin is passed to analytics event
assert_last_analytics_event(
record_analytics,
ProjectCreatedEvent(
user_id=self.user.id,
default_user_id=self.organization.default_owner_id,
organization_id=self.organization.id,
project_id=project.id,
platform=project.platform,
origin="ui",
),
)
@patch("sentry.analytics.record", wraps=record)
def test_first_event_received(self, record_analytics: MagicMock) -> None:
now = timezone.now()
# Create first project and send event
project = self.create_project(first_event=now, platform="javascript")
project_created.send_robust(project=project, user=self.user, sender=None)
event = self.store_event(
data={"platform": "javascript", "message": "javascript error message"},
project_id=project.id,
)
first_event_received.send_robust(project=project, event=event, sender=None)
# Assert first event onboarding task is created and completed
task = OrganizationOnboardingTask.objects.get(
organization=project.organization,
task=OnboardingTask.FIRST_EVENT,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
assert task.project_id == project.id
# Ensure analytics events are called in the right order
assert len(record_analytics.call_args_list) >= 2 # Ensure at least two calls
assert_any_analytics_event(
record_analytics,
FirstEventSentForProjectEvent(
user_id=self.user.id,
organization_id=project.organization_id,
project_id=project.id,
platform=event.platform,
project_platform=project.platform,
url=dict(event.tags).get("url", None),
has_minified_stack_trace=has_event_minified_stack_trace(event),
sdk_name=None,
),
)
assert_any_analytics_event(
record_analytics,
FirstEventSentEvent(
user_id=self.user.id,
organization_id=project.organization_id,
project_id=project.id,
platform=event.platform,
project_platform=project.platform,
),
)
# Create second project and send event
second_project = self.create_project(first_event=now, platform="python")
project_created.send(project=second_project, user=self.user, sender=None)
# Assert second platform onboarding task is completed
second_task = OrganizationOnboardingTask.objects.get(
organization=second_project.organization,
task=OnboardingTask.SECOND_PLATFORM,
status=OnboardingTaskStatus.COMPLETE,
)
assert second_task is not None
# An event is sent for the second project
first_event_received.send_robust(project=second_project, event=event, sender=None)
# Ensure first project's onboarding task remains unchanged
task = OrganizationOnboardingTask.objects.get(
organization=project.organization,
task=OnboardingTask.FIRST_EVENT,
status=OnboardingTaskStatus.COMPLETE,
)
assert task.project_id == project.id
# Ensure "first_event_for_project.sent" was called again for second project
record_analytics.call_args_list[-1].assert_called_with(
FirstEventSentForProjectEvent(
user_id=self.user.id,
organization_id=second_project.organization_id,
project_id=second_project.id,
platform=event.platform,
project_platform=second_project.platform,
url=dict(event.tags).get("url", None),
has_minified_stack_trace=has_event_minified_stack_trace(event),
sdk_name=None,
)
)
# Ensure "first_event.sent" was called exactly once
assert get_event_count(record_analytics, FirstEventSentEvent, exact=True) == 1
def test_first_transaction_received(self) -> None:
project = self.create_project()
event_data = load_data("transaction")
min_ago = before_now(minutes=1).isoformat()
event_data.update({"start_timestamp": min_ago, "timestamp": min_ago})
event = self.store_event(data=event_data, project_id=project.id)
first_event_received.send(project=project, event=event, sender=None)
first_transaction_received.send(project=project, event=event, sender=None)
task = OrganizationOnboardingTask.objects.get(
organization=project.organization,
task=OnboardingTask.FIRST_TRANSACTION,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
def test_member_invited(self) -> None:
user = self.create_user(email="test@example.org")
member = self.create_member(
organization=self.organization, teams=[self.team], email=user.email
)
member_invited.send(member=member, user=user, sender=None)
task = OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.INVITE_MEMBER,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
def test_alert_added(self) -> None:
alert_rule_created.send(
rule_id=Rule(id=1).id,
project=self.project,
user=self.user,
rule_type="issue",
sender=None,
is_api_token=False,
)
task = OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.ALERT_RULE,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
def test_integration_added(self) -> None:
integration_added.send(
integration_id=self._create_integration("slack", 1234).id,
organization_id=self.organization.id,
user_id=self.user.id,
sender=None,
)
task = OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.REAL_TIME_NOTIFICATIONS,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
# Adding a second integration
integration_added.send(
integration_id=self._create_integration("github", 4567).id,
organization_id=self.organization.id,
user_id=self.user.id,
sender=None,
)
task = OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.LINK_SENTRY_TO_SOURCE_CODE,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
@patch("sentry.analytics.record", wraps=record)
def test_first_event_without_minified_stack_trace_received(
self, record_analytics: MagicMock
) -> None:
"""
Test that an analytics event is NOT recorded when
there no event with minified stack trace is received
"""
now = timezone.now()
project = self.create_project(first_event=now)
project_created.send(project=project, user=self.user, sender=None)
data = load_data("javascript")
self.store_event(
data=data,
project_id=project.id,
)
with pytest.raises(AssertionError):
assert_last_analytics_event(
record_analytics,
FirstEventSentEventWithMinifiedStackTraceForProject(
user_id=self.user.id,
organization_id=project.organization_id,
project_id=project.id,
platform="javascript",
url="http://localhost:3000",
),
)
@patch("sentry.analytics.record", wraps=record)
def test_first_event_with_minified_stack_trace_received(
self, record_analytics: MagicMock
) -> None:
"""
Test that an analytics event is recorded when
a first event with minified stack trace is received
"""
now = timezone.now()
project = self.create_project(first_event=now, platform="VueJS")
project_created.send(project=project, user=self.user, sender=None)
url = "http://localhost:3000"
event = load_data("javascript")
event["tags"] = [("url", url)]
event["exception"] = {
"values": [
{
**event["exception"]["values"][0],
"raw_stacktrace": {
"frames": [
{
"function": "o",
"filename": "/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js",
"abs_path": "https://s1.sentry-cdn.com/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js",
"lineno": 2,
"colno": 37098,
"pre_context": [
"/*! For license information please see vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd. {snip}"
],
"context_line": "{snip} .apply(this,arguments);const i=o.map((e=>c(e,t)));return e.apply(this,i)}catch(e){throw l(),(0,i.$e)((n=>{n.addEventProcessor((e=>(t.mechani {snip}",
"post_context": [
"//# sourceMappingURL=../sourcemaps/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.fe32 {snip}"
],
"in_app": False,
},
],
},
}
]
}
self.store_event(
project_id=project.id,
data=event,
)
assert_last_analytics_event(
record_analytics,
FirstEventSentEventWithMinifiedStackTraceForProject(
user_id=self.user.id,
organization_id=project.organization_id,
project_id=project.id,
platform=event["platform"],
project_platform="VueJS",
url=url,
),
)
@patch("sentry.analytics.record", wraps=record)
def test_analytic_triggered_only_once_if_multiple_events_with_minified_stack_trace_received(
self, record_analytics
):
"""
Test that an analytic event is triggered only once when
multiple events with minified stack trace are received
"""
now = timezone.now()
project = self.create_project(first_event=now)
project_created.send(project=project, user=self.user, sender=None)
url = "http://localhost:3000"
event = load_data("javascript")
event["tags"] = [("url", url)]
event["exception"] = {
"values": [
{
**event["exception"]["values"][0],
"raw_stacktrace": {
"frames": [
{
"function": "o",
"filename": "/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js",
"abs_path": "https://s1.sentry-cdn.com/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js",
"lineno": 2,
"colno": 37098,
"pre_context": [
"/*! For license information please see vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd. {snip}"
],
"context_line": "{snip} .apply(this,arguments);const i=o.map((e=>c(e,t)));return e.apply(this,i)}catch(e){throw l(),(0,i.$e)((n=>{n.addEventProcessor((e=>(t.mechani {snip}",
"post_context": [
"//# sourceMappingURL=../sourcemaps/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.fe32 {snip}"
],
"in_app": False,
},
],
},
}
]
}
# Store first event
self.store_event(
project_id=project.id,
data=event,
)
# Store second event
self.store_event(
project_id=project.id,
data=event,
)
assert (
get_event_count(record_analytics, FirstEventSentEventWithMinifiedStackTraceForProject)
== 1
)
@patch("sentry.analytics.record", wraps=record)
def test_old_project_sending_minified_stack_trace_event(
self, record_analytics: MagicMock
) -> None:
"""
Test that an analytics event is NOT recorded when
the project creation date is older than the date we defined (START_DATE_TRACKING_FIRST_EVENT_WITH_MINIFIED_STACK_TRACE_PER_PROJ).
In this test we also check if the has_minified_stack_trace is being set to "True" in old projects
"""
old_date = datetime(2022, 12, 10, tzinfo=UTC)
project = self.create_project(first_event=old_date, date_added=old_date)
project_created.send(project=project, user=self.user, sender=None)
url = "http://localhost:3000"
event = load_data("javascript")
event["tags"] = [("url", url)]
event["exception"] = {
"values": [
{
**event["exception"]["values"][0],
"raw_stacktrace": {
"frames": [
{
"function": "o",
"filename": "/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js",
"abs_path": "https://s1.sentry-cdn.com/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js",
"lineno": 2,
"colno": 37098,
"pre_context": [
"/*! For license information please see vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd. {snip}"
],
"context_line": "{snip} .apply(this,arguments);const i=o.map((e=>c(e,t)));return e.apply(this,i)}catch(e){throw l(),(0,i.$e)((n=>{n.addEventProcessor((e=>(t.mechani {snip}",
"post_context": [
"//# sourceMappingURL=../sourcemaps/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.fe32 {snip}"
],
"in_app": False,
},
],
},
}
]
}
def _project_has_minified_stack_trace(p: Project) -> bool:
return p.flags.has_minified_stack_trace
assert not _project_has_minified_stack_trace(project)
# Store event
self.store_event(
project_id=project.id,
data=event,
)
project.refresh_from_db()
assert _project_has_minified_stack_trace(project)
# The analytic's event "first_event_with_minified_stack_trace_for_project" shall not be sent
assert (
get_event_count(record_analytics, FirstEventSentEventWithMinifiedStackTraceForProject)
== 0
)
@patch("sentry.analytics.record", wraps=record)
def test_first_event_without_sourcemaps_received(self, record_analytics: MagicMock) -> None:
"""
Test that an analytics event is NOT recorded when
no event with sourcemaps is received
"""
now = timezone.now()
project = self.create_project(first_event=now)
project_created.send(project=project, user=self.user, sender=None)
data = load_data("javascript")
data["exception"] = {
"values": [
{
"stacktrace": {"frames": [{"data": {}}]},
"type": "TypeError",
}
]
}
event = self.store_event(
project_id=project.id,
data=data,
)
event_processed.send(project=project, event=event, sender=None)
assert get_event_count(record_analytics, FirstSourcemapsSentEventForProject) == 0
@patch("sentry.analytics.record", wraps=record)
def test_first_event_with_sourcemaps_received(self, record_analytics: MagicMock) -> None:
"""
Test that an analytics event is recorded when
a first event with sourcemaps is received
"""
now = timezone.now()
project = self.create_project(first_event=now, platform="VueJS")
project_created.send(project=project, user=self.user, sender=None)
url = "http://localhost:3000"
data = load_data("javascript")
data["tags"] = [("url", url)]
data["exception"] = {
"values": [
{
"stacktrace": {
"frames": [
{
"data": {
"sourcemap": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js.map"
}
}
]
},
"type": "TypeError",
}
]
}
event = self.store_event(
project_id=project.id,
data=data,
)
event_processed.send(project=project, event=event, sender=None)
assert_last_analytics_event(
record_analytics,
FirstSourcemapsSentEventForProject(
user_id=self.user.id,
organization_id=project.organization_id,
project_id=project.id,
platform=event.platform,
project_platform="VueJS",
url=url,
),
)
@patch("sentry.analytics.record", wraps=record)
def test_analytic_triggered_only_once_if_multiple_events_with_sourcemaps_received(
self, record_analytics
):
"""
Test that an analytic event is triggered only once when
multiple events with sourcemaps are received
"""
now = timezone.now()
project = self.create_project(first_event=now)
project_created.send(project=project, user=self.user, sender=None)
url = "http://localhost:3000"
data = load_data("javascript")
data["tags"] = [("url", url)]
data["exception"] = {
"values": [
{
"stacktrace": {
"frames": [
{
"data": {
"sourcemap": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js.map"
}
}
]
},
"type": "TypeError",
}
]
}
# Store first event
event_1 = self.store_event(
project_id=project.id,
data=data,
)
event_processed.send(project=project, event=event_1, sender=None)
# Store second event
event_2 = self.store_event(
project_id=project.id,
data=data,
)
event_processed.send(project=project, event=event_2, sender=None)
assert get_event_count(record_analytics, FirstSourcemapsSentEventForProject) == 1
@patch("sentry.analytics.record", wraps=record)
def test_old_project_sending_sourcemap_event(self, record_analytics: MagicMock) -> None:
"""
Test that an analytics event is NOT recorded when
the project creation date is older than the date we defined (START_DATE_TRACKING_FIRST_EVENT_WITH_SOURCEMAPS_PER_PROJ).
In this test we also check if the has_sourcemaps is being set to "True" in old projects
"""
old_date = datetime(2022, 12, 10, tzinfo=UTC)
project = self.create_project(first_event=old_date, date_added=old_date)
project_created.send(project=project, user=self.user, sender=None)
url = "http://localhost:3000"
data = load_data("javascript")
data["tags"] = [("url", url)]
data["exception"] = {
"values": [
{
"stacktrace": {
"frames": [
{
"data": {
"sourcemap": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js.map"
}
}
]
},
"type": "TypeError",
}
]
}
def _project_has_sourcemaps(p: Project) -> bool:
return project.flags.has_sourcemaps
assert not _project_has_sourcemaps(project)
event = self.store_event(project_id=project.id, data=data)
event_processed.send(project=project, event=event, sender=None)
project.refresh_from_db()
assert _project_has_sourcemaps(project)
# The analytic's event "first_event_with_minified_stack_trace_for_project" shall not be sent
assert get_event_count(record_analytics, FirstSourcemapsSentEventForProject) == 0
@patch("sentry.analytics.record", wraps=record)
def test_real_time_notifications_added(self, record_analytics: MagicMock) -> None:
integration_id = self._create_integration("slack", 123).id
integration_added.send(
integration_id=integration_id,
organization_id=self.organization.id,
user_id=self.user.id,
sender=None,
)
task = OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.REAL_TIME_NOTIFICATIONS,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
assert_last_analytics_event(
record_analytics,
IntegrationAddedEvent(
user_id=self.user.id,
default_user_id=self.organization.default_owner_id,
organization_id=self.organization.id,
id=integration_id,
provider="slack",
),
)
@patch("sentry.analytics.record", wraps=record)
def test_source_code_management_added(self, record_analytics: MagicMock) -> None:
integration_id = self._create_integration("github", 123).id
integration_added.send(
integration_id=integration_id,
organization_id=self.organization.id,
user_id=self.user.id,
sender=None,
)
task = OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.LINK_SENTRY_TO_SOURCE_CODE,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
assert_last_analytics_event(
record_analytics,
IntegrationAddedEvent(
user_id=self.user.id,
default_user_id=self.organization.default_owner_id,
organization_id=self.organization.id,
id=integration_id,
provider="github",
),
)
def test_second_platform_complete(self) -> None:
now = timezone.now()
project = self.create_project(first_event=now)
second_project = self.create_project(first_event=now)
project_created.send(project=project, user=self.user, sender=None)
project_created.send(project=second_project, user=self.user, sender=None)
task = OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.SECOND_PLATFORM,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
def test_release_received_through_transaction_event(self) -> None:
project = self.create_project()
event_data = load_data("transaction")
event_data.update({"release": "my-first-release", "tags": []})
event = self.store_event(data=event_data, project_id=project.id)
event_processed.send(project=project, event=event, sender=None)
task = OrganizationOnboardingTask.objects.get(
organization=project.organization,
task=OnboardingTask.RELEASE_TRACKING,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
def test_issue_alert_received_through_project_creation(self) -> None:
now = timezone.now()
first_organization = self.create_organization(owner=self.user, slug="first-org")
first_project = self.create_project(first_event=now, organization=first_organization)
# By default, the project creation will create a default rule
project_created.send(project=first_project, user=self.user, sender=None)
assert OrganizationOnboardingTask.objects.filter(
organization=first_project.organization,
task=OnboardingTask.ALERT_RULE,
status=OnboardingTaskStatus.COMPLETE,
).exists()
second_organization = self.create_organization(owner=self.user, slug="second-org")
second_project = self.create_project(first_event=now, organization=second_organization)
# When creating a project, a user can opt out of creating a default rule
project_created.send(
project=second_project,
user=self.user,
sender=None,
default_rules=False,
)
assert not OrganizationOnboardingTask.objects.filter(
organization=second_project.organization,
task=OnboardingTask.ALERT_RULE,
status=OnboardingTaskStatus.COMPLETE,
).exists()
@patch("sentry.analytics.record", wraps=record)
def test_new_onboarding_complete(self, record_analytics: MagicMock) -> None:
"""
Test the new quick start happy path (without source maps)
"""
# Create first project
project = self.create_project(platform="python")
project_created.send(project=project, user=self.user, default_rules=False, sender=None)
assert (
OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.FIRST_PROJECT,
status=OnboardingTaskStatus.COMPLETE,
)
is not None
)
assert_last_analytics_event(
record_analytics,
ProjectCreatedEvent(
user_id=self.user.id,
default_user_id=self.organization.default_owner_id,
organization_id=self.organization.id,
project_id=project.id,
platform=project.platform,
origin=None,
),
)
# Set up tracing
transaction_event = load_data("transaction")
transaction_event.update({"user": None})
event = self.store_event(data=transaction_event, project_id=project.id)
transaction_processed.send(project=project, event=event, sender=None)
assert (
OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.FIRST_TRANSACTION,
status=OnboardingTaskStatus.COMPLETE,
)
is not None
)
assert_last_analytics_event(
record_analytics,
FirstTransactionSentEvent(
default_user_id=self.organization.default_owner_id,
organization_id=self.organization.id,
project_id=project.id,
platform=project.platform,
),
)
# Capture first error
error_event = self.store_event(
data={
"event_id": "c" * 32,
"message": "this is bad.",
"timestamp": timezone.now().isoformat(),
"type": "error",
},
project_id=project.id,
)
event_processed.send(project=project, event=error_event, sender=None)
assert (
OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.FIRST_EVENT,
status=OnboardingTaskStatus.COMPLETE,
)
is not None
)
assert_last_analytics_event(
record_analytics,
FirstEventSentEvent(
user_id=self.user.id,
organization_id=project.organization_id,
project_id=project.id,
platform=error_event.platform,
project_platform=project.platform,
),
)
# Configure an issue alert
alert_rule_created.send(
rule_id=Rule(id=1).id,
project=project,
user=self.user,
rule_type="issue",
sender=None,
is_api_token=False,
)
assert (
OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.ALERT_RULE,
status=OnboardingTaskStatus.COMPLETE,
)
is not None
)
assert_last_analytics_event(
record_analytics,
AlertCreatedEvent(
user_id=self.user.id,
default_user_id=self.organization.default_owner_id,
organization_id=self.organization.id,
project_id=project.id,
rule_id=Rule(id=1).id,
rule_type="issue",
is_api_token=False,
referrer=None,
session_id=None,
alert_rule_ui_component=None,
duplicate_rule=None,
wizard_v3=None,
query_type=None,
),
)
# Track releases
transaction_event = load_data("transaction")
transaction_event.update({"release": "my-first-release", "tags": []})
event = self.store_event(data=transaction_event, project_id=project.id)
transaction_processed.send(project=project, event=event, sender=None)
assert (
OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.RELEASE_TRACKING,
status=OnboardingTaskStatus.COMPLETE,
)
is not None
)
assert_any_analytics_event(
record_analytics,
FirstReleaseTagSentEvent(
user_id=self.user.id,
project_id=project.id,
organization_id=self.organization.id,
),
)
# Link Sentry to source code
github_integration = self._create_integration("github", 1234)
integration_added.send(
integration_id=github_integration.id,
organization_id=self.organization.id,
user_id=self.user.id,
sender=None,
)
assert (
OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.LINK_SENTRY_TO_SOURCE_CODE,
status=OnboardingTaskStatus.COMPLETE,
)
is not None
)
assert_last_analytics_event(
record_analytics,
IntegrationAddedEvent(
user_id=self.user.id,
default_user_id=self.organization.default_owner_id,
organization_id=self.organization.id,
provider=github_integration.provider,
id=github_integration.id,
),
)
# Invite your team
user = self.create_user(email="test@example.org")
member = self.create_member(
organization=self.organization, teams=[self.team], email=user.email
)
member_invited.send(member=member, user=user, sender=None)
assert (
OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.INVITE_MEMBER,
status=OnboardingTaskStatus.COMPLETE,
)
is not None
)
assert_last_analytics_event(
record_analytics,
MemberInvitedEvent(
invited_member_id=member.id,
inviter_user_id=user.id,
organization_id=self.organization.id,
referrer=None,
),
)
# Manually update the completionSeen column of existing tasks
OrganizationOnboardingTask.objects.filter(organization=self.organization).update(
completion_seen=timezone.now()
)
onboarding_tasks.try_mark_onboarding_complete(self.organization.id)
# The first group is complete but the beyond the basics is not
assert (
OrganizationOption.objects.filter(
organization=self.organization, key="onboarding:complete"
).count()
== 0
)
# Set up session replay
first_replay_received.send(project=project, sender=None)
assert (
OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.SESSION_REPLAY,
status=OnboardingTaskStatus.COMPLETE,
)
is not None
)
assert_last_analytics_event(
record_analytics,
FirstReplaySentEvent(
user_id=self.user.id,
organization_id=project.organization_id,
project_id=project.id,
platform=project.platform,
),
) # Get real time notifications
slack_integration = self._create_integration("slack", 4321)
integration_added.send(
integration_id=slack_integration.id,
organization_id=self.organization.id,
user_id=self.user.id,
sender=None,
)
assert (
OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.REAL_TIME_NOTIFICATIONS,
status=OnboardingTaskStatus.COMPLETE,
)
is not None
)
assert_last_analytics_event(
record_analytics,
IntegrationAddedEvent(
user_id=self.user.id,
default_user_id=self.organization.default_owner_id,
organization_id=self.organization.id,
provider=slack_integration.provider,
id=slack_integration.id,
),
)
# Add Sentry to other parts app
second_project = self.create_project(
first_event=timezone.now(), organization=self.organization
)
project_created.send(
project=second_project,
user=self.user,
sender=None,
default_rules=False,
)
assert (
OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.SECOND_PLATFORM,
status=OnboardingTaskStatus.COMPLETE,
)
is not None
)
record_analytics.call_args_list[
len(record_analytics.call_args_list) - 2
].assert_called_with(
"second_platform.added",
user_id=self.user.id,
organization_id=self.organization.id,
project_id=second_project.id,
)
# Manually update the completionSeen column of existing tasks
OrganizationOnboardingTask.objects.filter(organization=self.organization).update(
completion_seen=timezone.now()
)
onboarding_tasks.try_mark_onboarding_complete(self.organization.id)
# Onboarding is complete
assert (
OrganizationOption.objects.filter(
organization=self.organization, key="onboarding:complete"
).count()
== 1
)
assert_last_analytics_event(
record_analytics,
OnboardingCompleteEvent(
user_id=self.user.id,
organization_id=self.organization.id,
referrer="onboarding_tasks",
),
)
@patch("sentry.analytics.record", wraps=record)
def test_source_maps_as_required_task(self, record_analytics: MagicMock) -> None:
"""
Test the new quick start happy path (with source maps)
"""
# Create a project that can have source maps + create an issue alert
project = self.create_project(platform="javascript")
project_created.send(project=project, user=self.user, sender=None)
# Capture first transaction + release
transaction_event = load_data("transaction")
transaction_event.update({"release": "my-first-release", "tags": []})
event = self.store_event(data=transaction_event, project_id=project.id)
transaction_processed.send(project=project, event=event, sender=None)
# Capture first error
error_event = self.store_event(
data={
"event_id": "c" * 32,
"message": "this is bad.",
"timestamp": timezone.now().isoformat(),
"type": "error",
"release": "my-first-release",
},
project_id=project.id,
)
event_processed.send(project=project, event=error_event, sender=None)
# Invite your team
user = self.create_user(email="test@example.org")
member = self.create_member(
organization=self.organization, teams=[self.team], email=user.email
)
member_invited.send(member=member, user=user, sender=None)
# Member accepted the invite
member_joined.send(
organization_member_id=member.id,
organization_id=self.organization.id,
user_id=user.id,
sender=None,
)
# Link Sentry to source code
github_integration = self._create_integration("github", 1234)
integration_added.send(
integration_id=github_integration.id,
organization_id=self.organization.id,
user_id=self.user.id,
sender=None,
)
# Set up session replay
first_replay_received.send(project=project, sender=None)
# Get real time notifications
slack_integration = self._create_integration("slack", 4321)
integration_added.send(
integration_id=slack_integration.id,
organization_id=self.organization.id,
user_id=self.user.id,
sender=None,
)
# Add Sentry to other parts app
second_project = self.create_project(
first_event=timezone.now(), organization=self.organization
)
project_created.send(
project=second_project,
user=self.user,
sender=None,
default_rules=False,
)
# Manually update the completionSeen column of existing tasks
OrganizationOnboardingTask.objects.filter(organization=self.organization).update(
completion_seen=timezone.now()
)
onboarding_tasks.try_mark_onboarding_complete(self.organization.id)
# Onboarding is NOT yet complete
assert (
OrganizationOption.objects.filter(
organization=self.organization, key="onboarding:complete"
).count()
== 0
)
# Unminify your code
# Send event with source map
data = load_data("javascript")
data["exception"] = {
"values": [
{
"stacktrace": {
"frames": [
{
"data": {
"sourcemap": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js.map"
}
}
]
},
"type": "TypeError",
}
]
}
event_with_sourcemap = self.store_event(
project_id=project.id,
data=data,
)
event_processed.send(project=project, event=event_with_sourcemap, sender=None)
assert (
OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.SOURCEMAPS,
status=OnboardingTaskStatus.COMPLETE,
)
is not None
)
assert_any_analytics_event(
record_analytics,
FirstSourcemapsSentEvent(
user_id=self.user.id,
organization_id=self.organization.id,
project_id=project.id,
platform=event_with_sourcemap.platform,
project_platform=project.platform,
url=dict(event_with_sourcemap.tags).get("url", None),
),
)
assert_last_analytics_event(
record_analytics,
FirstSourcemapsSentEventForProject(
user_id=self.user.id,
organization_id=self.organization.id,
project_id=project.id,
platform=event_with_sourcemap.platform,
project_platform=project.platform,
url=dict(event_with_sourcemap.tags).get("url", None),
),
)
# Manually update the completionSeen column of existing tasks
OrganizationOnboardingTask.objects.filter(organization=self.organization).update(
completion_seen=timezone.now()
)
onboarding_tasks.try_mark_onboarding_complete(self.organization.id)
# Onboarding is NOW complete
assert (
OrganizationOption.objects.filter(
organization=self.organization, key="onboarding:complete"
).count()
== 1
)
@patch("sentry.analytics.record", wraps=record)
def test_tasks_are_transferred_when_project_is_transferred(
self, record_analytics: MagicMock
) -> None:
"""
Test that onboarding tasks are transferred when a project is transferred
"""
project = self.create_project(platform="python")
project_created.send(project=project, user=self.user, default_rules=True, sender=None)
transaction_event = load_data("transaction")
transaction_event.update({"user": None, "release": "my-first-release", "tags": []})
event = self.store_event(data=transaction_event, project_id=project.id)
transaction_processed.send(project=project, event=event, sender=None)
data = load_data("javascript")
data["exception"] = {
"values": [
{
"stacktrace": {
"frames": [
{
"data": {
"sourcemap": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js.map"
}
}
]
},
"type": "TypeError",
}
]
}
event_with_sourcemap = self.store_event(
project_id=project.id,
data=data,
)
event_processed.send(project=project, event=event_with_sourcemap, sender=None)
error_event = self.store_event(
data={
"event_id": "c" * 32,
"message": "this is bad.",
"timestamp": timezone.now().isoformat(),
"type": "error",
},
project_id=project.id,
)
event_processed.send(project=project, event=error_event, sender=None)
first_replay_received.send(project=project, sender=None)
new_organization = self.create_organization(slug="new-org")
project.organization = new_organization
project_transferred.send(
old_org_id=self.organization.id,
project=project,
sender=None,
)
assert_last_analytics_event(
record_analytics,
ProjectTransferredEvent(
old_organization_id=self.organization.id,
new_organization_id=new_organization.id,
project_id=project.id,
platform=project.platform,
),
)
project2 = self.create_project(platform="javascript-react")
project_created.send(project=project2, user=self.user, default_rules=False, sender=None)
project2.organization = new_organization
project_transferred.send(
old_org_id=self.organization.id,
project=project2,
sender=None,
)
assert_last_analytics_event(
record_analytics,
ProjectTransferredEvent(
old_organization_id=self.organization.id,
new_organization_id=new_organization.id,
project_id=project2.id,
platform=project2.platform,
),
)
transferred_tasks = OrganizationOnboardingTask.objects.filter(
organization_id=new_organization.id,
task__in=OrganizationOnboardingTask.TRANSFERABLE_TASKS,
)
self.assertEqual(len(transferred_tasks), len(OrganizationOnboardingTask.TRANSFERABLE_TASKS))
|
OrganizationOnboardingTaskTest
|
python
|
pytorch__pytorch
|
torch/_inductor/pattern_matcher.py
|
{
"start": 36500,
"end": 37588
}
|
class ____:
pattern: PatternExpr
extra_check: Callable[[Match], bool]
def apply(self, match: Match, graph: torch.fx.Graph, node: torch.fx.Node) -> None:
raise NotImplementedError
def register(
self,
pass_dicts: Union[_PassDictsType, Sequence[_PassDictsType]],
target: Union[torch.fx.node.Target, None] = None,
prepend: bool = False,
) -> None:
if target is None:
assert hasattr(self.pattern, "fns")
for fn in self.pattern.fns:
self.register(pass_dicts, fn, prepend=prepend)
elif isinstance(pass_dicts, (dict, PatternMatcherPass)):
assert hasattr(self.pattern, "op")
if prepend:
pass_dicts[(self.pattern.op, target)].insert(0, self)
else:
pass_dicts[(self.pattern.op, target)].append(self)
else:
pass_dicts = typing.cast(Sequence[_PassDictsType], pass_dicts)
for x in pass_dicts:
self.register(x, target, prepend=prepend)
@dataclasses.dataclass
|
PatternEntry
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/validation.py
|
{
"start": 4661,
"end": 5162
}
|
class ____(Validator):
"""
Validator that can be switched on/off according to
a filter. (This wraps around another validator.)
"""
def __init__(self, validator: Validator, filter: FilterOrBool) -> None:
self.validator = validator
self.filter = to_filter(filter)
def validate(self, document: Document) -> None:
# Call the validator only if the filter is active.
if self.filter():
self.validator.validate(document)
|
ConditionalValidator
|
python
|
pydantic__pydantic
|
pydantic/_internal/_mock_val_ser.py
|
{
"start": 626,
"end": 2284
}
|
class ____(Mapping[str, Any]):
"""Mocker for `pydantic_core.CoreSchema` which optionally attempts to
rebuild the thing it's mocking when one of its methods is accessed and raises an error if that fails.
"""
__slots__ = '_error_message', '_code', '_attempt_rebuild', '_built_memo'
def __init__(
self,
error_message: str,
*,
code: PydanticErrorCodes,
attempt_rebuild: Callable[[], CoreSchema | None] | None = None,
) -> None:
self._error_message = error_message
self._code: PydanticErrorCodes = code
self._attempt_rebuild = attempt_rebuild
self._built_memo: CoreSchema | None = None
def __getitem__(self, key: str) -> Any:
return self._get_built().__getitem__(key)
def __len__(self) -> int:
return self._get_built().__len__()
def __iter__(self) -> Iterator[str]:
return self._get_built().__iter__()
def _get_built(self) -> CoreSchema:
if self._built_memo is not None:
return self._built_memo
if self._attempt_rebuild:
schema = self._attempt_rebuild()
if schema is not None:
self._built_memo = schema
return schema
raise PydanticUserError(self._error_message, code=self._code)
def rebuild(self) -> CoreSchema | None:
self._built_memo = None
if self._attempt_rebuild:
schema = self._attempt_rebuild()
if schema is not None:
return schema
else:
raise PydanticUserError(self._error_message, code=self._code)
return None
|
MockCoreSchema
|
python
|
walkccc__LeetCode
|
solutions/3535. Unit Conversion II/3535.py
|
{
"start": 0,
"end": 1016
}
|
class ____:
def queryConversions(
self,
conversions: list[list[int]],
queries: list[list[int]]
) -> list[int]:
self.MOD = 1_000_000_007
units = self._baseUnitConversions(conversions)
# By Fermat's little theorem.
return [units[v] * self._modPow(units[u], self.MOD - 2) % self.MOD
for u, v in queries]
# Same as 3528. Unit Conversion I
def _baseUnitConversions(self, conversions: list[list[int]]) -> list[int]:
n = len(conversions) + 1
res = [0] * n
res[0] = 1
q = collections.deque([0])
graph = [[] for _ in range(n)]
for u, v, factor in conversions:
graph[u].append((v, factor))
while q:
u = q.popleft()
for v, factor in graph[u]:
res[v] = (res[u] * factor) % self.MOD
q.append(v)
return res
def _modPow(self, x: int, n: int) -> int:
if n == 0:
return 1
if n % 2 == 1:
return x * self._modPow(x, n - 1) % self.MOD
return self._modPow(x * x % self.MOD, n // 2)
|
Solution
|
python
|
python-openxml__python-docx
|
src/docx/opc/part.py
|
{
"start": 550,
"end": 5711
}
|
class ____:
"""Base class for package parts.
Provides common properties and methods, but intended to be subclassed in client code
to implement specific part behaviors.
"""
def __init__(
self,
partname: PackURI,
content_type: str,
blob: bytes | None = None,
package: Package | None = None,
):
super(Part, self).__init__()
self._partname = partname
self._content_type = content_type
self._blob = blob
self._package = package
def after_unmarshal(self):
"""Entry point for post-unmarshaling processing, for example to parse the part
XML.
May be overridden by subclasses without forwarding call to super.
"""
# don't place any code here, just catch call if not overridden by
# subclass
pass
def before_marshal(self):
"""Entry point for pre-serialization processing, for example to finalize part
naming if necessary.
May be overridden by subclasses without forwarding call to super.
"""
# don't place any code here, just catch call if not overridden by
# subclass
pass
@property
def blob(self) -> bytes:
"""Contents of this package part as a sequence of bytes.
May be text or binary. Intended to be overridden by subclasses. Default behavior
is to return load blob.
"""
return self._blob or b""
@property
def content_type(self):
"""Content type of this part."""
return self._content_type
def drop_rel(self, rId: str):
"""Remove the relationship identified by `rId` if its reference count is less
than 2.
Relationships with a reference count of 0 are implicit relationships.
"""
if self._rel_ref_count(rId) < 2:
del self.rels[rId]
@classmethod
def load(cls, partname: PackURI, content_type: str, blob: bytes, package: Package):
return cls(partname, content_type, blob, package)
def load_rel(self, reltype: str, target: Part | str, rId: str, is_external: bool = False):
"""Return newly added |_Relationship| instance of `reltype`.
The new relationship relates the `target` part to this part with key `rId`.
Target mode is set to ``RTM.EXTERNAL`` if `is_external` is |True|. Intended for
use during load from a serialized package, where the rId is well-known. Other
methods exist for adding a new relationship to a part when manipulating a part.
"""
return self.rels.add_relationship(reltype, target, rId, is_external)
@property
def package(self):
"""|OpcPackage| instance this part belongs to."""
return self._package
@property
def partname(self):
"""|PackURI| instance holding partname of this part, e.g.
'/ppt/slides/slide1.xml'."""
return self._partname
@partname.setter
def partname(self, partname: str):
if not isinstance(partname, PackURI):
tmpl = "partname must be instance of PackURI, got '%s'"
raise TypeError(tmpl % type(partname).__name__)
self._partname = partname
def part_related_by(self, reltype: str) -> Part:
"""Return part to which this part has a relationship of `reltype`.
Raises |KeyError| if no such relationship is found and |ValueError| if more than
one such relationship is found. Provides ability to resolve implicitly related
part, such as Slide -> SlideLayout.
"""
return self.rels.part_with_reltype(reltype)
def relate_to(self, target: Part | str, reltype: str, is_external: bool = False) -> str:
"""Return rId key of relationship of `reltype` to `target`.
The returned `rId` is from an existing relationship if there is one, otherwise a
new relationship is created.
"""
if is_external:
return self.rels.get_or_add_ext_rel(reltype, cast(str, target))
else:
rel = self.rels.get_or_add(reltype, cast(Part, target))
return rel.rId
@property
def related_parts(self):
"""Dictionary mapping related parts by rId, so child objects can resolve
explicit relationships present in the part XML, e.g. sldIdLst to a specific
|Slide| instance."""
return self.rels.related_parts
@lazyproperty
def rels(self):
"""|Relationships| instance holding the relationships for this part."""
# -- prevent breakage in `python-docx-template` by retaining legacy `._rels` attribute --
self._rels = Relationships(self._partname.baseURI)
return self._rels
def target_ref(self, rId: str) -> str:
"""Return URL contained in target ref of relationship identified by `rId`."""
rel = self.rels[rId]
return rel.target_ref
def _rel_ref_count(self, rId: str) -> int:
"""Return the count of references in this part to the relationship identified by `rId`.
Only an XML part can contain references, so this is 0 for `Part`.
"""
return 0
|
Part
|
python
|
bokeh__bokeh
|
src/bokeh/core/property/wrappers.py
|
{
"start": 7856,
"end": 8946
}
|
class ____(PropertyValueContainer, set[T]):
""" A list property value container that supports change notifications on
mutating operations.
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def _saved_copy(self) -> set[T]:
return set(self)
@notify_owner
def add(self, element: T) -> None:
super().add(element)
@notify_owner
def difference_update(self, *s: Iterable[Any]) -> None:
super().difference_update(*s)
@notify_owner
def discard(self, element: T) -> None:
super().discard(element)
@notify_owner
def intersection_update(self, *s: Iterable[Any]) -> None:
super().intersection_update(*s)
@notify_owner
def remove(self, element: T) -> None:
super().discard(element)
@notify_owner
def symmetric_difference_update(self, s: Iterable[T]) -> None:
super().symmetric_difference_update(s)
@notify_owner
def update(self, *s: Iterable[T]) -> None:
super().update(*s)
T_Val = TypeVar("T_Val")
|
PropertyValueSet
|
python
|
PyCQA__pylint
|
doc/data/messages/i/invalid-repr-returned/good.py
|
{
"start": 0,
"end": 107
}
|
class ____:
"""__repr__ returns <type 'str'>"""
def __repr__(self):
return "apples"
|
CustomRepr
|
python
|
django__django
|
django/contrib/contenttypes/models.py
|
{
"start": 183,
"end": 5011
}
|
class ____(models.Manager):
use_in_migrations = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Cache shared by all the get_for_* methods to speed up
# ContentType retrieval.
self._cache = {}
def get_by_natural_key(self, app_label, model):
try:
ct = self._cache[self.db][(app_label, model)]
except KeyError:
ct = self.get(app_label=app_label, model=model)
self._add_to_cache(self.db, ct)
return ct
def _get_opts(self, model, for_concrete_model):
if for_concrete_model:
model = model._meta.concrete_model
return model._meta
def _get_from_cache(self, opts):
key = (opts.app_label, opts.model_name)
return self._cache[self.db][key]
def get_for_model(self, model, for_concrete_model=True):
"""
Return the ContentType object for a given model, creating the
ContentType if necessary. Lookups are cached so that subsequent lookups
for the same model don't hit the database.
"""
opts = self._get_opts(model, for_concrete_model)
try:
return self._get_from_cache(opts)
except KeyError:
pass
# The ContentType entry was not found in the cache, therefore we
# proceed to load or create it.
try:
# Start with get() and not get_or_create() in order to use
# the db_for_read (see #20401).
ct = self.get(app_label=opts.app_label, model=opts.model_name)
except self.model.DoesNotExist:
# Not found in the database; we proceed to create it. This time
# use get_or_create to take care of any race conditions.
ct, created = self.get_or_create(
app_label=opts.app_label,
model=opts.model_name,
)
self._add_to_cache(self.db, ct)
return ct
def get_for_models(self, *models, for_concrete_models=True):
"""
Given *models, return a dictionary mapping {model: content_type}.
"""
results = {}
# Models that aren't already in the cache grouped by app labels.
needed_models = defaultdict(set)
# Mapping of opts to the list of models requiring it.
needed_opts = defaultdict(list)
for model in models:
opts = self._get_opts(model, for_concrete_models)
try:
ct = self._get_from_cache(opts)
except KeyError:
needed_models[opts.app_label].add(opts.model_name)
needed_opts[(opts.app_label, opts.model_name)].append(model)
else:
results[model] = ct
if needed_opts:
# Lookup required content types from the DB.
condition = Q(
*(
Q(("app_label", app_label), ("model__in", models))
for app_label, models in needed_models.items()
),
_connector=Q.OR,
)
cts = self.filter(condition)
for ct in cts:
opts_models = needed_opts.pop((ct.app_label, ct.model), [])
for model in opts_models:
results[model] = ct
self._add_to_cache(self.db, ct)
# Create content types that weren't in the cache or DB.
for (app_label, model_name), opts_models in needed_opts.items():
ct = self.create(app_label=app_label, model=model_name)
self._add_to_cache(self.db, ct)
for model in opts_models:
results[model] = ct
return results
def get_for_id(self, id):
"""
Lookup a ContentType by ID. Use the same shared cache as get_for_model
(though ContentTypes are not created on-the-fly by get_by_id).
"""
try:
ct = self._cache[self.db][id]
except KeyError:
# This could raise a DoesNotExist; that's correct behavior and will
# make sure that only correct ctypes get stored in the cache dict.
ct = self.get(pk=id)
self._add_to_cache(self.db, ct)
return ct
def clear_cache(self):
"""
Clear out the content-type cache.
"""
self._cache.clear()
def _add_to_cache(self, using, ct):
"""Insert a ContentType into the cache."""
# Note it's possible for ContentType objects to be stale; model_class()
# will return None. Hence, there is no reliance on
# model._meta.app_label here, just using the model fields instead.
key = (ct.app_label, ct.model)
self._cache.setdefault(using, {})[key] = ct
self._cache.setdefault(using, {})[ct.id] = ct
|
ContentTypeManager
|
python
|
geekcomputers__Python
|
Calculator with simple ui.py
|
{
"start": 37,
"end": 1950
}
|
class ____:
def __init__(self):
pass
def add(self, num1, num2):
"""
This function adds two numbers.
Examples:
>>> add(2, 3)
5
>>> add(5, 9)
14
>>> add(-1, 2)
1
"""
return num1 + num2
def subtract(self, num1, num2):
"""
This function subtracts two numbers.
Examples:
>>> subtract(5, 3)
2
>>> subtract(9, 5)
4
>>> subtract(4, 9)
-5
"""
return num1 - num2
def multiply(self, num1, num2):
"""
This function multiplies two numbers.
Examples:
>>> multiply(4, 2)
8
>>> multiply(3, 3)
9
>>> multiply(9, 9)
81
"""
return num1 * num2
def divide(self, num1, num2):
"""
This function divides two numbers.
Examples:
>>> divide(4, 4)
1
>>> divide(6, 3)
2
>>> divide(9, 1)
9
"""
if num2 == 0:
print("Cannot divide by zero")
else:
return num1 / num2
calculator = Calculator()
print("1.Add")
print("2.Subtract")
print("3.Multiply")
print("4.Divide")
while True:
# Take input from the user
choice = input("Enter choice(1/2/3/4): ")
# Check if choice is one of the four options
if choice in ("1", "2", "3", "4"):
num1 = float(input("Enter first number: "))
num2 = float(input("Enter second number: "))
if choice == "1":
print(calculator.add(num1, num2))
elif choice == "2":
print(calculator.subtract(num1, num2))
elif choice == "3":
print(calculator.multiply(num1, num2))
elif choice == "4":
print(calculator.divide(num1, num2))
break
else:
print("Invalid Input")
|
Calculator
|
python
|
lepture__authlib
|
authlib/oidc/core/errors.py
|
{
"start": 1586,
"end": 2015
}
|
class ____(OAuth2Error):
"""The Authorization Server requires End-User consent. This error MAY be
returned when the prompt parameter value in the Authentication Request is
none, but the Authentication Request cannot be completed without
displaying a user interface for End-User consent.
http://openid.net/specs/openid-connect-core-1_0.html#AuthError
"""
error = "consent_required"
|
ConsentRequiredError
|
python
|
walkccc__LeetCode
|
solutions/424. Longest Repeating Character Replacement/424-2.py
|
{
"start": 0,
"end": 394
}
|
class ____:
def characterReplacement(self, s: str, k: int) -> int:
maxCount = 0
count = collections.Counter()
# l and r track the maximum window instead of the valid window.
l = 0
for r, c in enumerate(s):
count[c] += 1
maxCount = max(maxCount, count[c])
while maxCount + k < r - l + 1:
count[s[l]] -= 1
l += 1
return r - l + 1
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/eomt/modeling_eomt.py
|
{
"start": 41094,
"end": 41584
}
|
class ____(nn.LayerNorm):
def __init__(self, num_channels, eps=1e-6, affine=True):
super().__init__(num_channels, eps=eps, elementwise_affine=affine)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = hidden_state.permute(0, 2, 3, 1)
hidden_state = F.layer_norm(hidden_state, self.normalized_shape, self.weight, self.bias, self.eps)
hidden_state = hidden_state.permute(0, 3, 1, 2)
return hidden_state
|
EomtLayerNorm2d
|
python
|
huggingface__transformers
|
src/transformers/models/paligemma/modeling_paligemma.py
|
{
"start": 3579,
"end": 9756
}
|
class ____(nn.Module):
def __init__(self, config: PaliGemmaConfig):
super().__init__()
self.linear = nn.Linear(config.vision_config.hidden_size, config.vision_config.projection_dim, bias=True)
def forward(self, image_features):
hidden_states = self.linear(image_features)
return hidden_states
def token_type_ids_mask_function(
token_type_ids: Optional[torch.Tensor],
image_group_ids: Optional[torch.Tensor],
) -> Optional[Callable]:
"""
This function adds the correct offsets to the `q_idx` and `kv_idx` as the torch API can only accept lengths,
not start and end indices.
"""
# Do not return an additional mask in this case
if token_type_ids is None:
return None
def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
# If it's 1 for both query and key/value, we are in an image block
# NOTE: static cache shape goes beyond input seq length, while token_type_ids.shape[1] == input seq length
# Since vmap doesn't support `if statement` we workaround it with `torch.where`
safe_q_idx = torch.where(q_idx < token_type_ids.shape[1], q_idx, 0)
safe_kv_idx = torch.where(kv_idx < token_type_ids.shape[1], kv_idx, 0)
token_type_ids_at_q_idx = token_type_ids[batch_idx, safe_q_idx]
token_type_ids_at_q_idx = torch.where(q_idx < token_type_ids.shape[1], token_type_ids_at_q_idx, 0)
token_type_ids_at_kv_idx = token_type_ids[batch_idx, safe_kv_idx]
token_type_ids_at_kv_idx = torch.where(kv_idx < token_type_ids.shape[1], token_type_ids_at_kv_idx, 0)
image_group_ids_at_q_idx = image_group_ids[batch_idx, safe_q_idx]
image_group_ids_at_q_idx = torch.where(q_idx < image_group_ids.shape[1], image_group_ids_at_q_idx, -1)
image_group_ids_at_kv_idx = image_group_ids[batch_idx, safe_kv_idx]
image_group_ids_at_kv_idx = torch.where(kv_idx < image_group_ids.shape[1], image_group_ids_at_kv_idx, -1)
is_image_block = (token_type_ids_at_q_idx == 1) & (token_type_ids_at_kv_idx == 1)
same_image_block = image_group_ids_at_q_idx == image_group_ids_at_kv_idx
# This is bidirectional attention whenever we are dealing with image tokens
return is_image_block & same_image_block
return inner_mask
def create_causal_mask_mapping(
config: PreTrainedConfig,
input_embeds: torch.Tensor,
attention_mask: Optional[torch.Tensor],
cache_position: torch.Tensor,
past_key_values: Optional[Cache],
position_ids: Optional[torch.Tensor],
token_type_ids: Optional[torch.Tensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
is_training: bool = False,
**kwargs,
) -> dict:
"""
Overwrites the base `create_masks_for_generate` with `token_type_ids` masking to create the causal mask mapping
for all kinds of forward passes. Paligemma uses a bidirectional mask on the prompt tokens.
Uses `pixel_values` as an optional input to disambiguate edge cases.
"""
if is_training and token_type_ids is None:
raise ValueError("`token_type_ids` is required as a model input when training")
mask_kwargs = {
"config": config.get_text_config(),
"input_embeds": input_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
}
# NOTE: this `is_prompt` logic is not flawless, it fails when we're using a cache eagerly initialized
# (e.g. compiled prefill) AND `pixel_values` are not provided (i.e. the image data is provided through other
# means). Determining prefill in that case requires checking data values, which is not compile-compatible.
maybe_is_prompt = past_key_values is None or not past_key_values.is_initialized or pixel_values is not None
if maybe_is_prompt:
if token_type_ids is not None:
# The logic bellow was originally written for Gemma3, where `token_type_ids` is reversed. Let's reverse
# it to then use exactly the same logic.
token_type_ids = 1 - token_type_ids
else:
logger.warning_once(
"The input may be the prompt, but `token_type_ids` is not provided. We recommend "
"passing `token_type_ids` to the model to prevent bad attention masking."
)
# BC: when NOT training, use bidirectional mask if sequence length > 1. Otherwise, use the default causal
# mask. This is incorrect in some advanced use cases, hence the warning above.
# NOTE: this branch can't be reached when training because `token_type_ids` is required as a model input.
if input_embeds.shape[1] > 1:
token_type_ids = torch.ones_like(input_embeds)[:, :, 0]
# Logic originally copied from Gemma3. It holds up for Paligemma as well because Paligemma assumes up to one image
# per prompt AND we reverse `token_type_ids` above. Gemma3 uses a bidirectional mask for images, tagged through
# `token_type_ids` 1s.
if token_type_ids is not None and maybe_is_prompt:
# We need to pass an additional mask function to account for token type ids, and it needs to be an `or` (to
# undo the causal masking)
# First find where a new image block starts: 1 if image and previous not image
# The images cannot attend to future images, but can attend to all prev images and to itself bidirectionally
is_image = (token_type_ids == 1).to(cache_position.device)
is_previous_image = nn.functional.pad(is_image, (1, 0), value=0)[:, :-1]
new_image_start = is_image & ~is_previous_image
image_group_ids = torch.cumsum(new_image_start.int(), dim=1) - 1
image_group_ids = torch.where(is_image, image_group_ids, torch.full_like(token_type_ids, -1))
mask_kwargs["or_mask_function"] = token_type_ids_mask_function(
token_type_ids.to(cache_position.device), image_group_ids
)
return create_masks_for_generate(**mask_kwargs)
@auto_docstring
|
PaliGemmaMultiModalProjector
|
python
|
getsentry__sentry
|
src/sentry/backup/comparators.py
|
{
"start": 8319,
"end": 9522
}
|
class ____(JSONScrubbingComparator):
"""Comparator that ensures that the specified fields' value on the right input is an ISO-8601
date that is greater than (ie, occurs after) or equal to the specified field's left input."""
def compare(self, on: InstanceID, left: Any, right: Any) -> list[ComparatorFinding]:
findings = []
fields = sorted(self.fields)
for f in fields:
if left["fields"].get(f) is None and right["fields"].get(f) is None:
continue
left_date_updated = left["fields"][f] or UNIX_EPOCH
right_date_updated = right["fields"][f] or UNIX_EPOCH
if parser.parse(left_date_updated) > parser.parse(right_date_updated):
findings.append(
ComparatorFinding(
kind=self.get_kind(),
on=on,
left_pk=left["pk"],
right_pk=right["pk"],
reason=f"""the left value ({left_date_updated}) of `{f}` was not less than or equal to the right value ({right_date_updated})""",
)
)
return findings
|
DateUpdatedComparator
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeVarDefaultClass2.py
|
{
"start": 577,
"end": 805
}
|
class ____(dict[T1, T2]): ...
a1 = ClassA[int]()
reveal_type(a1, expected_text="ClassA[int, int]")
a2 = ClassA()
reveal_type(a2, expected_text="ClassA[str, str]")
# This should generate an error because T2 depends on T1.
|
ClassA
|
python
|
doocs__leetcode
|
solution/3000-3099/3081.Replace Question Marks in String to Minimize Its Value/Solution.py
|
{
"start": 0,
"end": 495
}
|
class ____:
def minimizeStringValue(self, s: str) -> str:
cnt = Counter(s)
pq = [(cnt[c], c) for c in ascii_lowercase]
heapify(pq)
t = []
for _ in range(s.count("?")):
v, c = pq[0]
t.append(c)
heapreplace(pq, (v + 1, c))
t.sort()
cs = list(s)
j = 0
for i, c in enumerate(s):
if c == "?":
cs[i] = t[j]
j += 1
return "".join(cs)
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/backoff_strategies.py
|
{
"start": 230,
"end": 1894
}
|
class ____(BackoffStrategy):
def __init__(self, stream: HttpStream, **kwargs): # type: ignore # noqa
self.stream = stream
super().__init__(**kwargs)
def backoff_time(
self, response_or_exception: Optional[Union[requests.Response, requests.RequestException]], **kwargs: Any
) -> Optional[float]:
# This method is called if we run into the rate limit. GitHub limits requests to 5000 per hour and provides
# `X-RateLimit-Reset` header which contains time when this hour will be finished and limits will be reset so
# we again could have 5000 per another hour.
if isinstance(response_or_exception, requests.Response):
min_backoff_time = 60.0
retry_after = response_or_exception.headers.get("Retry-After")
if retry_after is not None:
backoff_time_in_seconds = max(float(retry_after), min_backoff_time)
return self.get_waiting_time(backoff_time_in_seconds)
reset_time = response_or_exception.headers.get("X-RateLimit-Reset")
if reset_time:
backoff_time_in_seconds = max(float(reset_time) - time.time(), min_backoff_time)
return self.get_waiting_time(backoff_time_in_seconds)
return None
def get_waiting_time(self, backoff_time_in_seconds: Optional[float]) -> Optional[float]:
if backoff_time_in_seconds < 60 * 10: # type: ignore[operator]
return backoff_time_in_seconds
else:
self.stream._http_client._session.auth.update_token() # New token will be used in next request
return 1
|
GithubStreamABCBackoffStrategy
|
python
|
getsentry__sentry
|
src/sentry/deletions/defaults/monitor_environment.py
|
{
"start": 181,
"end": 826
}
|
class ____(ModelDeletionTask[MonitorEnvironment]):
def get_child_relations(self, instance: MonitorEnvironment) -> list[BaseRelation]:
from sentry.monitors import models
return [
ModelRelation(
models.MonitorIncident,
{"monitor_environment_id": instance.id},
),
# Use BulkModelDeletionTask here since MonitorIncidents are already handled above
ModelRelation(
models.MonitorCheckIn,
{"monitor_environment_id": instance.id},
BulkModelDeletionTask,
),
]
|
MonitorEnvironmentDeletionTask
|
python
|
matplotlib__matplotlib
|
lib/mpl_toolkits/axes_grid1/axes_rgb.py
|
{
"start": 1707,
"end": 5085
}
|
class ____:
"""
4-panel `~.Axes.imshow` (RGB, R, G, B).
Layout::
┌───────────────┬─────┐
│ │ R │
│ ├─────┤
│ RGB │ G │
│ ├─────┤
│ │ B │
└───────────────┴─────┘
Subclasses can override the ``_defaultAxesClass`` attribute.
By default RGBAxes uses `.mpl_axes.Axes`.
Attributes
----------
RGB : ``_defaultAxesClass``
The Axes object for the three-channel `~.Axes.imshow`.
R : ``_defaultAxesClass``
The Axes object for the red channel `~.Axes.imshow`.
G : ``_defaultAxesClass``
The Axes object for the green channel `~.Axes.imshow`.
B : ``_defaultAxesClass``
The Axes object for the blue channel `~.Axes.imshow`.
"""
_defaultAxesClass = Axes
def __init__(self, *args, pad=0, **kwargs):
"""
Parameters
----------
pad : float, default: 0
Fraction of the Axes height to put as padding.
axes_class : `~matplotlib.axes.Axes`
Axes class to use. If not provided, ``_defaultAxesClass`` is used.
*args
Forwarded to *axes_class* init for the RGB Axes
**kwargs
Forwarded to *axes_class* init for the RGB, R, G, and B Axes
"""
axes_class = kwargs.pop("axes_class", self._defaultAxesClass)
self.RGB = ax = axes_class(*args, **kwargs)
ax.get_figure().add_axes(ax)
self.R, self.G, self.B = make_rgb_axes(
ax, pad=pad, axes_class=axes_class, **kwargs)
# Set the line color and ticks for the axes.
for ax1 in [self.RGB, self.R, self.G, self.B]:
if isinstance(ax1.axis, MethodType):
ad = Axes.AxisDict(self)
ad.update(
bottom=SimpleAxisArtist(ax1.xaxis, 1, ax1.spines["bottom"]),
top=SimpleAxisArtist(ax1.xaxis, 2, ax1.spines["top"]),
left=SimpleAxisArtist(ax1.yaxis, 1, ax1.spines["left"]),
right=SimpleAxisArtist(ax1.yaxis, 2, ax1.spines["right"]))
else:
ad = ax1.axis
ad[:].line.set_color("w")
ad[:].major_ticks.set_markeredgecolor("w")
def imshow_rgb(self, r, g, b, **kwargs):
"""
Create the four images {rgb, r, g, b}.
Parameters
----------
r, g, b : array-like
The red, green, and blue arrays.
**kwargs
Forwarded to `~.Axes.imshow` calls for the four images.
Returns
-------
rgb : `~matplotlib.image.AxesImage`
r : `~matplotlib.image.AxesImage`
g : `~matplotlib.image.AxesImage`
b : `~matplotlib.image.AxesImage`
"""
if not (r.shape == g.shape == b.shape):
raise ValueError(
f'Input shapes ({r.shape}, {g.shape}, {b.shape}) do not match')
RGB = np.dstack([r, g, b])
R = np.zeros_like(RGB)
R[:, :, 0] = r
G = np.zeros_like(RGB)
G[:, :, 1] = g
B = np.zeros_like(RGB)
B[:, :, 2] = b
im_rgb = self.RGB.imshow(RGB, **kwargs)
im_r = self.R.imshow(R, **kwargs)
im_g = self.G.imshow(G, **kwargs)
im_b = self.B.imshow(B, **kwargs)
return im_rgb, im_r, im_g, im_b
|
RGBAxes
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/filters/base.py
|
{
"start": 193,
"end": 2699
}
|
class ____(metaclass=ABCMeta):
"""
Base class for any filter to activate/deactivate a feature, depending on a
condition.
The return value of ``__call__`` will tell if the feature should be active.
"""
def __init__(self) -> None:
self._and_cache: dict[Filter, Filter] = {}
self._or_cache: dict[Filter, Filter] = {}
self._invert_result: Filter | None = None
@abstractmethod
def __call__(self) -> bool:
"""
The actual call to evaluate the filter.
"""
return True
def __and__(self, other: Filter) -> Filter:
"""
Chaining of filters using the & operator.
"""
assert isinstance(other, Filter), f"Expecting filter, got {other!r}"
if isinstance(other, Always):
return self
if isinstance(other, Never):
return other
if other in self._and_cache:
return self._and_cache[other]
result = _AndList.create([self, other])
self._and_cache[other] = result
return result
def __or__(self, other: Filter) -> Filter:
"""
Chaining of filters using the | operator.
"""
assert isinstance(other, Filter), f"Expecting filter, got {other!r}"
if isinstance(other, Always):
return other
if isinstance(other, Never):
return self
if other in self._or_cache:
return self._or_cache[other]
result = _OrList.create([self, other])
self._or_cache[other] = result
return result
def __invert__(self) -> Filter:
"""
Inverting of filters using the ~ operator.
"""
if self._invert_result is None:
self._invert_result = _Invert(self)
return self._invert_result
def __bool__(self) -> None:
"""
By purpose, we don't allow bool(...) operations directly on a filter,
because the meaning is ambiguous.
Executing a filter has to be done always by calling it. Providing
defaults for `None` values should be done through an `is None` check
instead of for instance ``filter1 or Always()``.
"""
raise ValueError(
"The truth value of a Filter is ambiguous. Instead, call it as a function."
)
def _remove_duplicates(filters: list[Filter]) -> list[Filter]:
result = []
for f in filters:
if f not in result:
result.append(f)
return result
|
Filter
|
python
|
django__django
|
tests/managers_regress/models.py
|
{
"start": 809,
"end": 1049
}
|
class ____(models.Model):
value = models.IntegerField()
class Meta:
abstract = True
# Custom manager
restricted = Value42()
# No custom manager on this class to make sure the default case doesn't break.
|
AbstractBase2
|
python
|
fluentpython__example-code-2e
|
05-data-classes/cards_enum.py
|
{
"start": 203,
"end": 381
}
|
class ____:
rank: Suit
suit: Rank
def __str__(self):
glyphs = [chr(x) for x in range(0x2660, 0x2664)]
return f'{self.rank} of {glyphs[self.suit-1]}'
|
Card
|
python
|
ray-project__ray
|
rllib/algorithms/dreamerv3/torch/models/actor_network.py
|
{
"start": 358,
"end": 7435
}
|
class ____(nn.Module):
"""The `actor` (policy net) of DreamerV3.
Consists of a simple MLP for Discrete actions and two MLPs for cont. actions (mean
and stddev).
Also contains two scalar variables to keep track of the percentile-5 and
percentile-95 values of the computed value targets within a batch. This is used to
compute the "scaled value targets" for actor learning. These two variables decay
over time exponentially (see [1] for more details).
"""
def __init__(
self,
*,
input_size: int,
model_size: str = "XS",
action_space: gym.Space,
):
"""Initializes an ActorNetwork instance.
Args:
input_size: The input size of the actor network.
model_size: The "Model Size" used according to [1] Appendinx B.
Use None for manually setting the different network sizes.
action_space: The action space the our environment used.
"""
super().__init__()
self.input_size = input_size
self.model_size = model_size
self.action_space = action_space
# The EMA decay variables used for the [Percentile(R, 95%) - Percentile(R, 5%)]
# diff to scale value targets for the actor loss.
self.ema_value_target_pct5 = nn.Parameter(
torch.tensor(float("nan")), requires_grad=False
)
self.ema_value_target_pct95 = nn.Parameter(
torch.tensor(float("nan")), requires_grad=False
)
# For discrete actions, use a single MLP that computes logits.
if isinstance(self.action_space, gym.spaces.Discrete):
self.mlp = MLP(
input_size=self.input_size,
model_size=self.model_size,
output_layer_size=self.action_space.n,
)
# For cont. actions, use separate MLPs for Gaussian mean and stddev.
# TODO (sven): In the author's original code repo, this is NOT the case,
# inputs are pushed through a shared MLP, then only the two output linear
# layers are separate for std- and mean logits.
elif isinstance(action_space, gym.spaces.Box):
output_layer_size = np.prod(action_space.shape)
self.mlp = MLP(
input_size=self.input_size,
model_size=self.model_size,
output_layer_size=output_layer_size,
)
self.std_mlp = MLP(
input_size=self.input_size,
model_size=self.model_size,
output_layer_size=output_layer_size,
)
else:
raise ValueError(f"Invalid action space: {action_space}")
def forward(self, h, z, return_distr_params=False):
"""Performs a forward pass through this policy network.
Args:
h: The deterministic hidden state of the sequence model. [B, dim(h)].
z: The stochastic discrete representations of the original
observation input. [B, num_categoricals, num_classes].
return_distr_params: Whether to return (as a second tuple item) the action
distribution parameter tensor created by the policy.
"""
# Flatten last two dims of z.
assert len(z.shape) == 3
z_shape = z.shape
z = z.view(z_shape[0], -1)
assert len(z.shape) == 2
out = torch.cat([h, z], dim=-1)
# Send h-cat-z through MLP.
action_logits = self.mlp(out)
if isinstance(self.action_space, gym.spaces.Discrete):
action_probs = nn.functional.softmax(action_logits, dim=-1)
# Add the unimix weighting (1% uniform) to the probs.
# See [1]: "Unimix categoricals: We parameterize the categorical
# distributions for the world model representations and dynamics, as well as
# for the actor network, as mixtures of 1% uniform and 99% neural network
# output to ensure a minimal amount of probability mass on every class and
# thus keep log probabilities and KL divergences well behaved."
action_probs = 0.99 * action_probs + 0.01 * (1.0 / self.action_space.n)
# Danijar's code does: distr = [Distr class](logits=torch.log(probs)).
# Not sure why we don't directly use the already available probs instead.
action_logits = torch.log(action_probs)
# Distribution parameters are the log(probs) directly.
distr_params = action_logits
distr = self.get_action_dist_object(distr_params)
action = distr.sample().float().detach() + (
action_probs - action_probs.detach()
)
elif isinstance(self.action_space, gym.spaces.Box):
# Send h-cat-z through MLP to compute stddev logits for Normal dist
std_logits = self.std_mlp(out)
# minstd, maxstd taken from [1] from configs.yaml
minstd = 0.1
maxstd = 1.0
# Distribution parameters are the squashed std_logits and the tanh'd
# mean logits.
# squash std_logits from (-inf, inf) to (minstd, maxstd)
std_logits = (maxstd - minstd) * torch.sigmoid(std_logits + 2.0) + minstd
mean_logits = torch.tanh(action_logits)
distr_params = torch.cat([mean_logits, std_logits], dim=-1)
distr = self.get_action_dist_object(distr_params)
action = distr.rsample()
if return_distr_params:
return action, distr_params
return action
def get_action_dist_object(self, action_dist_params_T_B):
"""Helper method to create an action distribution object from (T, B, ..) params.
Args:
action_dist_params_T_B: The time-major action distribution parameters.
This could be simply the logits (discrete) or a to-be-split-in-2
tensor for mean and stddev (continuous).
Returns:
The torch action distribution object, from which one can sample, compute
log probs, entropy, etc..
"""
if isinstance(self.action_space, gym.spaces.Discrete):
# Create the distribution object using the unimix'd logits.
distr = torch.distributions.OneHotCategorical(logits=action_dist_params_T_B)
elif isinstance(self.action_space, gym.spaces.Box):
# Compute Normal distribution from action_logits and std_logits
loc, scale = torch.split(
action_dist_params_T_B,
action_dist_params_T_B.shape[-1] // 2,
dim=-1,
)
distr = torch.distributions.Normal(loc=loc, scale=scale)
# If action_space is a box with multiple dims, make individual dims
# independent.
distr = torch.distributions.Independent(distr, len(self.action_space.shape))
else:
raise ValueError(f"Action space {self.action_space} not supported!")
return distr
|
ActorNetwork
|
python
|
vyperlang__vyper
|
tests/unit/ast/test_source_annotation.py
|
{
"start": 1060,
"end": 6342
}
|
class ____(Exception):
def __init__(self, message='Error Message not found.', item=None):
self.message = message
self.lineno = None
self.col_offset = None
if isinstance(item, tuple): # is a position.
self.lineno, self.col_offset = item
elif item and hasattr(item, 'lineno'):
self.set_err_pos(item.lineno, item.col_offset)
if hasattr(item, 'source_code'):
self.source_code = item.source_code.splitlines()
def set_err_pos(self, lineno, col_offset):
if not self.lineno:
self.lineno = lineno
if not self.col_offset:
self.col_offset = col_offset
def __str__(self):
output = self.message
if self.lineno and hasattr(self, 'source_code'):
output = f'line {self.lineno}: {output}\n{self.source_code[self.lineno -1]}'
if self.col_offset:
col = '-' * self.col_offset + '^'
output += '\n' + col
elif self.lineno is not None and self.col_offset is not None:
output = f'line {self.lineno}:{self.col_offset} {output}'
return output
"""[
1:-1
]
def test_annotate_source_code_marks_positions_in_source_code():
annotation = annotate_source_code(
TEST_SOURCE_CODE, 22, col_offset=16, context_lines=0, line_numbers=False
)
assert (
annotation
== r"""
def __str__(self):
----------------^
"""[
1:-1
]
)
annotation = annotate_source_code(
TEST_SOURCE_CODE, 22, col_offset=15, context_lines=1, line_numbers=False
)
assert (
annotation
== r"""
def __str__(self):
---------------^
output = self.message
"""[
1:-1
]
)
annotation = annotate_source_code(
TEST_SOURCE_CODE, 22, col_offset=20, context_lines=2, line_numbers=False
)
assert (
annotation
== r"""
self.col_offset = col_offset
def __str__(self):
--------------------^
output = self.message
"""[
1:-1
]
)
annotation = annotate_source_code(
TEST_SOURCE_CODE, 1, col_offset=5, context_lines=3, line_numbers=True
)
assert (
annotation
== r"""
---> 1 # Attempts to display the line and column of violating code.
------------^
2 class ParserException(Exception):
3 def __init__(self, message='Error Message not found.', item=None):
4 self.message = message
"""[
1:-1
]
)
annotation = annotate_source_code(
TEST_SOURCE_CODE, 36, col_offset=8, context_lines=4, line_numbers=True
)
assert (
annotation
== r"""
32
33 elif self.lineno is not None and self.col_offset is not None:
34 output = f'line {self.lineno}:{self.col_offset} {output}'
35
---> 36 return output
----------------^
"""[
1:-1
]
)
annotation = annotate_source_code(
TEST_SOURCE_CODE, 15, col_offset=8, context_lines=11, line_numbers=True
)
assert (
annotation
== r"""
4 self.message = message
5 self.lineno = None
6 self.col_offset = None
7
8 if isinstance(item, tuple): # is a position.
9 self.lineno, self.col_offset = item
10 elif item and hasattr(item, 'lineno'):
11 self.set_err_pos(item.lineno, item.col_offset)
12 if hasattr(item, 'source_code'):
13 self.source_code = item.source_code.splitlines()
14
---> 15 def set_err_pos(self, lineno, col_offset):
----------------^
16 if not self.lineno:
17 self.lineno = lineno
18
19 if not self.col_offset:
20 self.col_offset = col_offset
21
22 def __str__(self):
23 output = self.message
24
25 if self.lineno and hasattr(self, 'source_code'):
26
"""[
1:-1
]
)
annotation = annotate_source_code(
TEST_SOURCE_CODE, 15, col_offset=None, context_lines=3, line_numbers=True
)
assert (
annotation
== r"""
12 if hasattr(item, 'source_code'):
13 self.source_code = item.source_code.splitlines()
14
---> 15 def set_err_pos(self, lineno, col_offset):
16 if not self.lineno:
17 self.lineno = lineno
18
"""[
1:-1
]
)
annotation = annotate_source_code(
TEST_SOURCE_CODE, 15, col_offset=None, context_lines=2, line_numbers=False
)
assert (
annotation
== r"""
self.source_code = item.source_code.splitlines()
def set_err_pos(self, lineno, col_offset):
if not self.lineno:
self.lineno = lineno
"""[
1:-1
]
)
@pytest.mark.parametrize("bad_lineno", (-100, -1, 0, 45, 1000))
def test_annotate_source_code_raises_value_errors(bad_lineno):
with pytest.raises(ValueError, match="Line number is out of range"):
annotate_source_code(TEST_SOURCE_CODE, bad_lineno)
|
ParserException
|
python
|
pdm-project__pdm
|
src/pdm/cli/commands/venv/backends.py
|
{
"start": 469,
"end": 4885
}
|
class ____(abc.ABC):
"""The base class for virtualenv backends"""
def __init__(self, project: Project, python: str | None) -> None:
self.project = project
self.python = python
@abc.abstractmethod
def pip_args(self, with_pip: bool) -> Iterable[str]:
pass
@cached_property
def _resolved_interpreter(self) -> PythonInfo:
if not self.python:
project_python = self.project._python
if project_python:
return project_python
def match_func(py_version: PythonInfo) -> bool:
return bool(self.python) or (
py_version.valid and self.project.python_requires.contains(py_version.version, True)
)
respect_version_file = self.project.config["python.use_python_version"]
for py_version in self.project.iter_interpreters(
self.python, search_venv=False, filter_func=match_func, respect_version_file=respect_version_file
):
return py_version
python = f" {self.python}" if self.python else ""
raise VirtualenvCreateError(f"Can't resolve python interpreter{python}")
@property
def ident(self) -> str:
"""Get the identifier of this virtualenv.
self.python can be one of:
3.8
/usr/bin/python
3.9.0a4
python3.8
"""
return self._resolved_interpreter.identifier
def subprocess_call(self, cmd: list[str], **kwargs: Any) -> None:
self.project.core.ui.echo(
f"Run command: [success]{cmd}[/]",
verbosity=termui.Verbosity.DETAIL,
err=True,
)
try:
subprocess.check_call(
cmd,
stdout=subprocess.DEVNULL if self.project.core.ui.verbosity < termui.Verbosity.DETAIL else None,
)
except subprocess.CalledProcessError as e: # pragma: no cover
raise VirtualenvCreateError(e) from None
def _ensure_clean(self, location: Path, force: bool = False) -> None:
if not location.exists():
return
if location.is_dir() and not any(location.iterdir()):
return
if not force:
raise VirtualenvCreateError(f"The location {location} is not empty, add --force to overwrite it.")
if location.is_file():
self.project.core.ui.info(f"Removing existing file {location}", verbosity=termui.Verbosity.DETAIL)
location.unlink()
else:
self.project.core.ui.info(
f"Cleaning existing target directory {location}", verbosity=termui.Verbosity.DETAIL
)
with os.scandir(location) as entries:
for entry in entries:
if entry.is_dir() and not entry.is_symlink():
shutil.rmtree(entry.path)
else:
os.remove(entry.path)
def get_location(self, name: str | None = None, venv_name: str | None = None) -> Path:
if name and venv_name:
raise PdmUsageError("Cannot specify both name and venv_name")
venv_parent = Path(self.project.config["venv.location"]).expanduser()
if not venv_parent.is_dir():
venv_parent.mkdir(exist_ok=True, parents=True)
if not venv_name:
venv_name = f"{get_venv_prefix(self.project)}{name or self.ident}"
return venv_parent / venv_name
def create(
self,
name: str | None = None,
args: tuple[str, ...] = (),
force: bool = False,
in_project: bool = False,
prompt: str | None = None,
with_pip: bool = False,
venv_name: str | None = None,
) -> Path:
if in_project:
location = self.project.root / ".venv"
else:
location = self.get_location(name, venv_name)
args = (*self.pip_args(with_pip), *args)
if prompt is not None:
prompt = prompt.format(
project_name=self.project.root.name.lower() or "virtualenv",
python_version=self.ident,
)
self._ensure_clean(location, force)
self.perform_create(location, args, prompt=prompt)
return location
@abc.abstractmethod
def perform_create(self, location: Path, args: tuple[str, ...], prompt: str | None = None) -> None:
pass
|
Backend
|
python
|
Pylons__pyramid
|
tests/test_integration.py
|
{
"start": 20490,
"end": 21202
}
|
class ____(IntegrationBase, unittest.TestCase):
package = 'tests.pkgs.securityapp'
def test_public(self):
res = self.testapp.get('/public', status=200)
self.assertEqual(res.body, b'Hello')
def test_private_denied(self):
self.testapp.get('/private', status=403)
def test_private_allowed(self):
self.testapp.extra_environ = {'REMOTE_USER': 'bob'}
res = self.testapp.get('/private', status=200)
self.assertEqual(res.body, b'Secret')
def test_inaccessible(self):
self.testapp.get('/inaccessible', status=403)
self.testapp.extra_environ = {'REMOTE_USER': 'bob'}
self.testapp.get('/inaccessible', status=403)
|
TestSecurityApp
|
python
|
getsentry__sentry
|
tests/sentry/deletions/test_data_source.py
|
{
"start": 305,
"end": 1081
}
|
class ____(BaseWorkflowTest, HybridCloudTestMixin):
def setUp(self) -> None:
self.alert_rule = self.create_alert_rule()
self.snuba_query = self.alert_rule.snuba_query
self.subscription = QuerySubscription.objects.get(snuba_query_id=self.snuba_query.id)
self.data_source = self.create_data_source(
organization=self.organization, source_id=self.subscription.id
)
def test_simple(self) -> None:
self.ScheduledDeletion.schedule(instance=self.data_source, days=0)
with self.tasks():
run_scheduled_deletions()
assert not DataSource.objects.filter(id=self.data_source.id).exists()
assert not QuerySubscription.objects.filter(id=self.subscription.id).exists()
|
DeleteDataSourceTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/lookup_ops.py
|
{
"start": 34148,
"end": 34730
}
|
class ____(collections.namedtuple("HasherSpec", ["hasher", "key"])):
"""A structure for the spec of the hashing function to use for hash buckets.
`hasher` is the name of the hashing function to use (eg. "fasthash",
"stronghash").
`key` is optional and specify the key to use for the hash function if
supported, currently only used by a strong hash.
Fields:
hasher: The hasher name to use.
key: The key to be used by the hashing function, if required.
"""
__slots__ = ()
FastHashSpec = HasherSpec("fasthash", None) # pylint: disable=invalid-name
|
HasherSpec
|
python
|
pytorch__pytorch
|
torch/_dynamo/source.py
|
{
"start": 19723,
"end": 20551
}
|
class ____(ChainedSource):
is_int: bool
def __post_init__(self) -> None:
assert self.base is not None
def reconstruct(self, codegen: "PyCodegen") -> None:
# Integer casting at reconstruction helps reduce the amount of DynamicInts returned
# to the user, in favor of plain ints.
# For example, a compiled region that only does int arithmetic could return a
# DynamicInt without the casting here.
codegen.add_push_null(lambda: codegen.load_import_from("builtins", "int"))
codegen(self.base)
codegen.extend_output(create_call_function(1, False))
def guard_source(self) -> GuardSource:
return self.base.guard_source()
def name(self) -> str:
return f"int({self.base.name()})"
@dataclasses.dataclass(frozen=True)
|
DynamicScalarSource
|
python
|
Netflix__metaflow
|
metaflow/packaging_sys/tar_backend.py
|
{
"start": 130,
"end": 3010
}
|
class ____(PackagingBackend):
type = "tgz"
@classmethod
def get_extract_commands(cls, archive_name: str, dest_dir: str) -> List[str]:
return [
f"TAR_OPTIONS='--warning=no-timestamp' tar -xzf {archive_name} -C {dest_dir}"
]
def __init__(self):
super().__init__()
self._buf = None
def create(self):
self._buf = BytesIO()
self._archive = tarfile.open(
fileobj=self._buf, mode="w:gz", compresslevel=3, dereference=True
)
return self
def add_file(self, filename: str, arcname: Optional[str] = None):
info = self._archive.gettarinfo(filename, arcname)
# Setting this default to Dec 3, 2019
info.mtime = 1575360000
with open(filename, mode="rb") as f:
self._archive.addfile(info, f)
def add_data(self, data: BytesIO, arcname: str):
info = tarfile.TarInfo(arcname)
data.seek(0)
info.size = len(data.getvalue())
# Setting this default to Dec 3, 2019
info.mtime = 1575360000
self._archive.addfile(info, data)
def close(self):
if self._archive:
self._archive.close()
def get_blob(self) -> Optional[Union[bytes, bytearray]]:
if self._buf:
blob = bytearray(self._buf.getvalue())
blob[4:8] = [0] * 4 # Reset 4 bytes from offset 4 to account for ts
return blob
return None
@classmethod
def cls_open(cls, content: IO[bytes]) -> tarfile.TarFile:
return tarfile.open(fileobj=content, mode="r:gz")
@classmethod
def cls_member_name(cls, member: Union[tarfile.TarInfo, str]) -> str:
"""
Returns the name of the member as a string.
"""
return member.name if isinstance(member, tarfile.TarInfo) else member
@classmethod
def cls_has_member(cls, archive: tarfile.TarFile, name: str) -> bool:
try:
archive.getmember(name)
return True
except KeyError:
return False
@classmethod
def cls_get_member(cls, archive: tarfile.TarFile, name: str) -> Optional[bytes]:
try:
member = archive.getmember(name)
return archive.extractfile(member).read()
except KeyError:
return None
@classmethod
def cls_extract_members(
cls,
archive: tarfile.TarFile,
members: Optional[List[Any]] = None,
dest_dir: str = ".",
) -> None:
archive.extractall(path=dest_dir, members=members)
@classmethod
def cls_list_members(
cls, archive: tarfile.TarFile
) -> Optional[List[tarfile.TarInfo]]:
return archive.getmembers() or None
@classmethod
def cls_list_names(cls, archive: tarfile.TarFile) -> Optional[List[str]]:
return archive.getnames() or None
|
TarPackagingBackend
|
python
|
catalyst-team__catalyst
|
catalyst/contrib/datasets/cifar.py
|
{
"start": 10181,
"end": 12825
}
|
class ____(QueryGalleryDataset):
"""
CIFAR10 for metric learning with query and gallery split.
CIFAR10QGDataset should be used for test stage.
For this dataset we used only test part of the CIFAR10 and only
those images that are labeled as 'dog', 'frog', 'horse', 'ship', 'truck'.
"""
_split = 5
classes = [
"5 - dog",
"6 - frog",
"7 - horse",
"8 - ship",
"9 - truck",
]
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
gallery_fraq: Optional[float] = 0.2,
**kwargs
) -> None:
"""
Args:
root: root directory for storing dataset
transform: transform
gallery_fraq: gallery size
"""
self._cifar = CIFAR10(root, train=False, download=True, transform=transform)
self._filter()
self._gallery_size = int(gallery_fraq * len(self._cifar))
self._query_size = len(self._cifar) - self._gallery_size
self._is_query = torch.zeros(len(self._cifar)).type(torch.bool)
self._is_query[: self._query_size] = True
def _filter(self) -> None:
"""Filter CIFAR10 dataset: select images of 'dog', 'frog',
'horse', 'ship', 'truck' classes."""
mask = np.array(self._cifar.targets) >= self._split
self._cifar.data = self._cifar.data[mask]
self._cifar.targets = np.array(self._cifar.targets)[mask].tolist()
def __getitem__(self, idx: int) -> Dict[str, Any]:
"""
Get item method for dataset
Args:
idx: index of the object
Returns:
Dict with features, targets and is_query flag
"""
image, label = self._cifar[idx]
return {
"features": image,
"targets": label,
"is_query": self._is_query[idx],
}
def __len__(self) -> int:
"""Length"""
return len(self._cifar)
def __repr__(self) -> None:
"""Print info about the dataset"""
return self._cifar.__repr__()
@property
def gallery_size(self) -> int:
"""Query Gallery dataset should have gallery_size property"""
return self._gallery_size
@property
def query_size(self) -> int:
"""Query Gallery dataset should have query_size property"""
return self._query_size
@property
def data(self) -> torch.Tensor:
"""Images from CIFAR10"""
return self._cifar.data
@property
def targets(self) -> torch.Tensor:
"""Labels of digits"""
return self._cifar.targets
|
CifarQGDataset
|
python
|
spack__spack
|
lib/spack/spack/test/installer_tui.py
|
{
"start": 2807,
"end": 6215
}
|
class ____:
"""Test basic state management operations"""
def test_add_build(self):
"""Test that add_build adds builds correctly"""
status, _, _ = create_build_status(total=2)
spec1 = MockSpec("pkg1", "1.0")
spec2 = MockSpec("pkg2", "2.0")
status.add_build(spec1, explicit=True, control_w_conn=MockConnection())
assert len(status.builds) == 1
assert spec1.dag_hash() in status.builds
assert status.builds[spec1.dag_hash()].name == "pkg1"
assert status.builds[spec1.dag_hash()].explicit is True
assert status.dirty is True
status.add_build(spec2, explicit=False, control_w_conn=MockConnection())
assert len(status.builds) == 2
assert spec2.dag_hash() in status.builds
assert status.builds[spec2.dag_hash()].explicit is False
def test_update_state_transitions(self):
"""Test that update_state transitions states properly"""
status, fake_time, _ = create_build_status()
(spec,) = add_mock_builds(status, 1)
build_id = spec.dag_hash()
# Update to 'building' state
status.update_state(build_id, "building")
assert status.builds[build_id].state == "building"
assert status.builds[build_id].progress_percent is None
assert status.completed == 0
# Update to 'finished' state
status.update_state(build_id, "finished")
assert status.builds[build_id].state == "finished"
assert status.completed == 1
assert status.builds[build_id].finished_time == fake_time[0] + inst.CLEANUP_TIMEOUT
def test_update_state_failed(self):
"""Test that failed state increments completed counter"""
status, fake_time, _ = create_build_status()
(spec,) = add_mock_builds(status, 1)
build_id = spec.dag_hash()
status.update_state(build_id, "failed")
assert status.builds[build_id].state == "failed"
assert status.completed == 1
assert status.builds[build_id].finished_time == fake_time[0] + inst.CLEANUP_TIMEOUT
def test_update_progress(self):
"""Test that update_progress updates percentages"""
status, _, _ = create_build_status()
(spec,) = add_mock_builds(status, 1)
build_id = spec.dag_hash()
# Update progress
status.update_progress(build_id, 50, 100)
assert status.builds[build_id].progress_percent == 50
assert status.dirty is True
# Same percentage shouldn't mark dirty again
status.dirty = False
status.update_progress(build_id, 50, 100)
assert status.dirty is False
# Different percentage should mark dirty
status.update_progress(build_id, 75, 100)
assert status.builds[build_id].progress_percent == 75
assert status.dirty is True
def test_completion_counter(self):
"""Test that completion counter increments correctly"""
status, _, _ = create_build_status(total=3)
specs = add_mock_builds(status, 3)
assert status.completed == 0
status.update_state(specs[0].dag_hash(), "finished")
assert status.completed == 1
status.update_state(specs[1].dag_hash(), "failed")
assert status.completed == 2
status.update_state(specs[2].dag_hash(), "finished")
assert status.completed == 3
|
TestBasicStateManagement
|
python
|
getsentry__sentry
|
src/sentry/notifications/notifications/activity/escalating.py
|
{
"start": 236,
"end": 1550
}
|
class ____(GroupActivityNotification):
message_builder = "SlackNotificationsMessageBuilder"
metrics_key = "escalating_activity"
title = "Issue marked as escalating"
def get_notification_title(
self, provider: ExternalProviders, context: Mapping[str, Any] | None = None
) -> str:
return self.title
def get_description(self) -> tuple[str, str | None, Mapping[str, Any]]:
forecast = int(self.activity.data.get("forecast", 0))
expired_snooze = self.activity.data.get("expired_snooze")
if forecast:
return (
"Sentry flagged this issue as escalating because over {forecast} {event} happened in an hour.",
None,
{"forecast": forecast, "event": "event" if forecast == 1 else "events"},
)
if expired_snooze:
return (
"Sentry flagged this issue as escalating because your archive condition has expired.",
None,
{},
)
# Return a default basic message
return ("Sentry flagged this issue as escalating.", None, {})
def get_message_description(self, recipient: Actor, provider: ExternalProviders) -> Any:
return self.get_context()["text_description"]
|
EscalatingActivityNotification
|
python
|
scipy__scipy
|
scipy/stats/_multivariate.py
|
{
"start": 181438,
"end": 185168
}
|
class ____(multi_rv_frozen):
__class_getitem__ = None
def __init__(self, loc=None, shape=1, df=1, allow_singular=False,
seed=None):
"""Create a frozen multivariate t distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
Examples
--------
>>> import numpy as np
>>> from scipy.stats import multivariate_t
>>> loc = np.zeros(3)
>>> shape = np.eye(3)
>>> df = 10
>>> dist = multivariate_t(loc, shape, df)
>>> dist.rvs()
array([[ 0.81412036, -1.53612361, 0.42199647]])
>>> dist.pdf([1, 1, 1])
array([0.01237803])
"""
self._dist = multivariate_t_gen(seed)
dim, loc, shape, df = self._dist._process_parameters(loc, shape, df)
self.dim, self.loc, self.shape, self.df = dim, loc, shape, df
self.shape_info = _PSD(shape, allow_singular=allow_singular)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
U = self.shape_info.U
log_pdet = self.shape_info.log_pdet
return self._dist._logpdf(x, self.loc, U, log_pdet, self.df, self.dim,
self.shape_info.rank)
def cdf(self, x, *, maxpts=None, lower_limit=None, random_state=None):
x = self._dist._process_quantiles(x, self.dim)
return self._dist._cdf(x, self.loc, self.shape, self.df, self.dim,
maxpts, lower_limit, random_state)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(loc=self.loc,
shape=self.shape,
df=self.df,
size=size,
random_state=random_state)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.shape)
multivariate_t = multivariate_t_gen()
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_t_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'cdf', 'entropy']:
method = multivariate_t_gen.__dict__[name]
method_frozen = multivariate_t_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
mvt_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvt_docdict_params)
_mhg_doc_default_callparams = """\
m : array_like
The number of each type of object in the population.
That is, :math:`m[i]` is the number of objects of
type :math:`i`.
n : array_like
The number of samples taken from the population.
"""
_mhg_doc_callparams_note = """\
`m` must be an array of positive integers. If the quantile
:math:`i` contains values out of the range :math:`[0, m_i]`
where :math:`m_i` is the number of objects of type :math:`i`
in the population or if the parameters are inconsistent with one
another (e.g. ``x.sum() != n``), methods return the appropriate
value (e.g. ``0`` for ``pmf``). If `m` or `n` contain negative
values, the result will contain ``nan`` there.
"""
_mhg_doc_frozen_callparams = ""
_mhg_doc_frozen_callparams_note = """\
See class definition for a detailed description of parameters."""
mhg_docdict_params = {
'_doc_default_callparams': _mhg_doc_default_callparams,
'_doc_callparams_note': _mhg_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mhg_docdict_noparams = {
'_doc_default_callparams': _mhg_doc_frozen_callparams,
'_doc_callparams_note': _mhg_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
|
multivariate_t_frozen
|
python
|
google__jax
|
jax/_src/interpreters/mlir.py
|
{
"start": 22567,
"end": 24066
}
|
class ____:
# The names of the dimension variables, sorted by name. This is the order in
# which they are passed to the IR functions that need them. This is only
# used for native serialization with polymorphic shapes when
# --jax_dynamic_shapes is off.
# TODO: for multi-platform lowering we prepend to the regular dimension
# variables a fake dimension variable "platform_index_". This is a
# temporary abuse, taking advantage that for platform index we need the
# same lowering strategy as for dimension variables: add it as argument to
# inner functions, and pass the values along at the call sites.
dim_vars: tuple[str, ...]
# Whether the module uses dimension variables, either in its inputs or
# from an inner call to Exported modules that uses dimension variables.
# This includes the case when the called Exported module uses a platform
# index argument.
uses_dim_vars: bool
# If the first dimension variable is a platform index argument
has_platform_index_argument: bool
def __init__(self,
dim_vars: tuple[str, ...],
lowering_platforms: tuple[str, ...] | None):
if lowering_platforms is not None and len(lowering_platforms) > 1:
dim_vars = ("_platform_index",) + tuple(dim_vars)
self.has_platform_index_argument = True
else:
self.has_platform_index_argument = False
self.uses_dim_vars = (len(dim_vars) > 0)
self.dim_vars = dim_vars
@dataclasses.dataclass(frozen=True)
|
ShapePolyLoweringState
|
python
|
chroma-core__chroma
|
chromadb/test/test_api.py
|
{
"start": 85948,
"end": 89709
}
|
class ____:
"""Test Search class dict input support."""
def test_search_with_dict_where(self):
"""Test Search accepts dict for where parameter."""
from chromadb.execution.expression.plan import Search
from chromadb.execution.expression.operator import Where
# Simple equality
search = Search(where={"status": "active"})
assert search._where is not None
assert isinstance(search._where, Where)
# Complex where with operators
search = Search(where={"$and": [{"status": "active"}, {"score": {"$gt": 0.5}}]})
assert search._where is not None
def test_search_with_dict_rank(self):
"""Test Search accepts dict for rank parameter."""
from chromadb.execution.expression.plan import Search
from chromadb.execution.expression.operator import Rank
# KNN ranking
search = Search(rank={"$knn": {"query": [0.1, 0.2]}})
assert search._rank is not None
assert isinstance(search._rank, Rank)
# Val ranking
search = Search(rank={"$val": 0.5})
assert search._rank is not None
def test_search_with_dict_limit(self):
"""Test Search accepts dict and int for limit parameter."""
from chromadb.execution.expression.plan import Search
# Dict limit
search = Search(limit={"limit": 10, "offset": 5})
assert search._limit.limit == 10
assert search._limit.offset == 5
# Int limit (creates Limit with offset=0)
search = Search(limit=10)
assert search._limit.limit == 10
assert search._limit.offset == 0
def test_search_with_dict_select(self):
"""Test Search accepts dict, list, and set for select parameter."""
from chromadb.execution.expression.plan import Search
# Dict select
search = Search(select={"keys": ["#document", "#score"]})
assert search._select is not None
# List select
search = Search(select=["#document", "#metadata"])
assert search._select is not None
# Set select
search = Search(select={"#document", "#embedding"})
assert search._select is not None
def test_search_mixed_inputs(self):
"""Test Search with mixed expression and dict inputs."""
from chromadb.execution.expression.plan import Search
from chromadb.execution.expression.operator import Key
search = Search(
where=Key("status") == "active", # Expression
rank={"$knn": {"query": [0.1, 0.2]}}, # Dict
limit=10, # Int
select=["#document"], # List
)
assert search._where is not None
assert search._rank is not None
assert search._limit.limit == 10
assert search._select is not None
def test_search_builder_methods_with_dicts(self):
"""Test Search builder methods accept dicts."""
from chromadb.execution.expression.plan import Search
search = Search().where({"status": "active"}).rank({"$val": 0.5})
assert search._where is not None
assert search._rank is not None
def test_search_invalid_inputs(self):
"""Test Search rejects invalid input types."""
import pytest
from chromadb.execution.expression.plan import Search
with pytest.raises(TypeError, match="where must be"):
Search(where="invalid")
with pytest.raises(TypeError, match="rank must be"):
Search(rank=0.5) # Primitive numbers not allowed
with pytest.raises(TypeError, match="limit must be"):
Search(limit="10")
with pytest.raises(TypeError, match="select must be"):
Search(select=123)
|
TestSearchDictSupport
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeNarrowingTypeEquals1.py
|
{
"start": 1034,
"end": 1178
}
|
class ____:
pass
E = C[T] | D
def func5(x: E[T]) -> None:
if type(x) == C:
reveal_type(x, expected_text="C[T@func5]")
@final
|
D
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_type_expressions.py
|
{
"start": 16424,
"end": 17036
}
|
class ____(fixtures.TablesTest, RoundTripTestBase):
@classmethod
def define_tables(cls, metadata):
class MyString(UserDefinedType):
cache_ok = True
def get_col_spec(self, **kw):
return "VARCHAR(50)"
def bind_expression(self, bindvalue):
return func.lower(bindvalue)
def column_expression(self, col):
return func.upper(col)
Table(
"test_table",
metadata,
Column("x", String(50)),
Column("y", MyString()),
)
|
UserDefinedTypeRoundTripTest
|
python
|
python-openxml__python-docx
|
src/docx/opc/phys_pkg.py
|
{
"start": 1075,
"end": 2318
}
|
class ____(PhysPkgReader):
"""Implements |PhysPkgReader| interface for an OPC package extracted into a
directory."""
def __init__(self, path):
"""`path` is the path to a directory containing an expanded package."""
super(_DirPkgReader, self).__init__()
self._path = os.path.abspath(path)
def blob_for(self, pack_uri):
"""Return contents of file corresponding to `pack_uri` in package directory."""
path = os.path.join(self._path, pack_uri.membername)
with open(path, "rb") as f:
blob = f.read()
return blob
def close(self):
"""Provides interface consistency with |ZipFileSystem|, but does nothing, a
directory file system doesn't need closing."""
pass
@property
def content_types_xml(self):
"""Return the `[Content_Types].xml` blob from the package."""
return self.blob_for(CONTENT_TYPES_URI)
def rels_xml_for(self, source_uri):
"""Return rels item XML for source with `source_uri`, or None if the item has no
rels item."""
try:
rels_xml = self.blob_for(source_uri.rels_uri)
except IOError:
rels_xml = None
return rels_xml
|
_DirPkgReader
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-omni/dagster_omni/translation.py
|
{
"start": 2108,
"end": 4344
}
|
class ____:
"""Container class for data required to translate an object in an
Omni workspace into a Dagster definition.
Properties:
obj (Union[OmniDocument, OmniQuery]): The object to translate.
workspace_data (OmniWorkspaceData): Global workspace data.
"""
obj: Union[OmniDocument, OmniQuery]
workspace_data: OmniWorkspaceData
def _resolve_multilayer_translation(context: ResolutionContext, model):
"""The Omni translation schema supports defining global transforms
as well as per-object-type transforms. This resolver composes the
per-object-type transforms with the global transforms.
"""
info = TranslatorResolvingInfo(
asset_attributes=model,
resolution_context=context,
model_key="translation",
)
def _translation_fn(base_asset_spec: AssetSpec, data: OmniTranslatorData):
processed_spec = info.get_asset_spec(
base_asset_spec,
{
"data": data,
"spec": base_asset_spec,
},
)
nested_translation_fns = resolve_fields(
model=model,
resolved_cls=OmniTranslationArgs,
context=context.with_scope(
**{
"data": data,
"spec": processed_spec,
}
),
)
for_document = nested_translation_fns.get("for_document")
for_query = nested_translation_fns.get("for_query")
if isinstance(data.obj, OmniDocument) and for_document:
return for_document(processed_spec, data)
if isinstance(data.obj, OmniQuery) and for_query:
return for_query(processed_spec, data)
return processed_spec
return _translation_fn
OmniTranslationFn: TypeAlias = TranslationFn[OmniTranslatorData]
ResolvedTargetedOmniTranslationFn = Annotated[
OmniTranslationFn,
TranslationFnResolver[OmniTranslatorData](lambda data: {"data": data}),
]
ResolvedTargetedKeyOnlyOmniTranslationFn = Annotated[
OmniTranslationFn,
TranslationFnResolver[OmniTranslatorData](
lambda data: {"data": data}, model_field_type=AssetSpecKeyUpdateKwargs.model()
),
]
@record
|
OmniTranslatorData
|
python
|
python-attrs__attrs
|
typing-examples/mypy.py
|
{
"start": 687,
"end": 736
}
|
class ____:
z: Any = attr.ib()
@attrs.define
|
FF
|
python
|
getsentry__sentry
|
tests/sentry/users/api/endpoints/test_userroles_index.py
|
{
"start": 1168,
"end": 1494
}
|
class ____(UserRolesTest):
def test_simple(self) -> None:
UserRole.objects.create(name="test-role")
resp = self.get_response()
assert resp.status_code == 200
assert len(resp.data) >= 1, resp.data
assert "test-role" in [r["name"] for r in resp.data]
@control_silo_test
|
UserRolesGetTest
|
python
|
apache__airflow
|
providers/common/compat/tests/unit/common/compat/lineage/test_hook.py
|
{
"start": 22637,
"end": 39563
}
|
class ____:
"""Test edge cases and error conditions to ensure collector never fails."""
@pytest.mark.parametrize("uri", ["", None])
def test_invalid_uri_none(self, collector, uri):
"""Test handling of None URI - should not raise."""
mock_context = mock.MagicMock()
# Should not raise exceptions
collector.add_input_asset(mock_context, uri=uri)
collector.add_output_asset(mock_context, uri=uri)
# Collector should handle gracefully and not collect invalid URIs
assert not collector.has_collected
lineage = collector.collected_assets
assert len(lineage.inputs) == 0
assert len(lineage.outputs) == 0
assert len(lineage.extra) == 0
def test_malformed_uri(self, collector):
"""Test handling of malformed URIs - should not raise."""
mock_context = mock.MagicMock()
# Various malformed URIs should not cause crashes
collector.add_input_asset(mock_context, uri="not-a-valid-uri")
collector.add_input_asset(mock_context, uri="://missing-scheme")
collector.add_input_asset(mock_context, uri="scheme:")
collector.add_output_asset(mock_context, uri="//no-scheme")
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.inputs) == 3
assert lineage.inputs[0].asset.uri == "not-a-valid-uri"
assert lineage.inputs[0].count == 1
assert lineage.inputs[0].context == mock_context
assert lineage.inputs[1].asset.uri == "://missing-scheme"
assert lineage.inputs[1].count == 1
assert lineage.inputs[1].context == mock_context
assert lineage.inputs[2].asset.uri == "scheme:/"
assert lineage.inputs[2].count == 1
assert lineage.inputs[2].context == mock_context
assert len(lineage.outputs) == 1
assert lineage.outputs[0].asset.uri == "//no-scheme"
assert lineage.outputs[0].count == 1
assert lineage.outputs[0].context == mock_context
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Test requires Airflow 3.0+")
def test_very_long_uri(self, collector):
"""Test handling of very long URIs - 1000 chars OK, 2000 chars raises ValueError."""
mock_context = mock.MagicMock()
# Create very long URI (1000 chars - should work)
long_path = "a" * 1000
long_uri = f"s3://bucket/{long_path}"
# Create too long URI (2000 chars - should raise)
too_long_uri = f"s3://bucket/{long_path * 2}"
collector.add_input_asset(mock_context, uri=long_uri)
# Too long URI should raise ValueError
with pytest.raises(ValueError, match="Asset name cannot exceed"):
collector.add_output_asset(mock_context, uri=too_long_uri)
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.inputs) == 1
assert lineage.inputs[0].asset.uri == long_uri
assert lineage.inputs[0].count == 1
assert lineage.inputs[0].context == mock_context
assert len(lineage.outputs) == 0
assert len(lineage.extra) == 0
def test_none_context(self, collector):
"""Test handling of None context - should not raise."""
# Should not raise exceptions
collector.add_input_asset(None, uri="s3://bucket/input")
collector.add_output_asset(None, uri="s3://bucket/output")
collector.add_extra(None, "key", "value")
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.inputs) == 1
assert lineage.inputs[0].asset.uri == "s3://bucket/input"
assert lineage.inputs[0].count == 1
assert lineage.inputs[0].context is None
assert len(lineage.outputs) == 1
assert lineage.outputs[0].asset.uri == "s3://bucket/output"
assert lineage.outputs[0].count == 1
assert lineage.outputs[0].context is None
assert len(lineage.extra) == 1
assert lineage.extra[0].key == "key"
assert lineage.extra[0].value == "value"
assert lineage.extra[0].count == 1
assert lineage.extra[0].context is None
def test_special_characters_in_extra_key(self, collector):
"""Test that extra keys with special characters work."""
mock_context = mock.MagicMock()
collector.add_extra(mock_context, "key-with-dashes", {"data": "value"})
collector.add_extra(mock_context, "key.with.dots", {"data": "value"})
collector.add_extra(mock_context, "key_with_underscores", {"data": "value"})
collector.add_extra(mock_context, "key/with/slashes", {"data": "value"})
collector.add_extra(mock_context, "key:with:colons", {"data": "value"})
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.extra) == 5
assert lineage.extra[0].key == "key-with-dashes"
assert lineage.extra[1].key == "key.with.dots"
assert lineage.extra[2].key == "key_with_underscores"
assert lineage.extra[3].key == "key/with/slashes"
assert lineage.extra[4].key == "key:with:colons"
def test_unicode_in_extra_key_and_value(self, collector):
"""Test that unicode characters in extra work correctly."""
mock_context = mock.MagicMock()
collector.add_extra(mock_context, "clé_française", {"données": "valeur"})
collector.add_extra(mock_context, "中文键", {"中文": "值"})
collector.add_extra(mock_context, "مفتاح", {"بيانات": "قيمة"})
collector.add_extra(mock_context, "emoji_🚀", {"status": "✅"})
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.extra) == 4
assert lineage.extra[0].key == "clé_française"
assert lineage.extra[0].value == {"données": "valeur"}
assert lineage.extra[0].count == 1
assert lineage.extra[0].context == mock_context
assert lineage.extra[1].key == "中文键"
assert lineage.extra[1].value == {"中文": "值"}
assert lineage.extra[1].count == 1
assert lineage.extra[1].context == mock_context
assert lineage.extra[2].key == "مفتاح"
assert lineage.extra[2].value == {"بيانات": "قيمة"}
assert lineage.extra[2].count == 1
assert lineage.extra[2].context == mock_context
assert lineage.extra[3].key == "emoji_🚀"
assert lineage.extra[3].value == {"status": "✅"}
assert lineage.extra[3].count == 1
assert lineage.extra[3].context == mock_context
def test_very_large_extra_value(self, collector):
"""Test that large extra values are handled."""
mock_context = mock.MagicMock()
# Create a large value
large_value = {"data": "x" * 10000, "list": list(range(1000))}
collector.add_extra(mock_context, "large_key", large_value)
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.extra) == 1
assert lineage.extra[0].key == "large_key"
assert lineage.extra[0].value == large_value
assert lineage.extra[0].count == 1
assert lineage.extra[0].context == mock_context
assert len(lineage.inputs) == 0
assert len(lineage.outputs) == 0
def test_deeply_nested_extra_value(self, collector):
"""Test that deeply nested data structures in extra are handled."""
mock_context = mock.MagicMock()
# Create deeply nested structure
nested_value = {"level1": {"level2": {"level3": {"level4": {"level5": {"data": "deep"}}}}}}
collector.add_extra(mock_context, "nested", nested_value)
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.extra) == 1
assert lineage.extra[0].key == "nested"
assert lineage.extra[0].value == nested_value
assert lineage.extra[0].count == 1
assert lineage.extra[0].context == mock_context
assert len(lineage.inputs) == 0
assert len(lineage.outputs) == 0
def test_extra_value_with_various_types(self, collector):
"""Test that extra can handle various data types."""
mock_context = mock.MagicMock()
collector.add_extra(mock_context, "string", "text")
collector.add_extra(mock_context, "integer", 42)
collector.add_extra(mock_context, "float", 3.14)
collector.add_extra(mock_context, "boolean", True)
collector.add_extra(mock_context, "list", [1, 2, 3])
collector.add_extra(mock_context, "dict", {"key": "value"})
collector.add_extra(mock_context, "null", None)
assert collector.has_collected
# None value should not be collected (based on validation)
lineage = collector.collected_assets
assert len(lineage.extra) == 6 # None is filtered out
assert lineage.extra[0].key == "string"
assert lineage.extra[0].value == "text"
assert lineage.extra[0].count == 1
assert lineage.extra[1].key == "integer"
assert lineage.extra[1].value == 42
assert lineage.extra[1].count == 1
assert lineage.extra[2].key == "float"
assert lineage.extra[2].value == 3.14
assert lineage.extra[2].count == 1
assert lineage.extra[3].key == "boolean"
assert lineage.extra[3].value is True
assert lineage.extra[3].count == 1
assert lineage.extra[4].key == "list"
assert lineage.extra[4].value == [1, 2, 3]
assert lineage.extra[4].count == 1
assert lineage.extra[5].key == "dict"
assert lineage.extra[5].value == {"key": "value"}
assert lineage.extra[5].count == 1
assert len(lineage.inputs) == 0
assert len(lineage.outputs) == 0
def test_non_json_serializable_value_in_extra(self, collector):
"""Test that non-JSON-serializable values are handled gracefully."""
mock_context = mock.MagicMock()
# Create a non-serializable object
class CustomObject:
def __str__(self):
return "custom_object"
# Should not raise - collector should handle via str conversion or skip
collector.add_extra(mock_context, "custom_key", CustomObject())
# May or may not be collected depending on implementation
lineage = collector.collected_assets
# Just verify it doesn't crash
assert isinstance(lineage.extra, list)
assert len(lineage.inputs) == 0
assert len(lineage.outputs) == 0
def test_empty_asset_extra(self, collector):
"""Test that empty asset_extra is handled correctly."""
mock_context = mock.MagicMock()
collector.add_input_asset(mock_context, uri="s3://bucket/file", asset_extra={})
collector.add_output_asset(mock_context, uri="s3://bucket/file", asset_extra={})
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.inputs) == 1
assert lineage.inputs[0].asset.extra == {}
assert len(lineage.outputs) == 1
assert lineage.outputs[0].asset.extra == {}
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Test requires Airflow 3.0+")
def test_asset_with_all_optional_parameters(self, collector):
"""Test asset creation with all optional parameters provided."""
mock_context = mock.MagicMock()
collector.add_input_asset(
mock_context,
uri="s3://bucket/file",
name="custom-name",
group="custom-group",
asset_extra={"key": "value"},
)
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.inputs) == 1
assert lineage.inputs[0].asset.uri == "s3://bucket/file"
assert lineage.inputs[0].asset.name == "custom-name"
assert lineage.inputs[0].asset.group == "custom-group"
assert lineage.inputs[0].asset.extra == {"key": "value"}
def test_rapid_repeated_calls(self, collector):
"""Test that rapid repeated calls don't cause issues."""
mock_context = mock.MagicMock()
# Simulate rapid repeated calls
for _ in range(50):
collector.add_input_asset(mock_context, uri="s3://bucket/file")
collector.add_output_asset(mock_context, uri="s3://bucket/output")
collector.add_extra(mock_context, "key", "value")
assert collector.has_collected
lineage = collector.collected_assets
# Should have counted properly
assert len(lineage.inputs) == 1
assert lineage.inputs[0].count == 50
assert len(lineage.outputs) == 1
assert lineage.outputs[0].count == 50
assert len(lineage.extra) == 1
assert lineage.extra[0].count == 50
def test_mixed_valid_invalid_operations(self, collector):
"""Test mixing valid and invalid operations."""
mock_context = mock.MagicMock()
# Mix valid and invalid calls
collector.add_input_asset(mock_context, uri="s3://bucket/valid")
collector.add_input_asset(mock_context, uri=None) # Invalid - should not be collected
collector.add_input_asset(mock_context, uri="") # Invalid - should not be collected
collector.add_input_asset(mock_context, uri="s3://bucket/another-valid")
collector.add_extra(mock_context, "valid_key", "valid_value")
collector.add_extra(mock_context, "", "invalid_key") # Invalid key - should not be collected
collector.add_extra(mock_context, "another_key", "another_value")
assert collector.has_collected
# Should collect only valid items
lineage = collector.collected_assets
assert len(lineage.inputs) == 2
assert lineage.inputs[0].asset.uri == "s3://bucket/valid"
assert lineage.inputs[0].count == 1
assert lineage.inputs[0].context == mock_context
assert lineage.inputs[1].asset.uri == "s3://bucket/another-valid"
assert lineage.inputs[1].count == 1
assert lineage.inputs[1].context == mock_context
assert len(lineage.extra) == 2
assert lineage.extra[0].key == "valid_key"
assert lineage.extra[0].value == "valid_value"
assert lineage.extra[0].count == 1
assert lineage.extra[1].key == "another_key"
assert lineage.extra[1].value == "another_value"
assert lineage.extra[1].count == 1
assert len(lineage.outputs) == 0
def test_collector_collected_assets_called_multiple_times(self, collector):
"""Test that collected_assets property can be called multiple times."""
mock_context = mock.MagicMock()
collector.add_input_asset(mock_context, uri="s3://bucket/file")
assert collector.has_collected
# Call multiple times - should return same data
lineage1 = collector.collected_assets
lineage2 = collector.collected_assets
lineage3 = collector.collected_assets
assert lineage1.inputs == lineage2.inputs == lineage3.inputs
assert len(lineage1.inputs) == 1
assert lineage1.inputs[0].asset.uri == "s3://bucket/file"
assert lineage1.inputs[0].count == 1
assert lineage1.inputs[0].context == mock_context
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Test requires Airflow 3.0+")
def test_empty_name_and_group(self, collector):
"""Test that empty strings for name and group are handled."""
mock_context = mock.MagicMock()
# Empty strings for optional parameters
collector.add_input_asset(mock_context, uri="s3://bucket/file", name="", group="")
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.inputs) == 1
assert lineage.inputs[0].asset.uri == "s3://bucket/file"
assert lineage.inputs[0].asset.name == "s3://bucket/file"
assert lineage.inputs[0].asset.group == "asset"
assert lineage.inputs[0].count == 1
assert lineage.inputs[0].context == mock_context
assert len(lineage.outputs) == 0
assert len(lineage.extra) == 0
def test_extremely_long_extra_key(self, collector):
"""Test that extremely long extra keys are handled."""
mock_context = mock.MagicMock()
long_key = "k" * 10000
collector.add_extra(mock_context, long_key, "value")
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.extra) == 1
assert lineage.extra[0].key == long_key
assert lineage.extra[0].value == "value"
assert lineage.extra[0].count == 1
assert lineage.extra[0].context == mock_context
assert len(lineage.inputs) == 0
assert len(lineage.outputs) == 0
|
TestEdgeCases
|
python
|
scipy__scipy
|
scipy/stats/tests/test_distributions.py
|
{
"start": 362029,
"end": 366743
}
|
class ____:
# Construct a distribution w/ explicit shapes parameter and test it.
def test_correct_shapes(self):
dummy_distr = _distr_gen(name='dummy', shapes='a')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_wrong_shapes_1(self):
dummy_distr = _distr_gen(name='dummy', shapes='A')
assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))
def test_wrong_shapes_2(self):
dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')
dct = dict(a=1, b=2, c=3)
assert_raises(TypeError, dummy_distr.pdf, 1, **dct)
def test_shapes_string(self):
# shapes must be a string
dct = dict(name='dummy', shapes=42)
assert_raises(TypeError, _distr_gen, **dct)
def test_shapes_identifiers_1(self):
# shapes must be a comma-separated list of valid python identifiers
dct = dict(name='dummy', shapes='(!)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_2(self):
dct = dict(name='dummy', shapes='4chan')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_3(self):
dct = dict(name='dummy', shapes='m(fti)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_nodefaults(self):
dct = dict(name='dummy', shapes='a=2')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_args(self):
dct = dict(name='dummy', shapes='*args')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_kwargs(self):
dct = dict(name='dummy', shapes='**kwargs')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_keywords(self):
# python keywords cannot be used for shape parameters
dct = dict(name='dummy', shapes='a, b, c, lambda')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_signature(self):
# test explicit shapes which agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a')
assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)
def test_shapes_signature_inconsistent(self):
# test explicit shapes which do not agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a, b')
assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))
def test_star_args(self):
# test _pdf with only starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg
dist = _dist_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)
assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)
assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))
def test_star_args_2(self):
# test _pdf with named & starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, offset, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg + offset
dist = _dist_gen(shapes='offset, extra_kwarg')
assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),
stats.norm.pdf(0.5)*33 + 111)
assert_equal(dist.pdf(0.5, 111, 33),
stats.norm.pdf(0.5)*33 + 111)
def test_extra_kwarg(self):
# **kwargs to _pdf are ignored.
# this is a limitation of the framework (_pdf(x, *goodargs))
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, *args, **kwargs):
# _pdf should handle *args, **kwargs itself. Here "handling"
# is ignoring *args and looking for ``extra_kwarg`` and using
# that.
extra_kwarg = kwargs.pop('extra_kwarg', 1)
return stats.norm._pdf(x) * extra_kwarg
dist = _distr_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))
def test_shapes_empty_string(self):
# shapes='' is equivalent to shapes=None
class _dist_gen(stats.rv_continuous):
def _pdf(self, x):
return stats.norm.pdf(x)
dist = _dist_gen(shapes='')
assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))
|
TestSubclassingExplicitShapes
|
python
|
nedbat__coveragepy
|
coverage/config.py
|
{
"start": 803,
"end": 5568
}
|
class ____(configparser.ConfigParser):
"""Our specialization of ConfigParser."""
def __init__(self, our_file: bool) -> None:
"""Create the HandyConfigParser.
`our_file` is True if this config file is specifically for coverage,
False if we are examining another config file (tox.ini, setup.cfg)
for possible settings.
"""
super().__init__(interpolation=None)
self.section_prefixes = ["coverage:"]
if our_file:
self.section_prefixes.append("")
def read( # type: ignore[override]
self,
filenames: Iterable[str],
encoding_unused: str | None = None,
) -> list[str]:
"""Read a file name as UTF-8 configuration data."""
return super().read(filenames, encoding="utf-8")
def real_section(self, section: str) -> str | None:
"""Get the actual name of a section."""
for section_prefix in self.section_prefixes:
real_section = section_prefix + section
has = super().has_section(real_section)
if has:
return real_section
return None
def has_option(self, section: str, option: str) -> bool: # type: ignore[override]
real_section = self.real_section(section)
if real_section is not None:
return super().has_option(real_section, option)
return False
def has_section(self, section: str) -> bool: # type: ignore[override]
return bool(self.real_section(section))
def options(self, section: str) -> list[str]: # type: ignore[override]
real_section = self.real_section(section)
if real_section is not None:
return super().options(real_section)
raise ConfigError(f"No section: {section!r}")
def get_section(self, section: str) -> TConfigSectionOut:
"""Get the contents of a section, as a dictionary."""
d: dict[str, TConfigValueOut] = {}
for opt in self.options(section):
d[opt] = self.get(section, opt)
return d
def get(self, section: str, option: str, *args: Any, **kwargs: Any) -> str: # type: ignore
"""Get a value, replacing environment variables also.
The arguments are the same as `ConfigParser.get`, but in the found
value, ``$WORD`` or ``${WORD}`` are replaced by the value of the
environment variable ``WORD``.
Returns the finished value.
"""
for section_prefix in self.section_prefixes:
real_section = section_prefix + section
if super().has_option(real_section, option):
break
else:
raise ConfigError(f"No option {option!r} in section: {section!r}")
v: str = super().get(real_section, option, *args, **kwargs)
v = substitute_variables(v, os.environ)
return v
def getfile(self, section: str, option: str) -> str:
"""Fix up a file path setting."""
path = self.get(section, option)
return process_file_value(path)
def getlist(self, section: str, option: str) -> list[str]:
"""Read a list of strings.
The value of `section` and `option` is treated as a comma- and newline-
separated list of strings. Each value is stripped of white space.
Returns the list of strings.
"""
value_list = self.get(section, option)
values = []
for value_line in value_list.split("\n"):
for value in value_line.split(","):
value = value.strip()
if value:
values.append(value)
return values
def getregexlist(self, section: str, option: str) -> list[str]:
"""Read a list of full-line regexes.
The value of `section` and `option` is treated as a newline-separated
list of regexes. Each value is stripped of white space.
Returns the list of strings.
"""
line_list = self.get(section, option)
return process_regexlist(section, option, line_list.splitlines())
TConfigParser = HandyConfigParser | TomlConfigParser
# The default line exclusion regexes.
DEFAULT_EXCLUDE = [
r"#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(cover|COVER)",
r"^\s*(((async )?def .*?)?\)(\s*->.*?)?:\s*)?\.\.\.\s*(#|$)",
r"if (typing\.)?TYPE_CHECKING:",
]
# The default partial branch regexes, to be modified by the user.
DEFAULT_PARTIAL = [
r"#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(branch|BRANCH)",
]
# The default partial branch regexes, based on Python semantics.
# These are any Python branching constructs that can't actually execute all
# their branches.
DEFAULT_PARTIAL_ALWAYS = [
"while (True|1|False|0):",
"if (True|1|False|0):",
]
|
HandyConfigParser
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.