language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
tensorflow__tensorflow
tensorflow/python/data/ops/load_op.py
{ "start": 7619, "end": 9572 }
class ____(dataset_ops.DatasetSource): """A dataset for listing snapshot chunk files. It supports listing partially written snapshots. When a snapshot is being written, it returns the currently available chunk files. """ def __init__(self, snapshot_path: str): self._snapshot_path = snapshot_path variant_tensor = ged_ops.list_snapshot_chunks_dataset( snapshot_path, **self._flat_structure) super().__init__(variant_tensor) @property def element_spec(self) -> tensor_spec.TensorSpec: return tensor_spec.TensorSpec([], dtypes.string) def _validate_snapshot( path: str, metadata: snapshot_pb2.DistributedSnapshotMetadata, element_spec: Any, compression: str) -> None: """Validates a tf.data distributed snapshot. Args: path: Root path of the distributed snapshot. metadata: The DistributedSnapshotMetadata of the snapshot. element_spec: Dataset element_spec. compression: Compression method used for saving. Raises: ValueError if the snapshot is invalid. """ error_file = _pywrap_snapshot_utils.TF_DATA_SnapshotErrorFilePath(path) if gfile.Exists(error_file): with gfile.GFile(error_file, "r") as f: raise ValueError( f"Failed to load tf.data snapshot at {path}. The save job failed to " f"write it. Status: {f.read()}") snapshot_element_spec = _parse_element_spec(metadata.element_spec) if element_spec and element_spec != snapshot_element_spec: raise ValueError( f"Failed to load tf.data snapshot at {path}. User specified " f"element_spec {element_spec}, but the actual element_spec is " f"{snapshot_element_spec}.") if compression and compression != metadata.compression: raise ValueError( f"Failed to load tf.data snapshot at {path}. User specified " f"compression {compression}, but the actual compression is " f"{metadata.compression}.")
_ListSnapshotChunksDataset
python
realpython__materials
pygame-a-primer/py_tut_with_images.py
{ "start": 1729, "end": 2585 }
class ____(pygame.sprite.Sprite): def __init__(self): super(Enemy, self).__init__() self.surf = pygame.image.load("missile.png").convert() self.surf.set_colorkey((255, 255, 255), RLEACCEL) # The starting position is randomly generated, as is the speed self.rect = self.surf.get_rect( center=( random.randint(SCREEN_WIDTH + 20, SCREEN_WIDTH + 100), random.randint(0, SCREEN_HEIGHT), ) ) self.speed = random.randint(5, 20) # Move the enemy based on speed # Remove it when it passes the left edge of the screen def update(self): self.rect.move_ip(-self.speed, 0) if self.rect.right < 0: self.kill() # Define the cloud object extending pygame.sprite.Sprite # Use an image for a better looking sprite
Enemy
python
getsentry__sentry
src/sentry/auth/services/auth/model.py
{ "start": 678, "end": 956 }
class ____(RpcModel): id: int = -1 organization_id: int = -1 key: str = Field(repr=False, default="") status: int = 0 allowed_origins: list[str] = Field(default_factory=list) label: str = "" scope_list: list[str] = Field(default_factory=list)
RpcApiKey
python
pytorch__pytorch
torch/nn/modules/instancenorm.py
{ "start": 4167, "end": 7754 }
class ____(_InstanceNorm): r"""Applies Instance Normalization. This operation applies Instance Normalization over a 2D (unbatched) or 3D (batched) input as described in the paper `Instance Normalization: The Missing Ingredient for Fast Stylization <https://arxiv.org/abs/1607.08022>`__. .. math:: y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta The mean and standard-deviation are calculated per-dimension separately for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors of size `C` (where `C` is the number of features or channels of the input) if :attr:`affine` is ``True``. The variance is calculated via the biased estimator, equivalent to `torch.var(input, correction=0)`. By default, this layer uses instance statistics computed from input data in both training and evaluation modes. If :attr:`track_running_stats` is set to ``True``, during training this layer keeps running estimates of its computed mean and variance, which are then used for normalization during evaluation. The running estimates are kept with a default :attr:`momentum` of 0.1. .. note:: This :attr:`momentum` argument is different from one used in optimizer classes and the conventional notion of momentum. Mathematically, the update rule for running statistics here is :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`, where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the new observed value. .. note:: :class:`InstanceNorm1d` and :class:`LayerNorm` are very similar, but have some subtle differences. :class:`InstanceNorm1d` is applied on each channel of channeled data like multidimensional time series, but :class:`LayerNorm` is usually applied on entire sample and often in NLP tasks. Additionally, :class:`LayerNorm` applies elementwise affine transform, while :class:`InstanceNorm1d` usually don't apply affine transform. Args: num_features: number of features or channels :math:`C` of the input eps: a value added to the denominator for numerical stability. Default: 1e-5 momentum: the value used for the running_mean and running_var computation. Default: 0.1 affine: a boolean value that when set to ``True``, this module has learnable affine parameters, initialized the same way as done for batch normalization. Default: ``False``. track_running_stats: a boolean value that when set to ``True``, this module tracks the running mean and variance, and when set to ``False``, this module does not track such statistics and always uses batch statistics in both training and eval modes. Default: ``False`` Shape: - Input: :math:`(N, C, L)` or :math:`(C, L)` - Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input) Examples:: >>> # Without Learnable Parameters >>> m = nn.InstanceNorm1d(100) >>> # With Learnable Parameters >>> m = nn.InstanceNorm1d(100, affine=True) >>> input = torch.randn(20, 100, 40) >>> output = m(input) """ def _get_no_batch_dim(self) -> int: return 2 def _check_input_dim(self, input) -> None: if input.dim() not in (2, 3): raise ValueError(f"expected 2D or 3D input (got {input.dim()}D input)")
InstanceNorm1d
python
django__django
tests/template_tests/filter_tests/test_join.py
{ "start": 2884, "end": 3626 }
class ____(SimpleTestCase): def test_list(self): self.assertEqual(join([0, 1, 2], "glue"), "0glue1glue2") def test_autoescape(self): self.assertEqual( join(["<a>", "<img>", "</a>"], "<br>"), "&lt;a&gt;&lt;br&gt;&lt;img&gt;&lt;br&gt;&lt;/a&gt;", ) def test_autoescape_off(self): self.assertEqual( join(["<a>", "<img>", "</a>"], "<br>", autoescape=False), "<a><br><img><br></a>", ) def test_noniterable_arg(self): obj = object() self.assertEqual(join(obj, "<br>"), obj) def test_noniterable_arg_autoescape_off(self): obj = object() self.assertEqual(join(obj, "<br>", autoescape=False), obj)
FunctionTests
python
dagster-io__dagster
python_modules/automation/automation_tests/dagster_docs_tests/test_fixtures/test_public_class.py
{ "start": 2123, "end": 2498 }
class ____: """Another public class to test multiple classes.""" @public def another_public_method(self): """Another public method that should be validated.""" return "another_public" def another_non_public_method(self): """Another non-public method that should NOT be validated.""" return "another_non_public"
AnotherPublicClass
python
airbytehq__airbyte
airbyte-ci/connectors/connectors_qa/src/connectors_qa/checks/metadata.py
{ "start": 436, "end": 504 }
class ____(Check): category = CheckCategory.METADATA
MetadataCheck
python
eventlet__eventlet
eventlet/zipkin/_thrift/zipkinCore/ttypes.py
{ "start": 5852, "end": 8873 }
class ____: """ Attributes: - key - value - annotation_type - host """ thrift_spec = ( None, # 0 (1, TType.STRING, 'key', None, None, ), # 1 (2, TType.STRING, 'value', None, None, ), # 2 (3, TType.I32, 'annotation_type', None, None, ), # 3 (4, TType.STRUCT, 'host', (Endpoint, Endpoint.thrift_spec), None, ), # 4 ) def __init__(self, key=None, value=None, annotation_type=None, host=None,): self.key = key self.value = value self.annotation_type = annotation_type self.host = host def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.key = iprot.readString(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.value = iprot.readString(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I32: self.annotation_type = iprot.readI32(); else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRUCT: self.host = Endpoint() self.host.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('BinaryAnnotation') if self.key is not None: oprot.writeFieldBegin('key', TType.STRING, 1) oprot.writeString(self.key) oprot.writeFieldEnd() if self.value is not None: oprot.writeFieldBegin('value', TType.STRING, 2) oprot.writeString(self.value) oprot.writeFieldEnd() if self.annotation_type is not None: oprot.writeFieldBegin('annotation_type', TType.I32, 3) oprot.writeI32(self.annotation_type) oprot.writeFieldEnd() if self.host is not None: oprot.writeFieldBegin('host', TType.STRUCT, 4) self.host.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other)
BinaryAnnotation
python
Pylons__pyramid
tests/test_path.py
{ "start": 5508, "end": 6385 }
class ____(unittest.TestCase): def _getTargetClass(self): from pyramid.path import Resolver return Resolver def _makeOne(self, package): return self._getTargetClass()(package) def test_get_package_caller_package(self): from pyramid.path import CALLER_PACKAGE import tests self.assertEqual(self._makeOne(CALLER_PACKAGE).get_package(), tests) def test_get_package_name_caller_package(self): from pyramid.path import CALLER_PACKAGE self.assertEqual( self._makeOne(CALLER_PACKAGE).get_package_name(), 'tests' ) def test_get_package_string(self): import tests self.assertEqual(self._makeOne('tests').get_package(), tests) def test_get_package_name_string(self): self.assertEqual(self._makeOne('tests').get_package_name(), 'tests')
TestResolver
python
pallets__werkzeug
examples/coolmagic/utils.py
{ "start": 2217, "end": 2382 }
class ____(BaseResponse): """ The concrete response object for the WSGI application. """ charset = "utf-8" default_mimetype = "text/html"
Response
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/operators/test_emr_serverless.py
{ "start": 14085, "end": 45474 }
class ____: def setup_method(self): self.mock_context = mock.MagicMock() @mock.patch.object(EmrServerlessHook, "get_waiter") @mock.patch.object(EmrServerlessHook, "conn") def test_job_run_app_started(self, mock_conn, mock_get_waiter): mock_get_waiter().wait.return_value = True mock_conn.get_application.return_value = {"application": {"state": "STARTED"}} mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } mock_conn.get_job_run.return_value = {"jobRun": {"state": "SUCCESS"}} operator = EmrServerlessStartJobOperator( task_id=task_id, client_request_token=client_request_token, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=configuration_overrides, ) id = operator.execute(self.mock_context) default_name = operator.name assert operator.wait_for_completion is True mock_conn.get_application.assert_called_once_with(applicationId=application_id) assert id == job_run_id mock_conn.start_job_run.assert_called_once_with( clientToken=client_request_token, applicationId=application_id, executionRoleArn=execution_role_arn, jobDriver=job_driver, configurationOverrides=configuration_overrides, name=default_name, ) @mock.patch.object(EmrServerlessHook, "get_waiter") @mock.patch.object(EmrServerlessHook, "conn") def test_job_run_job_failed(self, mock_conn, mock_get_waiter): error = WaiterError( name="test_name", reason="Waiter encountered a terminal failure state:", last_response={"jobRun": {"state": "FAILED"}}, ) mock_get_waiter().wait.side_effect = [error] mock_conn.get_application.return_value = {"application": {"state": "STARTED"}} mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } operator = EmrServerlessStartJobOperator( task_id=task_id, client_request_token=client_request_token, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=configuration_overrides, ) with pytest.raises(AirflowException) as ex_message: operator.execute(self.mock_context) assert "Serverless Job failed:" in str(ex_message.value) default_name = operator.name mock_conn.get_application.assert_called_once_with(applicationId=application_id) mock_conn.start_job_run.assert_called_once_with( clientToken=client_request_token, applicationId=application_id, executionRoleArn=execution_role_arn, jobDriver=job_driver, configurationOverrides=configuration_overrides, name=default_name, ) @mock.patch.object(EmrServerlessHook, "get_waiter") @mock.patch.object(EmrServerlessHook, "conn") def test_job_run_app_not_started(self, mock_conn, mock_get_waiter): mock_get_waiter().wait.return_value = True mock_conn.get_application.return_value = {"application": {"state": "CREATING"}} mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } operator = EmrServerlessStartJobOperator( task_id=task_id, client_request_token=client_request_token, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=configuration_overrides, ) id = operator.execute(self.mock_context) default_name = operator.name assert operator.wait_for_completion is True mock_conn.get_application.assert_called_once_with(applicationId=application_id) assert mock_get_waiter().wait.call_count == 2 assert id == job_run_id mock_conn.start_job_run.assert_called_once_with( clientToken=client_request_token, applicationId=application_id, executionRoleArn=execution_role_arn, jobDriver=job_driver, configurationOverrides=configuration_overrides, name=default_name, ) @mock.patch("time.sleep", return_value=True) @mock.patch.object(EmrServerlessHook, "get_waiter") @mock.patch.object(EmrServerlessHook, "conn") def test_job_run_app_not_started_app_failed(self, mock_conn, mock_get_waiter, mock_time): error1 = WaiterError( name="test_name", reason="test-reason", last_response={"application": {"state": "CREATING", "stateDetails": "test-details"}}, ) error2 = WaiterError( name="test_name", reason="Waiter encountered a terminal failure state:", last_response={"application": {"state": "TERMINATED", "stateDetails": "test-details"}}, ) mock_get_waiter().wait.side_effect = [error1, error2] mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } operator = EmrServerlessStartJobOperator( task_id=task_id, client_request_token=client_request_token, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=configuration_overrides, ) with pytest.raises(AirflowException) as ex_message: operator.execute(self.mock_context) assert "Serverless Application failed to start:" in str(ex_message.value) assert operator.wait_for_completion is True assert mock_get_waiter().wait.call_count == 2 @mock.patch.object(EmrServerlessHook, "get_waiter") @mock.patch.object(EmrServerlessHook, "conn") def test_job_run_app_not_started_no_wait_for_completion(self, mock_conn, mock_get_waiter): mock_get_waiter().wait.return_value = True mock_conn.get_application.return_value = {"application": {"state": "CREATING"}} mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } operator = EmrServerlessStartJobOperator( task_id=task_id, client_request_token=client_request_token, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=configuration_overrides, wait_for_completion=False, ) id = operator.execute(self.mock_context) default_name = operator.name mock_conn.get_application.assert_called_once_with(applicationId=application_id) mock_get_waiter().wait.assert_called_once() assert id == job_run_id mock_conn.start_job_run.assert_called_once_with( clientToken=client_request_token, applicationId=application_id, executionRoleArn=execution_role_arn, jobDriver=job_driver, configurationOverrides=configuration_overrides, name=default_name, ) @mock.patch.object(EmrServerlessHook, "get_waiter") @mock.patch.object(EmrServerlessHook, "conn") def test_job_run_app_started_no_wait_for_completion(self, mock_conn, mock_get_waiter): mock_get_waiter().wait.return_value = True mock_conn.get_application.return_value = {"application": {"state": "STARTED"}} mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } operator = EmrServerlessStartJobOperator( task_id=task_id, client_request_token=client_request_token, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=configuration_overrides, wait_for_completion=False, ) id = operator.execute(self.mock_context) assert id == job_run_id default_name = operator.name mock_conn.start_job_run.assert_called_once_with( clientToken=client_request_token, applicationId=application_id, executionRoleArn=execution_role_arn, jobDriver=job_driver, configurationOverrides=configuration_overrides, name=default_name, ) assert not mock_get_waiter().wait.called @mock.patch.object(EmrServerlessHook, "get_waiter") @mock.patch.object(EmrServerlessHook, "conn") def test_failed_start_job_run(self, mock_conn, mock_get_waiter): mock_get_waiter().wait.return_value = True mock_conn.get_application.return_value = {"application": {"state": "CREATING"}} mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 404}, } operator = EmrServerlessStartJobOperator( task_id=task_id, client_request_token=client_request_token, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=configuration_overrides, ) with pytest.raises(AirflowException) as ex_message: operator.execute(self.mock_context) assert "EMR serverless job failed to start:" in str(ex_message.value) default_name = operator.name mock_conn.get_application.assert_called_once_with(applicationId=application_id) mock_get_waiter().wait.assert_called_once() mock_conn.start_job_run.assert_called_once_with( clientToken=client_request_token, applicationId=application_id, executionRoleArn=execution_role_arn, jobDriver=job_driver, configurationOverrides=configuration_overrides, name=default_name, ) @mock.patch.object(EmrServerlessHook, "get_waiter") @mock.patch.object(EmrServerlessHook, "conn") def test_start_job_run_fail_on_wait_for_completion(self, mock_conn, mock_get_waiter): error = WaiterError( name="mock_waiter_error", reason="Waiter encountered a terminal failure state:", last_response={"jobRun": {"state": "FAILED", "stateDetails": "Test Details"}}, ) mock_get_waiter().wait.side_effect = [error] mock_conn.get_application.return_value = {"application": {"state": "CREATED"}} mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } operator = EmrServerlessStartJobOperator( task_id=task_id, client_request_token=client_request_token, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=configuration_overrides, ) with pytest.raises(AirflowException) as ex_message: operator.execute(self.mock_context) assert "Serverless Job failed:" in str(ex_message.value) default_name = operator.name mock_conn.get_application.call_count == 2 mock_conn.start_job_run.assert_called_once_with( clientToken=client_request_token, applicationId=application_id, executionRoleArn=execution_role_arn, jobDriver=job_driver, configurationOverrides=configuration_overrides, name=default_name, ) mock_get_waiter().wait.assert_called_once() @mock.patch.object(EmrServerlessHook, "get_waiter") @mock.patch.object(EmrServerlessHook, "conn") def test_start_job_default_name(self, mock_conn, mock_get_waiter): mock_conn.get_application.return_value = {"application": {"state": "STARTED"}} mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } mock_get_waiter().wait.return_value = True operator = EmrServerlessStartJobOperator( task_id=task_id, client_request_token=client_request_token, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=configuration_overrides, ) operator.execute(self.mock_context) default_name = operator.name generated_name_uuid = default_name.split("_")[-1] assert default_name.startswith("emr_serverless_job_airflow") mock_conn.start_job_run.assert_called_once_with( clientToken=client_request_token, applicationId=application_id, executionRoleArn=execution_role_arn, jobDriver=job_driver, configurationOverrides=configuration_overrides, name=f"emr_serverless_job_airflow_{UUID(generated_name_uuid, version=4)}", ) @mock.patch.object(EmrServerlessHook, "get_waiter") @mock.patch.object(EmrServerlessHook, "conn") def test_start_job_custom_name(self, mock_conn, mock_get_waiter): mock_get_waiter().wait.return_value = True mock_conn.get_application.return_value = {"application": {"state": "STARTED"}} custom_name = "test_name" mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } operator = EmrServerlessStartJobOperator( task_id=task_id, client_request_token=client_request_token, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=configuration_overrides, name=custom_name, ) operator.execute(self.mock_context) mock_conn.start_job_run.assert_called_once_with( clientToken=client_request_token, applicationId=application_id, executionRoleArn=execution_role_arn, jobDriver=job_driver, configurationOverrides=configuration_overrides, name=custom_name, ) @mock.patch.object(EmrServerlessHook, "conn") def test_cancel_job_run(self, mock_conn): mock_conn.get_application.return_value = {"application": {"state": "STARTED"}} mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } mock_conn.get_job_run.return_value = {"jobRun": {"state": "RUNNING"}} operator = EmrServerlessStartJobOperator( task_id=task_id, client_request_token=client_request_token, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=configuration_overrides, wait_for_completion=False, ) id = operator.execute(self.mock_context) operator.on_kill() mock_conn.cancel_job_run.assert_called_once_with( applicationId=application_id, jobRunId=id, ) @pytest.mark.parametrize( ("waiter_delay", "waiter_max_attempts", "expected"), [ (NOTSET, NOTSET, [60, 25]), (30, 10, [30, 10]), ], ) def test_start_job_waiter_params( self, waiter_delay, waiter_max_attempts, expected, ): operator = EmrServerlessStartJobOperator( task_id=task_id, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=configuration_overrides, waiter_delay=waiter_delay, waiter_max_attempts=waiter_max_attempts, ) assert operator.wait_for_completion is True assert operator.waiter_delay == expected[0] assert operator.waiter_max_attempts == expected[1] @mock.patch.object(EmrServerlessHook, "conn") def test_start_job_deferrable(self, mock_conn): mock_conn.get_application.return_value = {"application": {"state": "STARTED"}} mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } operator = EmrServerlessStartJobOperator( task_id=task_id, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=configuration_overrides, deferrable=True, ) with pytest.raises(TaskDeferred): operator.execute(self.mock_context) @mock.patch.object(EmrServerlessHook, "conn") def test_start_job_deferrable_without_wait_for_completion(self, mock_conn): mock_conn.get_application.return_value = {"application": {"state": "STARTED"}} mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } operator = EmrServerlessStartJobOperator( task_id=task_id, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=configuration_overrides, deferrable=True, wait_for_completion=False, ) result = operator.execute(self.mock_context) assert result == job_run_id @mock.patch.object(EmrServerlessHook, "get_waiter") @mock.patch.object(EmrServerlessHook, "conn") def test_start_job_deferrable_app_not_started(self, mock_conn, mock_get_waiter): mock_get_waiter.wait.return_value = True mock_conn.get_application.return_value = {"application": {"state": "CREATING"}} mock_conn.start_application.return_value = { "ResponseMetadata": {"HTTPStatusCode": 200}, } operator = EmrServerlessStartJobOperator( task_id=task_id, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=configuration_overrides, deferrable=True, ) with pytest.raises(TaskDeferred): operator.execute(self.mock_context) @mock.patch.object(EmrServerlessHook, "get_waiter") @mock.patch.object(EmrServerlessHook, "conn") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessCloudWatchLogsLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessDashboardLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessLogsLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessS3LogsLink.persist") def test_links_start_job_default( self, mock_s3_logs_link, mock_logs_link, mock_dashboard_link, mock_cloudwatch_link, mock_conn, mock_get_waiter, ): mock_get_waiter.wait.return_value = True mock_conn.get_application.return_value = {"application": {"state": "STARTED"}} mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } operator = EmrServerlessStartJobOperator( task_id=task_id, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=configuration_overrides, ) operator.execute(self.mock_context) mock_conn.start_job_run.assert_called_once() mock_s3_logs_link.assert_not_called() mock_logs_link.assert_not_called() mock_dashboard_link.assert_not_called() mock_cloudwatch_link.assert_not_called() @mock.patch.object(EmrServerlessHook, "get_waiter") @mock.patch.object(EmrServerlessHook, "conn") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessCloudWatchLogsLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessDashboardLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessLogsLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessS3LogsLink.persist") def test_links_s3_enabled( self, mock_s3_logs_link, mock_logs_link, mock_dashboard_link, mock_cloudwatch_link, mock_conn, mock_get_waiter, ): mock_get_waiter.wait.return_value = True mock_conn.get_application.return_value = {"application": {"state": "STARTED"}} mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } operator = EmrServerlessStartJobOperator( task_id=task_id, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=s3_configuration_overrides, ) operator.execute(self.mock_context) mock_conn.start_job_run.assert_called_once() mock_logs_link.assert_not_called() mock_dashboard_link.assert_not_called() mock_cloudwatch_link.assert_not_called() mock_s3_logs_link.assert_called_once_with( context=mock.ANY, operator=mock.ANY, region_name=mock.ANY, aws_partition=mock.ANY, log_uri=s3_logs_location, application_id=application_id, job_run_id=job_run_id, ) @mock.patch.object(EmrServerlessHook, "get_waiter") @mock.patch.object(EmrServerlessHook, "conn") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessCloudWatchLogsLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessDashboardLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessLogsLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessS3LogsLink.persist") def test_links_cloudwatch_enabled( self, mock_s3_logs_link, mock_logs_link, mock_dashboard_link, mock_cloudwatch_link, mock_conn, mock_get_waiter, ): mock_get_waiter.wait.return_value = True mock_conn.get_application.return_value = {"application": {"state": "STARTED"}} mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } operator = EmrServerlessStartJobOperator( task_id=task_id, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=cloudwatch_configuration_overrides, ) operator.execute(self.mock_context) mock_conn.start_job_run.assert_called_once() mock_logs_link.assert_not_called() mock_dashboard_link.assert_not_called() mock_s3_logs_link.assert_not_called() mock_cloudwatch_link.assert_called_once_with( context=mock.ANY, operator=mock.ANY, region_name=mock.ANY, aws_partition=mock.ANY, awslogs_group=cloudwatch_logs_group_name, stream_prefix=f"{cloudwatch_logs_prefix}/applications/{application_id}/jobs/{job_run_id}", ) @mock.patch.object(EmrServerlessHook, "get_waiter") @mock.patch.object(EmrServerlessHook, "conn") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessCloudWatchLogsLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessDashboardLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessLogsLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessS3LogsLink.persist") def test_links_applicationui_enabled( self, mock_s3_logs_link, mock_logs_link, mock_dashboard_link, mock_cloudwatch_link, mock_conn, mock_get_waiter, ): mock_get_waiter.wait.return_value = True mock_conn.get_application.return_value = {"application": {"state": "STARTED"}} mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } operator = EmrServerlessStartJobOperator( task_id=task_id, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=cloudwatch_configuration_overrides, enable_application_ui_links=True, ) operator.execute(self.mock_context) mock_conn.start_job_run.assert_called_once() mock_logs_link.assert_not_called() mock_s3_logs_link.assert_not_called() mock_dashboard_link.assert_called_with( context=mock.ANY, operator=mock.ANY, region_name=mock.ANY, aws_partition=mock.ANY, conn_id=mock.ANY, application_id=application_id, job_run_id=job_run_id, ) mock_cloudwatch_link.assert_called_once_with( context=mock.ANY, operator=mock.ANY, region_name=mock.ANY, aws_partition=mock.ANY, awslogs_group=cloudwatch_logs_group_name, stream_prefix=f"{cloudwatch_logs_prefix}/applications/{application_id}/jobs/{job_run_id}", ) @mock.patch.object(EmrServerlessHook, "get_waiter") @mock.patch.object(EmrServerlessHook, "conn") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessCloudWatchLogsLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessDashboardLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessLogsLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessS3LogsLink.persist") def test_links_applicationui_with_spark_enabled( self, mock_s3_logs_link, mock_logs_link, mock_dashboard_link, mock_cloudwatch_link, mock_conn, mock_get_waiter, ): mock_get_waiter.wait.return_value = True mock_conn.get_application.return_value = {"application": {"state": "STARTED"}} mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } operator = EmrServerlessStartJobOperator( task_id=task_id, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=spark_job_driver, configuration_overrides=s3_configuration_overrides, enable_application_ui_links=True, ) operator.execute(self.mock_context) mock_conn.start_job_run.assert_called_once() mock_logs_link.assert_called_once_with( context=mock.ANY, operator=mock.ANY, region_name=mock.ANY, aws_partition=mock.ANY, conn_id=mock.ANY, application_id=application_id, job_run_id=job_run_id, ) mock_dashboard_link.assert_called_with( context=mock.ANY, operator=mock.ANY, region_name=mock.ANY, aws_partition=mock.ANY, conn_id=mock.ANY, application_id=application_id, job_run_id=job_run_id, ) mock_cloudwatch_link.assert_not_called() mock_s3_logs_link.assert_called_once_with( context=mock.ANY, operator=mock.ANY, region_name=mock.ANY, aws_partition=mock.ANY, log_uri=s3_logs_location, application_id=application_id, job_run_id=job_run_id, ) @mock.patch.object(EmrServerlessHook, "get_waiter") @mock.patch.object(EmrServerlessHook, "conn") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessCloudWatchLogsLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessDashboardLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessLogsLink.persist") @mock.patch("airflow.providers.amazon.aws.links.emr.EmrServerlessS3LogsLink.persist") def test_links_spark_without_applicationui_enabled( self, mock_s3_logs_link, mock_logs_link, mock_dashboard_link, mock_cloudwatch_link, mock_conn, mock_get_waiter, ): mock_get_waiter.wait.return_value = True mock_conn.get_application.return_value = {"application": {"state": "STARTED"}} mock_conn.start_job_run.return_value = { "jobRunId": job_run_id, "ResponseMetadata": {"HTTPStatusCode": 200}, } operator = EmrServerlessStartJobOperator( task_id=task_id, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=spark_job_driver, configuration_overrides=s3_configuration_overrides, enable_application_ui_links=False, ) operator.execute(self.mock_context) mock_conn.start_job_run.assert_called_once() mock_logs_link.assert_not_called() mock_dashboard_link.assert_not_called() mock_cloudwatch_link.assert_not_called() mock_s3_logs_link.assert_called_once_with( context=mock.ANY, operator=mock.ANY, region_name=mock.ANY, aws_partition=mock.ANY, log_uri=s3_logs_location, application_id=application_id, job_run_id=job_run_id, ) def test_template_fields(self): operator = EmrServerlessStartJobOperator( task_id=task_id, client_request_token=client_request_token, application_id=application_id, execution_role_arn=execution_role_arn, job_driver=job_driver, configuration_overrides=configuration_overrides, ) template_fields = list(operator.template_fields) + list(operator.template_fields_renderers.keys()) class_fields = operator.__dict__ missing_fields = [field for field in template_fields if field not in class_fields] assert not missing_fields, f"Templated fields are not available {missing_fields}"
TestEmrServerlessStartJobOperator
python
mlflow__mlflow
tests/store/artifact/test_http_artifact_repo.py
{ "start": 1560, "end": 16345 }
class ____: def __init__(self, name, mode): self.name = name self.mode = mode def __eq__(self, other): return self.name == other.name and self.mode == other.mode @pytest.fixture def http_artifact_repo(): artifact_uri = "http://test.com/api/2.0/mlflow-artifacts/artifacts" return HttpArtifactRepository(artifact_uri) @pytest.mark.parametrize( ("filename", "expected_mime_type"), [ ("c.txt", "text/plain"), ("c.pkl", "application/octet-stream"), ("MLmodel", "text/plain"), ], ) @pytest.mark.parametrize("artifact_path", [None, "dir"]) def test_log_artifact( http_artifact_repo, tmp_path, artifact_path, filename, expected_mime_type, monkeypatch, ): file_path = tmp_path.joinpath(filename) file_path.write_text("0") def assert_called_log_artifact(mock_http_request): paths = (artifact_path, file_path.name) if artifact_path else (file_path.name,) mock_http_request.assert_called_once_with( http_artifact_repo._host_creds, posixpath.join("/", *paths), "PUT", data=FileObjectMatcher(str(file_path), "rb"), extra_headers={"Content-Type": expected_mime_type}, ) with mock.patch( "mlflow.store.artifact.http_artifact_repo.http_request", return_value=MockResponse({}, 200), ) as mock_put: http_artifact_repo.log_artifact(file_path, artifact_path) assert_called_log_artifact(mock_put) with mock.patch( "mlflow.store.artifact.http_artifact_repo.http_request", return_value=MockResponse({}, 400), ): with pytest.raises(Exception, match="request failed"): http_artifact_repo.log_artifact(file_path, artifact_path) monkeypatch.setenv("MLFLOW_ENABLE_PROXY_MULTIPART_UPLOAD", "true") # assert mpu is triggered when file size is larger than minimum file size file_path.write_text("0" * MLFLOW_MULTIPART_UPLOAD_MINIMUM_FILE_SIZE.get()) with mock.patch.object( http_artifact_repo, "_try_multipart_upload", return_value=200 ) as mock_mpu: http_artifact_repo.log_artifact(file_path, artifact_path) mock_mpu.assert_called_once() # assert reverted to normal upload when mpu is not supported # mock that create_multipart_upload will returns a 400 error with appropriate message with ( mock.patch.object( http_artifact_repo, "create_multipart_upload", side_effect=HTTPError( response=MockResponse( data={ "message": "Multipart upload is not supported for the current " "artifact repository" }, status_code=501, ) ), ), mock.patch( "mlflow.store.artifact.http_artifact_repo.http_request", return_value=MockResponse({}, 200), ) as mock_put, ): http_artifact_repo.log_artifact(file_path, artifact_path) assert_called_log_artifact(mock_put) # assert if mpu is triggered but the uploads failed, mpu is aborted and exception is raised with ( mock.patch("requests.put", side_effect=Exception("MPU_UPLOAD_FAILS")), mock.patch.object( http_artifact_repo, "create_multipart_upload", return_value=CreateMultipartUploadResponse( upload_id="upload_id", credentials=[MultipartUploadCredential(url="url", part_number=1, headers={})], ), ), mock.patch.object( http_artifact_repo, "abort_multipart_upload", return_value=None, ) as mock_abort, ): with pytest.raises(Exception, match="MPU_UPLOAD_FAILS"): http_artifact_repo.log_artifact(file_path, artifact_path) mock_abort.assert_called_once() @pytest.mark.parametrize("artifact_path", [None, "dir"]) def test_log_artifacts(http_artifact_repo, tmp_path, artifact_path): tmp_path_a = tmp_path.joinpath("a.txt") d = tmp_path.joinpath("dir") d.mkdir() tmp_path_b = d.joinpath("b.txt") tmp_path_a.write_text("0") tmp_path_b.write_text("1") with mock.patch.object(http_artifact_repo, "log_artifact") as mock_log_artifact: http_artifact_repo.log_artifacts(tmp_path, artifact_path) mock_log_artifact.assert_has_calls( [ mock.call(str(tmp_path_a), artifact_path), mock.call( str(tmp_path_b), posixpath.join(artifact_path, "dir") if artifact_path else "dir", ), ], ) with mock.patch( "mlflow.store.artifact.http_artifact_repo.http_request", return_value=MockResponse({}, 400), ): with pytest.raises(Exception, match="request failed"): http_artifact_repo.log_artifacts(tmp_path, artifact_path) def test_list_artifacts(http_artifact_repo): with mock.patch( "mlflow.store.artifact.http_artifact_repo.http_request", return_value=MockResponse({}, 200), ) as mock_get: assert http_artifact_repo.list_artifacts() == [] endpoint = "/mlflow-artifacts/artifacts" url, _ = http_artifact_repo.artifact_uri.split(endpoint, maxsplit=1) mock_get.assert_called_once_with( get_default_host_creds(url), endpoint, "GET", params={"path": ""}, ) with mock.patch( "mlflow.store.artifact.http_artifact_repo.http_request", return_value=MockResponse( { "files": [ {"path": "1.txt", "is_dir": False, "file_size": 1}, {"path": "dir", "is_dir": True}, ] }, 200, ), ): assert [a.path for a in http_artifact_repo.list_artifacts()] == ["1.txt", "dir"] with mock.patch( "mlflow.store.artifact.http_artifact_repo.http_request", return_value=MockResponse( { "files": [ {"path": "1.txt", "is_dir": False, "file_size": 1}, {"path": "dir", "is_dir": True}, ] }, 200, ), ): assert [a.path for a in http_artifact_repo.list_artifacts(path="path")] == [ "path/1.txt", "path/dir", ] with mock.patch( "mlflow.store.artifact.http_artifact_repo.http_request", return_value=MockResponse({}, 400), ): with pytest.raises(Exception, match="request failed"): http_artifact_repo.list_artifacts() @pytest.mark.parametrize("path", ["/tmp/path", "../../path", "%2E%2E%2Fpath"]) def test_list_artifacts_malicious_path(http_artifact_repo, path): with mock.patch( "mlflow.store.artifact.http_artifact_repo.http_request", return_value=MockResponse( { "files": [ {"path": path, "is_dir": False, "file_size": 1}, ] }, 200, ), ): with pytest.raises(MlflowException, match="Invalid path"): http_artifact_repo.list_artifacts() def read_file(path): with open(path) as f: return f.read() @pytest.mark.parametrize("remote_file_path", ["a.txt", "dir/b.xtx"]) def test_download_file(http_artifact_repo, tmp_path, remote_file_path): with mock.patch( "mlflow.store.artifact.http_artifact_repo.http_request", return_value=MockStreamResponse("data", 200), ) as mock_get: file_path = tmp_path.joinpath(posixpath.basename(remote_file_path)) http_artifact_repo._download_file(remote_file_path, file_path) mock_get.assert_called_once_with( http_artifact_repo._host_creds, posixpath.join("/", remote_file_path), "GET", stream=True, ) assert file_path.read_text() == "data" with mock.patch( "mlflow.store.artifact.http_artifact_repo.http_request", return_value=MockStreamResponse("data", 400), ): with pytest.raises(Exception, match="request failed"): http_artifact_repo._download_file(remote_file_path, tmp_path) def test_download_artifacts(http_artifact_repo, tmp_path): # This test simulates downloading artifacts in the following structure: # --------- # - a.txt # - dir # - b.txt # --------- def http_request(_host_creds, endpoint, _method, **kwargs): # Responses for list_artifacts if params := kwargs.get("params"): if params.get("path") == "": return MockResponse( { "files": [ {"path": "a.txt", "is_dir": False, "file_size": 1}, {"path": "dir", "is_dir": True}, ] }, 200, ) elif params.get("path") == "dir": return MockResponse( { "files": [ {"path": "b.txt", "is_dir": False, "file_size": 1}, ] }, 200, ) else: Exception("Unreachable") # Responses for _download_file if endpoint == "/a.txt": return MockStreamResponse("data_a", 200) elif endpoint == "/dir/b.txt": return MockStreamResponse("data_b", 200) else: raise Exception("Unreachable") with mock.patch("mlflow.store.artifact.http_artifact_repo.http_request", http_request): http_artifact_repo.download_artifacts("", tmp_path) paths = [os.path.join(root, f) for root, _, files in os.walk(tmp_path) for f in files] assert [os.path.relpath(p, tmp_path) for p in paths] == [ "a.txt", os.path.join("dir", "b.txt"), ] assert read_file(paths[0]) == "data_a" assert read_file(paths[1]) == "data_b" def test_default_host_creds(monkeypatch): artifact_uri = "https://test.com" username = "user" password = "pass" token = "token" ignore_tls_verification = False client_cert_path = "client_cert_path" server_cert_path = "server_cert_path" expected_host_creds = MlflowHostCreds( host=artifact_uri, username=username, password=password, token=token, ignore_tls_verification=ignore_tls_verification, client_cert_path=client_cert_path, server_cert_path=server_cert_path, ) repo = HttpArtifactRepository(artifact_uri) monkeypatch.setenv(MLFLOW_TRACKING_USERNAME.name, username) monkeypatch.setenv(MLFLOW_TRACKING_PASSWORD.name, password) monkeypatch.setenv(MLFLOW_TRACKING_TOKEN.name, token) monkeypatch.setenv(MLFLOW_TRACKING_INSECURE_TLS.name, str(ignore_tls_verification)) monkeypatch.setenv(MLFLOW_TRACKING_CLIENT_CERT_PATH.name, client_cert_path) monkeypatch.setenv(MLFLOW_TRACKING_SERVER_CERT_PATH.name, server_cert_path) assert repo._host_creds == expected_host_creds @pytest.mark.parametrize("remote_file_path", ["a.txt", "dir/b.txt", None]) def test_delete_artifacts(http_artifact_repo, remote_file_path): with mock.patch( "mlflow.store.artifact.http_artifact_repo.http_request", return_value=MockStreamResponse("data", 200), ) as mock_get: http_artifact_repo.delete_artifacts(remote_file_path) mock_get.assert_called_once_with( http_artifact_repo._host_creds, posixpath.join("/", remote_file_path or ""), "DELETE", stream=True, ) def test_create_multipart_upload(http_artifact_repo, monkeypatch): monkeypatch.setenv("MLFLOW_ENABLE_PROXY_MULTIPART_UPLOAD", "true") with mock.patch( "mlflow.store.artifact.http_artifact_repo.http_request", return_value=MockResponse( { "upload_id": "upload_id", "credentials": [ { "url": "/some/url", "part_number": 1, "headers": {}, } ], }, 200, ), ): response = http_artifact_repo.create_multipart_upload("", 1) assert response.upload_id == "upload_id" assert len(response.credentials) == 1 assert response.credentials[0].url == "/some/url" def test_complete_multipart_upload(http_artifact_repo, monkeypatch): monkeypatch.setenv("MLFLOW_ENABLE_PROXY_MULTIPART_UPLOAD", "true") with mock.patch( "mlflow.store.artifact.http_artifact_repo.http_request", return_value=MockResponse({}, 200), ) as mock_post: http_artifact_repo.complete_multipart_upload( local_file="local_file", upload_id="upload_id", parts=[ MultipartUploadPart(part_number=1, etag="etag1"), MultipartUploadPart(part_number=2, etag="etag2"), ], artifact_path="artifact/path", ) endpoint = "/mlflow-artifacts" url, _ = http_artifact_repo.artifact_uri.split(endpoint, maxsplit=1) mock_post.assert_called_once_with( get_default_host_creds(url), "/mlflow-artifacts/mpu/complete/artifact/path", "POST", json={ "path": "local_file", "upload_id": "upload_id", "parts": [ {"part_number": 1, "etag": "etag1", "url": None}, {"part_number": 2, "etag": "etag2", "url": None}, ], }, ) def test_abort_multipart_upload(http_artifact_repo, monkeypatch): monkeypatch.setenv("MLFLOW_ENABLE_PROXY_MULTIPART_UPLOAD", "true") with mock.patch( "mlflow.store.artifact.http_artifact_repo.http_request", return_value=MockResponse({}, 200), ) as mock_post: http_artifact_repo.abort_multipart_upload( local_file="local_file", upload_id="upload_id", artifact_path="artifact/path", ) endpoint = "/mlflow-artifacts" url, _ = http_artifact_repo.artifact_uri.split(endpoint, maxsplit=1) mock_post.assert_called_once_with( get_default_host_creds(url), "/mlflow-artifacts/mpu/abort/artifact/path", "POST", json={ "path": "local_file", "upload_id": "upload_id", }, )
FileObjectMatcher
python
rq__rq
tests/fixtures.py
{ "start": 5055, "end": 6116 }
class ____: pass def kill_horse(horse_pid_key: str, connection_kwargs: dict, interval: float = 1.5): """ Kill the worker horse process by its PID stored in a Redis key. :param horse_pid_key: Redis key where the horse PID is stored :param connection_kwargs: Connection parameters for Redis :param interval: Time to wait before sending the kill signal """ time.sleep(interval) redis = Redis(**connection_kwargs) value = redis.get(horse_pid_key) if value: pid = int(value) os.kill(pid, signal.SIGKILL) def kill_worker(pid: int, double_kill: bool, interval: float = 1.5): # wait for the worker to be started over on the main process time.sleep(interval) os.kill(pid, signal.SIGTERM) if double_kill: # give the worker time to switch signal handler time.sleep(interval) os.kill(pid, signal.SIGTERM) def resume_worker(connection_kwargs: dict, interval: float = 1): # Wait and resume RQ time.sleep(interval) resume(Redis(**connection_kwargs))
DummyQueue
python
walkccc__LeetCode
solutions/1105. Filling Bookcase Shelves/1105.py
{ "start": 0, "end": 586 }
class ____: def minHeightShelves(self, books: list[list[int]], shelfWidth: int) -> int: # dp[i] := the minimum height to place the first i books dp = [0] + [math.inf] * len(books) for i in range(len(books)): sumThickness = 0 maxHeight = 0 # Place books[j..i] on a new shelf. for j in range(i, -1, -1): thickness, height = books[j] sumThickness += thickness if sumThickness > shelfWidth: break maxHeight = max(maxHeight, height) dp[i + 1] = min(dp[i + 1], dp[j] + maxHeight) return dp[-1]
Solution
python
getsentry__sentry
tests/sentry/core/endpoints/test_project_users.py
{ "start": 364, "end": 6105 }
class ____(APITestCase, SnubaTestCase): endpoint = "sentry-api-0-project-users" method = "get" def setUp(self) -> None: super().setUp() self.project = self.create_project( organization=self.organization, date_added=(timezone.now() - timedelta(hours=2)) ) timestamp = (timezone.now() - timedelta(hours=1)).isoformat() self.event1 = self.store_event( project_id=self.project.id, data={ "user": { "id": 1, "email": "foo@example.com", "username": "foobar", "ip_address": "127.0.0.1", }, "event_id": "b" * 32, "timestamp": timestamp, }, ) self.euser1 = EventUser.from_event(self.event1) self.event2 = self.store_event( project_id=self.project.id, data={ "user": { "id": 2, "email": "bar@example.com", "username": "baz", "ip_address": "192.168.0.1", }, "event_id": "c" * 32, "timestamp": timestamp, }, ) self.euser2 = EventUser.from_event(self.event2) def _assert_simple_response(self, response, mock_record): assert len(response.data) == 2 if self.euser1.id is None and self.euser2.id is None: assert list(map(lambda x: x["id"], response.data)) == [None, None] else: assert sorted(map(lambda x: x["id"], response.data)) == sorted( [str(self.euser1.id), str(self.euser2.id)] ) assert_any_analytics_event( mock_record, EventUserEndpointRequest( project_id=self.project.id, endpoint="sentry.core.endpoints.project_users.get", ), ) @mock.patch("sentry.analytics.record") def test_simple(self, mock_record: mock.MagicMock) -> None: self.login_as(user=self.user) response = self.get_success_response( self.organization.slug, self.project.slug, status_code=200 ) self._assert_simple_response(response, mock_record) @mock.patch("sentry.analytics.record") def test_superuser_simple(self, mock_record: mock.MagicMock) -> None: superuser = self.create_user(is_superuser=True) self.login_as(user=superuser, superuser=True) response = self.get_success_response( self.organization.slug, self.project.slug, status_code=200 ) self._assert_simple_response(response, mock_record) @mock.patch("sentry.analytics.record") def test_staff_simple(self, mock_record: mock.MagicMock) -> None: staff_user = self.create_user(is_staff=True) self.login_as(user=staff_user, staff=True) response = self.get_success_response( self.organization.slug, self.project.slug, status_code=200 ) self._assert_simple_response(response, mock_record) def test_empty_search_query(self) -> None: self.login_as(user=self.user) response = self.get_success_response( self.organization.slug, self.project.slug, query="foo", status_code=200 ) assert len(response.data) == 0 def test_username_search(self) -> None: self.login_as(user=self.user) response = self.get_success_response( self.organization.slug, self.project.slug, query="username:baz", status_code=200 ) assert len(response.data) == 1 if self.euser2.id is None: assert response.data[0]["id"] is None else: assert response.data[0]["id"] == str(self.euser2.id) response = self.get_success_response( self.organization.slug, self.project.slug, query="username:ba", status_code=200 ) assert len(response.data) == 0 def test_email_search(self) -> None: self.login_as(user=self.user) response = self.get_success_response( self.organization.slug, self.project.slug, query="email:foo@example.com", status_code=200, ) assert len(response.data) == 1 if self.euser1.id is None: assert response.data[0]["id"] is None else: assert response.data[0]["id"] == str(self.euser1.id) response = self.get_success_response( self.organization.slug, self.project.slug, query="email:@example.com", status_code=200 ) assert len(response.data) == 0 def test_id_search(self) -> None: self.login_as(user=self.user) response = self.get_success_response( self.organization.slug, self.project.slug, query="id:1", status_code=200 ) assert len(response.data) == 1 if self.euser1.id is None: assert response.data[0]["id"] is None else: assert response.data[0]["id"] == str(self.euser1.id) response = self.get_success_response( self.organization.slug, self.project.slug, query="id:3", status_code=200 ) assert len(response.data) == 0 def test_ip_search(self) -> None: self.login_as(user=self.user) response = self.get_success_response( self.organization.slug, self.project.slug, query="ip:192.168.0.1", status_code=200 ) assert len(response.data) == 1 if self.euser2.id is None: assert response.data[0]["id"] is None else: assert response.data[0]["id"] == str(self.euser2.id)
EventUserProjectUsersTest
python
miyuchina__mistletoe
mistletoe/contrib/scheme.py
{ "start": 1511, "end": 1693 }
class ____: def __init__(self, expr_token, body, env): self.params = [child.name for child in expr_token.children] self.body = body self.env = env
Procedure
python
python__mypy
mypyc/ir/class_ir.py
{ "start": 22038, "end": 23800 }
class ____: """Information needed to construct a non-extension class (Python class). Includes the class dictionary, a tuple of base classes, the class annotations dictionary, and the metaclass. """ def __init__(self, dict: Value, bases: Value, anns: Value, metaclass: Value) -> None: self.dict = dict self.bases = bases self.anns = anns self.metaclass = metaclass def serialize_vtable_entry(entry: VTableMethod) -> JsonDict: return { ".class": "VTableMethod", "cls": entry.cls.fullname, "name": entry.name, "method": entry.method.decl.id, "shadow_method": entry.shadow_method.decl.id if entry.shadow_method else None, } def serialize_vtable(vtable: VTableEntries) -> list[JsonDict]: return [serialize_vtable_entry(v) for v in vtable] def deserialize_vtable_entry(data: JsonDict, ctx: DeserMaps) -> VTableMethod: if data[".class"] == "VTableMethod": return VTableMethod( ctx.classes[data["cls"]], data["name"], ctx.functions[data["method"]], ctx.functions[data["shadow_method"]] if data["shadow_method"] else None, ) assert False, "Bogus vtable .class: %s" % data[".class"] def deserialize_vtable(data: list[JsonDict], ctx: DeserMaps) -> VTableEntries: return [deserialize_vtable_entry(x, ctx) for x in data] def all_concrete_classes(class_ir: ClassIR) -> list[ClassIR] | None: """Return all concrete classes among the class itself and its subclasses.""" concrete = class_ir.concrete_subclasses() if concrete is None: return None if not (class_ir.is_abstract or class_ir.is_trait): concrete.append(class_ir) return concrete
NonExtClassInfo
python
bokeh__bokeh
src/bokeh/models/widgets/buttons.py
{ "start": 2116, "end": 2866 }
class ____(HasProps): ''' Shared properties for button-like widgets. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) button_type = Enum(ButtonType, help=""" A style for the button, signifying it's role. Possible values are one of the following: .. bokeh-plot:: :source-position: none from bokeh.core.enums import ButtonType from bokeh.io import show from bokeh.layouts import column from bokeh.models import Button show(column( [Button(label=button_type, button_type=button_type) for button_type in ButtonType] )) """) @abstract
ButtonLike
python
ray-project__ray
python/ray/air/tests/mocked_wandb_integration.py
{ "start": 258, "end": 637 }
class ____( namedtuple( "MockTrial", [ "config", "trial_id", "trial_name", "experiment_dir_name", "placement_group_factory", "local_path", ], ) ): def __hash__(self): return hash(self.trial_id) def __str__(self): return self.trial_name @dataclass
Trial
python
getsentry__sentry
tests/sentry/workflow_engine/endpoints/serializers/test_workflow_group_history_serializer.py
{ "start": 665, "end": 8903 }
class ____(TestCase): def setUp(self) -> None: super().setUp() self.login_as(user=self.user) self.group = self.create_group() self.project = self.group.project self.organization = self.project.organization self.history: list[WorkflowFireHistory] = [] self.workflow = self.create_workflow(organization=self.organization) self.detector_1 = self.create_detector( project_id=self.project.id, type=MetricIssue.slug, ) DetectorGroup.objects.create( detector=self.detector_1, group=self.group, ) for i in range(3): self.history.append( WorkflowFireHistory( detector=self.detector_1, workflow=self.workflow, group=self.group, event_id=uuid4().hex, ) ) self.group_2 = self.create_group() self.detector_2 = self.create_detector( project_id=self.project.id, type=MetricIssue.slug, ) DetectorGroup.objects.create( detector=self.detector_2, group=self.group_2, ) self.history.append( WorkflowFireHistory( detector=self.detector_2, workflow=self.workflow, group=self.group_2, event_id=uuid4().hex, ) ) self.group_3 = self.create_group() self.detector_3 = self.create_detector( project_id=self.project.id, type=MetricIssue.slug, ) DetectorGroup.objects.create( detector=self.detector_3, group=self.group_3, ) for i in range(2): self.history.append( WorkflowFireHistory( detector=self.detector_3, workflow=self.workflow, group=self.group_3, event_id=uuid4().hex, ) ) # this will be ordered after the WFH with self.detector_1 self.detector_4 = self.create_detector( project_id=self.project.id, type=MetricIssue.slug, ) self.workflow_2 = self.create_workflow(organization=self.organization) self.history.append( WorkflowFireHistory( detector=self.detector_4, workflow=self.workflow_2, group=self.group, event_id=uuid4().hex, ) ) histories: list[WorkflowFireHistory] = WorkflowFireHistory.objects.bulk_create(self.history) # manually update date_added for i in range(3): histories[i].update(date_added=before_now(days=i + 1)) histories[3].update(date_added=before_now(days=1)) for i in range(2): histories[i + 4].update(date_added=before_now(days=i + 1)) histories[-1].update(date_added=before_now(days=0)) self.base_triggered_date = before_now(days=1) self.login_as(self.user) def assert_expected_results( self, workflow: Workflow, start: datetime, end: datetime, expected_results: list[WorkflowGroupHistory], cursor: Cursor | None = None, per_page: int = 25, ) -> CursorResult[Group]: result = fetch_workflow_groups_paginated(workflow, start, end, cursor, per_page) assert result.results == expected_results, (result.results, expected_results) return result def test_workflow_groups_paginated__simple(self) -> None: self.assert_expected_results( workflow=self.workflow, start=before_now(days=6), end=before_now(days=0), expected_results=[ WorkflowGroupHistory( self.group, count=3, last_triggered=self.base_triggered_date, event_id=self.history[0].event_id, detector=self.detector_1, ), WorkflowGroupHistory( self.group_3, count=2, last_triggered=self.base_triggered_date, event_id=self.history[4].event_id, detector=self.detector_3, ), WorkflowGroupHistory( self.group_2, count=1, last_triggered=self.base_triggered_date, event_id=self.history[3].event_id, detector=self.detector_2, ), ], ) def test_workflow_groups_paginated__cursor(self) -> None: result = self.assert_expected_results( workflow=self.workflow, start=before_now(days=6), end=before_now(days=0), expected_results=[ WorkflowGroupHistory( self.group, count=3, last_triggered=self.base_triggered_date, event_id=self.history[0].event_id, detector=self.detector_1, ), ], per_page=1, ) # use the cursor to get the next page result = self.assert_expected_results( workflow=self.workflow, start=before_now(days=6), end=before_now(days=0), expected_results=[ WorkflowGroupHistory( self.group_3, count=2, last_triggered=self.base_triggered_date, event_id=self.history[4].event_id, detector=self.detector_3, ), ], cursor=result.next, per_page=1, ) # get the next page self.assert_expected_results( workflow=self.workflow, start=before_now(days=6), end=before_now(days=0), expected_results=[ WorkflowGroupHistory( self.group_2, count=1, last_triggered=self.base_triggered_date, event_id=self.history[3].event_id, detector=self.detector_2, ), ], cursor=result.next, per_page=1, ) def test_workflow_groups_paginated__filters_counts(self) -> None: # Test that the count is updated if the date range affects it self.assert_expected_results( workflow=self.workflow, start=before_now(days=1), end=before_now(days=0), expected_results=[ WorkflowGroupHistory( self.group, count=1, last_triggered=self.base_triggered_date, event_id=self.history[0].event_id, detector=self.detector_1, ), WorkflowGroupHistory( self.group_2, count=1, last_triggered=self.base_triggered_date, event_id=self.history[3].event_id, detector=self.detector_2, ), WorkflowGroupHistory( self.group_3, count=1, last_triggered=self.base_triggered_date, event_id=self.history[4].event_id, detector=self.detector_3, ), ], ) def test_workflow_groups_paginated__past_date_range(self) -> None: self.assert_expected_results( workflow=self.workflow, start=before_now(days=3), end=before_now(days=2), expected_results=[ WorkflowGroupHistory( self.group, count=1, last_triggered=self.base_triggered_date - timedelta(days=2), event_id=self.history[2].event_id, detector=self.detector_1, ), ], )
WorkflowGroupsPaginatedTest
python
numpy__numpy
numpy/f2py/tests/test_character.py
{ "start": 20703, "end": 21177 }
class ____(util.F2PyTest): sources = [util.getpath("tests", "src", "string", "gh24662.f90")] def test_gh24662(self): self.module.string_inout_optional() a = np.array('hi', dtype='S32') self.module.string_inout_optional(a) assert "output string" in a.tobytes().decode() with pytest.raises(Exception): # noqa: B017 aa = "Hi" self.module.string_inout_optional(aa) @pytest.mark.slow
TestStringOptionalInOut
python
getsentry__sentry
tests/sentry/db/postgres/schema/safe_migrations/integration/test_migrations.py
{ "start": 4202, "end": 4589 }
class ____(BaseSafeMigrationTest): app = "decimal_to_float_app" migrate_from = "0001_initial" migrate_to = "0002_type_conversion" def test(self) -> None: with pytest.raises( UnsafeOperationException, match="Altering the type of column Value.amount in this way is unsafe", ): self.run_migration()
ChangeDecimalToFloatTest
python
run-llama__llama_index
llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/agent.py
{ "start": 1055, "end": 1158 }
class ____(Event): tool_call_id: str tool_name: str tool_kwargs: Dict[str, Any]
ToolCallEvent
python
ansible__ansible
lib/ansible/modules/hostname.py
{ "start": 24125, "end": 24242 }
class ____(Hostname): platform = 'Linux' distribution = 'Kylin' strategy_class = FileStrategy
KylinHostname
python
kubernetes-client__python
kubernetes/client/models/v1_service_account_subject.py
{ "start": 383, "end": 4834 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'name': 'str', 'namespace': 'str' } attribute_map = { 'name': 'name', 'namespace': 'namespace' } def __init__(self, name=None, namespace=None, local_vars_configuration=None): # noqa: E501 """V1ServiceAccountSubject - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._name = None self._namespace = None self.discriminator = None self.name = name self.namespace = namespace @property def name(self): """Gets the name of this V1ServiceAccountSubject. # noqa: E501 `name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required. # noqa: E501 :return: The name of this V1ServiceAccountSubject. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this V1ServiceAccountSubject. `name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required. # noqa: E501 :param name: The name of this V1ServiceAccountSubject. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501 raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501 self._name = name @property def namespace(self): """Gets the namespace of this V1ServiceAccountSubject. # noqa: E501 `namespace` is the namespace of matching ServiceAccount objects. Required. # noqa: E501 :return: The namespace of this V1ServiceAccountSubject. # noqa: E501 :rtype: str """ return self._namespace @namespace.setter def namespace(self, namespace): """Sets the namespace of this V1ServiceAccountSubject. `namespace` is the namespace of matching ServiceAccount objects. Required. # noqa: E501 :param namespace: The namespace of this V1ServiceAccountSubject. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and namespace is None: # noqa: E501 raise ValueError("Invalid value for `namespace`, must not be `None`") # noqa: E501 self._namespace = namespace def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1ServiceAccountSubject): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1ServiceAccountSubject): return True return self.to_dict() != other.to_dict()
V1ServiceAccountSubject
python
dagster-io__dagster
python_modules/dagster/dagster/_core/remote_representation/external_data.py
{ "start": 23614, "end": 24022 }
class ____: """A definition of a directed edge in the logical asset graph. An downstream asset that's depended by, and the corresponding input name in the upstream asset that it depends on. """ child_asset_key: AssetKey input_name: Optional[str] = None output_name: Optional[str] = None @whitelist_for_serdes(storage_name="ExternalResourceConfigEnvVar") @record
AssetChildEdgeSnap
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/roles.py
{ "start": 2385, "end": 2682 }
class ____(AllowsLambdaRole, UsesInspection, ColumnListRole): __slots__ = () _role_name = ( "Column expression, FROM clause, or other columns clause element" ) @property def _select_iterable(self) -> _SelectIterable: raise NotImplementedError()
ColumnsClauseRole
python
ray-project__ray
python/ray/_private/gcs_pubsub.py
{ "start": 400, "end": 2445 }
class ____: def __init__(self, worker_id: bytes = None): self._worker_id = worker_id # self._subscriber_id needs to match the binary format of a random # SubscriberID / UniqueID, which is 28 (kUniqueIDSize) random bytes. self._subscriber_id = bytes(bytearray(random.getrandbits(8) for _ in range(28))) self._last_batch_size = 0 self._max_processed_sequence_id = 0 self._publisher_id = b"" # Batch size of the result from last poll. Used to indicate whether the # subscriber can keep up. @property def last_batch_size(self): return self._last_batch_size def _subscribe_request(self, channel): cmd = pubsub_pb2.Command(channel_type=channel, subscribe_message={}) req = gcs_service_pb2.GcsSubscriberCommandBatchRequest( subscriber_id=self._subscriber_id, sender_id=self._worker_id, commands=[cmd] ) return req def _poll_request(self): return gcs_service_pb2.GcsSubscriberPollRequest( subscriber_id=self._subscriber_id, max_processed_sequence_id=self._max_processed_sequence_id, publisher_id=self._publisher_id, ) def _unsubscribe_request(self, channels): req = gcs_service_pb2.GcsSubscriberCommandBatchRequest( subscriber_id=self._subscriber_id, sender_id=self._worker_id, commands=[] ) for channel in channels: req.commands.append( pubsub_pb2.Command(channel_type=channel, unsubscribe_message={}) ) return req @staticmethod def _should_terminate_polling(e: grpc.RpcError) -> None: # Caller only expects polling to be terminated after deadline exceeded. if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED: return True # Could be a temporary connection issue. Suppress error. # TODO: reconnect GRPC channel? if e.code() == grpc.StatusCode.UNAVAILABLE: return True return False
_SubscriberBase
python
kamyu104__LeetCode-Solutions
Python/valid-word-square.py
{ "start": 33, "end": 414 }
class ____(object): def validWordSquare(self, words): """ :type words: List[str] :rtype: bool """ for i in xrange(len(words)): for j in xrange(len(words[i])): if j >= len(words) or i >= len(words[j]) or \ words[j][i] != words[i][j]: return False return True
Solution
python
fsspec__filesystem_spec
fsspec/implementations/dask.py
{ "start": 3932, "end": 4466 }
class ____(AbstractBufferedFile): def __init__(self, mode="rb", **kwargs): if mode != "rb": raise ValueError('Remote dask files can only be opened in "rb" mode') super().__init__(**kwargs) def _upload_chunk(self, final=False): pass def _initiate_upload(self): """Create remote file/upload""" pass def _fetch_range(self, start, end): """Get the specified set of bytes from remote""" return self.fs.fetch_range(self.path, self.mode, start, end)
DaskFile
python
pypa__warehouse
tests/unit/admin/views/test_prohibited_project_names.py
{ "start": 9223, "end": 13969 }
class ____: def test_get(self): request = pretend.stub(method="GET") assert views.bulk_add_prohibited_project_names(request) == {} def test_bulk_add(self, db_request): db_request.user = UserFactory.create() db_request.method = "POST" comment = "This is a comment" already_existing_prohibition = ProhibitedProjectFactory.create( name="prohibition-already-exists", prohibited_by=db_request.user, comment=comment, ) already_existing_project = ProjectFactory.create(name="project-already-exists") release = ReleaseFactory.create(project=already_existing_project) FileFactory.create(release=release, filename="who cares") RoleFactory.create(project=already_existing_project, user=db_request.user) project_names = [ already_existing_prohibition.name, already_existing_project.name, "doesnt-already-exist", ] db_request.POST["projects"] = "\n".join(project_names) db_request.POST["comment"] = comment db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) db_request.route_path = lambda a: "/admin/prohibited_project_names/bulk" result = views.bulk_add_prohibited_project_names(db_request) assert db_request.session.flash.calls == [ pretend.call( f"Prohibited {len(project_names)!r} projects", queue="success", ) ] assert result.status_code == 303 assert result.headers["Location"] == "/admin/prohibited_project_names/bulk" for project_name in project_names: prohibition = ( db_request.db.query(ProhibitedProjectName) .filter(ProhibitedProjectName.name == project_name) .one() ) assert prohibition.name == project_name assert prohibition.prohibited_by == db_request.user assert prohibition.comment == comment assert ( db_request.db.query(Project) .filter(Project.name == project_name) .count() == 0 ) def test_adds_prohibited_project_name(self, db_request): db_request.user = UserFactory.create() db_request.POST["project"] = "foo" db_request.POST["confirm"] = "foo" db_request.POST["comment"] = "This is a comment" db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) db_request.route_path = lambda a: "/admin/prohibited_project_names/" views.add_prohibited_project_names(db_request) assert db_request.session.flash.calls == [ pretend.call("Prohibited Project Name 'foo'", queue="success") ] prohibited_project_name = ( db_request.db.query(ProhibitedProjectName) .filter(ProhibitedProjectName.name == "foo") .one() ) assert prohibited_project_name.name == "foo" assert prohibited_project_name.prohibited_by == db_request.user assert prohibited_project_name.comment == "This is a comment" def test_adds_prohibited_project_name_with_deletes(self, db_request): db_request.user = UserFactory.create() db_request.POST["project"] = "foo" db_request.POST["confirm"] = "foo" db_request.POST["comment"] = "This is a comment" db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) db_request.route_path = lambda a: "/admin/prohibited_project_names/" project = ProjectFactory.create(name="foo") release = ReleaseFactory.create(project=project) FileFactory.create(release=release, filename="who cares") RoleFactory.create(project=project, user=db_request.user) views.add_prohibited_project_names(db_request) assert db_request.session.flash.calls == [ pretend.call("Deleted the project 'foo'", queue="success"), pretend.call("Prohibited Project Name 'foo'", queue="success"), ] prohibited_project_name = ( db_request.db.query(ProhibitedProjectName) .filter(ProhibitedProjectName.name == "foo") .one() ) assert prohibited_project_name.name == "foo" assert prohibited_project_name.prohibited_by == db_request.user assert prohibited_project_name.comment == "This is a comment" assert not (db_request.db.query(Project).filter(Project.name == "foo").count())
TestBulkAddProhibitedProjectName
python
walkccc__LeetCode
solutions/2110. Number of Smooth Descent Periods of a Stock/2110.py
{ "start": 0, "end": 262 }
class ____: def getDescentPeriods(self, prices: list[int]) -> int: ans = 1 # prices[0] dp = 1 for i in range(1, len(prices)): if prices[i] == prices[i - 1] - 1: dp += 1 else: dp = 1 ans += dp return ans
Solution
python
spack__spack
lib/spack/spack/solver/asp.py
{ "start": 167789, "end": 168100 }
class ____(spack.error.UnsatisfiableSpecError): """Errors that indicate a bug in Spack.""" def __init__(self, msg): super(spack.error.UnsatisfiableSpecError, self).__init__(msg) self.provided = None self.required = None self.constraint_type = None
InternalConcretizerError
python
pytorch__pytorch
test/lazy/test_extract_compiled_graph.py
{ "start": 5401, "end": 6185 }
class ____(unittest.TestCase): test_sub = maketest(ModuleSub) # Same as test_sub but force aten::sub to fallback # We expect an exception caught because of LTC fallback. test_ltc_fallback = maketest( ModuleSub, exception_msg_pattern="fallback.*aten::sub", ctxmgr=force_fallback_ctx_mgr("aten::sub"), ) test_const_scale = maketest(ModuleConstScale) test_addcmul = maketest(ModuleAddcmul) test_return_multi = maketest(ModuleReturnMulti) test_return_dup_tensor = maketest(ModuleReturnDupTensor) test_inplace_update = maketest(ModuleInplaceUpdate) if __name__ == "__main__": raise RuntimeError( "This test is not currently used and should be " "enabled in discover_tests.py if required." )
OptimizeTest
python
rapidsai__cudf
python/cudf/cudf/core/dtypes.py
{ "start": 30786, "end": 42421 }
class ____(StructDtype): """ A data type for Interval data. Parameters ---------- subtype: str, np.dtype The dtype of the Interval bounds. closed: {'right', 'left', 'both', 'neither'}, default 'right' Whether the interval is closed on the left-side, right-side, both or neither. See the Notes for more detailed explanation. """ name = "interval" def __init__( self, subtype: None | Dtype = None, closed: Literal["left", "right", "neither", "both", None] = "right", ) -> None: if closed in {"left", "right", "neither", "both"}: self.closed = closed elif closed is None: self.closed = "right" else: raise ValueError(f"{closed=} is not valid") if subtype is None: self._subtype = None dtypes = {} else: self._subtype = cudf.dtype(subtype) if isinstance( self._subtype, cudf.CategoricalDtype ) or cudf.utils.dtypes.is_dtype_obj_string(self._subtype): raise TypeError( "category, object, and string subtypes are not supported " "for IntervalDtype" ) dtypes = {"left": self._subtype, "right": self._subtype} super().__init__(dtypes) @property def subtype(self) -> DtypeObj | None: return self._subtype def __repr__(self) -> str: if self.subtype is None: return "interval" return f"interval[{self.subtype}, {self.closed}]" def __str__(self) -> str: return repr(self) @classmethod def from_arrow(cls, typ: ArrowIntervalType) -> Self: return cls(typ.subtype.to_pandas_dtype(), typ.closed) def to_arrow(self) -> ArrowIntervalType: return ArrowIntervalType( cudf_dtype_to_pa_type(self.subtype), self.closed ) @classmethod def from_pandas(cls, pd_dtype: pd.IntervalDtype) -> Self: warnings.warn( "from_pandas is deprecated and will be removed in a future version. " "Pass the pandas.IntervalDtype subtype and closed to the IntervalDtype constructor instead.", FutureWarning, ) return cls( subtype=pd_dtype.subtype, closed="right" if pd_dtype.closed is None else pd_dtype.closed, ) def to_pandas(self) -> pd.IntervalDtype: if cudf.get_option("mode.pandas_compatible"): return pd.IntervalDtype( subtype=self.subtype.numpy_dtype if is_pandas_nullable_extension_dtype(self.subtype) else self.subtype, closed=self.closed, ) return pd.IntervalDtype(subtype=self.subtype, closed=self.closed) def __eq__(self, other) -> bool: if isinstance(other, str): # This means equality isn't transitive but mimics pandas return other in (self.name, str(self)) elif type(self) is not type(other): # Avoid isinstance because this subclasses StructDtype return False elif other.subtype is None: # Equivalent to the string "interval" return True return self.subtype == other.subtype and self.closed == other.closed def __hash__(self) -> int: return hash((self.subtype, self.closed)) def serialize(self) -> tuple[dict, list]: header = { "fields": ( self.subtype.str if self.subtype is not None else self.subtype, self.closed, ), "frame_count": 0, } return header, [] @classmethod def deserialize(cls, header: dict, frames: list) -> Self: _check_type(cls, header, frames) subtype, closed = header["fields"] return cls(subtype, closed=closed) def _is_categorical_dtype(obj): if obj is None: return False if isinstance( obj, ( pd.CategoricalDtype, cudf.CategoricalDtype, cudf.core.index.CategoricalIndex, cudf.core.column.CategoricalColumn, pd.Categorical, pd.CategoricalIndex, ), ): return True # Note that we cannot directly use `obj in (...)` because that triggers # equality as well as identity checks and pandas extension dtypes won't # allow converting that equality check to a boolean; `__nonzero__` is # disabled because they treat dtypes as "array-like". if any( obj is t for t in ( cudf.CategoricalDtype, pd.CategoricalDtype, pd.CategoricalDtype.type, ) ): return True if isinstance(obj, (np.ndarray, np.dtype)): return False if isinstance(obj, str) and obj == "category": return True if isinstance( obj, (cudf.Index, cudf.core.column.ColumnBase, cudf.Series), ): return isinstance(obj.dtype, cudf.CategoricalDtype) if isinstance(obj, (pd.Series, pd.Index)): return isinstance(obj.dtype, pd.CategoricalDtype) if hasattr(obj, "type"): if obj.type is pd.CategoricalDtype.type: return True # TODO: A lot of the above checks are probably redundant and should be # farmed out to this function here instead. with warnings.catch_warnings(): warnings.simplefilter("ignore") return pd_types.is_categorical_dtype(obj) def is_categorical_dtype(obj): """Check whether an array-like or dtype is of the Categorical dtype. .. deprecated:: 24.04 Use isinstance(dtype, cudf.CategoricalDtype) instead Parameters ---------- obj : array-like or dtype The array-like or dtype to check. Returns ------- bool Whether or not the array-like or dtype is of a categorical dtype. """ # Do not remove until pandas 3.0 support is added. assert PANDAS_LT_300, "Need to drop after pandas-3.0 support is added." warnings.warn( "is_categorical_dtype is deprecated and will be removed in a future " "version. Use isinstance(dtype, cudf.CategoricalDtype) instead", DeprecationWarning, ) return _is_categorical_dtype(obj) def is_list_dtype(obj): """Check whether an array-like or dtype is of the list dtype. Parameters ---------- obj : array-like or dtype The array-like or dtype to check. Returns ------- bool Whether or not the array-like or dtype is of the list dtype. """ return ( type(obj) is ListDtype or obj is ListDtype or type(obj) is cudf.core.column.ListColumn or obj is cudf.core.column.ListColumn or (isinstance(obj, str) and obj == ListDtype.name) or (hasattr(obj, "dtype") and isinstance(obj.dtype, ListDtype)) or ( isinstance(obj, pd.ArrowDtype) and pa.types.is_list(obj.pyarrow_dtype) ) ) def is_struct_dtype(obj): """Check whether an array-like or dtype is of the struct dtype. Parameters ---------- obj : array-like or dtype The array-like or dtype to check. Returns ------- bool Whether or not the array-like or dtype is of the struct dtype. """ # TODO: This behavior is currently inconsistent for interval types. the # actual class IntervalDtype will return False, but instances (e.g. # IntervalDtype(int)) will return True. For now this is not being changed # since the interval dtype is being modified as part of the array refactor, # but this behavior should be made consistent afterwards. return ( isinstance(obj, StructDtype) or obj is StructDtype or (isinstance(obj, str) and obj == StructDtype.name) or (hasattr(obj, "dtype") and isinstance(obj.dtype, StructDtype)) or ( isinstance(obj, pd.ArrowDtype) and pa.types.is_struct(obj.pyarrow_dtype) ) ) def is_decimal_dtype(obj): """Check whether an array-like or dtype is of the decimal dtype. Parameters ---------- obj : array-like or dtype The array-like or dtype to check. Returns ------- bool Whether or not the array-like or dtype is of the decimal dtype. """ return ( is_decimal32_dtype(obj) or is_decimal64_dtype(obj) or is_decimal128_dtype(obj) ) def _is_interval_dtype(obj): return ( isinstance( obj, ( IntervalDtype, pd.IntervalDtype, ), ) or obj is IntervalDtype or (isinstance(obj, cudf.Index) and obj._is_interval()) or (isinstance(obj, str) and obj == IntervalDtype.name) or ( isinstance( getattr(obj, "dtype", None), (pd.IntervalDtype, IntervalDtype), ) ) or ( isinstance(obj, pd.ArrowDtype) and pa.types.is_interval(obj.pyarrow_dtype) ) ) def is_interval_dtype(obj): """Check whether an array-like or dtype is of the interval dtype. Parameters ---------- obj : array-like or dtype The array-like or dtype to check. Returns ------- bool Whether or not the array-like or dtype is of the interval dtype. """ warnings.warn( "is_interval_dtype is deprecated and will be removed in a " "future version. Use `isinstance(dtype, cudf.IntervalDtype)` instead", DeprecationWarning, ) return _is_interval_dtype(obj) def is_decimal32_dtype(obj): return ( type(obj) is Decimal32Dtype or obj is Decimal32Dtype or (isinstance(obj, str) and obj == Decimal32Dtype.name) or (hasattr(obj, "dtype") and is_decimal32_dtype(obj.dtype)) ) def is_decimal64_dtype(obj): return ( type(obj) is Decimal64Dtype or obj is Decimal64Dtype or (isinstance(obj, str) and obj == Decimal64Dtype.name) or (hasattr(obj, "dtype") and is_decimal64_dtype(obj.dtype)) ) def is_decimal128_dtype(obj): return ( type(obj) is Decimal128Dtype or obj is Decimal128Dtype or (isinstance(obj, str) and obj == Decimal128Dtype.name) or (hasattr(obj, "dtype") and is_decimal128_dtype(obj.dtype)) or ( isinstance(obj, pd.ArrowDtype) and pa.types.is_decimal128(obj.pyarrow_dtype) ) ) def recursively_update_struct_names( dtype: DtypeObj, child_names: Mapping[Any, Any] ) -> DtypeObj: """ Update dtype's field names (namely StructDtype) recursively with child_names. Needed for nested types that come from libcudf which do not carry struct field names. """ if isinstance(dtype, StructDtype): return StructDtype( { new_name: recursively_update_struct_names( child_type, new_child_names ) for (new_name, new_child_names), child_type in zip( child_names.items(), dtype.fields.values(), strict=True ) } ) elif isinstance(dtype, ListDtype): # child_names here should be {"offsets": {}, "<values_key>": {...}} values_key = next(reversed(child_names)) return ListDtype( element_type=recursively_update_struct_names( dtype.element_type, child_names[values_key] ) ) else: return dtype
IntervalDtype
python
keras-team__keras
keras/src/layers/activations/leaky_relu_test.py
{ "start": 118, "end": 1224 }
class ____(testing.TestCase): @pytest.mark.requires_trainable_backend def test_leaky_relu(self): self.run_layer_test( leaky_relu.LeakyReLU, init_kwargs={ "negative_slope": 1, }, input_shape=(2, 3, 4), supports_masking=True, assert_built_after_instantiation=True, ) def test_leaky_relu_correctness(self): leaky_relu_layer = leaky_relu.LeakyReLU(negative_slope=0.5) input = np.array([-10, -5, 0.0, 5, 10]) expected_output = np.array([-5.0, -2.5, 0.0, 5.0, 10.0]) result = leaky_relu_layer(input) self.assertAllClose(result, expected_output) def test_invalid_usage(self): with self.assertRaisesRegex( ValueError, "The negative_slope value of a Leaky ReLU layer cannot be None", ): self.run_layer_test( leaky_relu.LeakyReLU, init_kwargs={"negative_slope": None}, input_shape=(2, 3, 4), supports_masking=True, )
LeakyReLUTest
python
Pylons__pyramid
src/pyramid/interfaces.py
{ "start": 24093, "end": 24448 }
class ____(Interface): """Marker interface for storing request extensions (properties and methods) which will be added to the request object.""" descriptors = Attribute( """A list of descriptors that will be added to each request.""" ) methods = Attribute("""A list of methods to be added to each request.""")
IRequestExtensions
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_dataplex.py
{ "start": 38092, "end": 39038 }
class ____: @mock.patch(HOOK_STR) def test_execute(self, hook_mock): op = DataplexCatalogDeleteAspectTypeOperator( project_id=PROJECT_ID, location=REGION, aspect_type_id=ASPECT_TYPE_NAME, task_id="delete_task", gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) hook_mock.return_value.wait_for_operation.return_value = None op.execute(context=mock.MagicMock()) hook_mock.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) hook_mock.return_value.delete_aspect_type.assert_called_once_with( project_id=PROJECT_ID, location=REGION, aspect_type_id=ASPECT_TYPE_NAME, retry=DEFAULT, timeout=None, metadata=(), )
TestDataplexCatalogDeleteAspectTypeOperator
python
ipython__ipython
IPython/extensions/tests/test_deduperreload.py
{ "start": 56557, "end": 64024 }
class ____(ShellFixture): """ Unit tests for autoreload patching logic """ def test_modify_property(self): self.shell.magic_autoreload("2") mod_name, mod_file = self.new_module( """ class Foo: @property def foo(self): return 42 """ ) self.shell.run_code(f"from {mod_name} import Foo") self.shell.run_code("foo = Foo()") self.shell.run_code("assert foo.foo == 42") self.write_file( mod_file, """ class Foo: @property def foo(self): return 43 """, ) self.shell.run_code("pass") self.shell.run_code("assert foo.foo == 43") def test_method_decorator(self): self.shell.magic_autoreload("2") mod_name, mod_file = self.new_module( """ def incremented(f): return lambda *args: f(*args) + 1 class Foo: @classmethod @incremented def foo(cls): return 42 foo = Foo.foo """ ) self.shell.run_code(f"from {mod_name} import foo") self.shell.run_code("assert foo() == 43") self.write_file( mod_file, """ def incremented(f): return lambda *args: f(*args) + 1 class Foo: @classmethod def foo(cls): return 42 foo = Foo.foo """, ) self.shell.run_code("assert foo() == 42") def test_method_modified_decorator(self): self.shell.magic_autoreload("2") mod_name, mod_file = self.new_module( """ def incremented(f): return lambda *args: f(*args) + 1 class Foo: @classmethod @incremented def foo(cls): return 42 foo = Foo.foo """ ) self.shell.run_code(f"from {mod_name} import foo") self.shell.run_code("assert foo() == 43") self.write_file( mod_file, """ def incremented(f): return lambda *args: f(*args) + 0 class Foo: @classmethod @incremented def foo(cls): return 42 foo = Foo.foo """, ) self.shell.run_code("assert foo() == 42") def test_function_decorators(self): self.shell.magic_autoreload("2") mod_name, mod_file = self.new_module( """ def incremented(f): return lambda *args: f(*args) + 1 @incremented def foo(): return 42 """ ) self.shell.run_code("import %s" % mod_name) self.shell.run_code("pass") mod = sys.modules[mod_name] assert mod.foo() == 43 self.write_file( mod_file, """ def incremented(f): return lambda *args: f(*args) + 1 def foo(): return 42 """, ) self.shell.run_code("pass") assert mod.foo() == 42 self.write_file( mod_file, """ def incremented(v): def deco(f): return lambda *args: f(*args) + v return deco @incremented(2) def foo(): return 43 """, ) self.shell.run_code("pass") assert mod.foo() == 45 def test_method_decorators_again(self): self.shell.magic_autoreload("2") mod_name, mod_file = self.new_module( """ class Foo: @classmethod def bar(cls): return 0 @classmethod def foo(cls): return 42 + cls.bar() foo = Foo.foo """ ) self.shell.run_code("import %s" % mod_name) self.shell.run_code("pass") mod = sys.modules[mod_name] assert mod.foo() == 42 self.write_file( mod_file, """ class Foo: @classmethod def bar(cls): return 1 @classmethod def foo(cls): return 42 + cls.bar() foo = Foo.foo """, ) self.shell.run_code("pass") assert mod.Foo.foo() == 43 assert mod.foo() == 43 # This test verifies that the correct globals are used when patching a function # decorated by a function from another module. For this example, we should always # use <mod_name>.__dict__ as the global environment when patching foo, which comes # from <mod_name>, and never <other_mod_name>.__dict__, which is what we would get # if we use foo.__globals__ after it has been decorated. def test_function_decorator_from_other_module(self): self.shell.magic_autoreload("2") other_mod_name, _ = self.new_module( """ import functools def incremented(f): @functools.wraps(f) def wrapper(*args, **kwargs): return f(*args, **kwargs) + 1 return wrapper """ ) mod_name, mod_file = self.new_module( f""" from {other_mod_name} import incremented as deco @deco def foo(): return 42 """ ) self.shell.run_code(f"from {mod_name} import foo") self.shell.run_code("assert foo() == 43") self.write_file( mod_file, f""" from {other_mod_name} import incremented as deco @deco def foo(): return 43 """, ) self.shell.run_code("assert foo() == 44") def test_decorators_with_freevars(self): self.shell.magic_autoreload("2") mod_name, mod_file = self.new_module( """ def decorate(func): free_var_x = "x" free_var_y = "y" def wrapper(*args, **kwargs): return func(*args, **kwargs), free_var_x, free_var_y return wrapper @decorate def f(): return "v1" """ ) self.shell.run_code(f"from {mod_name} import f") mod = sys.modules[mod_name] assert mod.f() == ("v1", "x", "y") self.write_file( mod_file, """ def decorate(func): free_var_ax = "ax" free_var_by = "by" def wrapper(*args, **kwargs): return func(*args, **kwargs), free_var_ax, free_var_by return wrapper @decorate def f(): return "v2" """, ) self.shell.run_code("pass") val = mod.f() assert val == ("v2", "ax", "by"), val
DecoratorPatchingSuite
python
doocs__leetcode
lcof/面试题10- II. 青蛙跳台阶问题/Solution.py
{ "start": 0, "end": 159 }
class ____: def numWays(self, n: int) -> int: a = b = 1 for _ in range(n): a, b = b, (a + b) % 1000000007 return a
Solution
python
walkccc__LeetCode
solutions/3155. Maximum Number of Upgradable Servers/3155.py
{ "start": 0, "end": 486 }
class ____: def maxUpgrades( self, count: list[int], upgrade: list[int], sell: list[int], money: list[int], ) -> list[int]: # If there's enough money, upgrade all servers; otherwise, optimize by # upgrading x servers. We have x * upgrade <= money + (count - x) * sell. # Therefore, x = (money + count * sell) / (sell + upgrade). return [min(c, (m + c * s) // (s + u)) for c, u, s, m in zip(count, upgrade, sell, money)]
Solution
python
kamyu104__LeetCode-Solutions
Python/minimum-number-of-visited-cells-in-a-grid.py
{ "start": 2163, "end": 3089 }
class ____(object): def minimumVisitedCells(self, grid): """ :type grid: List[List[int]] :rtype: int """ m, n = len(grid), len(grid[0]) sl1 = [SortedList(xrange(n)) for _ in xrange(m)] sl2 = [SortedList(xrange(m)) for _ in xrange(n)] d, i, j = 1, 0, 0 q = [(i, j)] while q: new_q = [] for i, j in q: if (i, j) == (m-1, n-1): return d for k in list(sl1[i].irange(j+1, min(j+grid[i][j], n-1))): new_q.append((i, k)) sl2[k].remove(i) sl1[i].remove(k) for k in list(sl2[j].irange(i+1, min(i+grid[i][j], m-1))): new_q.append((k, j)) sl1[k].remove(j) sl2[j].remove(k) q = new_q d += 1 return -1
Solution2_TLE
python
apache__airflow
airflow-core/tests/unit/api_fastapi/core_api/routes/ui/test_backfills.py
{ "start": 1691, "end": 2291 }
class ____: @provide_session def _create_dag_models(self, *, count=3, dag_id_prefix="TEST_DAG", is_paused=False, session=None): dags = [] for num in range(1, count + 1): dag_model = DagModel( dag_id=f"{dag_id_prefix}_{num}", bundle_name="testing", fileloc=f"/tmp/dag_{num}.py", is_stale=False, timetable_summary="0 0 * * *", is_paused=is_paused, ) session.add(dag_model) dags.append(dag_model) return dags
TestBackfillEndpoint
python
explosion__spaCy
spacy/training/corpus.py
{ "start": 3370, "end": 8161 }
class ____: """Iterate Example objects from a file or directory of DocBin (.spacy) formatted data files. path (Path): The directory or filename to read from. gold_preproc (bool): Whether to set up the Example object with gold-standard sentences and tokens for the predictions. Gold preprocessing helps the annotations align to the tokenization, and may result in sequences of more consistent length. However, it may reduce run-time accuracy due to train/test skew. Defaults to False. max_length (int): Maximum document length. Longer documents will be split into sentences, if sentence boundaries are available. Defaults to 0, which indicates no limit. limit (int): Limit corpus to a subset of examples, e.g. for debugging. Defaults to 0, which indicates no limit. augment (Callable[Example, Iterable[Example]]): Optional data augmentation function, to extrapolate additional examples from your annotations. shuffle (bool): Whether to shuffle the examples. DOCS: https://spacy.io/api/corpus """ def __init__( self, path: Union[str, Path], *, limit: int = 0, gold_preproc: bool = False, max_length: int = 0, augmenter: Optional[Callable] = None, shuffle: bool = False, ) -> None: self.path = util.ensure_path(path) self.gold_preproc = gold_preproc self.max_length = max_length self.limit = limit self.augmenter = augmenter if augmenter is not None else dont_augment self.shuffle = shuffle def __call__(self, nlp: "Language") -> Iterator[Example]: """Yield examples from the data. nlp (Language): The current nlp object. YIELDS (Example): The examples. DOCS: https://spacy.io/api/corpus#call """ ref_docs = self.read_docbin(nlp.vocab, walk_corpus(self.path, FILE_TYPE)) if self.shuffle: ref_docs = list(ref_docs) # type: ignore random.shuffle(ref_docs) # type: ignore if self.gold_preproc: examples = self.make_examples_gold_preproc(nlp, ref_docs) else: examples = self.make_examples(nlp, ref_docs) for real_eg in examples: for augmented_eg in self.augmenter(nlp, real_eg): # type: ignore[operator] yield augmented_eg def _make_example( self, nlp: "Language", reference: Doc, gold_preproc: bool ) -> Example: if gold_preproc or reference.has_unknown_spaces: return Example( Doc( nlp.vocab, words=[word.text for word in reference], spaces=[bool(word.whitespace_) for word in reference], ), reference, ) else: return Example(nlp.make_doc(reference.text), reference) def make_examples( self, nlp: "Language", reference_docs: Iterable[Doc] ) -> Iterator[Example]: for reference in reference_docs: if len(reference) == 0: continue elif self.max_length == 0 or len(reference) < self.max_length: yield self._make_example(nlp, reference, False) elif reference.has_annotation("SENT_START"): for ref_sent in reference.sents: if len(ref_sent) == 0: continue elif self.max_length == 0 or len(ref_sent) < self.max_length: yield self._make_example(nlp, ref_sent.as_doc(), False) def make_examples_gold_preproc( self, nlp: "Language", reference_docs: Iterable[Doc] ) -> Iterator[Example]: for reference in reference_docs: if reference.has_annotation("SENT_START"): ref_sents = [sent.as_doc() for sent in reference.sents] else: ref_sents = [reference] for ref_sent in ref_sents: eg = self._make_example(nlp, ref_sent, True) if len(eg.x): yield eg def read_docbin( self, vocab: Vocab, locs: Iterable[Union[str, Path]] ) -> Iterator[Doc]: """Yield training examples as example dicts""" i = 0 for loc in locs: loc = util.ensure_path(loc) if loc.parts[-1].endswith(FILE_TYPE): # type: ignore[union-attr] doc_bin = DocBin().from_disk(loc) docs = doc_bin.get_docs(vocab) for doc in docs: if len(doc): yield doc i += 1 if self.limit >= 1 and i >= self.limit: break
Corpus
python
psf__black
tests/data/cases/nested_stub.py
{ "start": 27, "end": 450 }
class ____: class InnerStub: ... outer_attr_after_inner_stub: int class Inner: inner_attr: int outer_attr: int if sys.version_info > (3, 7): if sys.platform == "win32": assignment = 1 def function_definition(self): ... def f1(self) -> str: ... if sys.platform != "win32": def function_definition(self): ... assignment = 1 def f2(self) -> str: ...
Outer
python
pytorch__pytorch
torch/nn/modules/conv.py
{ "start": 69489, "end": 72514 }
class ____(_LazyConvXdMixin, ConvTranspose1d): # type: ignore[misc] r"""A :class:`torch.nn.ConvTranspose1d` module with lazy initialization of the ``in_channels`` argument. The ``in_channels`` argument of the :class:`ConvTranspose1d` that is inferred from the ``input.size(1)``. The attributes that will be lazily initialized are `weight` and `bias`. Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation on lazy modules and their limitations. Args: out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the convolving kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both sides of the input. Default: 0 output_padding (int or tuple, optional): Additional size added to one side of the output shape. Default: 0 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True`` dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 .. seealso:: :class:`torch.nn.ConvTranspose1d` and :class:`torch.nn.modules.lazy.LazyModuleMixin` """ # super class define this variable as None. "type: ignore[..] is required # since we are redefining the variable. cls_to_become = ConvTranspose1d # type: ignore[assignment] def __init__( self, out_channels: int, kernel_size: _size_1_t, stride: _size_1_t = 1, padding: _size_1_t = 0, output_padding: _size_1_t = 0, groups: int = 1, bias: bool = True, dilation: _size_1_t = 1, padding_mode: Literal["zeros", "reflect", "replicate", "circular"] = "zeros", device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} # pyrefly: ignore [bad-argument-type] super().__init__( 0, 0, kernel_size, stride, padding, output_padding, groups, # bias is hardcoded to False to avoid creating tensor # that will soon be overwritten. False, dilation, padding_mode, **factory_kwargs, ) # pyrefly: ignore [bad-override, bad-argument-type] self.weight = UninitializedParameter(**factory_kwargs) self.out_channels = out_channels if bias: # pyrefly: ignore [bad-override, bad-argument-type] self.bias = UninitializedParameter(**factory_kwargs) def _get_num_spatial_dims(self) -> int: return 1 # LazyConvTranspose2d defines weight as a Tensor but derived class defines it as UninitializeParameter
LazyConvTranspose1d
python
walkccc__LeetCode
solutions/311. Sparse Matrix Multiplication/311-2.py
{ "start": 0, "end": 591 }
class ____: def multiply(self, mat1: list[list[int]], mat2: list[list[int]]) -> list[list[int]]: m = len(mat1) n = len(mat2) l = len(mat2[0]) ans = [[0] * l for _ in range(m)] nonZeroColIndicesInMat2 = [ [j for j, a in enumerate(row) if a] for row in mat2 ] for i in range(m): for j, a in enumerate(mat1[i]): if a == 0: continue # mat1s j-th column matches mat2's j-th row for colIndex in nonZeroColIndicesInMat2[j]: ans[i][colIndex] += a * mat2[j][colIndex] return ans
Solution
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/postgresql/base.py
{ "start": 107950, "end": 108158 }
class ____(TypedDict): """Represents a reflect check constraint of a domain.""" name: str """Name of the constraint.""" check: str """The check constraint text."""
ReflectedDomainConstraint
python
getsentry__sentry
tests/snuba/api/endpoints/test_organization_tagkey_values.py
{ "start": 19441, "end": 23908 }
class ____(OrganizationTagKeyTestCase): def setUp(self) -> None: super().setUp() data = load_data("transaction", timestamp=before_now(minutes=1)) data.update( { "measurements": {"lcp": {"value": 2500}}, "breakdowns": {"span_ops": {"ops.http": {"value": 1500}}}, } ) self.store_event(data, project_id=self.project.id) self.transaction = data.copy() self.transaction.update( { "transaction": "/city_by_code/", "timestamp": before_now(seconds=30).isoformat(), "start_timestamp": before_now(seconds=35).isoformat(), } ) self.transaction["contexts"]["trace"].update( { "status": "unknown_error", "trace": "a" * 32, "span": "abcd1234abcd1234", "parent_span_id": "9000cec7cc0779c1", "op": "bar.server", } ) self.store_event( self.transaction, project_id=self.project.id, ) def run_test(self, key, expected, **kwargs): # all tests here require that we search in transactions so make that the default here qs_params = kwargs.get("qs_params", {}) qs_params["includeTransactions"] = "1" kwargs["qs_params"] = qs_params super().run_test(key, expected, **kwargs) def test_status(self) -> None: self.run_test("transaction.status", expected=[("internal_error", 1), ("ok", 1)]) self.run_test( "transaction.status", qs_params={"query": "o"}, expected=[("internal_error", 1), ("ok", 1)], ) self.run_test("transaction.status", qs_params={"query": "ow"}, expected=[]) self.run_test("transaction.status", qs_params={"query": "does-not-exist"}, expected=[]) def test_op(self) -> None: self.run_test("transaction.op", expected=[("bar.server", 1), ("http.server", 1)]) self.run_test( "transaction.op", qs_params={"query": "server"}, expected=[("bar.server", 1), ("http.server", 1)], ) self.run_test("transaction.op", qs_params={"query": "bar"}, expected=[("bar.server", 1)]) def test_duration(self) -> None: self.run_test("transaction.duration", expected=[("5000", 1), ("3000", 1)]) self.run_test("transaction.duration", qs_params={"query": "5001"}, expected=[("5000", 1)]) self.run_test("transaction.duration", qs_params={"query": "50"}, expected=[]) def test_measurements(self) -> None: self.run_test("measurements.lcp", expected=[("2500.0", 2)]) self.run_test("measurements.lcp", qs_params={"query": "2501"}, expected=[("2500.0", 2)]) self.run_test("measurements.lcp", qs_params={"query": "25"}, expected=[]) self.run_test("measurements.foo", expected=[]) def test_span_ops_breakdowns(self) -> None: self.run_test("spans.http", expected=[("1500.0", 2)]) self.run_test("spans.http", qs_params={"query": "1501"}, expected=[("1500.0", 2)]) self.run_test("spans.http", qs_params={"query": "15"}, expected=[]) self.run_test("spans.bar", expected=[]) def test_transaction_title(self) -> None: self.run_test("transaction", expected=[("/city_by_code/", 1), ("/country_by_code/", 1)]) self.run_test( "transaction", qs_params={"query": "by_code", "includeTransactions": "1"}, expected=[("/city_by_code/", 1), ("/country_by_code/", 1)], ) self.run_test("transaction", qs_params={"query": "city"}, expected=[("/city_by_code/", 1)]) def test_invalid_keys(self) -> None: self.run_test("trace.parent_span", expected=[]) self.run_test("trace.span", expected=[]) self.run_test("trace", expected=[]) self.run_test("event_id", expected=[]) self.run_test("profile_id", expected=[]) self.run_test("replay_id", expected=[]) def test_boolean_fields(self) -> None: self.run_test("error.handled", expected=[("true", None), ("false", None)]) self.run_test("error.unhandled", expected=[("true", None), ("false", None)]) self.run_test("error.main_thread", expected=[("true", None), ("false", None)]) self.run_test("stack.in_app", expected=[("true", None), ("false", None)])
TransactionTagKeyValues
python
django__django
tests/contenttypes_tests/test_fields.py
{ "start": 2776, "end": 3171 }
class ____(TestCase): def test_value_to_string(self): question = Question.objects.create(text="test") answer1 = Answer.objects.create(question=question) answer2 = Answer.objects.create(question=question) result = json.loads(Question.answer_set.field.value_to_string(question)) self.assertCountEqual(result, [answer1.pk, answer2.pk])
GenericRelationTests
python
ray-project__ray
python/ray/autoscaler/v2/tests/test_subscribers.py
{ "start": 5074, "end": 9687 }
class ____: def test_launch_no_op(self): mock_provider = mock.MagicMock() launcher = CloudInstanceUpdater(mock_provider) launcher.notify( [ InstanceUpdateEvent( new_instance_status=Instance.RAY_RUNNING, launch_request_id="1", instance_type="type-1", ), ] ) mock_provider.launch.assert_not_called() def test_launch_new_instances(self): mock_provider = mock.MagicMock() launcher = CloudInstanceUpdater(mock_provider) launcher.notify( [ InstanceUpdateEvent( new_instance_status=Instance.REQUESTED, launch_request_id="1", instance_type="type-1", ), InstanceUpdateEvent( new_instance_status=Instance.REQUESTED, launch_request_id="1", instance_type="type-1", ), InstanceUpdateEvent( new_instance_status=Instance.REQUESTED, launch_request_id="2", instance_type="type-1", ), InstanceUpdateEvent( new_instance_status=Instance.REQUESTED, launch_request_id="2", instance_type="type-2", ), ] ) def verify(): mock_provider.launch.assert_has_calls( [ mock.call(shape={"type-1": 2}, request_id="1"), mock.call(shape={"type-1": 1, "type-2": 1}, request_id="2"), ] ) return True wait_for_condition(verify) def test_multi_notify(self): mock_provider = mock.MagicMock() launcher = CloudInstanceUpdater(mock_provider) launcher.notify( [ InstanceUpdateEvent( new_instance_status=Instance.REQUESTED, launch_request_id="1", instance_type="type-1", ), ] ) launcher.notify( [ InstanceUpdateEvent( new_instance_status=Instance.REQUESTED, launch_request_id="2", instance_type="type-1", ), ] ) def verify(): assert mock_provider.launch.call_count == 2 mock_provider.launch.assert_has_calls( [ mock.call(shape={"type-1": 1}, request_id="1"), mock.call(shape={"type-1": 1}, request_id="2"), ] ) return True wait_for_condition(verify) def test_terminate_no_op(self): mock_provider = mock.MagicMock() launcher = CloudInstanceUpdater(mock_provider) launcher.notify( [ InstanceUpdateEvent( new_instance_status=Instance.RAY_RUNNING, instance_id="1", cloud_instance_id="c1", ), ] ) def verify(): mock_provider.terminate.assert_not_called() return True wait_for_condition(verify) def test_terminate_instances(self): mock_provider = mock.MagicMock() launcher = CloudInstanceUpdater(mock_provider) launcher.notify( [ InstanceUpdateEvent( new_instance_status=Instance.TERMINATING, instance_id="1", cloud_instance_id="c1", ), InstanceUpdateEvent( new_instance_status=Instance.TERMINATING, instance_id="2", cloud_instance_id="c2", ), InstanceUpdateEvent( new_instance_status=Instance.TERMINATING, instance_id="3", cloud_instance_id="c3", ), ] ) def verify(): mock_provider.terminate.assert_called_once_with( ids=["c1", "c2", "c3"], request_id=mock.ANY ) return True wait_for_condition(verify) if __name__ == "__main__": if os.environ.get("PARALLEL_CI"): sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__])) else: sys.exit(pytest.main(["-sv", __file__]))
TestCloudInstanceUpdater
python
HypothesisWorks__hypothesis
hypothesis-python/tests/ghostwriter/test_expected_output.py
{ "start": 1928, "end": 11349 }
class ____: @classmethod def a_classmethod(cls, arg: int): pass @staticmethod def a_staticmethod(arg: int): pass def add(a: float, b: float) -> float: return a + b def divide(a: int, b: int) -> float: """This is a RST-style docstring for `divide`. :raises ZeroDivisionError: if b == 0 """ return a / b def optional_parameter(a: float, b: float | None) -> float: return optional_union_parameter(a, b) def optional_union_parameter(a: float, b: float | int | None) -> float: return a if b is None else a + b def union_sequence_parameter(items: Sequence[float | int]) -> float: return sum(items) def sequence_from_collections(items: collections.abc.Sequence[int]) -> int: return min(items) def various_numpy_annotations( f: numpy.typing.NDArray[numpy.float64], fc: numpy.typing.NDArray[numpy.float64 | numpy.complex128], union: numpy.typing.NDArray[numpy.float64 | numpy.complex128] | None, ): pass # Note: for some of the `expected` outputs, we replace away some small # parts which vary between minor versions of Python. @pytest.mark.parametrize( "data", [ ("fuzz_sorted", lambda: ghostwriter.fuzz(sorted)), ( "fuzz_sorted_with_annotations", lambda: ghostwriter.fuzz(sorted, annotate=True), ), ("fuzz_with_docstring", lambda: ghostwriter.fuzz(with_docstring)), ("fuzz_classmethod", lambda: ghostwriter.fuzz(A_Class.a_classmethod)), ("fuzz_staticmethod", lambda: ghostwriter.fuzz(A_Class.a_staticmethod)), ("fuzz_ufunc", lambda: ghostwriter.fuzz(numpy.add)), ("magic_gufunc", lambda: ghostwriter.magic(numpy.matmul)), ("optional_parameter", lambda: ghostwriter.magic(optional_parameter)), ( "optional_union_parameter", lambda: ghostwriter.magic(optional_union_parameter), ), ( "union_sequence_parameter", lambda: ghostwriter.magic(union_sequence_parameter), ), ( "sequence_from_collections", lambda: ghostwriter.magic(sequence_from_collections), ), ("add_custom_classes", lambda: ghostwriter.magic(add_custom_classes)), ("merge_dicts", lambda: ghostwriter.magic(merge_dicts)), ("invalid_types", lambda: ghostwriter.magic(invalid_types)), ("magic_base64_roundtrip", lambda: ghostwriter.magic(base64.b64encode)), ( "magic_base64_roundtrip_with_annotations", lambda: ghostwriter.magic(base64.b64encode, annotate=True), ), ("re_compile", lambda: ghostwriter.fuzz(re.compile)), ( "re_compile_except", lambda: ghostwriter.fuzz(re.compile, except_=re.error).replace( "re.PatternError", "re.error" # changed in Python 3.13 ), ), ("re_compile_unittest", lambda: ghostwriter.fuzz(re.compile, style="unittest")), pytest.param( ("base64_magic", lambda: ghostwriter.magic(base64)), marks=pytest.mark.skipif("sys.version_info[:2] >= (3, 10)"), ), ("sorted_idempotent", lambda: ghostwriter.idempotent(sorted)), ("timsort_idempotent", lambda: ghostwriter.idempotent(timsort)), ( "timsort_idempotent_asserts", lambda: ghostwriter.idempotent(timsort, except_=AssertionError), ), pytest.param( ("eval_equivalent", lambda: ghostwriter.equivalent(eval, ast.literal_eval)), marks=[pytest.mark.skipif(sys.version_info[:2] >= (3, 13), reason="kw")], ), ( "sorted_self_equivalent", lambda: ghostwriter.equivalent(sorted, sorted, sorted), ), ( "sorted_self_equivalent_with_annotations", lambda: ghostwriter.equivalent(sorted, sorted, sorted, annotate=True), ), ("addition_op_magic", lambda: ghostwriter.magic(add)), ("multiplication_magic", lambda: ghostwriter.magic(operator.mul)), ("matmul_magic", lambda: ghostwriter.magic(operator.matmul)), ( "addition_op_multimagic", lambda: ghostwriter.magic(add, operator.add, numpy.add), ), ("division_fuzz_error_handler", lambda: ghostwriter.fuzz(divide)), ( "division_binop_error_handler", lambda: ghostwriter.binary_operation(divide, identity=1), ), ( "division_roundtrip_error_handler", lambda: ghostwriter.roundtrip(divide, operator.mul), ), ( "division_roundtrip_error_handler_without_annotations", lambda: ghostwriter.roundtrip(divide, operator.mul, annotate=False), ), ( "division_roundtrip_arithmeticerror_handler", lambda: ghostwriter.roundtrip( divide, operator.mul, except_=ArithmeticError ), ), ( "division_roundtrip_typeerror_handler", lambda: ghostwriter.roundtrip(divide, operator.mul, except_=TypeError), ), ( "division_operator", lambda: ghostwriter.binary_operation( operator.truediv, associative=False, commutative=False ), ), ( "division_operator_with_annotations", lambda: ghostwriter.binary_operation( operator.truediv, associative=False, commutative=False, annotate=True ), ), ( "multiplication_operator", lambda: ghostwriter.binary_operation( operator.mul, identity=1, distributes_over=operator.add ), ), ( "multiplication_operator_unittest", lambda: ghostwriter.binary_operation( operator.mul, identity=1, distributes_over=operator.add, style="unittest", ), ), ( "sorted_self_error_equivalent_simple", lambda: ghostwriter.equivalent(sorted, sorted, allow_same_errors=True), ), ( "sorted_self_error_equivalent_threefuncs", lambda: ghostwriter.equivalent( sorted, sorted, sorted, allow_same_errors=True ), ), ( "sorted_self_error_equivalent_1error", lambda: ghostwriter.equivalent( sorted, sorted, allow_same_errors=True, except_=ValueError, ), ), ( "sorted_self_error_equivalent_2error_unittest", lambda: ghostwriter.equivalent( sorted, sorted, allow_same_errors=True, except_=(TypeError, ValueError), style="unittest", ), ), ("magic_class", lambda: ghostwriter.magic(A_Class)), pytest.param( ("magic_builtins", lambda: ghostwriter.magic(builtins)), marks=[ pytest.mark.skipif( sys.version_info[:2] != (3, 10), reason="often small changes", ) ], ), pytest.param( ( "magic_numpy", lambda: ghostwriter.magic(various_numpy_annotations, annotate=False), ), marks=pytest.mark.skipif(various_numpy_annotations is add, reason="<=3.9"), ), ], ids=lambda x: x[0], ) def test_ghostwriter_example_outputs(update_recorded_outputs, data): name, get_actual = data # ghostwriter computations can be expensive, so defer collection-time # computations until test-time actual = get_actual() expected = get_recorded(name, actual * update_recorded_outputs) assert actual == expected # We got the expected source code exec(expected, {}) # and there are no SyntaxError or NameErrors def test_ghostwriter_on_hypothesis(update_recorded_outputs): actual = ( ghostwriter.magic(hypothesis) .replace("Strategy[+Ex]", "Strategy") .replace("hypothesis._settings.settings", "hypothesis.settings") ) # hypothesis._settings.settings wraps the line before replacement, and doesn't # after replacement actual = black.format_str(actual, mode=black.Mode()) expected = get_recorded("hypothesis_module_magic", actual * update_recorded_outputs) if sys.version_info[:2] == (3, 10): assert actual == expected exec(expected, {"not_set": not_set}) def test_ghostwriter_suggests_submodules_for_empty_toplevel( tmp_path, update_recorded_outputs ): foo = tmp_path / "foo" foo.mkdir() (foo / "__init__.py").write_text("from . import bar\n", encoding="utf-8") (foo / "bar.py").write_text("def baz(x: int): ...\n", encoding="utf-8") proc = subprocess.run( ["hypothesis", "write", "foo"], check=True, capture_output=True, encoding="utf-8", cwd=tmp_path, ) actual = proc.stdout.replace(re.search(r"from '(.+)foo/", proc.stdout).group(1), "") expected = get_recorded("nothing_found", actual * update_recorded_outputs) assert actual == expected # We got the expected source code exec(expected, {}) # and there are no SyntaxError or NameErrors
A_Class
python
numpy__numpy
numpy/distutils/command/build_scripts.py
{ "start": 249, "end": 1665 }
class ____(old_build_scripts): def generate_scripts(self, scripts): new_scripts = [] func_scripts = [] for script in scripts: if is_string(script): new_scripts.append(script) else: func_scripts.append(script) if not func_scripts: return new_scripts build_dir = self.build_dir self.mkpath(build_dir) for func in func_scripts: script = func(build_dir) if not script: continue if is_string(script): log.info(" adding '%s' to scripts" % (script,)) new_scripts.append(script) else: [log.info(" adding '%s' to scripts" % (s,)) for s in script] new_scripts.extend(list(script)) return new_scripts def run (self): if not self.scripts: return self.scripts = self.generate_scripts(self.scripts) # Now make sure that the distribution object has this list of scripts. # setuptools' develop command requires that this be a list of filenames, # not functions. self.distribution.scripts = self.scripts return old_build_scripts.run(self) def get_source_files(self): from numpy.distutils.misc_util import get_script_files return get_script_files(self.scripts)
build_scripts
python
huggingface__transformers
src/transformers/models/markuplm/modeling_markuplm.py
{ "start": 16178, "end": 17069 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.self = MarkupLMSelfAttention(config) self.output = MarkupLMSelfOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, **kwargs, ) -> tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, **kwargs, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.align.modeling_align.AlignTextLayer with AlignText->MarkupLM
MarkupLMAttention
python
coleifer__peewee
peewee.py
{ "start": 37943, "end": 38560 }
class ____(ColumnBase): def __init__(self, source, name): self.source = source self.name = name def get_sort_key(self, ctx): if ctx.scope == SCOPE_VALUES: return (self.name,) else: return self.source.get_sort_key(ctx) + (self.name,) def __hash__(self): return hash((self.source, self.name)) def __sql__(self, ctx): if ctx.scope == SCOPE_VALUES: return ctx.sql(Entity(self.name)) else: with ctx.scope_column(): return ctx.sql(self.source).literal('.').sql(Entity(self.name))
Column
python
mlflow__mlflow
mlflow/models/auth_policy.py
{ "start": 784, "end": 1351 }
class ____: """ System Auth Policy, which defines a list of resources required to serve this model """ def __init__(self, resources: list[Resource]): self._resources = resources @property def resources(self) -> list[Resource]: return self._resources @resources.setter def resources(self, value: list[Resource]): self._resources = value def to_dict(self): serialized_resources = _ResourceBuilder.from_resources(self.resources) return {"resources": serialized_resources}
SystemAuthPolicy
python
google__pytype
pytype/tests/test_complex_function.py
{ "start": 458, "end": 1104 }
class ____(test_base.BaseTest): """Test function with complex cfg.""" def test_function_not_optimized(self): # If we do not analyse generate_tokens with full filtering, some of the # return branches will be None and the iterator will raise a type error. code = test_utils.test_data_file("tokenize.py") with self.DepTree([("foo.py", code)]): self.Check(""" import io import foo stream = io.StringIO("") tokens = foo.generate_tokens(stream.readline) for tok_type, tok_str, _, _, _ in tokens: pass """) if __name__ == "__main__": test_base.main()
TestComplexFunction
python
kubernetes-client__python
kubernetes/client/models/v1_portworx_volume_source.py
{ "start": 383, "end": 5809 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'fs_type': 'str', 'read_only': 'bool', 'volume_id': 'str' } attribute_map = { 'fs_type': 'fsType', 'read_only': 'readOnly', 'volume_id': 'volumeID' } def __init__(self, fs_type=None, read_only=None, volume_id=None, local_vars_configuration=None): # noqa: E501 """V1PortworxVolumeSource - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._fs_type = None self._read_only = None self._volume_id = None self.discriminator = None if fs_type is not None: self.fs_type = fs_type if read_only is not None: self.read_only = read_only self.volume_id = volume_id @property def fs_type(self): """Gets the fs_type of this V1PortworxVolumeSource. # noqa: E501 fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501 :return: The fs_type of this V1PortworxVolumeSource. # noqa: E501 :rtype: str """ return self._fs_type @fs_type.setter def fs_type(self, fs_type): """Sets the fs_type of this V1PortworxVolumeSource. fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501 :param fs_type: The fs_type of this V1PortworxVolumeSource. # noqa: E501 :type: str """ self._fs_type = fs_type @property def read_only(self): """Gets the read_only of this V1PortworxVolumeSource. # noqa: E501 readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501 :return: The read_only of this V1PortworxVolumeSource. # noqa: E501 :rtype: bool """ return self._read_only @read_only.setter def read_only(self, read_only): """Sets the read_only of this V1PortworxVolumeSource. readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501 :param read_only: The read_only of this V1PortworxVolumeSource. # noqa: E501 :type: bool """ self._read_only = read_only @property def volume_id(self): """Gets the volume_id of this V1PortworxVolumeSource. # noqa: E501 volumeID uniquely identifies a Portworx volume # noqa: E501 :return: The volume_id of this V1PortworxVolumeSource. # noqa: E501 :rtype: str """ return self._volume_id @volume_id.setter def volume_id(self, volume_id): """Sets the volume_id of this V1PortworxVolumeSource. volumeID uniquely identifies a Portworx volume # noqa: E501 :param volume_id: The volume_id of this V1PortworxVolumeSource. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and volume_id is None: # noqa: E501 raise ValueError("Invalid value for `volume_id`, must not be `None`") # noqa: E501 self._volume_id = volume_id def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1PortworxVolumeSource): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1PortworxVolumeSource): return True return self.to_dict() != other.to_dict()
V1PortworxVolumeSource
python
plotly__plotly.py
_plotly_utils/basevalidators.py
{ "start": 77446, "end": 79793 }
class ____(BaseValidator): def __init__(self, plotly_name, parent_name, data_class_str, data_docs, **kwargs): super(CompoundArrayValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, **kwargs ) # Save element class string self.data_class_str = data_class_str self._data_class = None self.data_docs = data_docs self.module_str = CompoundValidator.compute_graph_obj_module_str( self.data_class_str, parent_name ) def description(self): desc = ( """\ The '{plotly_name}' property is a tuple of instances of {class_str} that may be specified as: - A list or tuple of instances of {module_str}.{class_str} - A list or tuple of dicts of string/value properties that will be passed to the {class_str} constructor""" ).format( plotly_name=self.plotly_name, class_str=self.data_class_str, module_str=self.module_str, ) return desc @property def data_class(self): if self._data_class is None: module = import_module(self.module_str) self._data_class = getattr(module, self.data_class_str) return self._data_class def validate_coerce(self, v, skip_invalid=False): if v is None: v = [] elif isinstance(v, (list, tuple)): res = [] invalid_els = [] for v_el in v: if isinstance(v_el, self.data_class): res.append(self.data_class(v_el)) elif isinstance(v_el, dict): res.append(self.data_class(v_el, skip_invalid=skip_invalid)) else: if skip_invalid: res.append(self.data_class()) else: res.append(None) invalid_els.append(v_el) if invalid_els: self.raise_invalid_elements(invalid_els) v = to_scalar_or_list(res) else: if skip_invalid: v = [] else: self.raise_invalid_val(v) return v def present(self, v): # Return compound object as tuple return tuple(v)
CompoundArrayValidator
python
ray-project__ray
doc/source/ray-more-libs/doc_code/dask_on_ray_callbacks.py
{ "start": 207, "end": 1130 }
class ____(RayDaskCallback): def _ray_pretask(self, key, object_refs): # Executed at the start of the Ray task. start_time = timer() return start_time def _ray_posttask(self, key, result, pre_state): # Executed at the end of the Ray task. execution_time = timer() - pre_state print(f"Execution time for task {key}: {execution_time}s") with MyTimerCallback(): # Any .compute() calls within this context will get MyTimerCallback() # as a Dask-Ray callback. z.compute(scheduler=ray_dask_get) # __timer_callback_end__ # fmt: on # fmt: off # __ray_dask_callback_direct_begin__ def my_presubmit_cb(task, key, deps): print(f"About to submit task {key}!") with RayDaskCallback(ray_presubmit=my_presubmit_cb): z.compute(scheduler=ray_dask_get) # __ray_dask_callback_direct_end__ # fmt: on # fmt: off # __ray_dask_callback_subclass_begin__
MyTimerCallback
python
huggingface__transformers
src/transformers/models/blip/modeling_blip.py
{ "start": 12358, "end": 13939 }
class ____(nn.Module): def __init__(self, config: BlipTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] max_position_embedding = self.position_embedding.weight.shape[0] if seq_length > max_position_embedding: raise ValueError( f"Sequence length must be less than max_position_embeddings (got `sequence length`: " f"{seq_length} and max_position_embeddings: {max_position_embedding}" ) if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings
BlipTextEmbeddings
python
ray-project__ray
python/ray/data/datasource/datasource.py
{ "start": 7868, "end": 10760 }
class ____(_DatasourceProjectionPushdownMixin, _DatasourcePredicatePushdownMixin): """Interface for defining a custom :class:`~ray.data.Dataset` datasource. To read a datasource into a dataset, use :meth:`~ray.data.read_datasource`. """ # noqa: E501 def __init__(self): """Initialize the datasource and its mixins.""" _DatasourcePredicatePushdownMixin.__init__(self) @Deprecated def create_reader(self, **read_args) -> "Reader": """ Deprecated: Implement :meth:`~ray.data.Datasource.get_read_tasks` and :meth:`~ray.data.Datasource.estimate_inmemory_data_size` instead. """ return _LegacyDatasourceReader(self, **read_args) @Deprecated def prepare_read(self, parallelism: int, **read_args) -> List["ReadTask"]: """ Deprecated: Implement :meth:`~ray.data.Datasource.get_read_tasks` and :meth:`~ray.data.Datasource.estimate_inmemory_data_size` instead. """ raise NotImplementedError def get_name(self) -> str: """Return a human-readable name for this datasource. This will be used as the names of the read tasks. """ name = type(self).__name__ datasource_suffix = "Datasource" if name.endswith(datasource_suffix): name = name[: -len(datasource_suffix)] return name def estimate_inmemory_data_size(self) -> Optional[int]: """Return an estimate of the in-memory data size, or None if unknown. Note that the in-memory data size may be larger than the on-disk data size. """ raise NotImplementedError def get_read_tasks( self, parallelism: int, per_task_row_limit: Optional[int] = None ) -> List["ReadTask"]: """Execute the read and return read tasks. Args: parallelism: The requested read parallelism. The number of read tasks should equal to this value if possible. per_task_row_limit: The per-task row limit for the read tasks. Returns: A list of read tasks that can be executed to read blocks from the datasource in parallel. """ raise NotImplementedError @property def should_create_reader(self) -> bool: has_implemented_get_read_tasks = ( type(self).get_read_tasks is not Datasource.get_read_tasks ) has_implemented_estimate_inmemory_data_size = ( type(self).estimate_inmemory_data_size is not Datasource.estimate_inmemory_data_size ) return ( not has_implemented_get_read_tasks or not has_implemented_estimate_inmemory_data_size ) @property def supports_distributed_reads(self) -> bool: """If ``False``, only launch read tasks on the driver's node.""" return True @Deprecated
Datasource
python
allegroai__clearml
clearml/router/proxy.py
{ "start": 155, "end": 1924 }
class ____: DEFAULT_PORT = 9000 def __init__( self, port: Optional[int] = None, workers: Optional[int] = None, default_target: Optional[str] = None, log_level: Optional[str] = None, access_log: bool = True, enable_streaming: bool = True, ) -> None: # at the moment, only a fastapi proxy is supported self.base_proxy = FastAPIProxy( port or self.DEFAULT_PORT, workers=workers, default_target=default_target, log_level=log_level, access_log=access_log, enable_streaming=enable_streaming, ) self.base_proxy.start() self.port = port self.routes = {} def add_route( self, source: str, target: str, request_callback: Optional[Callable[[Request], Request]] = None, response_callback: Optional[Callable[[Response], Response]] = None, endpoint_telemetry: bool = True, error_callback: Optional[Callable[[Exception], Response]] = None, ) -> Route: self.routes[source] = self.base_proxy.add_route( source=source, target=target, request_callback=request_callback, response_callback=response_callback, endpoint_telemetry=endpoint_telemetry, error_callback=error_callback, ) return self.routes[source] def remove_route(self, source: str) -> None: self.routes.pop(source, None) self.base_proxy.remove_route(source) def get_routes(self) -> Dict[str, Any]: return self.routes def start(self) -> None: self.base_proxy.start() def stop(self) -> None: self.base_proxy.stop()
HttpProxy
python
dateutil__dateutil
src/dateutil/parser/_parser.py
{ "start": 13451, "end": 19356 }
class ____(list): def __init__(self, *args, **kwargs): super(self.__class__, self).__init__(*args, **kwargs) self.century_specified = False self.dstridx = None self.mstridx = None self.ystridx = None @property def has_year(self): return self.ystridx is not None @property def has_month(self): return self.mstridx is not None @property def has_day(self): return self.dstridx is not None def could_be_day(self, value): if self.has_day: return False elif not self.has_month: return 1 <= value <= 31 elif not self.has_year: # Be permissive, assume leap year month = self[self.mstridx] return 1 <= value <= monthrange(2000, month)[1] else: month = self[self.mstridx] year = self[self.ystridx] return 1 <= value <= monthrange(year, month)[1] def append(self, val, label=None): if hasattr(val, '__len__'): if val.isdigit() and len(val) > 2: self.century_specified = True if label not in [None, 'Y']: # pragma: no cover raise ValueError(label) label = 'Y' elif val > 100: self.century_specified = True if label not in [None, 'Y']: # pragma: no cover raise ValueError(label) label = 'Y' super(self.__class__, self).append(int(val)) if label == 'M': if self.has_month: raise ValueError('Month is already set') self.mstridx = len(self) - 1 elif label == 'D': if self.has_day: raise ValueError('Day is already set') self.dstridx = len(self) - 1 elif label == 'Y': if self.has_year: raise ValueError('Year is already set') self.ystridx = len(self) - 1 def _resolve_from_stridxs(self, strids): """ Try to resolve the identities of year/month/day elements using ystridx, mstridx, and dstridx, if enough of these are specified. """ if len(self) == 3 and len(strids) == 2: # we can back out the remaining stridx value missing = [x for x in range(3) if x not in strids.values()] key = [x for x in ['y', 'm', 'd'] if x not in strids] assert len(missing) == len(key) == 1 key = key[0] val = missing[0] strids[key] = val assert len(self) == len(strids) # otherwise this should not be called out = {key: self[strids[key]] for key in strids} return (out.get('y'), out.get('m'), out.get('d')) def resolve_ymd(self, yearfirst, dayfirst): len_ymd = len(self) year, month, day = (None, None, None) strids = (('y', self.ystridx), ('m', self.mstridx), ('d', self.dstridx)) strids = {key: val for key, val in strids if val is not None} if (len(self) == len(strids) > 0 or (len(self) == 3 and len(strids) == 2)): return self._resolve_from_stridxs(strids) mstridx = self.mstridx if len_ymd > 3: raise ValueError("More than three YMD values") elif len_ymd == 1 or (mstridx is not None and len_ymd == 2): # One member, or two members with a month string if mstridx is not None: month = self[mstridx] # since mstridx is 0 or 1, self[mstridx-1] always # looks up the other element other = self[mstridx - 1] else: other = self[0] if len_ymd > 1 or mstridx is None: if other > 31: year = other else: day = other elif len_ymd == 2: # Two members with numbers if self[0] > 31: # 99-01 year, month = self elif self[1] > 31: # 01-99 month, year = self elif dayfirst and self[1] <= 12: # 13-01 day, month = self else: # 01-13 month, day = self elif len_ymd == 3: # Three members if mstridx == 0: if self[1] > 31: # Apr-2003-25 month, year, day = self else: month, day, year = self elif mstridx == 1: if self[0] > 31 or (yearfirst and self[2] <= 31): # 99-Jan-01 year, month, day = self else: # 01-Jan-01 # Give precedence to day-first, since # two-digit years is usually hand-written. day, month, year = self elif mstridx == 2: # WTF!? if self[1] > 31: # 01-99-Jan day, year, month = self else: # 99-01-Jan year, day, month = self else: if (self[0] > 31 or self.ystridx == 0 or (yearfirst and self[1] <= 12 and self[2] <= 31)): # 99-01-01 if dayfirst and self[2] <= 12: year, day, month = self else: year, month, day = self elif self[0] > 12 or (dayfirst and self[1] <= 12): # 13-01-01 day, month, year = self else: # 01-13-01 month, day, year = self return year, month, day
_ymd
python
django__django
tests/aggregation/models.py
{ "start": 31, "end": 285 }
class ____(models.Model): name = models.CharField(max_length=100) age = models.IntegerField() friends = models.ManyToManyField("self", blank=True) rating = models.FloatField(null=True) def __str__(self): return self.name
Author
python
kamyu104__LeetCode-Solutions
Python/binary-tree-preorder-traversal.py
{ "start": 957, "end": 1512 }
class ____(object): def preorderTraversal(self, root): """ :type root: TreeNode :rtype: List[int] """ result, stack = [], [(root, False)] while stack: root, is_visited = stack.pop() if root is None: continue if is_visited: result.append(root.val) else: stack.append((root.right, False)) stack.append((root.left, False)) stack.append((root, True)) return result
Solution2
python
python__mypy
mypyc/irbuild/prebuildvisitor.py
{ "start": 438, "end": 8636 }
class ____(ExtendedTraverserVisitor): """Mypy file AST visitor run before building the IR. This collects various things, including: * Determine relationships between nested functions and functions that contain nested functions * Find non-local variables (free variables) * Find property setters * Find decorators of functions * Find module import groups The main IR build pass uses this information. """ def __init__( self, errors: Errors, current_file: MypyFile, decorators_to_remove: dict[FuncDef, list[int]], types: dict[Expression, Type], ) -> None: super().__init__() # Dict from a function to symbols defined directly in the # function that are used as non-local (free) variables within a # nested function. self.free_variables: dict[FuncItem, set[SymbolNode]] = {} # Intermediate data structure used to find the function where # a SymbolNode is declared. Initially this may point to a # function nested inside the function with the declaration, # but we'll eventually update this to refer to the function # with the declaration. self.symbols_to_funcs: dict[SymbolNode, FuncItem] = {} # Stack representing current function nesting. self.funcs: list[FuncItem] = [] # All property setters encountered so far. self.prop_setters: set[FuncDef] = set() # A map from any function that contains nested functions to # a set of all the functions that are nested within it. self.encapsulating_funcs: dict[FuncItem, list[FuncItem]] = {} # Map nested function to its parent/encapsulating function. self.nested_funcs: dict[FuncItem, FuncItem] = {} # Map function to its non-special decorators. self.funcs_to_decorators: dict[FuncDef, list[Expression]] = {} # Map function to indices of decorators to remove self.decorators_to_remove: dict[FuncDef, list[int]] = decorators_to_remove # A mapping of import groups (a series of Import nodes with # nothing in between) where each group is keyed by its first # import node. self.module_import_groups: dict[Import, list[Import]] = {} self._current_import_group: Import | None = None self.errors: Errors = errors self.current_file: MypyFile = current_file self.missing_types_visitor = MissingTypesVisitor(types) def visit(self, o: Node) -> bool: if not isinstance(o, Import): self._current_import_group = None return True def visit_assignment_stmt(self, stmt: AssignmentStmt) -> None: # These are cases where mypy may not have types for certain expressions, # but mypyc needs some form type to exist. if stmt.is_alias_def: stmt.rvalue.accept(self.missing_types_visitor) return super().visit_assignment_stmt(stmt) def visit_block(self, block: Block) -> None: self._current_import_group = None super().visit_block(block) self._current_import_group = None def visit_decorator(self, dec: Decorator) -> None: if dec.decorators: # Only add the function being decorated if there exist # (ordinary) decorators in the decorator list. Certain # decorators (such as @property, @abstractmethod) are # special cased and removed from this list by # mypy. Functions decorated only by special decorators # (and property setters) are not treated as decorated # functions by the IR builder. if isinstance(dec.decorators[0], MemberExpr) and dec.decorators[0].name == "setter": # Property setters are not treated as decorated methods. self.prop_setters.add(dec.func) else: decorators_to_store = dec.decorators.copy() if dec.func in self.decorators_to_remove: to_remove = self.decorators_to_remove[dec.func] for i in reversed(to_remove): del decorators_to_store[i] # if all of the decorators are removed, we shouldn't treat this as a decorated # function because there aren't any decorators to apply if not decorators_to_store: return self.funcs_to_decorators[dec.func] = decorators_to_store super().visit_decorator(dec) def visit_func_def(self, fdef: FuncDef) -> None: # TODO: What about overloaded functions? self.visit_func(fdef) self.visit_symbol_node(fdef) def visit_lambda_expr(self, expr: LambdaExpr) -> None: self.visit_func(expr) def visit_func(self, func: FuncItem) -> None: # If there were already functions or lambda expressions # defined in the function stack, then note the previous # FuncItem as containing a nested function and the current # FuncItem as being a nested function. if self.funcs: # Add the new func to the set of nested funcs within the # func at top of the func stack. self.encapsulating_funcs.setdefault(self.funcs[-1], []).append(func) # Add the func at top of the func stack as the parent of # new func. self.nested_funcs[func] = self.funcs[-1] self.funcs.append(func) super().visit_func(func) self.funcs.pop() def visit_import(self, imp: Import) -> None: if self._current_import_group is not None: self.module_import_groups[self._current_import_group].append(imp) else: self.module_import_groups[imp] = [imp] self._current_import_group = imp super().visit_import(imp) def visit_name_expr(self, expr: NameExpr) -> None: if isinstance(expr.node, (Var, FuncDef)): self.visit_symbol_node(expr.node) def visit_var(self, var: Var) -> None: self.visit_symbol_node(var) def visit_symbol_node(self, symbol: SymbolNode) -> None: if not self.funcs: # We are not inside a function and hence do not need to do # anything regarding free variables. return if symbol in self.symbols_to_funcs: orig_func = self.symbols_to_funcs[symbol] if self.is_parent(self.funcs[-1], orig_func): # The function in which the symbol was previously seen is # nested within the function currently being visited. Thus # the current function is a better candidate to contain the # declaration. self.symbols_to_funcs[symbol] = self.funcs[-1] # TODO: Remove from the orig_func free_variables set? self.free_variables.setdefault(self.funcs[-1], set()).add(symbol) elif self.is_parent(orig_func, self.funcs[-1]): # The SymbolNode instance has already been visited # before in a parent function, thus it's a non-local # symbol. self.add_free_variable(symbol) else: # This is the first time the SymbolNode is being # visited. We map the SymbolNode to the current FuncDef # being visited to note where it was first visited. self.symbols_to_funcs[symbol] = self.funcs[-1] def is_parent(self, fitem: FuncItem, child: FuncItem) -> bool: # Check if child is nested within fdef (possibly indirectly # within multiple nested functions). if child not in self.nested_funcs: return False parent = self.nested_funcs[child] return parent == fitem or self.is_parent(fitem, parent) def add_free_variable(self, symbol: SymbolNode) -> None: # Find the function where the symbol was (likely) first declared, # and mark is as a non-local symbol within that function. func = self.symbols_to_funcs[symbol] self.free_variables.setdefault(func, set()).add(symbol)
PreBuildVisitor
python
kubernetes-client__python
kubernetes/client/models/v1_api_service.py
{ "start": 383, "end": 7196 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'kind': 'str', 'metadata': 'V1ObjectMeta', 'spec': 'V1APIServiceSpec', 'status': 'V1APIServiceStatus' } attribute_map = { 'api_version': 'apiVersion', 'kind': 'kind', 'metadata': 'metadata', 'spec': 'spec', 'status': 'status' } def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501 """V1APIService - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._kind = None self._metadata = None self._spec = None self._status = None self.discriminator = None if api_version is not None: self.api_version = api_version if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata if spec is not None: self.spec = spec if status is not None: self.status = status @property def api_version(self): """Gets the api_version of this V1APIService. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this V1APIService. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this V1APIService. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V1APIService. # noqa: E501 :type: str """ self._api_version = api_version @property def kind(self): """Gets the kind of this V1APIService. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V1APIService. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this V1APIService. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V1APIService. # noqa: E501 :type: str """ self._kind = kind @property def metadata(self): """Gets the metadata of this V1APIService. # noqa: E501 :return: The metadata of this V1APIService. # noqa: E501 :rtype: V1ObjectMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """Sets the metadata of this V1APIService. :param metadata: The metadata of this V1APIService. # noqa: E501 :type: V1ObjectMeta """ self._metadata = metadata @property def spec(self): """Gets the spec of this V1APIService. # noqa: E501 :return: The spec of this V1APIService. # noqa: E501 :rtype: V1APIServiceSpec """ return self._spec @spec.setter def spec(self, spec): """Sets the spec of this V1APIService. :param spec: The spec of this V1APIService. # noqa: E501 :type: V1APIServiceSpec """ self._spec = spec @property def status(self): """Gets the status of this V1APIService. # noqa: E501 :return: The status of this V1APIService. # noqa: E501 :rtype: V1APIServiceStatus """ return self._status @status.setter def status(self, status): """Sets the status of this V1APIService. :param status: The status of this V1APIService. # noqa: E501 :type: V1APIServiceStatus """ self._status = status def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1APIService): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1APIService): return True return self.to_dict() != other.to_dict()
V1APIService
python
run-llama__llama_index
llama-index-core/llama_index/core/ingestion/data_sources.py
{ "start": 726, "end": 1421 }
class ____(BasePydanticReader): """ A group of documents, usually separate pages from a single file. """ file_path: str = Field(description="Path to the file containing the documents") documents: List[Document] = Field( description="Sequential group of documents, usually separate pages from a single file." ) @property def file_name(self) -> str: return Path(self.file_path).name @classmethod def class_name(cls) -> str: return "DocumentGroup" def lazy_load_data(self, *args: Any, **load_kwargs: Any) -> Iterable[Document]: """Load data from the input directory lazily.""" return self.documents
DocumentGroup
python
openai__openai-python
src/openai/types/chat/parsed_chat_completion.py
{ "start": 1002, "end": 1182 }
class ____(Choice, GenericModel, Generic[ContentType]): message: ParsedChatCompletionMessage[ContentType] """A chat completion message generated by the model."""
ParsedChoice
python
allegroai__clearml
clearml/storage/helper.py
{ "start": 169054, "end": 169225 }
class ____(_FileStorageDriver): def get_direct_access(self, remote_path: str, **_: Any) -> Optional[str]: return None
_FileStorageDriverDiskSpaceFileSizeStrategy
python
vyperlang__vyper
vyper/exceptions.py
{ "start": 9845, "end": 9936 }
class ____(VyperException): """msg.value in a nonpayable function."""
NonPayableViolation
python
huggingface__transformers
src/transformers/models/sew_d/modeling_sew_d.py
{ "start": 12876, "end": 14698 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d( config.hidden_size, config.hidden_size, kernel_size=config.num_conv_pos_embeddings, padding=config.num_conv_pos_embeddings // 2, groups=config.num_conv_pos_embedding_groups, stride=config.squeeze_factor, ) weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0): self.conv = weight_norm(self.conv, name="weight", dim=2) if hasattr(self.conv, "parametrizations"): weight_g = self.conv.parametrizations.weight.original0 weight_v = self.conv.parametrizations.weight.original1 else: weight_g = self.conv.weight_g weight_v = self.conv.weight_v deepspeed.zero.register_external_parameter(self, weight_v) deepspeed.zero.register_external_parameter(self, weight_g) else: self.conv = weight_norm(self.conv, name="weight", dim=2) self.padding = SEWDSamePadLayer(config.num_conv_pos_embeddings) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->SEW
SEWDPositionalConvEmbedding
python
davidhalter__parso
parso/grammar.py
{ "start": 891, "end": 8809 }
class ____(Generic[_NodeT]): """ :py:func:`parso.load_grammar` returns instances of this class. Creating custom none-python grammars by calling this is not supported, yet. :param text: A BNF representation of your grammar. """ _start_nonterminal: str _error_normalizer_config: Optional[ErrorFinderConfig] = None _token_namespace: Any = None _default_normalizer_config: NormalizerConfig = pep8.PEP8NormalizerConfig() def __init__(self, text: str, *, tokenizer, parser=BaseParser, diff_parser=None): self._pgen_grammar = generate_grammar( text, token_namespace=self._get_token_namespace() ) self._parser = parser self._tokenizer = tokenizer self._diff_parser = diff_parser self._hashed = hashlib.sha256(text.encode("utf-8")).hexdigest() def parse(self, code: Union[str, bytes] = None, *, error_recovery=True, path: Union[os.PathLike, str] = None, start_symbol: str = None, cache=False, diff_cache=False, cache_path: Union[os.PathLike, str] = None, file_io: FileIO = None) -> _NodeT: """ If you want to parse a Python file you want to start here, most likely. If you need finer grained control over the parsed instance, there will be other ways to access it. :param str code: A unicode or bytes string. When it's not possible to decode bytes to a string, returns a :py:class:`UnicodeDecodeError`. :param bool error_recovery: If enabled, any code will be returned. If it is invalid, it will be returned as an error node. If disabled, you will get a ParseError when encountering syntax errors in your code. :param str start_symbol: The grammar rule (nonterminal) that you want to parse. Only allowed to be used when error_recovery is False. :param str path: The path to the file you want to open. Only needed for caching. :param bool cache: Keeps a copy of the parser tree in RAM and on disk if a path is given. Returns the cached trees if the corresponding files on disk have not changed. Note that this stores pickle files on your file system (e.g. for Linux in ``~/.cache/parso/``). :param bool diff_cache: Diffs the cached python module against the new code and tries to parse only the parts that have changed. Returns the same (changed) module that is found in cache. Using this option requires you to not do anything anymore with the cached modules under that path, because the contents of it might change. This option is still somewhat experimental. If you want stability, please don't use it. :param bool cache_path: If given saves the parso cache in this directory. If not given, defaults to the default cache places on each platform. :return: A subclass of :py:class:`parso.tree.NodeOrLeaf`. Typically a :py:class:`parso.python.tree.Module`. """ if code is None and path is None and file_io is None: raise TypeError("Please provide either code or a path.") if isinstance(path, str): path = Path(path) if isinstance(cache_path, str): cache_path = Path(cache_path) if start_symbol is None: start_symbol = self._start_nonterminal if error_recovery and start_symbol != 'file_input': raise NotImplementedError("This is currently not implemented.") if file_io is None: if code is None: file_io = FileIO(path) # type: ignore[arg-type] else: file_io = KnownContentFileIO(path, code) if cache and file_io.path is not None: module_node = load_module(self._hashed, file_io, cache_path=cache_path) if module_node is not None: return module_node # type: ignore[no-any-return] if code is None: code = file_io.read() code = python_bytes_to_unicode(code) lines = split_lines(code, keepends=True) if diff_cache: if self._diff_parser is None: raise TypeError("You have to define a diff parser to be able " "to use this option.") try: module_cache_item = parser_cache[self._hashed][file_io.path] except KeyError: pass else: module_node = module_cache_item.node old_lines = module_cache_item.lines if old_lines == lines: return module_node # type: ignore[no-any-return] new_node = self._diff_parser( self._pgen_grammar, self._tokenizer, module_node ).update( old_lines=old_lines, new_lines=lines ) try_to_save_module(self._hashed, file_io, new_node, lines, # Never pickle in pypy, it's slow as hell. pickling=cache and not is_pypy, cache_path=cache_path) return new_node # type: ignore[no-any-return] tokens = self._tokenizer(lines) p = self._parser( self._pgen_grammar, error_recovery=error_recovery, start_nonterminal=start_symbol ) root_node = p.parse(tokens=tokens) if cache or diff_cache: try_to_save_module(self._hashed, file_io, root_node, lines, # Never pickle in pypy, it's slow as hell. pickling=cache and not is_pypy, cache_path=cache_path) return root_node # type: ignore[no-any-return] def _get_token_namespace(self): ns = self._token_namespace if ns is None: raise ValueError("The token namespace should be set.") return ns def iter_errors(self, node): """ Given a :py:class:`parso.tree.NodeOrLeaf` returns a generator of :py:class:`parso.normalizer.Issue` objects. For Python this is a list of syntax/indentation errors. """ if self._error_normalizer_config is None: raise ValueError("No error normalizer specified for this grammar.") return self._get_normalizer_issues(node, self._error_normalizer_config) def refactor(self, base_node, node_to_str_map): return RefactoringNormalizer(node_to_str_map).walk(base_node) def _get_normalizer(self, normalizer_config): if normalizer_config is None: normalizer_config = self._default_normalizer_config if normalizer_config is None: raise ValueError("You need to specify a normalizer, because " "there's no default normalizer for this tree.") return normalizer_config.create_normalizer(self) def _normalize(self, node, normalizer_config=None): """ TODO this is not public, yet. The returned code will be normalized, e.g. PEP8 for Python. """ normalizer = self._get_normalizer(normalizer_config) return normalizer.walk(node) def _get_normalizer_issues(self, node, normalizer_config=None): normalizer = self._get_normalizer(normalizer_config) normalizer.walk(node) return normalizer.issues def __repr__(self): nonterminals = self._pgen_grammar.nonterminal_to_dfas.keys() txt = ' '.join(list(nonterminals)[:3]) + ' ...' return '<%s:%s>' % (self.__class__.__name__, txt)
Grammar
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1486458, "end": 1493746 }
class ____(sgqlc.types.Type, Node): """A GitHub Sponsors listing.""" __schema__ = github_schema __field_names__ = ( "active_goal", "active_stripe_connect_account", "billing_country_or_region", "contact_email_address", "created_at", "dashboard_resource_path", "dashboard_url", "featured_items", "fiscal_host", "full_description", "full_description_html", "is_public", "name", "next_payout_date", "residence_country_or_region", "resource_path", "short_description", "slug", "sponsorable", "tiers", "url", ) active_goal = sgqlc.types.Field(SponsorsGoal, graphql_name="activeGoal") """The current goal the maintainer is trying to reach with GitHub Sponsors, if any. """ active_stripe_connect_account = sgqlc.types.Field(StripeConnectAccount, graphql_name="activeStripeConnectAccount") """The Stripe Connect account currently in use for payouts for this Sponsors listing, if any. Will only return a value when queried by the maintainer themselves, or by an admin of the sponsorable organization. """ billing_country_or_region = sgqlc.types.Field(String, graphql_name="billingCountryOrRegion") """The name of the country or region with the maintainer's bank account or fiscal host. Will only return a value when queried by the maintainer themselves, or by an admin of the sponsorable organization. """ contact_email_address = sgqlc.types.Field(String, graphql_name="contactEmailAddress") """The email address used by GitHub to contact the sponsorable about their GitHub Sponsors profile. Will only return a value when queried by the maintainer themselves, or by an admin of the sponsorable organization. """ created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt") """Identifies the date and time when the object was created.""" dashboard_resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="dashboardResourcePath") """The HTTP path for the Sponsors dashboard for this Sponsors listing. """ dashboard_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="dashboardUrl") """The HTTP URL for the Sponsors dashboard for this Sponsors listing.""" featured_items = sgqlc.types.Field( sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null("SponsorsListingFeaturedItem"))), graphql_name="featuredItems", args=sgqlc.types.ArgDict( ( ( "featureable_types", sgqlc.types.Arg( sgqlc.types.list_of(sgqlc.types.non_null(SponsorsListingFeaturedItemFeatureableType)), graphql_name="featureableTypes", default=("REPOSITORY", "USER"), ), ), ) ), ) """The records featured on the GitHub Sponsors profile. Arguments: * `featureable_types` (`[SponsorsListingFeaturedItemFeatureableType!]`): The types of featured items to return. (default: `[REPOSITORY, USER]`) """ fiscal_host = sgqlc.types.Field(Organization, graphql_name="fiscalHost") """The fiscal host used for payments, if any. Will only return a value when queried by the maintainer themselves, or by an admin of the sponsorable organization. """ full_description = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="fullDescription") """The full description of the listing.""" full_description_html = sgqlc.types.Field(sgqlc.types.non_null(HTML), graphql_name="fullDescriptionHTML") """The full description of the listing rendered to HTML.""" is_public = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isPublic") """Whether this listing is publicly visible.""" name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name") """The listing's full name.""" next_payout_date = sgqlc.types.Field(Date, graphql_name="nextPayoutDate") """A future date on which this listing is eligible to receive a payout. """ residence_country_or_region = sgqlc.types.Field(String, graphql_name="residenceCountryOrRegion") """The name of the country or region where the maintainer resides. Will only return a value when queried by the maintainer themselves, or by an admin of the sponsorable organization. """ resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="resourcePath") """The HTTP path for this Sponsors listing.""" short_description = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="shortDescription") """The short description of the listing.""" slug = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="slug") """The short name of the listing.""" sponsorable = sgqlc.types.Field(sgqlc.types.non_null(Sponsorable), graphql_name="sponsorable") """The entity this listing represents who can be sponsored on GitHub Sponsors. """ tiers = sgqlc.types.Field( SponsorsTierConnection, graphql_name="tiers", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ("before", sgqlc.types.Arg(String, graphql_name="before", default=None)), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ( "order_by", sgqlc.types.Arg( SponsorsTierOrder, graphql_name="orderBy", default={"field": "MONTHLY_PRICE_IN_CENTS", "direction": "ASC"} ), ), ("include_unpublished", sgqlc.types.Arg(Boolean, graphql_name="includeUnpublished", default=False)), ) ), ) """The tiers for this GitHub Sponsors profile. Arguments: * `after` (`String`): Returns the elements in the list that come after the specified cursor. * `before` (`String`): Returns the elements in the list that come before the specified cursor. * `first` (`Int`): Returns the first _n_ elements from the list. * `last` (`Int`): Returns the last _n_ elements from the list. * `order_by` (`SponsorsTierOrder`): Ordering options for Sponsors tiers returned from the connection. (default: `{field: MONTHLY_PRICE_IN_CENTS, direction: ASC}`) * `include_unpublished` (`Boolean`): Whether to include tiers that aren't published. Only admins of the Sponsors listing can see draft tiers. Only admins of the Sponsors listing and viewers who are currently sponsoring on a retired tier can see those retired tiers. Defaults to including only published tiers, which are visible to anyone who can see the GitHub Sponsors profile. (default: `false`) """ url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url") """The HTTP URL for this Sponsors listing."""
SponsorsListing
python
python__mypy
mypy/nodes.py
{ "start": 59025, "end": 59184 }
class ____(Statement): __slots__ = () def accept(self, visitor: StatementVisitor[T]) -> T: return visitor.visit_continue_stmt(self)
ContinueStmt
python
getsentry__sentry
src/sentry/api/endpoints/event_file_committers.py
{ "start": 476, "end": 1685 }
class ____(ProjectEndpoint): owner = ApiOwner.ISSUES publish_status = { "GET": ApiPublishStatus.PRIVATE, } def get(self, request: Request, project, event_id) -> Response: """ Retrieve Suspect Commit information for an event ``````````````````````````````````````````` Return suspect commits on an individual event. :pparam string project_id_or_slug: the id or slug of the project the event belongs to. :pparam string event_id: the hexadecimal ID of the event to retrieve (as reported by the raven client). :auth: required """ event = eventstore.backend.get_event_by_id(project.id, event_id) if event is None: raise NotFound(detail="Event not found") elif event.group_id is None: raise NotFound(detail="Issue not found") committers = get_serialized_event_file_committers(project, event) if not committers: raise NotFound(detail="No committers found") data = { "committers": committers, } return Response(data)
EventFileCommittersEndpoint
python
django__django
django/db/backends/postgresql/creation.py
{ "start": 247, "end": 3886 }
class ____(BaseDatabaseCreation): def _quote_name(self, name): return self.connection.ops.quote_name(name) def _get_database_create_suffix(self, encoding=None, template=None): suffix = "" if encoding: suffix += " ENCODING '{}'".format(encoding) if template: suffix += " TEMPLATE {}".format(self._quote_name(template)) return suffix and "WITH" + suffix def sql_table_creation_suffix(self): test_settings = self.connection.settings_dict["TEST"] if test_settings.get("COLLATION") is not None: raise ImproperlyConfigured( "PostgreSQL does not support collation setting at database " "creation time." ) return self._get_database_create_suffix( encoding=test_settings["CHARSET"], template=test_settings.get("TEMPLATE"), ) def _database_exists(self, cursor, database_name): cursor.execute( "SELECT 1 FROM pg_catalog.pg_database WHERE datname = %s", [strip_quotes(database_name)], ) return cursor.fetchone() is not None def _execute_create_test_db(self, cursor, parameters, keepdb=False): try: if keepdb and self._database_exists(cursor, parameters["dbname"]): # If the database should be kept and it already exists, don't # try to create a new one. return super()._execute_create_test_db(cursor, parameters, keepdb) except Exception as e: if not isinstance(e.__cause__, errors.DuplicateDatabase): # All errors except "database already exists" cancel tests. self.log("Got an error creating the test database: %s" % e) sys.exit(2) elif not keepdb: # If the database should be kept, ignore "database already # exists". raise def _clone_test_db(self, suffix, verbosity, keepdb=False): # CREATE DATABASE ... WITH TEMPLATE ... requires closing connections # to the template database. self.connection.close() self.connection.close_pool() source_database_name = self.connection.settings_dict["NAME"] target_database_name = self.get_test_db_clone_settings(suffix)["NAME"] test_db_params = { "dbname": self._quote_name(target_database_name), "suffix": self._get_database_create_suffix(template=source_database_name), } with self._nodb_cursor() as cursor: try: self._execute_create_test_db(cursor, test_db_params, keepdb) except Exception: try: if verbosity >= 1: self.log( "Destroying old test database for alias %s..." % ( self._get_database_display_str( verbosity, target_database_name ), ) ) cursor.execute("DROP DATABASE %(dbname)s" % test_db_params) self._execute_create_test_db(cursor, test_db_params, keepdb) except Exception as e: self.log("Got an error cloning the test database: %s" % e) sys.exit(2) def _destroy_test_db(self, test_database_name, verbosity): self.connection.close_pool() return super()._destroy_test_db(test_database_name, verbosity)
DatabaseCreation
python
ansible__ansible
lib/ansible/cli/arguments/option_helpers.py
{ "start": 5870, "end": 6395 }
class ____(argparse.Action): def __init__(self, option_strings, dest, const=True, default=None, required=False, help=None, metavar=None, nargs=0): super(UnrecognizedArgument, self).__init__(option_strings=option_strings, dest=dest, nargs=nargs, const=const, default=default, required=required, help=help) def __call__(self, parser, namespace, values, option_string=None): parser.error('unrecognized arguments: %s' % option_string)
UnrecognizedArgument
python
ray-project__ray
rllib/core/models/configs.py
{ "start": 27462, "end": 35383 }
class ____(ModelConfig): """Configuration for a convolutional (encoder) network. The configured CNN encodes 3D-observations into a latent space. The stack of layers is composed of a sequence of convolutional layers. `input_dims` describes the shape of the input tensor. Beyond that, each layer specified by `filter_specifiers` is followed by an activation function according to `filter_activation`. `output_dims` is reached by either the final convolutional layer's output directly OR by flatten this output. See ModelConfig for usage details. Example: .. testcode:: # Configuration: config = CNNEncoderConfig( input_dims=[84, 84, 3], # must be 3D tensor (image: w x h x C) cnn_filter_specifiers=[ [16, [8, 8], 4], [32, [4, 4], 2], ], cnn_activation="relu", cnn_use_layernorm=False, cnn_use_bias=True, ) model = config.build(framework="torch") # Resulting stack in pseudocode: # Conv2D( # in_channels=3, out_channels=16, # kernel_size=[8, 8], stride=[4, 4], bias=True, # ) # ReLU() # Conv2D( # in_channels=16, out_channels=32, # kernel_size=[4, 4], stride=[2, 2], bias=True, # ) # ReLU() # Conv2D( # in_channels=32, out_channels=1, # kernel_size=[1, 1], stride=[1, 1], bias=True, # ) # Flatten() Attributes: input_dims: The input dimension of the network. These must be given in the form of `(width, height, channels)`. cnn_filter_specifiers: A list in which each element is another (inner) list of either the following forms: `[number of channels/filters, kernel, stride]` OR: `[number of channels/filters, kernel, stride, padding]`, where `padding` can either be "same" or "valid". When using the first format w/o the `padding` specifier, `padding` is "same" by default. Also, `kernel` and `stride` may be provided either as single ints (square) or as a tuple/list of two ints (width- and height dimensions) for non-squared kernel/stride shapes. A good rule of thumb for constructing CNN stacks is: When using padding="same", the input "image" will be reduced in size by the factor `stride`, e.g. input=(84, 84, 3) stride=2 kernel=x padding="same" filters=16 -> output=(42, 42, 16). For example, if you would like to reduce an Atari image from its original (84, 84, 3) dimensions down to (6, 6, F), you can construct the following stack and reduce the w x h dimension of the image by 2 in each layer: [[16, 4, 2], [32, 4, 2], [64, 4, 2], [128, 4, 2]] -> output=(6, 6, 128) cnn_use_bias: Whether to use bias on all Conv2D layers. cnn_activation: The activation function to use after each layer ( except for the output). The default activation for Conv2d layers is "relu". cnn_use_layernorm: Whether to insert a LayerNorm functionality in between each CNN layer's output and its activation. Note that the output layer. cnn_kernel_initializer: The initializer function or class to use for kernel initialization in the CNN layers. If `None` the default initializer of the respective CNN layer of a framework (`"torch"` or `"tf2"`) is used. Note, all initializers defined in the framework `"tf2`) are allowed. For `"torch"` only the in-place initializers, i.e. ending with an underscore "_" are allowed. cnn_kernel_initializer_config: Configuration to pass into the initializer defined in `cnn_kernel_initializer`. cnn_bias_initializer: The initializer function or class to use for bias initialization in the CNN layers. If `None` the default initializer of the respective CNN layer of a framework (`"torch"` or `"tf2"`) is used. For `"torch"` only the in-place initializers, i.e. ending with an underscore "_" are allowed. cnn_bias_initializer_config: Configuration to pass into the initializer defined in `cnn_bias_initializer`. flatten_at_end: Whether to flatten the output of the last conv 2D layer into a 1D tensor. By default, this is True. Note that if you set this to False, you might simply stack another CNNEncoder on top of this one (maybe with different activation and bias settings). """ input_dims: Union[List[int], Tuple[int, ...]] = None cnn_filter_specifiers: List[List[Union[int, List[int]]]] = field( default_factory=lambda: [[16, [4, 4], 2], [32, [4, 4], 2], [64, [8, 8], 2]] ) cnn_use_bias: bool = True cnn_activation: str = "relu" cnn_use_layernorm: bool = False cnn_kernel_initializer: Optional[Union[str, Callable]] = None cnn_kernel_initializer_config: Optional[Dict] = None cnn_bias_initializer: Optional[Union[str, Callable]] = None cnn_bias_initializer_config: Optional[Dict] = None flatten_at_end: bool = True @property def output_dims(self): if not self.input_dims: return None # Infer output dims, layer by layer. dims = self.input_dims # Creates a copy (works for tuple/list). for filter_spec in self.cnn_filter_specifiers: # Padding not provided, "same" by default. if len(filter_spec) == 3: num_filters, kernel, stride = filter_spec padding = "same" # Padding option provided, use given value. else: num_filters, kernel, stride, padding = filter_spec # Same padding. if padding == "same": _, dims = same_padding(dims[:2], kernel, stride) # Valid padding. else: dims = valid_padding(dims[:2], kernel, stride) # Add depth (num_filters) to the end (our utility functions for same/valid # only return the image width/height). dims = [dims[0], dims[1], num_filters] # Flatten everything. if self.flatten_at_end: return (int(np.prod(dims)),) return tuple(dims) def _validate(self, framework: str = "torch"): if len(self.input_dims) != 3: raise ValueError( f"`input_dims` ({self.input_dims}) of CNNEncoderConfig must be a 3D " "tensor (image) with the dimensions meaning: width x height x " "channels, e.g. `[64, 64, 3]`!" ) if not self.flatten_at_end and len(self.output_dims) != 3: raise ValueError( f"`output_dims` ({self.output_dims}) of CNNEncoderConfig must be " "3D, e.g. `[4, 4, 128]`, b/c your `flatten_at_end` setting is False! " "`output_dims` is an inferred value, hence other settings might be " "wrong." ) elif self.flatten_at_end and len(self.output_dims) != 1: raise ValueError( f"`output_dims` ({self.output_dims}) of CNNEncoderConfig must be " "1D, e.g. `[32]`, b/c your `flatten_at_end` setting is True! " "`output_dims` is an inferred value, hence other settings might be " "wrong." ) @_framework_implemented() def build(self, framework: str = "torch") -> "Model": self._validate(framework) if framework == "torch": from ray.rllib.core.models.torch.encoder import TorchCNNEncoder return TorchCNNEncoder(self) @ExperimentalAPI @dataclass
CNNEncoderConfig
python
django__django
django/test/selenium.py
{ "start": 4535, "end": 5000 }
class ____: def __init__(self, width, height, selenium): self.selenium = selenium self.new_size = (width, height) def __enter__(self): self.old_size = self.selenium.get_window_size() self.selenium.set_window_size(*self.new_size) return self def __exit__(self, exc_type, exc_value, traceback): self.selenium.set_window_size(self.old_size["width"], self.old_size["height"]) @tag("selenium")
ChangeWindowSize
python
huggingface__transformers
src/transformers/models/decision_transformer/configuration_decision_transformer.py
{ "start": 822, "end": 7029 }
class ____(PreTrainedConfig): """ This is the configuration class to store the configuration of a [`DecisionTransformerModel`]. It is used to instantiate a Decision Transformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the standard DecisionTransformer architecture. Many of the config options are used to instantiate the GPT2 model that is used as part of the architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: state_dim (`int`, *optional*, defaults to 17): The state size for the RL environment act_dim (`int`, *optional*, defaults to 4): The size of the output action space hidden_size (`int`, *optional*, defaults to 128): The size of the hidden layers max_ep_len (`int`, *optional*, defaults to 4096): The maximum length of an episode in the environment action_tanh (`bool`, *optional*, defaults to True): Whether to use a tanh activation on action prediction vocab_size (`int`, *optional*, defaults to 50257): Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`DecisionTransformerModel`]. n_positions (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_layer (`int`, *optional*, defaults to 3): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 1): Number of attention heads for each attention layer in the Transformer encoder. n_inner (`int`, *optional*): Dimensionality of the inner feed-forward layers. If unset, will default to 4 times `n_embd`. activation_function (`str`, *optional*, defaults to `"gelu"`): Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`int`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention. layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): The epsilon to use in the layer normalization layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_attn_weights (`bool`, *optional*, defaults to `True`): Scale attention weights by dividing by sqrt(hidden_size).. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`): Whether to additionally scale attention weights by `1 / layer_idx + 1`. reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`): Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention dot-product/softmax to float() when training with mixed precision. Example: ```python >>> from transformers import DecisionTransformerConfig, DecisionTransformerModel >>> # Initializing a DecisionTransformer configuration >>> configuration = DecisionTransformerConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = DecisionTransformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "decision_transformer" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, state_dim=17, act_dim=4, hidden_size=128, max_ep_len=4096, action_tanh=True, vocab_size=1, n_positions=1024, n_layer=3, n_head=1, n_inner=None, activation_function="relu", resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, scale_attn_weights=True, use_cache=True, bos_token_id=50256, eos_token_id=50256, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False, **kwargs, ): self.state_dim = state_dim self.act_dim = act_dim self.hidden_size = hidden_size self.max_ep_len = max_ep_len self.action_tanh = action_tanh self.vocab_size = vocab_size self.n_positions = n_positions self.n_layer = n_layer self.n_head = n_head self.n_inner = n_inner self.activation_function = activation_function self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.scale_attn_weights = scale_attn_weights self.use_cache = use_cache self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx self.reorder_and_upcast_attn = reorder_and_upcast_attn self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) __all__ = ["DecisionTransformerConfig"]
DecisionTransformerConfig
python
doocs__leetcode
lcci/16.19.Pond Sizes/Solution.py
{ "start": 0, "end": 514 }
class ____: def pondSizes(self, land: List[List[int]]) -> List[int]: def dfs(i: int, j: int) -> int: res = 1 land[i][j] = 1 for x in range(i - 1, i + 2): for y in range(j - 1, j + 2): if 0 <= x < m and 0 <= y < n and land[x][y] == 0: res += dfs(x, y) return res m, n = len(land), len(land[0]) return sorted(dfs(i, j) for i in range(m) for j in range(n) if land[i][j] == 0)
Solution
python
conda__conda
conda/core/link.py
{ "start": 5405, "end": 5669 }
class ____(NamedTuple): target_prefix: str unlink_precs: tuple[PackageRecord, ...] link_precs: tuple[PackageRecord, ...] remove_specs: tuple[MatchSpec, ...] update_specs: tuple[MatchSpec, ...] neutered_specs: tuple[MatchSpec, ...]
PrefixSetup
python
graphql-python__graphene
graphene/types/scalars.py
{ "start": 301, "end": 1401 }
class ____(UnmountedType, BaseType): """ Scalar Type Definition The leaf values of any request and input values to arguments are Scalars (or Enums) and are defined with a name and a series of functions used to parse input from ast or variables and to ensure validity. """ @classmethod def __init_subclass_with_meta__(cls, **options): _meta = ScalarOptions(cls) super(Scalar, cls).__init_subclass_with_meta__(_meta=_meta, **options) serialize = None parse_value = None parse_literal = None @classmethod def get_type(cls): """ This function is called when the unmounted type (Scalar instance) is mounted (as a Field, InputField or Argument) """ return cls # As per the GraphQL Spec, Integers are only treated as valid when a valid # 32-bit signed integer, providing the broadest support across platforms. # # n.b. JavaScript's integers are safe between -(2^53 - 1) and 2^53 - 1 because # they are internally represented as IEEE 754 doubles. MAX_INT = 2147483647 MIN_INT = -2147483648
Scalar
python
huggingface__transformers
tests/models/pixtral/test_image_processing_pixtral.py
{ "start": 4631, "end": 13245 }
class ____(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = PixtralImageProcessor if is_vision_available() else None fast_image_processing_class = PixtralImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = PixtralImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "patch_size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) # The following tests are overridden as PixtralImageProcessor can return images of different sizes # and thus doesn't support returning batched tensors def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs_list = self.image_processor_tester.prepare_image_inputs() for image in image_inputs_list: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs_list[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list[0]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs_list, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs_list = self.image_processor_tester.prepare_image_inputs(numpify=True) for image in image_inputs_list: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs_list[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list[0]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched batch_encoded_images = image_processing(image_inputs_list, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list) self.assertEqual(tuple(batch_encoded_images.shape), expected_output_image_shape) def test_call_pytorch(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs_list = self.image_processor_tester.prepare_image_inputs(torchify=True) for image in image_inputs_list: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs_list[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list[0]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched batch_encoded_images = image_processing(image_inputs_list, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list) self.assertEqual(tuple(batch_encoded_images.shape), expected_output_image_shape) @require_vision @require_torch def test_slow_fast_equivalence(self): dummy_image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg")) if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_image, return_tensors="pt") encoding_fast = image_processor_fast(dummy_image, return_tensors="pt") self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values[0][0], encoding_fast.pixel_values[0][0]) @require_vision @require_torch def test_slow_fast_equivalence_batched(self): dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop: self.skipTest( reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors" ) image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_images, return_tensors="pt") encoding_fast = image_processor_fast(dummy_images, return_tensors="pt") for i in range(len(encoding_slow.pixel_values)): self._assert_slow_fast_tensors_equivalence( encoding_slow.pixel_values[i][0], encoding_fast.pixel_values[i][0] ) @slow @require_torch_gpu @require_vision @pytest.mark.torch_compile_test def test_can_compile_fast_image_processor(self): if self.fast_image_processing_class is None: self.skipTest("Skipping compilation test as fast image processor is not defined") if version.parse(torch.__version__) < version.parse("2.3"): self.skipTest(reason="This test requires torch >= 2.3 to run.") torch.compiler.reset() input_image = torch.randint(0, 255, (3, 224, 224), dtype=torch.uint8) image_processor = self.fast_image_processing_class(**self.image_processor_dict) output_eager = image_processor(input_image, device=torch_device, return_tensors="pt") image_processor = torch.compile(image_processor, mode="reduce-overhead") output_compiled = image_processor(input_image, device=torch_device, return_tensors="pt") self._assert_slow_fast_tensors_equivalence( output_eager.pixel_values[0][0], output_compiled.pixel_values[0][0], atol=1e-4, rtol=1e-4, mean_atol=1e-5 ) @unittest.skip(reason="PixtralImageProcessor doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy def test_call_numpy_4_channels(self): pass
PixtralImageProcessingTest
python
wandb__wandb
wandb/vendor/pygments/styles/rrt.py
{ "start": 350, "end": 852 }
class ____(Style): """ Minimalistic "rrt" theme, based on Zap and Emacs defaults. """ background_color = '#000000' highlight_color = '#0000ff' styles = { Comment: '#00ff00', Name.Function: '#ffff00', Name.Variable: '#eedd82', Name.Constant: '#7fffd4', Keyword: '#ff0000', Comment.Preproc: '#e5e5e5', String: '#87ceeb', Keyword.Type: '#ee82ee', }
RrtStyle
python
astropy__astropy
astropy/coordinates/angles/formats.py
{ "start": 918, "end": 13777 }
class ____: """ Parses the various angle formats including: * 01:02:30.43 degrees * 1 2 0 hours * 1°2′3″ * 1d2m3s * -1h2m3s * 1°2′3″N This class should not be used directly. Use `parse_angle` instead. """ # For safe multi-threaded operation all class (but not instance) # members that carry state should be thread-local. They are stored # in the following class member _thread_local = threading.local() def __init__(self): # TODO: in principle, the parser should be invalidated if we change unit # system (from CDS to FITS, say). Might want to keep a link to the # unit_registry used, and regenerate the parser/lexer if it changes. # Alternatively, perhaps one should not worry at all and just pre- # generate the parser for each release (as done for unit formats). # For some discussion of this problem, see # https://github.com/astropy/astropy/issues/5350#issuecomment-248770151 if "_parser" not in _AngleParser._thread_local.__dict__: ( _AngleParser._thread_local._parser, _AngleParser._thread_local._lexer, ) = self._make_parser() @classmethod def _get_simple_unit_names(cls): simple_units = set(u.radian.find_equivalent_units(include_prefix_units=True)) simple_unit_names = set() # We filter out degree and hourangle, since those are treated # separately. for unit in simple_units: if unit != u.deg and unit != u.hourangle: simple_unit_names.update(unit.names) return sorted(simple_unit_names) @classmethod def _make_parser(cls): # List of token names. tokens = ( "SIGN", "UINT", "UFLOAT", "COLON", "DEGREE", "HOUR", "MINUTE", "SECOND", "SIMPLE_UNIT", "EASTWEST", "NORTHSOUTH", ) # NOTE THE ORDERING OF THESE RULES IS IMPORTANT!! # Regular expression rules for simple tokens def t_UFLOAT(t): r"((\d+\.\d*)|(\.\d+))([eE][+-−]?\d+)?" # The above includes Unicode "MINUS SIGN" \u2212. It is # important to include the hyphen last, or the regex will # treat this as a range. t.value = float(t.value.replace("−", "-")) return t def t_UINT(t): r"\d+" t.value = int(t.value) return t def t_SIGN(t): r"[+−-]" # The above include Unicode "MINUS SIGN" \u2212. It is # important to include the hyphen last, or the regex will # treat this as a range. if t.value == "+": t.value = 1.0 else: t.value = -1.0 return t def t_EASTWEST(t): r"[EW]$" t.value = -1.0 if t.value == "W" else 1.0 return t def t_NORTHSOUTH(t): r"[NS]$" # We cannot use lower-case letters otherwise we'll confuse # s[outh] with s[econd] t.value = -1.0 if t.value == "S" else 1.0 return t def t_SIMPLE_UNIT(t): t.value = u.Unit(t.value) return t t_SIMPLE_UNIT.__doc__ = "|".join( f"(?:{x})" for x in cls._get_simple_unit_names() ) def t_MINUTE(t): r"m(in(ute(s)?)?)?|′|\'|ᵐ" t.value = u.arcmin return t def t_SECOND(t): r"s(ec(ond(s)?)?)?|″|\"|ˢ" # codespell:ignore ond t.value = u.arcsec return t t_COLON = ":" t_DEGREE = r"d(eg(ree(s)?)?)?|°" t_HOUR = r"hour(s)?|h(r)?|ʰ" # A string containing ignored characters (spaces) t_ignore = " " # Error handling rule def t_error(t): raise ValueError(f"Invalid character at col {t.lexpos}") lexer = parsing.lex(lextab="angle_lextab", package="astropy/coordinates/angles") def p_angle(p): """ angle : sign hms eastwest | sign dms dir | sign simple dir """ sign = p[1] * p[3] value, unit = p[2] if isinstance(value, tuple): p[0] = ((sign * value[0],) + value[1:], unit) else: p[0] = (sign * value, unit) def p_sign(p): """ sign : SIGN | """ p[0] = p[1] if len(p) == 2 else 1.0 def p_eastwest(p): """ eastwest : EASTWEST | """ p[0] = p[1] if len(p) == 2 else 1.0 def p_dir(p): """ dir : EASTWEST | NORTHSOUTH | """ p[0] = p[1] if len(p) == 2 else 1.0 def p_ufloat(p): """ ufloat : UFLOAT | UINT """ p[0] = p[1] def p_generic(p): """ generic : ufloat | UINT ufloat | UINT COLON ufloat | UINT UINT ufloat | UINT COLON UINT COLON ufloat """ match p[1:]: case [p1]: p[0] = p1 case [p1, p2] | [p1, ":", p2]: p[0] = (p1, p2) case [p1, p2, p3] | [p1, _, p2, _, p3]: p[0] = (p1, p2, p3) def p_hms(p): """ hms : UINT HOUR | UINT HOUR ufloat | UINT HOUR UINT MINUTE | UINT HOUR UFLOAT MINUTE | UINT HOUR UINT MINUTE ufloat | UINT HOUR UINT MINUTE ufloat SECOND | generic HOUR """ if len(p) == 3: p[0] = (p[1], u.hourangle) elif len(p) in (4, 5): p[0] = ((p[1], p[3]), u.hourangle) elif len(p) in (6, 7): p[0] = ((p[1], p[3], p[5]), u.hourangle) def p_dms(p): """ dms : UINT DEGREE | UINT DEGREE ufloat | UINT DEGREE UINT MINUTE | UINT DEGREE UFLOAT MINUTE | UINT DEGREE UINT MINUTE ufloat | UINT DEGREE UINT MINUTE ufloat SECOND | generic DEGREE """ if len(p) == 3: p[0] = (p[1], u.degree) elif len(p) in (4, 5): p[0] = ((p[1], p[3]), u.degree) elif len(p) in (6, 7): p[0] = ((p[1], p[3], p[5]), u.degree) def p_simple(p): """ simple : generic | generic MINUTE | generic SECOND | generic SIMPLE_UNIT """ p[0] = (p[1], None if len(p) == 2 else p[2]) def p_error(p): raise ValueError parser = parsing.yacc( tabmodule="angle_parsetab", package="astropy/coordinates/angles" ) return parser, lexer def parse(self, angle, unit, debug=False): try: found_angle, found_unit = self._thread_local._parser.parse( angle, lexer=self._thread_local._lexer, debug=debug ) except ValueError as e: raise ValueError( f"{str(e) or 'syntax error'} parsing angle {angle!r}" ) from e if unit is None and found_unit is None: raise u.UnitsError("No unit specified") return found_angle, found_unit def _check_hour_range(hrs: float) -> None: """ Checks that the given value is in the range [-24,24]. If the value is equal to -24 or 24, then a warning is raised. """ if not -24.0 < hrs < 24.0: if abs(hrs) != 24.0: raise IllegalHourError(hrs) warn(IllegalHourWarning(hrs, "Treating as 24 hr")) def _check_minute_range(m: float) -> None: """ Checks that the given value is in the range [0,60]. If the value is equal to 60, then a warning is raised. """ if not 0.0 <= m < 60.0: if m != 60.0: raise IllegalMinuteError(m) warn(IllegalMinuteWarning(m, "Treating as 0 min, +1 hr/deg")) def _check_second_range(sec: float) -> None: """ Checks that the given value is in the range [0,60]. If the value is equal to 60, then a warning is raised. """ if not 0.0 <= sec < 60.0: if sec != 60.0: raise IllegalSecondError(sec) warn(IllegalSecondWarning(sec, "Treating as 0 sec, +1 min")) def parse_angle(angle, unit=None, debug=False): """ Parses an input string value into an angle value. Parameters ---------- angle : str A string representing the angle. May be in one of the following forms: * 01:02:30.43 degrees * 1 2 0 hours * 1°2′3″ * 1d2m3s * -1h2m3s unit : `~astropy.units.UnitBase` instance, optional The unit used to interpret the string. If ``unit`` is not provided, the unit must be explicitly represented in the string, either at the end or as number separators. debug : bool, optional If `True`, print debugging information from the parser. Returns ------- value, unit : tuple ``value`` is the value as a floating point number or three-part tuple, and ``unit`` is a `Unit` instance which is either the unit passed in or the one explicitly mentioned in the input string. """ return _AngleParser().parse(angle, unit, debug=debug) def _decimal_to_sexagesimal(a, /): """ Convert a floating-point input to a 3 tuple - if input is in degrees, the result is (degree, arcminute, arcsecond) - if input is in hourangle, the result is (hour, minute, second) """ sign = np.copysign(1.0, a) # assuming a in degree, these are (degree fraction, degree) (df, d) = np.modf(np.fabs(a)) # assuming a in degree, these are (arcminute fraction, arcminute) (mf, m) = np.modf(df * 60.0) s = mf * 60.0 return np.floor(sign * d), sign * np.floor(m), sign * s def _decimal_to_sexagesimal_string( angle, precision=None, pad=False, sep=(":",), fields=3 ): """ Given a floating point angle, convert it to string """ values = _decimal_to_sexagesimal(angle) # Check to see if values[0] is negative, using np.copysign to handle -0 sign = np.copysign(1.0, values[0]) # If the coordinates are negative, we need to take the absolute values. # We use np.abs because abs(-0) is -0 # TODO: Is this true? (MHvK, 2018-02-01: not on my system) values = [np.abs(value) for value in values] if pad: pad = 3 if sign == -1 else 2 else: pad = 0 if not isinstance(sep, tuple): sep = tuple(sep) if fields < 1 or fields > 3: raise ValueError("fields must be 1, 2, or 3") if not sep: # empty string, False, or None, etc. sep = ("", "", "") elif len(sep) == 1: if fields == 3: sep = sep + (sep[0], "") elif fields == 2: sep = sep + ("", "") else: sep = ("", "", "") elif len(sep) == 2: sep = sep + ("",) elif len(sep) != 3: raise ValueError( "Invalid separator specification for converting angle to string." ) # Simplify the expression based on the requested precision. For # example, if the seconds will round up to 60, we should convert # it to 0 and carry upwards. If the field is hidden (by the # fields kwarg) we round up around the middle, 30.0. rounding_thresh = 60.0 - (10.0 ** -(8 if precision is None else precision)) if fields == 3 and values[2] >= rounding_thresh: values[2] = 0.0 values[1] += 1.0 elif fields < 3 and values[2] >= 30.0: values[1] += 1.0 if fields >= 2 and values[1] >= 60.0: values[1] = 0.0 values[0] += 1.0 elif fields < 2 and values[1] >= 30.0: values[0] += 1.0 literal = f"{np.copysign(values[0], sign):0{pad}.0f}{sep[0]}" if fields >= 2: literal += f"{int(values[1]):02d}{sep[1]}" if fields == 3: if precision is None: last_value = f"{abs(values[2]):.8f}".rstrip("0").rstrip(".") else: last_value = f"{abs(values[2]):.{precision}f}" if len(last_value) == 1 or last_value[1] == ".": last_value = "0" + last_value literal += f"{last_value}{sep[2]}" return literal
_AngleParser
python
etianen__django-reversion
tests/test_app/tests/test_admin.py
{ "start": 10636, "end": 10747 }
class ____(VersionAdmin): inlines = (TestModelInlineAdmin, TestModelGenericInlineAdmin)
TestModelParentAdmin
python
tensorflow__tensorflow
tensorflow/python/debug/lib/debug_graph_reconstruction_test.py
{ "start": 1607, "end": 7552 }
class ____(test_util.TensorFlowTestCase): _OP_TYPE_DENYLIST = ("_Send", "_Recv", "_HostSend", "_HostRecv", "_Retval") def _no_rewrite_session_config(self): rewriter_config = rewriter_config_pb2.RewriterConfig( dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF, pin_to_host_optimization=rewriter_config_pb2.RewriterConfig.OFF, min_graph_nodes=-1) graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config) return config_pb2.ConfigProto(graph_options=graph_options) def setUp(self): super(ReconstructNonDebugGraphTest, self).setUp() self._dump_dir = tempfile.mkdtemp() self._debug_url = "file://" + self._dump_dir ops.reset_default_graph() def tearDown(self): file_io.delete_recursively(self._dump_dir) super(ReconstructNonDebugGraphTest, self).tearDown() def _graphDefWithoutDenylistedNodes(self, graph_def): output_graph_def = graph_pb2.GraphDef() for node in graph_def.node: if node.op not in self._OP_TYPE_DENYLIST: new_node = output_graph_def.node.add() new_node.CopyFrom(node) if new_node.op == "Enter": # The debugger sets parallel_iterations attribute of while-loop Enter # nodes to 1 for debugging. for attr_key in new_node.attr: if attr_key == "parallel_iterations": new_node.attr[attr_key].i = 1 elif new_node.op == "Switch" or new_node.op == "Identity": # We don't check the inputs to Switch or Identity ops as their inputs # may be Send/Recv nodes. del new_node.input[:] return output_graph_def def _compareOriginalAndReconstructedGraphDefs(self, sess, fetches, feed_dict=None, expected_output=None): run_options = config_pb2.RunOptions(output_partition_graphs=True) run_metadata = config_pb2.RunMetadata() output = sess.run(fetches, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata) if expected_output is not None: self.assertAllClose(expected_output, output) non_debug_graph_defs = run_metadata.partition_graphs debug_utils.watch_graph( run_options, sess.graph, debug_urls=self._debug_url) run_metadata = config_pb2.RunMetadata() output = sess.run(fetches, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata) if expected_output is not None: self.assertAllClose(expected_output, output) dump = debug_data.DebugDumpDir( self._dump_dir, partition_graphs=run_metadata.partition_graphs, validate=True) reconstructed = dump.reconstructed_non_debug_partition_graphs() self.assertEqual(len(non_debug_graph_defs), len(reconstructed)) for i, non_debug_graph_def in enumerate(non_debug_graph_defs): device_name = debug_graphs._infer_device_name(non_debug_graph_def) test_util.assert_equal_graph_def( self._graphDefWithoutDenylistedNodes(reconstructed[device_name]), self._graphDefWithoutDenylistedNodes(non_debug_graph_def)) # Test debug_graphs.reconstruct_non_debug_graph_def. reconstructed_again = ( debug_graphs.reconstruct_non_debug_graph_def( run_metadata.partition_graphs[i])) test_util.assert_equal_graph_def( self._graphDefWithoutDenylistedNodes(reconstructed_again), self._graphDefWithoutDenylistedNodes(non_debug_graph_def)) def testReconstructSimpleGraph(self): with session.Session() as sess: u = variables.Variable([12.0], name="u") v = variables.Variable([30.0], name="v") w = math_ops.add(u, v, name="w") self.evaluate(u.initializer) self.evaluate(v.initializer) self._compareOriginalAndReconstructedGraphDefs( sess, w, expected_output=[42.0]) def testReconstructGraphWithControlEdge(self): with session.Session() as sess: a = variables.Variable(10.0, name="a") with ops.control_dependencies([a]): b = math_ops.add(a, a, name="b") with ops.control_dependencies([a, b]): c = math_ops.multiply(b, b, name="c") self.evaluate(a.initializer) self._compareOriginalAndReconstructedGraphDefs( sess, c, expected_output=400.0) def testReconstructGraphWithCond(self): with session.Session(config=self._no_rewrite_session_config()) as sess: x = variables.Variable(10.0, name="x") y = variables.Variable(20.0, name="y") cond = tf_cond.cond( x > y, lambda: math_ops.add(x, 1), lambda: math_ops.add(y, 1)) self.evaluate(x.initializer) self.evaluate(y.initializer) self._compareOriginalAndReconstructedGraphDefs( sess, cond, expected_output=21.0) def testReconstructGraphWithWhileLoop(self): with session.Session(config=self._no_rewrite_session_config()) as sess: loop_body = lambda i: math_ops.add(i, 2) loop_cond = lambda i: math_ops.less(i, 16) i = constant_op.constant(10, name="i") loop = while_loop.while_loop(loop_cond, loop_body, [i]) self._compareOriginalAndReconstructedGraphDefs(sess, loop) def testReconstructGraphWithGradients(self): with session.Session(config=self._no_rewrite_session_config()) as sess: u = variables.Variable(12.0, name="u") v = variables.Variable(30.0, name="v") x = constant_op.constant(1.1, name="x") toy_loss = x * (u - v) train_op = gradient_descent.GradientDescentOptimizer( learning_rate=0.1).minimize(toy_loss, name="train_op") self.evaluate(u.initializer) self.evaluate(v.initializer) self._compareOriginalAndReconstructedGraphDefs(sess, train_op) if __name__ == "__main__": test.main()
ReconstructNonDebugGraphTest
python
dask__dask
dask/dataframe/dask_expr/_expr.py
{ "start": 59973, "end": 60224 }
class ____(ExplodeSeries): _parameters = ["frame", "column"] def _simplify_up(self, parent, dependents): if isinstance(parent, Projection): return plain_column_projection(self, parent, dependents, [self.column])
ExplodeFrame
python
kamyu104__LeetCode-Solutions
Python/subsets.py
{ "start": 950, "end": 1321 }
class ____(object): def subsets(self, nums): """ :type nums: List[int] :rtype: List[List[int]] """ return self.subsetsRecu([], sorted(nums)) def subsetsRecu(self, cur, nums): if not nums: return [cur] return self.subsetsRecu(cur, nums[1:]) + self.subsetsRecu(cur + [nums[0]], nums[1:])
Solution3
python
django-mptt__django-mptt
tests/myapp/tests.py
{ "start": 58593, "end": 58748 }
class ____(TreeTestCase): def test_save_registered_model(self): g1 = Group.objects.create(name="group 1") g1.save()
RegisteredRemoteModel
python
django__django
django/contrib/gis/geos/collections.py
{ "start": 486, "end": 3472 }
class ____(GEOSGeometry): _typeid = 7 def __init__(self, *args, **kwargs): "Initialize a Geometry Collection from a sequence of Geometry objects." # Checking the arguments if len(args) == 1: # If only one geometry provided or a list of geometries is provided # in the first argument. if isinstance(args[0], (tuple, list)): init_geoms = args[0] else: init_geoms = args else: init_geoms = args # Ensuring that only the permitted geometries are allowed in this # collection this is moved to list mixin super class self._check_allowed(init_geoms) # Creating the geometry pointer array. collection = self._create_collection(len(init_geoms), init_geoms) super().__init__(collection, **kwargs) def __iter__(self): "Iterate over each Geometry in the Collection." for i in range(len(self)): yield self[i] def __len__(self): "Return the number of geometries in this Collection." return self.num_geom # ### Methods for compatibility with ListMixin ### def _create_collection(self, length, items): # Creating the geometry pointer array. geoms = (GEOM_PTR * length)( *[ # this is a little sloppy, but makes life easier # allow GEOSGeometry types (python wrappers) or pointer types capi.geom_clone(getattr(g, "ptr", g)) for g in items ] ) return capi.create_collection(self._typeid, geoms, length) def _get_single_internal(self, index): return capi.get_geomn(self.ptr, index) def _get_single_external(self, index): """ Return the Geometry from this Collection at the given index (0-based). """ # Checking the index and returning the corresponding GEOS geometry. return GEOSGeometry( capi.geom_clone(self._get_single_internal(index)), srid=self.srid ) def _set_list(self, length, items): """ Create a new collection, and destroy the contents of the previous pointer. """ prev_ptr = self.ptr srid = self.srid self.ptr = self._create_collection(length, items) if srid: self.srid = srid capi.destroy_geom(prev_ptr) _set_single = GEOSGeometry._set_single_rebuild _assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild @property def kml(self): "Return the KML for this Geometry Collection." return "<MultiGeometry>%s</MultiGeometry>" % "".join(g.kml for g in self) @property def tuple(self): "Return a tuple of all the coordinates in this Geometry Collection" return tuple(g.tuple for g in self) coords = tuple # MultiPoint, MultiLineString, and MultiPolygon class definitions.
GeometryCollection
python
django__django
django/db/models/functions/text.py
{ "start": 6122, "end": 6197 }
class ____(Transform): function = "LOWER" lookup_name = "lower"
Lower
python
numba__numba
numba/core/rewrites/static_getitem.py
{ "start": 139, "end": 1788 }
class ____(Rewrite): """ Rewrite IR expressions of the kind `getitem(value=arr, index=$constXX)` where `$constXX` is a known constant as `static_getitem(value=arr, index=<constant value>)`. """ def match(self, func_ir, block, typemap, calltypes): self.getitems = getitems = {} self.block = block # Detect all getitem expressions and find which ones can be # rewritten for expr in block.find_exprs(op='getitem'): if expr.op == 'getitem': try: const = func_ir.infer_constant(expr.index) except errors.ConstantInferenceError: continue getitems[expr] = const return len(getitems) > 0 def apply(self): """ Rewrite all matching getitems as static_getitems. """ new_block = self.block.copy() new_block.clear() for inst in self.block.body: if isinstance(inst, ir.Assign): expr = inst.value if expr in self.getitems: const = self.getitems[expr] new_expr = ir.Expr.static_getitem(value=expr.value, index=const, index_var=expr.index, loc=expr.loc) inst = ir.Assign(value=new_expr, target=inst.target, loc=inst.loc) new_block.append(inst) return new_block @register_rewrite('after-inference')
RewriteConstGetitems
python
pennersr__django-allauth
allauth/headless/account/inputs.py
{ "start": 4624, "end": 4701 }
class ____(ResetPasswordForm, inputs.Input): pass
RequestPasswordResetInput
python
matplotlib__matplotlib
lib/matplotlib/backends/backend_tkcairo.py
{ "start": 771, "end": 845 }
class ____(_BackendTk): FigureCanvas = FigureCanvasTkCairo
_BackendTkCairo
python
pytorch__pytorch
torch/fx/_symbolic_trace.py
{ "start": 41822, "end": 42037 }
class ____(NamedTuple): frame_dict: Any fn_name: str orig_fn: Any new_fn: Any def revert(self): raise NotImplementedError def patch(self): raise NotImplementedError
_PatchedFn
python
doocs__leetcode
solution/1100-1199/1176.Diet Plan Performance/Solution2.py
{ "start": 0, "end": 467 }
class ____: def dietPlanPerformance( self, calories: List[int], k: int, lower: int, upper: int ) -> int: def check(s): if s < lower: return -1 if s > upper: return 1 return 0 s, n = sum(calories[:k]), len(calories) ans = check(s) for i in range(k, n): s += calories[i] - calories[i - k] ans += check(s) return ans
Solution