language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
numpy__numpy
benchmarks/benchmarks/bench_function_base.py
{ "start": 2991, "end": 3571 }
class ____(Benchmark): def setup(self): self.d = np.arange(20000) self.e = self.d.copy() self.cond = [(self.d > 4), (self.d < 2)] self.cond_large = [(self.d > 4), (self.d < 2)] * 10 def time_select(self): np.select(self.cond, [self.d, self.e]) def time_select_larger(self): np.select(self.cond_large, ([self.d, self.e] * 10)) def memoize(f): _memoized = {} def wrapped(*args): if args not in _memoized: _memoized[args] = f(*args) return _memoized[args].copy() return f
Select
python
google__pytype
pytype/abstract/abstract_utils_test.py
{ "start": 195, "end": 2181 }
class ____(test_base.UnitTest): def setUp(self): super().setUp() options = config.Options.create(python_version=self.python_version) self._ctx = test_utils.make_context(options) def test_basic(self): v1 = self._ctx.program.NewVariable( [self._ctx.convert.unsolvable], [], self._ctx.root_node ) v2 = self._ctx.program.NewVariable( [self._ctx.convert.int_type, self._ctx.convert.str_type], [], self._ctx.root_node, ) views = list(abstract_utils.get_views([v1, v2], self._ctx.root_node)) self.assertCountEqual( [ {v1: views[0][v1], v2: views[0][v2]}, {v1: views[1][v1], v2: views[1][v2]}, ], [ {v1: v1.bindings[0], v2: v2.bindings[0]}, {v1: v1.bindings[0], v2: v2.bindings[1]}, ], ) def _test_optimized(self, skip_future_value, expected_num_views): v1 = self._ctx.program.NewVariable( [self._ctx.convert.unsolvable], [], self._ctx.root_node ) v2 = self._ctx.program.NewVariable( [self._ctx.convert.int_type, self._ctx.convert.str_type], [], self._ctx.root_node, ) views = abstract_utils.get_views([v1, v2], self._ctx.root_node) skip_future = None # To count the number of views. Doesn't matter what we put in here, as long # as it's one per view. view_markers = [] while True: try: view = views.send(skip_future) except StopIteration: break # Accesses v1 only, so the v2 bindings should be deduplicated when # `skip_future` is True. view_markers.append(view[v1]) skip_future = skip_future_value self.assertEqual(len(view_markers), expected_num_views) def test_skip(self): self._test_optimized(skip_future_value=True, expected_num_views=1) def test_no_skip(self): self._test_optimized(skip_future_value=False, expected_num_views=2) if __name__ == "__main__": unittest.main()
GetViewsTest
python
tensorflow__tensorflow
tensorflow/python/framework/test_combinations.py
{ "start": 822, "end": 2385 }
class ____(test.TestCase, parameterized.TestCase): @combinations.generate( combinations.combine(mode=["graph", "eager"], optimizer=[AdamOptimizer(), GradientDescentOptimizer()])) def testOptimizer(self, optimizer): ... f(optimizer)... This will run `testOptimizer` 4 times with the specified optimizers: 2 in Eager and 2 in Graph mode. The test is going to accept the same parameters as the ones used in `combine()`. The parameters need to match by name between the `combine()` call and the test signature. It is necessary to accept all parameters. See `OptionalParameter` for a way to implement optional parameters. `combine()` function is available for creating a cross product of various options. `times()` function exists for creating a product of N `combine()`-ed results. The execution of generated tests can be customized in a number of ways: - The test can be skipped if it is not running in the correct environment. - The arguments that are passed to the test can be additionally transformed. - The test can be run with specific Python context managers. These behaviors can be customized by providing instances of `TestCombination` to `generate()`. """ from collections import OrderedDict import contextlib import re import types import unittest from absl.testing import parameterized from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import tf_export @tf_export("__internal__.test.combinations.TestCombination", v1=[])
AdditionExample
python
scrapy__scrapy
tests/test_commands.py
{ "start": 532, "end": 704 }
class ____(ScrapyCommand): def short_desc(self) -> str: return "" def run(self, args: list[str], opts: argparse.Namespace) -> None: pass
EmptyCommand
python
numba__llvmlite
llvmlite/ir/instructions.py
{ "start": 20695, "end": 21666 }
class ____(Instruction): def __init__(self, parent, typ, name, flags=()): super(PhiInstr, self).__init__(parent, typ, "phi", (), name=name, flags=flags) self.incomings = [] def descr(self, buf): incs = ', '.join('[{0}, {1}]'.format(v.get_reference(), b.get_reference()) for v, b in self.incomings) buf.append("phi {0} {1} {2} {3}\n".format( ' '.join(self.flags), self.type, incs, self._stringify_metadata(leading_comma=True), )) def add_incoming(self, value, block): assert isinstance(block, Block) self.incomings.append((value, block)) def replace_usage(self, old, new): self.incomings = [((new if val is old else val), blk) for (val, blk) in self.incomings]
PhiInstr
python
tornadoweb__tornado
tornado/websocket.py
{ "start": 26055, "end": 27760 }
class ____: def __init__( self, persistent: bool, max_wbits: Optional[int], compression_options: Optional[Dict[str, Any]] = None, ) -> None: if max_wbits is None: max_wbits = zlib.MAX_WBITS # There is no symbolic constant for the minimum wbits value. if not (8 <= max_wbits <= zlib.MAX_WBITS): raise ValueError( "Invalid max_wbits value %r; allowed range 8-%d", max_wbits, zlib.MAX_WBITS, ) self._max_wbits = max_wbits if ( compression_options is None or "compression_level" not in compression_options ): self._compression_level = tornado.web.GZipContentEncoding.GZIP_LEVEL else: self._compression_level = compression_options["compression_level"] if compression_options is None or "mem_level" not in compression_options: self._mem_level = 8 else: self._mem_level = compression_options["mem_level"] if persistent: self._compressor = self._create_compressor() # type: Optional[_Compressor] else: self._compressor = None def _create_compressor(self) -> "_Compressor": return zlib.compressobj( self._compression_level, zlib.DEFLATED, -self._max_wbits, self._mem_level ) def compress(self, data: bytes) -> bytes: compressor = self._compressor or self._create_compressor() data = compressor.compress(data) + compressor.flush(zlib.Z_SYNC_FLUSH) assert data.endswith(b"\x00\x00\xff\xff") return data[:-4]
_PerMessageDeflateCompressor
python
getsentry__sentry
src/sentry/issues/attributes.py
{ "start": 1095, "end": 1215 }
class ____(Enum): CREATED = "created" UPDATED = "updated" DELETED = "deleted" @dataclasses.dataclass
Operation
python
EpistasisLab__tpot
tpot/builtin_modules/genetic_encoders.py
{ "start": 388, "end": 1664 }
class ____(TransformerMixin, BaseEstimator ): """This class contains the function definition for encoding the input features as a Dominant genetic model. The encoding used is AA(0)->1, Aa(1)->1, aa(2)->0. """ def fit(self, X, y=None): """Do nothing and return the estimator unchanged. Dummy function to fit in with the sklearn API and hence work in pipelines. Parameters ---------- X : array-like """ return self def transform(self, X, y=None): """Transform the data by applying the Dominant encoding. Parameters ---------- X : numpy ndarray, {n_samples, n_components} New data, where n_samples is the number of samples (number of individuals) and n_components is the number of components (number of features). y : None Unused Returns ------- X_transformed: numpy ndarray, {n_samples, n_components} The encoded feature set """ X = check_array(X) map = {0: 1, 1: 1, 2: 0} mapping_function = np.vectorize(lambda i: map[i] if i in map else i) X_transformed = mapping_function(X) return X_transformed
DominantEncoder
python
scikit-learn__scikit-learn
sklearn/utils/tests/test_estimator_checks.py
{ "start": 16467, "end": 16750 }
class ____(LogisticRegression): def decision_function(self, X): return super().decision_function(X) + 1 def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.classifier_tags.poor_score = True return tags
PoorScoreLogisticRegression
python
hynek__structlog
tests/test_stdlib.py
{ "start": 12511, "end": 14510 }
class ____: def test_formats_tuple(self): """ Positional arguments as simple types are rendered. """ formatter = PositionalArgumentsFormatter() event_dict = formatter( None, None, {"event": "%d %d %s", "positional_args": (1, 2, "test")}, ) assert "1 2 test" == event_dict["event"] assert "positional_args" not in event_dict def test_formats_dict(self): """ Positional arguments as dict are rendered. """ formatter = PositionalArgumentsFormatter() event_dict = formatter( None, None, {"event": "%(foo)s bar", "positional_args": ({"foo": "bar"},)}, ) assert "bar bar" == event_dict["event"] assert "positional_args" not in event_dict def test_positional_args_retained(self): """ Positional arguments are retained if remove_positional_args argument is set to False. """ formatter = PositionalArgumentsFormatter(remove_positional_args=False) positional_args = (1, 2, "test") event_dict = formatter( None, None, {"event": "%d %d %s", "positional_args": positional_args}, ) assert "positional_args" in event_dict assert positional_args == event_dict["positional_args"] def test_nop_no_args(self): """ If no positional args are passed, nothing happens. """ formatter = PositionalArgumentsFormatter() assert {} == formatter(None, None, {}) def test_args_removed_if_empty(self): """ If remove_positional_args is True and positional_args is (), still remove them. Regression test for https://github.com/hynek/structlog/issues/82. """ formatter = PositionalArgumentsFormatter() assert {} == formatter(None, None, {"positional_args": ()})
TestPositionalArgumentsFormatter
python
getsentry__sentry
tests/sentry/api/bases/test_organization.py
{ "start": 23156, "end": 24278 }
class ____(BaseOrganizationEndpointTest): def setUp(self) -> None: self.project = self.create_project(organization=self.org) self.env_1 = self.create_environment(project=self.project) self.env_2 = self.create_environment(project=self.project) def run_test(self, expected_envs, env_names=None): request_args = {} if env_names: request_args["environment"] = env_names result = self.endpoint.get_environments(self.build_request(**request_args), self.org) assert {e.name for e in expected_envs} == {e.name for e in result} def test_no_params(self) -> None: self.run_test([]) def test_valid_params(self) -> None: self.run_test([self.env_1], [self.env_1.name]) self.run_test([self.env_1, self.env_2], [self.env_1.name, self.env_2.name]) def test_invalid_params(self) -> None: with pytest.raises(ResourceDoesNotExist): self.run_test([], ["fake"]) with pytest.raises(ResourceDoesNotExist): self.run_test([self.env_1, self.env_2], ["fake", self.env_2.name])
GetEnvironmentsTest
python
bokeh__bokeh
src/bokeh/io/webdriver.py
{ "start": 5397, "end": 8059 }
class ____: reuse: bool kind: DriverKind | None current: WebDriver | None _drivers: set[WebDriver] def __init__(self, *, kind: DriverKind | None = None, reuse: bool = True) -> None: self.kind = kind self.reuse = reuse self.current = None self._drivers = set() def terminate(self, driver: WebDriver) -> None: self._drivers.remove(driver) driver.quit() def reset(self) -> None: if self.current is not None: self.terminate(self.current) self.current = None def get(self, scale_factor: float = 1) -> WebDriver: if not self.reuse or self.current is None or not scale_factor_less_than_web_driver_device_pixel_ratio( scale_factor, self.current): self.reset() self.current = self.create(scale_factor=scale_factor) return self.current def create(self, kind: DriverKind | None = None, scale_factor: float = 1) -> WebDriver: driver = self._create(kind, scale_factor=scale_factor) self._drivers.add(driver) return driver def _create(self, kind: DriverKind | None, scale_factor: float = 1) -> WebDriver: driver_kind = kind or self.kind if driver_kind is None: driver = _try_create_chromium_webdriver(scale_factor=scale_factor) if driver is not None: self.kind = "chromium" return driver driver = _try_create_firefox_webdriver(scale_factor=scale_factor) if driver is not None: self.kind = "firefox" return driver raise RuntimeError("Neither firefox and geckodriver nor a variant of chromium browser and " \ "chromedriver are available on system PATH. You can install the former " \ "with 'conda install -c conda-forge firefox geckodriver'.") elif driver_kind == "chromium": return create_chromium_webdriver(scale_factor=scale_factor) elif driver_kind == "firefox": return create_firefox_webdriver(scale_factor=scale_factor) else: raise ValueError(f"'{driver_kind}' is not a recognized webdriver kind") def cleanup(self) -> None: self.reset() for driver in list(self._drivers): self.terminate(driver) #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- webdriver_control = _WebdriverState() atexit.register(lambda: webdriver_control.cleanup())
_WebdriverState
python
PrefectHQ__prefect
src/prefect/server/database/orm_models.py
{ "start": 32557, "end": 32840 }
class ____(Base): tag: Mapped[str] concurrency_limit: Mapped[int] active_slots: Mapped[list[str]] = mapped_column( JSON, server_default="[]", default=list ) __table_args__: Any = (sa.Index("uq_concurrency_limit__tag", "tag", unique=True),)
ConcurrencyLimit
python
astropy__astropy
astropy/utils/masked/tests/test_function_helpers.py
{ "start": 2079, "end": 2362 }
class ____(MaskedArraySetup): def check(self, func, *args, **kwargs): o = func(self.ma, *args, **kwargs) expected = func(self.a, *args, **kwargs) assert_array_equal(o.unmasked, expected) assert_array_equal(o.mask, self.mask_a)
InvariantMaskTestSetup
python
pytorch__pytorch
torch/utils/_filelock.py
{ "start": 157, "end": 1530 }
class ____(base_FileLock): """ This behaves like a normal file lock. However, it adds waitcounters for acquiring and releasing the filelock as well as for the critical region within it. pytorch.filelock.enter - While we're acquiring the filelock. pytorch.filelock.region - While we're holding the filelock and doing work. pytorch.filelock.exit - While we're releasing the filelock. """ def __enter__(self) -> Self: self.region_counter = _WaitCounter("pytorch.filelock.region").guard() with _WaitCounter("pytorch.filelock.enter").guard(): result = super().__enter__() self.region_counter.__enter__() return result def __exit__( self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None, ) -> None: self.region_counter.__exit__() with _WaitCounter("pytorch.filelock.exit").guard(): # Returns nothing per # https://github.com/tox-dev/filelock/blob/57f488ff8fdc2193572efe102408fb63cfefe4e4/src/filelock/_api.py#L379 super().__exit__(exc_type, exc_value, traceback) # Returns nothing per # https://github.com/pytorch/pytorch/blob/0f6bfc58a2cfb7a5c052bea618ab62becaf5c912/torch/csrc/monitor/python_init.cpp#L315 return None
FileLock
python
pytorch__pytorch
test/distributed/tensor/test_init.py
{ "start": 464, "end": 1307 }
class ____(DTensorTestBase): def _run_init_op(self, init_op, *args, **kwargs): device_mesh = self.build_device_mesh() shard_spec = [Shard(0)] input_size = (8, 4) input_tensor = torch.randn(*input_size, device=self.device_type) dtensor = DTensor.from_local(input_tensor, device_mesh, shard_spec) local_tensor_clone = torch.clone(input_tensor) torch.manual_seed(self.rank) local_tensor_clone = init_op(local_tensor_clone, *args, **kwargs) torch.manual_seed(self.rank) dtensor = init_op(dtensor, *args, **kwargs) self.assertEqual(local_tensor_clone, dtensor.to_local()) @with_comms def test_init_ops(self): # NOTE: random init tests are moved to test_random_ops.py self._run_init_op(torch.nn.init.constant_, 2.4)
DTensorInitOpsTest
python
automl__auto-sklearn
autosklearn/metalearning/metafeatures/metafeatures.py
{ "start": 26086, "end": 27864 }
class ____(MetaFeature): def _calculate(self, X, y, logger, feat_type): import sklearn.discriminant_analysis if type(y) in ("binary", "multiclass"): kf = sklearn.model_selection.StratifiedKFold(n_splits=5) else: kf = sklearn.model_selection.KFold(n_splits=5) accuracy = 0.0 try: for train, test in kf.split(X, y): lda = sklearn.discriminant_analysis.LinearDiscriminantAnalysis() if len(y.shape) == 1 or y.shape[1] == 1: lda.fit( X.iloc[train] if hasattr(X, "iloc") else X[train], y.iloc[train] if hasattr(y, "iloc") else y[train], ) else: lda = OneVsRestClassifier(lda) lda.fit( X.iloc[train] if hasattr(X, "iloc") else X[train], y.iloc[train] if hasattr(y, "iloc") else y[train], ) predictions = lda.predict( X.iloc[test] if hasattr(X, "iloc") else X[test], ) accuracy += sklearn.metrics.accuracy_score( predictions, y.iloc[test] if hasattr(y, "iloc") else y[test], ) return accuracy / 5 except scipy.linalg.LinAlgError as e: self.logger.warning("LDA failed: %s Returned 0 instead!" % e) return np.NaN except ValueError as e: self.logger.warning("LDA failed: %s Returned 0 instead!" % e) return np.NaN def _calculate_sparse(self, X, y, logger, feat_type): return np.NaN # Naive Bayes @metafeatures.define("LandmarkNaiveBayes")
LandmarkLDA
python
getsentry__sentry
src/sentry/services/filestore/s3.py
{ "start": 9060, "end": 25088 }
class ____(Storage): """ Amazon Simple Storage Service using Boto3 This storage backend supports opening files in read or write mode and supports streaming(buffering) data in chunks to S3 when writing. """ # XXX: note that this file reads entirely into memory before the first # read happens. This means that it should only be used for small # files (eg: see how sentry.models.file works with it through the # ChunkedFileBlobIndexWrapper. connection_class = staticmethod(resource) connection_service_name = "s3" default_content_type = "application/octet-stream" connection_response_error = ClientError file_class = S3Boto3StorageFile # If config provided in init, signature_version and addressing_style settings/args are ignored. config: Config | None = None # used for looking up the access and secret key from env vars access_key_names = ["AWS_S3_ACCESS_KEY_ID", "AWS_ACCESS_KEY_ID"] secret_key_names = ["AWS_S3_SECRET_ACCESS_KEY", "AWS_SECRET_ACCESS_KEY"] access_key: str | None = None secret_key: str | None = None file_overwrite = True object_parameters: dict[str, str] = {} bucket_name: str | None = None auto_create_bucket = False default_acl = "public-read" bucket_acl = default_acl querystring_auth = True querystring_expire = 3600 signature_version: str | None = None reduced_redundancy = False location = "" encryption = False custom_domain: str | None = None addressing_style: str | None = None secure_urls = True file_name_charset = "utf-8" gzip = False preload_metadata = False gzip_content_types = ( "text/css", "text/javascript", "application/javascript", "application/x-javascript", "image/svg+xml", ) url_protocol = "https:" endpoint_url: str | None = None region_name: str | None = None use_ssl = True def __init__(self, acl=None, bucket=None, **settings): # check if some of the settings we've provided as class attributes # need to be overwritten with values passed in here for name, value in settings.items(): if hasattr(self, name): setattr(self, name, value) # For backward-compatibility of old differing parameter names if acl is not None: self.default_acl = acl if bucket is not None: self.bucket_name = bucket self.location = (self.location or "").lstrip("/") # Backward-compatibility: given the anteriority of the SECURE_URL setting # we fall back to https if specified in order to avoid the construction # of unsecure urls. if self.secure_urls: self.url_protocol = "https:" self._entries = {} self._bucket = None self._connection = None if not self.access_key and not self.secret_key: self.access_key, self.secret_key = self._get_access_keys() if not self.config: self.config = Config( s3={"addressing_style": self.addressing_style}, signature_version=self.signature_version, ) @property def connection(self): # TODO: Support host, port like in s3boto # Note that proxies are handled by environment variables that the underlying # urllib/requests libraries read. See https://github.com/boto/boto3/issues/338 # and http://docs.python-requests.org/en/latest/user/advanced/#proxies if self._connection is None: # If this is running on an ec2 instance, allow boto to connect using an IAM role # instead of explicitly provided an access key and secret # http://boto3.readthedocs.io/en/latest/guide/configuration.html#iam-role kwargs = {} if self.access_key and self.secret_key: kwargs["aws_access_key_id"] = self.access_key kwargs["aws_secret_access_key"] = self.secret_key self._connection = self.connection_class( self.connection_service_name, region_name=self.region_name, use_ssl=self.use_ssl, endpoint_url=self.endpoint_url, config=self.config, **kwargs, ) return self._connection @property def bucket(self): """ Get the current bucket. If there is no current bucket object create it. """ if self._bucket is None: self._bucket = self._get_or_create_bucket(self.bucket_name) return self._bucket @property def entries(self): """ Get the locally cached files for the bucket. """ if self.preload_metadata and not self._entries: self._entries = { self._decode_name(entry.key): entry for entry in self.bucket.objects.filter(Prefix=self.location) } return self._entries def _get_access_keys(self): """ Gets the access keys to use when accessing S3. If none are provided to the class in the constructor or in the settings then get them from the environment variables. """ def lookup_env(names): for name in names: value = os.environ.get(name) if value: return value access_key = self.access_key or lookup_env(self.access_key_names) secret_key = self.secret_key or lookup_env(self.secret_key_names) return access_key, secret_key def _get_or_create_bucket(self, name): """ Retrieves a bucket if it exists, otherwise creates it. """ bucket = self.connection.Bucket(name) if self.auto_create_bucket: try: # Directly call head_bucket instead of bucket.load() because head_bucket() # fails on wrong region, while bucket.load() does not. bucket.meta.client.head_bucket(Bucket=name) except self.connection_response_error as err: if err.response["ResponseMetadata"]["HTTPStatusCode"] == 301: raise ImproperlyConfigured( "Bucket %s exists, but in a different " "region than we are connecting to. Set " "the region to connect to by setting " "AWS_S3_REGION_NAME to the correct region." % name ) else: raise ImproperlyConfigured( "Bucket %s does not exist. Buckets " "can be automatically created by " "setting AWS_AUTO_CREATE_BUCKET to " "``True``." % name ) return bucket def _clean_name(self, name): """ Cleans the name so that Windows style paths work """ # Normalize Windows style paths clean_name = posixpath.normpath(name).replace("\\", "/") # os.path.normpath() can strip trailing slashes so we implement # a workaround here. if name.endswith("/") and not clean_name.endswith("/"): # Add a trailing slash as it was stripped. return clean_name + "/" else: return clean_name def _normalize_name(self, name): """ Normalizes the name so that paths like /path/to/ignored/../something.txt work. We check to make sure that the path pointed to is not outside the directory specified by the LOCATION setting. """ try: return safe_join(self.location, name) except ValueError: raise SuspiciousOperation("Attempted access to '%s' denied." % name) def _encode_name(self, name): return smart_str(name, encoding=self.file_name_charset) def _decode_name(self, name): return force_str(name, encoding=self.file_name_charset) def _compress_content(self, content): """Gzip a given string content.""" zbuf = BytesIO() zfile = GzipFile(mode="wb", compresslevel=6, fileobj=zbuf) try: zfile.write(force_bytes(content.read())) finally: zfile.close() zbuf.seek(0) # Boto 2 returned the InMemoryUploadedFile with the file pointer replaced, # but Boto 3 seems to have issues with that. No need for fp.name in Boto3 # so just returning the BytesIO directly return zbuf def _open(self, name, mode="rb"): name = self._normalize_name(self._clean_name(name)) try: f = self.file_class(name, mode, self) except self.connection_response_error as err: if err.response["ResponseMetadata"]["HTTPStatusCode"] == 404: raise OSError("File does not exist: %s" % name) raise # Let it bubble up if it was some other error return f def _save(self, name, content): with metrics.timer("filestore.save", instance="s3"): cleaned_name = self._clean_name(name) name = self._normalize_name(cleaned_name) parameters = self.object_parameters.copy() content_type = getattr( content, "content_type", mimetypes.guess_type(name)[0] or self.default_content_type ) # setting the content_type in the key object is not enough. parameters.update({"ContentType": content_type}) if self.gzip and content_type in self.gzip_content_types: content = self._compress_content(content) parameters.update({"ContentEncoding": "gzip"}) encoded_name = self._encode_name(name) obj = self.bucket.Object(encoded_name) if self.preload_metadata: self._entries[encoded_name] = obj self._save_content(obj, content, parameters=parameters) # Note: In boto3, after a put, last_modified is automatically reloaded # the next time it is accessed; no need to specifically reload it. return cleaned_name def _save_content(self, obj, content, parameters): # only pass backwards incompatible arguments if they vary from the default put_parameters = parameters.copy() if parameters else {} if self.encryption: put_parameters["ServerSideEncryption"] = "AES256" if self.reduced_redundancy: put_parameters["StorageClass"] = "REDUCED_REDUNDANCY" if self.default_acl: put_parameters["ACL"] = self.default_acl content.seek(0, os.SEEK_SET) obj.upload_fileobj(content, ExtraArgs=put_parameters) def delete(self, name): name = self._normalize_name(self._clean_name(name)) self.bucket.Object(self._encode_name(name)).delete() def exists(self, name): if not name: try: self.bucket return True except ImproperlyConfigured: return False name = self._normalize_name(self._clean_name(name)) if self.entries: return name in self.entries obj = self.bucket.Object(self._encode_name(name)) try: obj.load() return True except self.connection_response_error: return False def listdir(self, name): name = self._normalize_name(self._clean_name(name)) # for the bucket.objects.filter and logic below name needs to end in / # But for the root path "" we leave it as an empty string if name and not name.endswith("/"): name += "/" files = [] dirs = set() base_parts = name.split("/")[:-1] for item in self.bucket.objects.filter(Prefix=self._encode_name(name)): parts = item.key.split("/") parts = parts[len(base_parts) :] if len(parts) == 1: # File files.append(parts[0]) elif len(parts) > 1: # Directory dirs.add(parts[0]) return list(dirs), files def size(self, name): name = self._normalize_name(self._clean_name(name)) if self.entries: entry = self.entries.get(name) if entry: return entry.content_length return 0 return self.bucket.Object(self._encode_name(name)).content_length def get_modified_time(self, name): """ Returns an (aware) datetime object containing the last modified time if USE_TZ is True, otherwise returns a naive datetime in the local timezone. """ name = self._normalize_name(self._clean_name(name)) entry = self.entries.get(name) # only call self.bucket.Object() if the key is not found # in the preloaded metadata. if entry is None: entry = self.bucket.Object(self._encode_name(name)) if settings.USE_TZ: # boto3 returns TZ aware timestamps return entry.last_modified else: return localtime(entry.last_modified).replace(tzinfo=None) def modified_time(self, name): """Returns a naive datetime object containing the last modified time.""" return localtime(self.get_modified_time(name)).replace(tzinfo=None) def _strip_signing_parameters(self, url): # Boto3 does not currently support generating URLs that are unsigned. Instead we # take the signed URLs and strip any querystring params related to signing and expiration. # Note that this may end up with URLs that are still invalid, especially if params are # passed in that only work with signed URLs, e.g. response header params. # The code attempts to strip all query parameters that match names of known parameters # from v2 and v4 signatures, regardless of the actual signature version used. split_url = urlparse.urlsplit(url) qs = urlparse.parse_qsl(split_url.query, keep_blank_values=True) blacklist = { "x-amz-algorithm", "x-amz-credential", "x-amz-date", "x-amz-expires", "x-amz-signedheaders", "x-amz-signature", "x-amz-security-token", "awsaccesskeyid", "expires", "signature", } filtered_qs = ((key, val) for key, val in qs if key.lower() not in blacklist) # Note: Parameters that did not have a value in the original query string will have # an '=' sign appended to it, e.g ?foo&bar becomes ?foo=&bar= joined_qs = ("=".join(keyval) for keyval in filtered_qs) split_url = split_url._replace(query="&".join(joined_qs)) return split_url.geturl() def url(self, name, parameters=None, expire=None): # Preserve the trailing slash after normalizing the path. # TODO: Handle force_http=not self.secure_urls like in s3boto name = self._normalize_name(self._clean_name(name)) if self.custom_domain: return f"{self.url_protocol}//{self.custom_domain}/{filepath_to_uri(name)}" if expire is None: expire = self.querystring_expire params = parameters.copy() if parameters else {} params["Bucket"] = self.bucket.name params["Key"] = self._encode_name(name) url = self.bucket.meta.client.generate_presigned_url( "get_object", Params=params, ExpiresIn=expire ) if self.querystring_auth: return url return self._strip_signing_parameters(url) def get_available_name(self, name, max_length=None): """Overwrite existing file with the same name.""" if self.file_overwrite: name = self._clean_name(name) return name return super().get_available_name(name, max_length)
S3Boto3Storage
python
pytorch__pytorch
test/dynamo/test_repros.py
{ "start": 18422, "end": 20397 }
class ____(torch.nn.Module): # Highly simplified version of maml.meta.Meta.finetuning def __init__(self) -> None: super().__init__() self.net = FakeMamlInner() self.update_step_test = 10 self.update_lr = 0.4 def forward(self, x_spt, y_spt, x_qry, y_qry): querysz = x_qry.size(0) corrects = [0 for _ in range(self.update_step_test + 1)] # in order to not ruin the state of running_mean/variance and bn_weight/bias # we finetuning on the copied model instead of self.net net = deepcopy(self.net) # 1. run the i-th task and compute loss for k=0 logits = net(x_spt) loss = F.cross_entropy(logits, y_spt) grad = torch.autograd.grad(loss, net.parameters()) fast_weights = [ p[1] - self.update_lr * p[0] for p in zip(grad, net.parameters()) ] # this is the loss and accuracy before first update with torch.no_grad(): # [setsz, nway] logits_q = net(x_qry, net.parameters(), bn_training=True) # [setsz] pred_q = F.softmax(logits_q, dim=1).argmax(dim=1) # scalar correct = torch.eq(pred_q, y_qry).sum().item() corrects[0] = corrects[0] + correct # this is the loss and accuracy after the first update with torch.no_grad(): # [setsz, nway] logits_q = net(x_qry, fast_weights, bn_training=True) # [setsz] pred_q = F.softmax(logits_q, dim=1).argmax(dim=1) # scalar correct = torch.eq(pred_q, y_qry).sum().item() corrects[1] = corrects[1] + correct del net accs = torch.tensor(corrects) / querysz return accs def softmax_backward_data(parent, grad_output, output, dim, self): from torch import _softmax_backward_data return _softmax_backward_data(grad_output, output, parent.dim, self.dtype)
PartialMaml
python
ansible__ansible
lib/ansible/executor/interpreter_discovery.py
{ "start": 483, "end": 3632 }
class ____(Exception): def __init__(self, message, interpreter_name, discovery_mode): super(InterpreterDiscoveryRequiredError, self).__init__(message) self.interpreter_name = interpreter_name self.discovery_mode = discovery_mode def discover_interpreter(action, interpreter_name, discovery_mode, task_vars): """Probe the target host for a Python interpreter from the `INTERPRETER_PYTHON_FALLBACK` list, returning the first found or `/usr/bin/python3` if none.""" host = task_vars.get('inventory_hostname', 'unknown') res = None found_interpreters = [_FALLBACK_INTERPRETER] # fallback value is_silent = discovery_mode.endswith('_silent') try: bootstrap_python_list = C.config.get_config_value('INTERPRETER_PYTHON_FALLBACK', variables=task_vars) display.vvv(msg=f"Attempting {interpreter_name} interpreter discovery.", host=host) # not all command -v impls accept a list of commands, so we have to call it once per python command_list = ["command -v '%s'" % py for py in bootstrap_python_list] shell_bootstrap = "echo FOUND; {0}; echo ENDFOUND".format('; '.join(command_list)) # FUTURE: in most cases we probably don't want to use become, but maybe sometimes we do? res = action._low_level_execute_command(shell_bootstrap, sudoable=False) raw_stdout = res.get('stdout', u'') match = foundre.match(raw_stdout) if not match: display.debug(u'raw interpreter discovery output: {0}'.format(raw_stdout), host=host) raise ValueError('unexpected output from Python interpreter discovery') found_interpreters = [interp.strip() for interp in match.groups()[0].splitlines() if interp.startswith('/')] display.debug(u"found interpreters: {0}".format(found_interpreters), host=host) if not found_interpreters: if not is_silent: display.warning(msg=f'No python interpreters found for host {host!r} (tried {bootstrap_python_list!r}).') # this is lame, but returning None or throwing an exception is uglier return _FALLBACK_INTERPRETER except AnsibleError: raise except Exception as ex: if not is_silent: display.error_as_warning(msg=f'Unhandled error in Python interpreter discovery for host {host!r}.', exception=ex) if res and res.get('stderr'): # the current ssh plugin implementation always has stderr, making coverage of the false case difficult display.vvv(msg=f"Interpreter discovery remote stderr:\n{res.get('stderr')}", host=host) if not is_silent: display.warning( msg=( f"Host {host!r} is using the discovered Python interpreter at {found_interpreters[0]!r}, " "but future installation of another Python interpreter could cause a different interpreter to be discovered." ), help_text=f"See {get_versioned_doclink('reference_appendices/interpreter_discovery.html')} for more information.", ) return found_interpreters[0]
InterpreterDiscoveryRequiredError
python
davidhalter__jedi
jedi/inference/value/instance.py
{ "start": 1194, "end": 1534 }
class ____(ParamName): def __init__(self, instance, function_value, tree_name): super().__init__( function_value, tree_name, arguments=None) self._instance = instance def infer(self): return ValueSet([self._instance]) def matches_signature(self): return True
InstanceExecutedParamName
python
pandas-dev__pandas
asv_bench/benchmarks/algos/isin.py
{ "start": 9062, "end": 9274 }
class ____: def setup(self): t = tuple(range(1000)) self.series = Series([t] * 1000) self.values = [t] def time_isin(self): self.series.isin(self.values)
IsInWithLongTupples
python
Pylons__pyramid
tests/test_testing.py
{ "start": 12026, "end": 14880 }
class ____(unittest.TestCase): def _callFUT(self, **kw): from pyramid.testing import setUp return setUp(**kw) def tearDown(self): from pyramid.threadlocal import manager manager.clear() getSiteManager.reset() def _assertSMHook(self, hook): result = getSiteManager.sethook(None) self.assertEqual(result, hook) def test_it_defaults(self): from pyramid.registry import Registry from pyramid.threadlocal import get_current_registry, manager old = True manager.push(old) config = self._callFUT() current = manager.get() self.assertFalse(current is old) self.assertEqual(config.registry, current['registry']) self.assertEqual(current['registry'].__class__, Registry) self.assertEqual(current['request'], None) self.assertEqual(config.package.__name__, 'tests') self._assertSMHook(get_current_registry) def test_it_with_registry(self): from pyramid.registry import Registry from pyramid.threadlocal import manager registry = Registry() self._callFUT(registry=registry) current = manager.get() self.assertEqual(current['registry'], registry) def test_it_with_request(self): from pyramid.threadlocal import manager request = object() self._callFUT(request=request) current = manager.get() self.assertEqual(current['request'], request) def test_it_with_package(self): config = self._callFUT(package='pyramid') self.assertEqual(config.package.__name__, 'pyramid') def test_it_with_hook_zca_false(self): from pyramid.registry import Registry registry = Registry() self._callFUT(registry=registry, hook_zca=False) sm = getSiteManager() self.assertFalse(sm is registry) def test_it_with_settings_passed_explicit_registry(self): from pyramid.registry import Registry registry = Registry() self._callFUT(registry=registry, hook_zca=False, settings=dict(a=1)) self.assertEqual(registry.settings['a'], 1) def test_it_with_settings_passed_implicit_registry(self): config = self._callFUT(hook_zca=False, settings=dict(a=1)) self.assertEqual(config.registry.settings['a'], 1) def test_it_with_unpatched_registry(self): from zope.interface.registry import Components class DummyRegistry(Components, dict): pass dummy_registry = DummyRegistry() config = self._callFUT( registry=dummy_registry, hook_zca=False, settings=dict(a=1) ) self.assertEqual(config.registry.settings['a'], 1) dummy = DummyEvent() self.assertIs(dummy_registry.queryAdapterOrSelf(dummy, IDummy), dummy)
Test_setUp
python
keras-team__keras
keras/src/ops/image.py
{ "start": 14544, "end": 20209 }
class ____(Operation): def __init__( self, interpolation="bilinear", fill_mode="constant", fill_value=0, data_format=None, *, name=None, ): super().__init__(name=name) self.interpolation = interpolation self.fill_mode = fill_mode self.fill_value = fill_value self.data_format = backend.standardize_data_format(data_format) def call(self, images, transform): return backend.image.affine_transform( images, transform, interpolation=self.interpolation, fill_mode=self.fill_mode, fill_value=self.fill_value, data_format=self.data_format, ) def compute_output_spec(self, images, transform): if len(images.shape) not in (3, 4): raise ValueError( "Invalid images rank: expected rank 3 (single image) " "or rank 4 (batch of images). Received input with shape: " f"images.shape={images.shape}" ) if len(transform.shape) not in (1, 2): raise ValueError( "Invalid transform rank: expected rank 1 (single transform) " "or rank 2 (batch of transforms). Received input with shape: " f"transform.shape={transform.shape}" ) return KerasTensor(images.shape, dtype=images.dtype) @keras_export("keras.ops.image.affine_transform") def affine_transform( images, transform, interpolation="bilinear", fill_mode="constant", fill_value=0, data_format=None, ): """Applies the given transform(s) to the image(s). Args: images: Input image or batch of images. Must be 3D or 4D. transform: Projective transform matrix/matrices. A vector of length 8 or tensor of size N x 8. If one row of transform is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps the output point `(x, y)` to a transformed input point `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where `k = c0 x + c1 y + 1`. The transform is inverted compared to the transform mapping input points to output points. Note that gradients are not backpropagated into transformation parameters. Note that `c0` and `c1` are only effective when using TensorFlow backend and will be considered as `0` when using other backends. interpolation: Interpolation method. Available methods are `"nearest"`, and `"bilinear"`. Defaults to `"bilinear"`. fill_mode: Points outside the boundaries of the input are filled according to the given mode. Available methods are `"constant"`, `"nearest"`, `"wrap"` and `"reflect"`. Defaults to `"constant"`. - `"reflect"`: `(d c b a | a b c d | d c b a)` The input is extended by reflecting about the edge of the last pixel. - `"constant"`: `(k k k k | a b c d | k k k k)` The input is extended by filling all values beyond the edge with the same constant value k specified by `fill_value`. - `"wrap"`: `(a b c d | a b c d | a b c d)` The input is extended by wrapping around to the opposite edge. - `"nearest"`: `(a a a a | a b c d | d d d d)` The input is extended by the nearest pixel. fill_value: Value used for points outside the boundaries of the input if `fill_mode="constant"`. Defaults to `0`. data_format: A string specifying the data format of the input tensor. It can be either `"channels_last"` or `"channels_first"`. `"channels_last"` corresponds to inputs with shape `(batch, height, width, channels)`, while `"channels_first"` corresponds to inputs with shape `(batch, channels, height, width)`. If not specified, the value will default to `keras.config.image_data_format`. Returns: Applied affine transform image or batch of images. Examples: >>> x = np.random.random((2, 64, 80, 3)) # batch of 2 RGB images >>> transform = np.array( ... [ ... [1.5, 0, -20, 0, 1.5, -16, 0, 0], # zoom ... [1, 0, -20, 0, 1, -16, 0, 0], # translation ... ] ... ) >>> y = keras.ops.image.affine_transform(x, transform) >>> y.shape (2, 64, 80, 3) >>> x = np.random.random((64, 80, 3)) # single RGB image >>> transform = np.array([1.0, 0.5, -20, 0.5, 1.0, -16, 0, 0]) # shear >>> y = keras.ops.image.affine_transform(x, transform) >>> y.shape (64, 80, 3) >>> x = np.random.random((2, 3, 64, 80)) # batch of 2 RGB images >>> transform = np.array( ... [ ... [1.5, 0, -20, 0, 1.5, -16, 0, 0], # zoom ... [1, 0, -20, 0, 1, -16, 0, 0], # translation ... ] ... ) >>> y = keras.ops.image.affine_transform(x, transform, ... data_format="channels_first") >>> y.shape (2, 3, 64, 80) """ if any_symbolic_tensors((images, transform)): return AffineTransform( interpolation=interpolation, fill_mode=fill_mode, fill_value=fill_value, data_format=data_format, ).symbolic_call(images, transform) return backend.image.affine_transform( images, transform, interpolation=interpolation, fill_mode=fill_mode, fill_value=fill_value, data_format=data_format, )
AffineTransform
python
psf__requests
tests/test_utils.py
{ "start": 10512, "end": 11643 }
class ____: def test_none(self): encodings = get_encodings_from_content("") assert not len(encodings) @pytest.mark.parametrize( "content", ( # HTML5 meta charset attribute '<meta charset="UTF-8">', # HTML4 pragma directive '<meta http-equiv="Content-type" content="text/html;charset=UTF-8">', # XHTML 1.x served with text/html MIME type '<meta http-equiv="Content-type" content="text/html;charset=UTF-8" />', # XHTML 1.x served as XML '<?xml version="1.0" encoding="UTF-8"?>', ), ) def test_pragmas(self, content): encodings = get_encodings_from_content(content) assert len(encodings) == 1 assert encodings[0] == "UTF-8" def test_precedence(self): content = """ <?xml version="1.0" encoding="XML"?> <meta charset="HTML5"> <meta http-equiv="Content-type" content="text/html;charset=HTML4" /> """.strip() assert get_encodings_from_content(content) == ["HTML5", "HTML4", "XML"]
TestContentEncodingDetection
python
scipy__scipy
scipy/optimize/tests/test_optimize.py
{ "start": 39648, "end": 39754 }
class ____(CheckOptimizeParameterized): use_wrapper = False disp = False
TestOptimizeNoWrapperNoDisp
python
pallets__quart
src/quart/cli.py
{ "start": 9487, "end": 13729 }
class ____(click.Group): """This works similar to a regular click :class:`~click.Group` but it changes the behavior of the :meth:`command` decorator so that it automatically wraps the functions in :func:`with_appcontext`. Not to be confused with :class:`QuartGroup`. """ def command(self, *args: Any, **kwargs: Any) -> Callable: # type: ignore """This works exactly like the method of the same name on a regular :class:`click.Group` but it wraps callbacks in :func:`with_appcontext` if it's enabled by passing ``with_appcontext=True``. """ wrap_for_ctx = kwargs.pop("with_appcontext", False) def decorator(f: Callable) -> Callable: if wrap_for_ctx: f = with_appcontext(f) return click.Group.command(self, *args, **kwargs)(f) return decorator def group(self, *args: Any, **kwargs: Any) -> Callable: # type: ignore kwargs.setdefault("cls", AppGroup) return super().group(*args, **kwargs) def get_version(ctx: Any, param: Any, value: Any) -> None: if not value or ctx.resilient_parsing: return quart_version = version("quart") werkzeug_version = version("werkzeug") click.echo( f"Python {platform.python_version()}\n" f"Quart {quart_version}\n" f"Werkzeug {werkzeug_version}", color=ctx.color, ) ctx.exit() version_option = click.Option( ["--version"], help="Show the Quart version", expose_value=False, callback=get_version, is_flag=True, is_eager=True, ) def _set_app(ctx: click.Context, param: click.Option, value: str | None) -> str | None: if value is None: return None info = ctx.ensure_object(ScriptInfo) info.app_import_path = value return value # This option is eager so the app will be available if --help is given. # --help is also eager, so --app must be before it in the param list. # no_args_is_help bypasses eager processing, so this option must be # processed manually in that case to ensure QUART_APP gets picked up. _app_option = click.Option( ["-A", "--app"], metavar="IMPORT", help=( "The QUART application or factory function to load, in the form 'module:name'." " Module can be a dotted import or file path. Name is not required if it is" " 'app', 'application', 'create_app', or 'make_app', and can be 'name(args)' to" " pass arguments." ), is_eager=True, expose_value=False, callback=_set_app, ) def _set_debug(ctx: click.Context, param: click.Option, value: bool) -> bool | None: # If the flag isn't provided, it will default to False. Don't use # that, let debug be set by env in that case. source = ctx.get_parameter_source(param.name) if source is not None and source in ( ParameterSource.DEFAULT, ParameterSource.DEFAULT_MAP, ): return None # Set with env var instead of ScriptInfo.load so that it can be # accessed early during a factory function. os.environ["QUART_DEBUG"] = "1" if value else "0" return value _debug_option = click.Option( ["--debug/--no-debug"], help="Set 'app.debug' separately from '--env'.", expose_value=False, callback=_set_debug, ) def _env_file_callback( ctx: click.Context, param: click.Option, value: str | None ) -> str | None: if value is None: return None import importlib try: importlib.import_module("dotenv") except ImportError: raise click.BadParameter( "python-dotenv must be installed to load an env file.", ctx=ctx, param=param, ) from None # Don't check QUART_SKIP_DOTENV, that only disables automatically # loading .env and .quartenv files. load_dotenv(value) return value # This option is eager so env vars are loaded as early as possible to be # used by other options. _env_file_option = click.Option( ["-e", "--env-file"], type=click.Path(exists=True, dir_okay=False), help="Load environment variables from this file. python-dotenv must be installed.", is_eager=True, expose_value=False, callback=_env_file_callback, )
AppGroup
python
getsentry__sentry
tests/sentry/integrations/slack/test_unfurl.py
{ "start": 6683, "end": 55425 }
class ____(TestCase): def setUp(self) -> None: super().setUp() # We're redefining project to ensure that the individual tests have unique project ids. # Sharing project ids across tests could result in some race conditions self.project = self.create_project() self._integration = install_slack(self.organization) self.integration = serialize_integration(self._integration) self.request = RequestFactory().get("slack/event") self.frozen_time = freeze_time(datetime.now() - timedelta(days=1)) self.frozen_time.start() def tearDown(self) -> None: self.frozen_time.stop() def test_unfurl_issues(self) -> None: min_ago = before_now(minutes=1).isoformat() event = self.store_event( data={"fingerprint": ["group2"], "timestamp": min_ago}, project_id=self.project.id ) assert event.group is not None group2 = event.group links = [ UnfurlableUrl( url=f"https://sentry.io/organizations/{self.organization.slug}/issues/{self.group.id}/", args={"issue_id": self.group.id, "event_id": None}, ), UnfurlableUrl( url=f"https://sentry.io/organizations/{self.organization.slug}/issues/{group2.id}/{event.event_id}/", args={"issue_id": group2.id, "event_id": event.event_id}, ), ] unfurls = link_handlers[LinkType.ISSUES].fn(self.integration, links) assert unfurls[links[0].url] == SlackIssuesMessageBuilder(self.group).build() assert ( unfurls[links[1].url] == SlackIssuesMessageBuilder( group2, event.for_group(group2), link_to_event=True ).build() ) def test_unfurl_issues_block_kit(self) -> None: min_ago = before_now(minutes=1).isoformat() event = self.store_event( data={"fingerprint": ["group2"], "timestamp": min_ago}, project_id=self.project.id ) assert event.group is not None group2 = event.group links = [ UnfurlableUrl( url=f"https://sentry.io/organizations/{self.organization.slug}/issues/{self.group.id}/", args={"issue_id": self.group.id, "event_id": None}, ), UnfurlableUrl( url=f"https://sentry.io/organizations/{self.organization.slug}/issues/{group2.id}/{event.event_id}/", args={"issue_id": group2.id, "event_id": event.event_id}, ), ] unfurls = link_handlers[LinkType.ISSUES].fn(self.integration, links) assert unfurls[links[0].url] == SlackIssuesMessageBuilder(self.group).build() assert ( unfurls[links[1].url] == SlackIssuesMessageBuilder( group2, event.for_group(group2), link_to_event=True ).build() ) def test_escape_issue(self) -> None: # wraps text in markdown code block escape_text = "<https://example.com/|*Click Here*>" group = self.create_group( project=self.project, data={"type": "error", "metadata": {"value": escape_text}}, ) links = [ UnfurlableUrl( url=f"https://sentry.io/organizations/{self.organization.slug}/issues/{group.id}/", args={"issue_id": group.id, "event_id": None}, ), ] unfurls = link_handlers[LinkType.ISSUES].fn(self.integration, links) assert unfurls[links[0].url]["blocks"][1]["text"]["text"] == "```" + escape_text + "```" def test_unfurl_metric_alert(self) -> None: alert_rule = self.create_alert_rule() incident = self.create_incident( status=2, organization=self.organization, projects=[self.project], alert_rule=alert_rule ) incident.update(identifier=123) trigger = self.create_alert_rule_trigger(alert_rule, CRITICAL_TRIGGER_LABEL, 100) self.create_alert_rule_trigger_action( alert_rule_trigger=trigger, triggered_for_incident=incident ) links = [ UnfurlableUrl( url=f"https://sentry.io/organizations/{self.organization.slug}/alerts/rules/details/{incident.alert_rule.id}/?alert={incident.identifier}", args={ "org_slug": self.organization.slug, "alert_rule_id": incident.alert_rule.id, "incident_id": incident.identifier, "period": None, "start": None, "end": None, }, ), ] unfurls = link_handlers[LinkType.METRIC_ALERT].fn(self.integration, links) assert ( links[0].url == f"https://sentry.io/organizations/{self.organization.slug}/alerts/rules/details/{incident.alert_rule.id}/?alert={incident.identifier}" ) assert ( unfurls[links[0].url] == SlackMetricAlertMessageBuilder(incident.alert_rule, incident).build() ) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_unfurl_metric_alerts_chart(self, mock_generate_chart: MagicMock) -> None: alert_rule = self.create_alert_rule() incident = self.create_incident( status=2, organization=self.organization, projects=[self.project], alert_rule=alert_rule, date_started=timezone.now() - timedelta(minutes=2), ) incident.update(identifier=123) trigger = self.create_alert_rule_trigger(alert_rule, CRITICAL_TRIGGER_LABEL, 100) self.create_alert_rule_trigger_action( alert_rule_trigger=trigger, triggered_for_incident=incident ) url = f"https://sentry.io/organizations/{self.organization.slug}/alerts/rules/details/{alert_rule.id}/?alert={incident.identifier}" links = [ UnfurlableUrl( url=url, args={ "org_slug": self.organization.slug, "alert_rule_id": alert_rule.id, "incident_id": incident.identifier, "period": None, "start": None, "end": None, }, ), ] with self.feature( [ "organizations:incidents", "organizations:discover", "organizations:discover-basic", "organizations:metric-alert-chartcuterie", ] ): unfurls = link_handlers[LinkType.METRIC_ALERT].fn(self.integration, links) assert ( unfurls[links[0].url] == SlackMetricAlertMessageBuilder(alert_rule, incident, chart_url="chart-url").build() ) assert len(mock_generate_chart.mock_calls) == 1 chart_data = mock_generate_chart.call_args[0][1] assert chart_data["rule"]["id"] == str(alert_rule.id) assert chart_data["selectedIncident"]["identifier"] == str(incident.identifier) series_data = chart_data["timeseriesData"][0]["data"] assert len(series_data) > 0 # Validate format of timeseries assert type(series_data[0]["name"]) is int assert type(series_data[0]["value"]) is float assert chart_data["incidents"][0]["id"] == str(incident.id) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_unfurl_metric_alerts_chart_transaction(self, mock_generate_chart: MagicMock) -> None: # Using the transactions dataset alert_rule = self.create_alert_rule(query="p95", dataset=Dataset.Transactions) incident = self.create_incident( status=2, organization=self.organization, projects=[self.project], alert_rule=alert_rule, date_started=timezone.now() - timedelta(minutes=2), ) url = f"https://sentry.io/organizations/{self.organization.slug}/alerts/rules/details/{alert_rule.id}/?alert={incident.identifier}" links = [ UnfurlableUrl( url=url, args={ "org_slug": self.organization.slug, "alert_rule_id": alert_rule.id, "incident_id": incident.identifier, "period": None, "start": None, "end": None, }, ), ] with self.feature( [ "organizations:incidents", "organizations:discover", "organizations:performance-view", "organizations:metric-alert-chartcuterie", ] ): unfurls = link_handlers[LinkType.METRIC_ALERT].fn(self.integration, links) assert ( unfurls[links[0].url] == SlackMetricAlertMessageBuilder(alert_rule, incident, chart_url="chart-url").build() ) assert len(mock_generate_chart.mock_calls) == 1 chart_data = mock_generate_chart.call_args[0][1] assert chart_data["rule"]["id"] == str(alert_rule.id) assert chart_data["selectedIncident"]["identifier"] == str(incident.identifier) series_data = chart_data["timeseriesData"][0]["data"] assert len(series_data) > 0 # Validate format of timeseries assert type(series_data[0]["name"]) is int assert type(series_data[0]["value"]) is float assert chart_data["incidents"][0]["id"] == str(incident.id) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_unfurl_metric_alerts_chart_eap_spans(self, mock_generate_chart: MagicMock) -> None: # Using the EventsAnalyticsPlatform dataset alert_rule = self.create_alert_rule( query="span.op:foo", dataset=Dataset.EventsAnalyticsPlatform ) incident = self.create_incident( status=2, organization=self.organization, projects=[self.project], alert_rule=alert_rule, date_started=timezone.now() - timedelta(minutes=2), ) trigger = self.create_alert_rule_trigger(alert_rule, CRITICAL_TRIGGER_LABEL, 100) self.create_alert_rule_trigger_action( alert_rule_trigger=trigger, triggered_for_incident=incident ) url = f"https://sentry.io/organizations/{self.organization.slug}/alerts/rules/details/{alert_rule.id}/?alert={incident.identifier}" links = [ UnfurlableUrl( url=url, args={ "org_slug": self.organization.slug, "alert_rule_id": alert_rule.id, "incident_id": incident.identifier, "period": None, "start": None, "end": None, }, ), ] with self.feature( [ "organizations:incidents", "organizations:discover", "organizations:performance-view", "organizations:metric-alert-chartcuterie", ] ): unfurls = link_handlers[LinkType.METRIC_ALERT].fn(self.integration, links) assert ( unfurls[links[0].url] == SlackMetricAlertMessageBuilder(alert_rule, incident, chart_url="chart-url").build() ) assert len(mock_generate_chart.mock_calls) == 1 chart_data = mock_generate_chart.call_args[0][1] assert chart_data["rule"]["id"] == str(alert_rule.id) assert chart_data["rule"]["dataset"] == "events_analytics_platform" assert chart_data["selectedIncident"]["identifier"] == str(incident.identifier) series_data = chart_data["timeseriesData"][0]["data"] assert len(series_data) > 0 # Validate format of timeseries assert type(series_data[0]["name"]) is int assert type(series_data[0]["value"]) is float assert chart_data["incidents"][0]["id"] == str(incident.id) @patch( "sentry.api.bases.organization_events.OrganizationEventsV2EndpointBase.get_event_stats_data", ) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_unfurl_metric_alerts_chart_eap_spans_events_stats_call( self, mock_generate_chart, mock_get_event_stats_data ): # Using the EventsAnalyticsPlatform dataset alert_rule = self.create_alert_rule( query="span.op:foo", dataset=Dataset.EventsAnalyticsPlatform ) incident = self.create_incident( status=2, organization=self.organization, projects=[self.project], alert_rule=alert_rule, date_started=timezone.now() - timedelta(minutes=2), ) url = f"https://sentry.io/organizations/{self.organization.slug}/alerts/rules/details/{alert_rule.id}/?alert={incident.identifier}" links = [ UnfurlableUrl( url=url, args={ "org_slug": self.organization.slug, "alert_rule_id": alert_rule.id, "incident_id": incident.identifier, "period": None, "start": None, "end": None, }, ), ] with self.feature( [ "organizations:incidents", "organizations:discover", "organizations:performance-view", "organizations:metric-alert-chartcuterie", ] ): link_handlers[LinkType.METRIC_ALERT].fn(self.integration, links) dataset = mock_get_event_stats_data.mock_calls[0][2]["dataset"] assert dataset == Spans @patch( "sentry.api.bases.organization_events.OrganizationEventsV2EndpointBase.get_event_stats_data", ) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_unfurl_metric_alerts_chart_eap_ourlogs_events_stats_call( self, mock_generate_chart, mock_get_event_stats_data ): # Using the EventsAnalyticsPlatform dataset with TRACE_ITEM_LOG event type alert_rule = self.create_alert_rule( query="log.level:error", dataset=Dataset.EventsAnalyticsPlatform, event_types=[SnubaQueryEventType.EventType.TRACE_ITEM_LOG], ) incident = self.create_incident( status=2, organization=self.organization, projects=[self.project], alert_rule=alert_rule, date_started=timezone.now() - timedelta(minutes=2), ) url = f"https://sentry.io/organizations/{self.organization.slug}/alerts/rules/details/{alert_rule.id}/?alert={incident.identifier}" links = [ UnfurlableUrl( url=url, args={ "org_slug": self.organization.slug, "alert_rule_id": alert_rule.id, "incident_id": incident.identifier, "period": None, "start": None, "end": None, }, ), ] with self.feature( [ "organizations:incidents", "organizations:discover", "organizations:performance-view", "organizations:metric-alert-chartcuterie", ] ): link_handlers[LinkType.METRIC_ALERT].fn(self.integration, links) dataset = mock_get_event_stats_data.mock_calls[0][2]["dataset"] assert dataset == OurLogs @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_unfurl_metric_alerts_chart_crash_free(self, mock_generate_chart: MagicMock) -> None: alert_rule = self.create_alert_rule( query="", aggregate="percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate", dataset=Dataset.Metrics, time_window=60, resolve_threshold=10, threshold_period=1, ) url = f"https://sentry.io/organizations/{self.organization.slug}/alerts/rules/details/{alert_rule.id}/" links = [ UnfurlableUrl( url=url, args={ "org_slug": self.organization.slug, "alert_rule_id": alert_rule.id, "incident_id": None, "period": None, "start": None, "end": None, }, ), ] with self.feature( [ "organizations:incidents", "organizations:discover", "organizations:discover-basic", "organizations:metric-alert-chartcuterie", ] ): unfurls = link_handlers[LinkType.METRIC_ALERT].fn(self.integration, links) assert ( unfurls[links[0].url] == SlackMetricAlertMessageBuilder(alert_rule, chart_url="chart-url").build() ) assert len(mock_generate_chart.mock_calls) == 1 chart_data = mock_generate_chart.call_args[0][1] assert chart_data["rule"]["id"] == str(alert_rule.id) assert chart_data["selectedIncident"] is None assert len(chart_data["sessionResponse"]["groups"]) >= 1 assert len(chart_data["incidents"]) == 0 @patch( "sentry.api.bases.organization_events.OrganizationEventsV2EndpointBase.get_event_stats_data", return_value={ "data": [(i * INTERVAL_COUNT, [{"count": 0}]) for i in range(INTERVALS_PER_DAY)], "end": 1652903400, "isMetricsData": False, "start": 1652817000, }, ) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_unfurl_discover(self, mock_generate_chart: MagicMock, _: MagicMock) -> None: url = f"https://sentry.io/organizations/{self.organization.slug}/discover/results/?field=title&field=event.type&field=project&field=user.display&field=timestamp&name=All+Events&project={self.project.id}&query=&sort=-timestamp&statsPeriod=24h" link_type, args = match_link(url) if not args or not link_type: raise AssertionError("Missing link_type/args") links = [ UnfurlableUrl(url=url, args=args), ] with self.feature(["organizations:discover-basic"]): unfurls = link_handlers[link_type].fn(self.integration, links, self.user) assert ( unfurls[url] == SlackDiscoverMessageBuilder( title=args["query"].get("name"), chart_url="chart-url" ).build() ) assert len(mock_generate_chart.mock_calls) == 1 chart_data = mock_generate_chart.call_args[0][1] assert chart_data["seriesName"] == "count()" assert len(chart_data["stats"]["data"]) == INTERVALS_PER_DAY @patch( "sentry.api.bases.organization_events.OrganizationEventsV2EndpointBase.get_event_stats_data", return_value={ "data": [ (i * INTERVAL_COUNT, [{"count": 0}]) for i in range(int(INTERVALS_PER_DAY / 6)) ], "end": 1652903400, "isMetricsData": False, "start": 1652817000, }, ) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_unfurl_discover_previous_period( self, mock_generate_chart: MagicMock, _: MagicMock ) -> None: url = f"https://sentry.io/organizations/{self.organization.slug}/discover/results/?display=previous&field=title&field=event.type&field=project&field=user.display&field=timestamp&name=All+Events&project={self.project.id}&query=&sort=-timestamp&statsPeriod=24h" link_type, args = match_link(url) if not args or not link_type: raise AssertionError("Missing link_type/args") links = [ UnfurlableUrl(url=url, args=args), ] with self.feature(["organizations:discover-basic"]): unfurls = link_handlers[link_type].fn(self.integration, links, self.user) assert ( unfurls[url] == SlackDiscoverMessageBuilder( title=args["query"].get("name"), chart_url="chart-url" ).build() ) assert len(mock_generate_chart.mock_calls) == 1 assert mock_generate_chart.call_args[0][0] == ChartType.SLACK_DISCOVER_PREVIOUS_PERIOD chart_data = mock_generate_chart.call_args[0][1] assert chart_data["seriesName"] == "count()" assert len(chart_data["stats"]["data"]) == 48 @patch( "sentry.api.bases.organization_events.OrganizationEventsV2EndpointBase.get_event_stats_data", return_value={ "count()": { "data": [(i * INTERVAL_COUNT, [{"count": 0}]) for i in range(INTERVALS_PER_DAY)], "end": 1652903400, "isMetricsData": False, "order": 1, "start": 1652817000, }, "count_unique(user)": { "data": [(i * INTERVAL_COUNT, [{"count": 0}]) for i in range(INTERVALS_PER_DAY)], "end": 1652903400, "isMetricsData": False, "order": 1, "start": 1652817000, }, }, ) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_unfurl_discover_multi_y_axis( self, mock_generate_chart: MagicMock, _: MagicMock ) -> None: url = f"https://sentry.io/organizations/{self.organization.slug}/discover/results/?field=title&field=event.type&field=project&field=user.display&field=timestamp&name=All+Events&project={self.project.id}&query=&sort=-timestamp&statsPeriod=24h&yAxis=count_unique%28user%29&yAxis=count%28%29" link_type, args = match_link(url) if not args or not link_type: raise AssertionError("Missing link_type/args") links = [ UnfurlableUrl(url=url, args=args), ] with self.feature(["organizations:discover-basic"]): unfurls = link_handlers[link_type].fn(self.integration, links, self.user) assert ( unfurls[url] == SlackDiscoverMessageBuilder( title=args["query"].get("name"), chart_url="chart-url" ).build() ) assert len(mock_generate_chart.mock_calls) == 1 chart_data = mock_generate_chart.call_args[0][1] assert len(chart_data["stats"]["count()"]["data"]) == INTERVALS_PER_DAY assert len(chart_data["stats"]["count_unique(user)"]["data"]) == INTERVALS_PER_DAY @patch( "sentry.api.bases.organization_events.OrganizationEventsV2EndpointBase.get_event_stats_data", return_value={ "data": [(i * INTERVAL_COUNT, [{"count": 0}]) for i in range(INTERVALS_PER_DAY)], "end": 1652903400, "isMetricsData": False, "start": 1652817000, }, ) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_unfurl_discover_html_escaped( self, mock_generate_chart: MagicMock, _: MagicMock ) -> None: url = f"https://sentry.io/organizations/{self.organization.slug}/discover/results/?field=title&amp;field=event.type&amp;field=project&amp;field=user.display&amp;field=timestamp&amp;name=All+Events&amp;project={self.project.id}&amp;query=&amp;sort=-timestamp&amp;statsPeriod=24h" link_type, args = match_link(url) if not args or not link_type: raise AssertionError("Missing link_type/args") links = [ UnfurlableUrl(url=url, args=args), ] with self.feature(["organizations:discover-basic"]): unfurls = link_handlers[link_type].fn(self.integration, links, self.user) assert ( unfurls[url] == SlackDiscoverMessageBuilder( title=args["query"].get("name"), chart_url="chart-url" ).build() ) assert len(mock_generate_chart.mock_calls) == 1 chart_data = mock_generate_chart.call_args[0][1] assert chart_data["seriesName"] == "count()" assert len(chart_data["stats"]["data"]) == INTERVALS_PER_DAY @patch( "sentry.api.bases.organization_events.OrganizationEventsV2EndpointBase.get_event_stats_data", return_value={ "default,first,capable-hagfish,None": { "data": [(i * INTERVAL_COUNT, [{"count": 0}]) for i in range(INTERVALS_PER_DAY)], "end": 1652903400, "isMetricsData": False, "order": 1, "start": 1652817000, }, "default,second,capable-hagfish,None": { "data": [(i * INTERVAL_COUNT, [{"count": 0}]) for i in range(INTERVALS_PER_DAY)], "end": 1652903400, "isMetricsData": False, "order": 1, "start": 1652817000, }, }, ) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_unfurl_discover_short_url(self, mock_generate_chart: MagicMock, _: MagicMock) -> None: query = { "fields": ["message", "event.type", "project", "user.display", "count_unique(user)"], "query": "message:[first,second]", "yAxis": "count_unique(user)", "display": "top5", "topEvents": 2, } saved_query = DiscoverSavedQuery.objects.create( organization=self.organization, created_by_id=self.user.id, name="Test query", query=query, version=2, ) saved_query.set_projects([self.project.id]) url = f"https://sentry.io/organizations/{self.organization.slug}/discover/results/?id={saved_query.id}&statsPeriod=24h&project={self.project.id}" link_type, args = match_link(url) if not args or not link_type: raise AssertionError("Missing link_type/args") links = [ UnfurlableUrl(url=url, args=args), ] with self.feature( [ "organizations:discover", "organizations:discover-basic", ] ): unfurls = link_handlers[link_type].fn(self.integration, links, self.user) assert ( unfurls[url] == SlackDiscoverMessageBuilder( title=args["query"].get("name"), chart_url="chart-url" ).build() ) assert len(mock_generate_chart.mock_calls) == 1 # Line chart expected since yAxis is count_unique(user) assert mock_generate_chart.call_args[0][0] == ChartType.SLACK_DISCOVER_TOP5_PERIOD_LINE chart_data = mock_generate_chart.call_args[0][1] assert chart_data["seriesName"] == "count_unique(user)" # 2 + 1 cause of Other assert len(chart_data["stats"].keys()) == 2 first_key = list(chart_data["stats"].keys())[0] assert len(chart_data["stats"][first_key]["data"]) == INTERVALS_PER_DAY @patch( "sentry.api.bases.organization_events.OrganizationEventsV2EndpointBase.get_event_stats_data", return_value={ "data": [(i * INTERVAL_COUNT, [{"count": 0}]) for i in range(INTERVALS_PER_DAY)], "end": 1652903400, "isMetricsData": False, "start": 1652817000, }, ) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_unfurl_correct_y_axis_for_saved_query( self, mock_generate_chart: MagicMock, _: MagicMock ) -> None: query = { "fields": [ "message", "event.type", "project", "user.display", "p50(transaction.duration)", ], } saved_query = DiscoverSavedQuery.objects.create( organization=self.organization, created_by_id=self.user.id, name="Test query", query=query, version=2, ) saved_query.set_projects([self.project.id]) url = f"https://sentry.io/organizations/{self.organization.slug}/discover/results/?id={saved_query.id}&statsPeriod=24h&project={self.project.id}" link_type, args = match_link(url) if not args or not link_type: raise AssertionError("Missing link_type/args") links = [ UnfurlableUrl(url=url, args=args), ] with self.feature( [ "organizations:discover", "organizations:discover-basic", ] ): unfurls = link_handlers[link_type].fn(self.integration, links, self.user) assert ( unfurls[url] == SlackDiscoverMessageBuilder( title=args["query"].get("name"), chart_url="chart-url" ).build() ) assert len(mock_generate_chart.mock_calls) == 1 assert mock_generate_chart.call_args[0][0] == ChartType.SLACK_DISCOVER_TOTAL_PERIOD chart_data = mock_generate_chart.call_args[0][1] assert chart_data["seriesName"] == "p50(transaction.duration)" assert len(chart_data["stats"]["data"]) == INTERVALS_PER_DAY @patch( "sentry.api.bases.organization_events.OrganizationEventsV2EndpointBase.get_event_stats_data", return_value={ "default,first": { "data": [(i * INTERVAL_COUNT, [{"count": 0}]) for i in range(INTERVALS_PER_DAY)], "end": 1652903400, "isMetricsData": False, "order": 1, "start": 1652817000, }, "default,second": { "data": [(i * INTERVAL_COUNT, [{"count": 0}]) for i in range(INTERVALS_PER_DAY)], "end": 1652903400, "isMetricsData": False, "order": 1, "start": 1652817000, }, }, ) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_top_events_url_param(self, mock_generate_chart: MagicMock, _: MagicMock) -> None: url = f"https://sentry.io/organizations/{self.organization.slug}/discover/results/?field=message&field=event.type&field=count()&name=All+Events&query=message:[first,second]&sort=-count&statsPeriod=24h&display=top5&topEvents=2" link_type, args = match_link(url) if not args or not link_type: raise AssertionError("Missing link_type/args") links = [ UnfurlableUrl(url=url, args=args), ] with self.feature( [ "organizations:discover", "organizations:discover-basic", ] ): unfurls = link_handlers[link_type].fn(self.integration, links, self.user) assert ( unfurls[url] == SlackDiscoverMessageBuilder( title=args["query"].get("name"), chart_url="chart-url" ).build() ) assert len(mock_generate_chart.mock_calls) == 1 assert mock_generate_chart.call_args[0][0] == ChartType.SLACK_DISCOVER_TOP5_PERIOD chart_data = mock_generate_chart.call_args[0][1] assert chart_data["seriesName"] == "count()" assert len(chart_data["stats"].keys()) == 2 first_key = list(chart_data["stats"].keys())[0] assert len(chart_data["stats"][first_key]["data"]) == INTERVALS_PER_DAY # patched return value determined by reading events stats output @patch( "sentry.api.bases.organization_events.OrganizationEventsV2EndpointBase.get_event_stats_data", return_value={ "default,second": { "data": [(1212121, [{"count": 15}]), (1652659200, [{"count": 12}])], "order": 0, "isMetricsData": False, "start": 1652572800, "end": 1652659201, }, "default,first": { "data": [(1652572800, [{"count": 15}]), (1652659200, [{"count": 11}])], "order": 1, "isMetricsData": False, "start": 1652572800, "end": 1652659201, }, }, ) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_top_daily_events_renders_bar_chart( self, mock_generate_chart: MagicMock, _: MagicMock ) -> None: url = ( f"https://sentry.io/organizations/{self.organization.slug}/discover/results/" "?field=message" "&field=event.type" "&field=count()" "&name=All+Events" "&query=message:[first,second]" "&sort=-count" "&statsPeriod=24h" "&display=dailytop5" "&topEvents=2" ) link_type, args = match_link(url) if not args or not link_type: raise AssertionError("Missing link_type/args") links = [ UnfurlableUrl(url=url, args=args), ] with self.feature( [ "organizations:discover", "organizations:discover-basic", ] ): unfurls = link_handlers[link_type].fn(self.integration, links, self.user) assert ( unfurls[url] == SlackDiscoverMessageBuilder( title=args["query"].get("name"), chart_url="chart-url" ).build() ) assert len(mock_generate_chart.mock_calls) == 1 assert mock_generate_chart.call_args[0][0] == ChartType.SLACK_DISCOVER_TOP5_DAILY chart_data = mock_generate_chart.call_args[0][1] assert chart_data["seriesName"] == "count()" assert len(chart_data["stats"].keys()) == 2 first_key = list(chart_data["stats"].keys())[0] # Two buckets assert len(chart_data["stats"][first_key]["data"]) == 2 @patch( "sentry.api.bases.organization_events.OrganizationEventsV2EndpointBase.get_event_stats_data", return_value={ "data": [(i * INTERVAL_COUNT, [{"count": 0}]) for i in range(INTERVALS_PER_DAY)], "end": 1652903400, "isMetricsData": False, "start": 1652817000, }, ) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_unfurl_discover_short_url_without_project_ids( self, mock_generate_chart: MagicMock, _: MagicMock ) -> None: query = { "fields": ["title", "event.type", "project", "user.display", "timestamp"], "query": "", "yAxis": "count_unique(users)", } saved_query = DiscoverSavedQuery.objects.create( organization=self.organization, created_by_id=self.user.id, name="Test query", query=query, version=2, ) saved_query.set_projects([self.project.id]) url = f"https://sentry.io/organizations/{self.organization.slug}/discover/results/?id={saved_query.id}&statsPeriod=24h" link_type, args = match_link(url) if not args or not link_type: raise AssertionError("Missing link_type/args") links = [ UnfurlableUrl(url=url, args=args), ] with self.feature( [ "organizations:discover", "organizations:discover-basic", ] ): unfurls = link_handlers[link_type].fn(self.integration, links, self.user) assert ( unfurls[url] == SlackDiscoverMessageBuilder( title=args["query"].get("name"), chart_url="chart-url" ).build() ) assert len(mock_generate_chart.mock_calls) == 1 assert mock_generate_chart.call_args[0][0] == ChartType.SLACK_DISCOVER_TOTAL_PERIOD chart_data = mock_generate_chart.call_args[0][1] assert chart_data["seriesName"] == "count_unique(users)" assert len(chart_data["stats"]["data"]) == INTERVALS_PER_DAY @patch( "sentry.api.bases.organization_events.OrganizationEventsV2EndpointBase.get_event_stats_data", return_value={ "data": [(i * INTERVAL_COUNT, [{"count": 0}]) for i in range(INTERVALS_PER_DAY)], "end": 1652903400, "isMetricsData": False, "start": 1652817000, }, ) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_unfurl_discover_without_project_ids( self, mock_generate_chart, mock_get_event_stats_data ): url = f"https://sentry.io/organizations/{self.organization.slug}/discover/results/?dataset=errors&field=title&field=event.type&field=project&field=user.display&field=timestamp&name=All+Events&query=&sort=-timestamp&statsPeriod=24h" link_type, args = match_link(url) if not args or not link_type: raise AssertionError("Missing link_type/args") links = [ UnfurlableUrl(url=url, args=args), ] with self.feature( [ "organizations:discover", "organizations:discover-basic", ] ): unfurls = link_handlers[link_type].fn(self.integration, links, self.user) assert ( unfurls[url] == SlackDiscoverMessageBuilder( title=args["query"].get("name"), chart_url="chart-url" ).build() ) assert len(mock_generate_chart.mock_calls) == 1 chart_data = mock_generate_chart.call_args[0][1] assert chart_data["seriesName"] == "count()" assert len(chart_data["stats"]["data"]) == INTERVALS_PER_DAY assert len(mock_get_event_stats_data.mock_calls) == 1 dataset = mock_get_event_stats_data.mock_calls[0][2]["dataset"] assert dataset == errors # patched return value determined by reading events stats output @patch( "sentry.api.bases.organization_events.OrganizationEventsV2EndpointBase.get_event_stats_data", return_value={ "default,second": { "data": [(1212121, [{"count": 15}]), (1652659200, [{"count": 12}])], "order": 0, "isMetricsData": False, "start": 1652572800, "end": 1652659201, }, "default,first": { "data": [(1652572800, [{"count": 15}]), (1652659200, [{"count": 11}])], "order": 1, "isMetricsData": False, "start": 1652572800, "end": 1652659201, }, }, ) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_bar_chart_display_renders_bar_chart( self, mock_generate_chart: MagicMock, _: MagicMock ) -> None: url = f"https://sentry.io/organizations/{self.organization.slug}/discover/results/?display=bar&field=title&event.type%3Aerror&sort=-count&statsPeriod=24h&yAxis=count%28%29" link_type, args = match_link(url) if not args or not link_type: raise AssertionError("Missing link_type/args") links = [ UnfurlableUrl(url=url, args=args), ] with self.feature( [ "organizations:discover", "organizations:discover-basic", ] ): unfurls = link_handlers[link_type].fn(self.integration, links, self.user) assert ( unfurls[url] == SlackDiscoverMessageBuilder( title=args["query"].get("name"), chart_url="chart-url" ).build() ) assert len(mock_generate_chart.mock_calls) == 1 assert mock_generate_chart.call_args[0][0] == ChartType.SLACK_DISCOVER_TOTAL_DAILY @patch("sentry.integrations.slack.unfurl.discover.client.get") @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_bar_chart_interval_with_absolute_date( self, mock_generate_chart: MagicMock, api_mock: MagicMock ) -> None: url = f"https://sentry.io/organizations/{self.organization.slug}/discover/results/?display=bar&end=2022-09-16T23%3A59%3A59&field=title&field=event.type&field=project&field=user.display&field=timestamp&name=All+Events&query=&sort=-timestamp&start=2022-09-09T00%3A00%3A00&utc=true&yAxis=count%28%29" link_type, args = match_link(url) if not args or not link_type: raise AssertionError("Missing link_type/args") links = [ UnfurlableUrl(url=url, args=args), ] with self.feature( [ "organizations:discover", "organizations:discover-basic", ] ): unfurls = link_handlers[link_type].fn(self.integration, links, self.user) assert ( unfurls[url] == SlackDiscoverMessageBuilder( title=args["query"].get("name"), chart_url="chart-url" ).build() ) assert len(mock_generate_chart.mock_calls) == 1 assert len(api_mock.mock_calls) == 1 assert "interval" in api_mock.call_args[1]["params"] assert api_mock.call_args[1]["params"]["interval"] == "1h" @patch("sentry.integrations.slack.unfurl.discover.client.get") @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_bar_chart_interval_with_periodic_date( self, mock_generate_chart: MagicMock, api_mock: MagicMock ) -> None: url = f"https://sentry.io/organizations/{self.organization.slug}/discover/results/?display=bar&field=title&field=event.type&field=project&field=user.display&field=timestamp&name=All+Events&query=&sort=-timestamp&statsPeriod=90d&utc=true&yAxis=count%28%29" link_type, args = match_link(url) if not args or not link_type: raise AssertionError("Missing link_type/args") links = [ UnfurlableUrl(url=url, args=args), ] with self.feature( [ "organizations:discover", "organizations:discover-basic", ] ): unfurls = link_handlers[link_type].fn(self.integration, links, self.user) assert ( unfurls[url] == SlackDiscoverMessageBuilder( title=args["query"].get("name"), chart_url="chart-url" ).build() ) assert len(mock_generate_chart.mock_calls) == 1 assert len(api_mock.mock_calls) == 1 assert "interval" in api_mock.call_args[1]["params"] assert api_mock.call_args[1]["params"]["interval"] == "1d" @patch("sentry.integrations.slack.unfurl.discover.client.get") @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_saved_query_with_interval( self, mock_generate_chart: MagicMock, api_mock: MagicMock ) -> None: query = { "fields": ["title", "event.type", "project", "user.display", "timestamp"], "query": "", "yAxis": "count()", "interval": "10m", "statsPeriod": "24h", } saved_query = DiscoverSavedQuery.objects.create( organization=self.organization, created_by_id=self.user.id, name="Test query", query=query, version=2, ) saved_query.set_projects([self.project.id]) api_mock.return_value.data = query url = f"https://sentry.io/organizations/{self.organization.slug}/discover/results/?id={saved_query.id}&statsPeriod=24h" link_type, args = match_link(url) if not args or not link_type: raise AssertionError("Missing link_type/args") links = [ UnfurlableUrl(url=url, args=args), ] with self.feature( [ "organizations:discover", "organizations:discover-basic", ] ): unfurls = link_handlers[link_type].fn(self.integration, links, self.user) assert ( unfurls[url] == SlackDiscoverMessageBuilder( title=args["query"].get("name"), chart_url="chart-url" ).build() ) assert len(mock_generate_chart.mock_calls) == 1 assert len(api_mock.mock_calls) == 2 assert "interval" in api_mock.call_args[1]["params"] assert api_mock.call_args[1]["params"]["interval"] == "10m" @patch( "sentry.api.bases.organization_events.OrganizationEventsV2EndpointBase.get_event_stats_data", ) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_saved_query_with_dataset( self, mock_generate_chart: MagicMock, mock_get_event_stats_data: MagicMock ) -> None: query = { "fields": ["title", "event.type", "project", "user.display", "timestamp"], "query": "", "yAxis": "count()", "interval": "10m", "statsPeriod": "24h", } saved_query = DiscoverSavedQuery.objects.create( organization=self.organization, created_by_id=self.user.id, name="Test query", query=query, version=2, dataset=DiscoverSavedQueryTypes.TRANSACTION_LIKE, ) saved_query.set_projects([self.project.id]) url = f"https://sentry.io/organizations/{self.organization.slug}/discover/results/?id={saved_query.id}&statsPeriod=24h" link_type, args = match_link(url) if not args or not link_type: raise AssertionError("Missing link_type/args") links = [ UnfurlableUrl(url=url, args=args), ] with self.feature( [ "organizations:discover", "organizations:discover-basic", ] ): unfurls = link_handlers[link_type].fn(self.integration, links, self.user) assert ( unfurls[url] == SlackDiscoverMessageBuilder( title=args["query"].get("name"), chart_url="chart-url" ).build() ) assert len(mock_generate_chart.mock_calls) == 1 assert len(mock_get_event_stats_data.mock_calls) == 1 dataset = mock_get_event_stats_data.mock_calls[0][2]["dataset"] assert dataset == transactions @patch( "sentry.api.bases.organization_events.OrganizationEventsV2EndpointBase.get_event_stats_data", return_value={ "data": [(i * INTERVAL_COUNT, [{"count": 0}]) for i in range(INTERVALS_PER_DAY)], "end": 1652903400, "isMetricsData": False, "start": 1652817000, }, ) @patch("sentry.charts.backend.generate_chart", return_value="chart-url") def test_unfurl_discover_homepage( self, mock_generate_chart: MagicMock, mock_get_event_stats_data: MagicMock ) -> None: url = f"https://sentry.io/organizations/{self.organization.slug}/discover/homepage/?field=title&field=event.type&field=project&field=user.display&field=timestamp&name=All+Events&project={self.project.id}&query=&sort=-timestamp&statsPeriod=24h" link_type, args = match_link(url) if not args or not link_type: raise AssertionError("Missing link_type/args") links = [ UnfurlableUrl(url=url, args=args), ] with self.feature(["organizations:discover-basic"]): unfurls = link_handlers[link_type].fn(self.integration, links, self.user) assert ( unfurls[url] == SlackDiscoverMessageBuilder( title=args["query"].get("name"), chart_url="chart-url" ).build() ) assert len(mock_generate_chart.mock_calls) == 1 chart_data = mock_generate_chart.call_args[0][1] assert chart_data["seriesName"] == "count()" assert len(chart_data["stats"]["data"]) == INTERVALS_PER_DAY assert len(mock_get_event_stats_data.mock_calls) == 1 dataset = mock_get_event_stats_data.mock_calls[0][2]["dataset"] assert dataset == discover
UnfurlTest
python
PrefectHQ__prefect
src/prefect/logging/loggers.py
{ "start": 11268, "end": 13427 }
class ____(logging.Handler): """A context manager that collects logs for the duration of the context Example: ```python import logging from prefect.logging import LogEavesdropper with LogEavesdropper("my_logger") as eavesdropper: logging.getLogger("my_logger").info("Hello, world!") logging.getLogger("my_logger.child_module").info("Another one!") print(eavesdropper.text()) # Outputs: "Hello, world!\nAnother one!" """ _target_logger: Optional[logging.Logger] _lines: List[str] def __init__(self, eavesdrop_on: str, level: int = logging.NOTSET): """ Args: eavesdrop_on (str): the name of the logger to eavesdrop on level (int): the minimum log level to eavesdrop on; if omitted, all levels are captured """ super().__init__(level=level) self.eavesdrop_on = eavesdrop_on self._target_logger = None # It's important that we use a very minimalistic formatter for use cases where # we may present these logs back to the user. We shouldn't leak filenames, # versions, or other environmental information. self.formatter: logging.Formatter | None = logging.Formatter( "[%(levelname)s]: %(message)s" ) def __enter__(self) -> Self: self._target_logger = logging.getLogger(self.eavesdrop_on) self._original_level = self._target_logger.level self._target_logger.level = self.level self._target_logger.addHandler(self) self._lines = [] return self def __exit__(self, *_: Any) -> None: if self._target_logger: self._target_logger.removeHandler(self) self._target_logger.level = self._original_level def emit(self, record: LogRecord) -> None: """The logging.Handler implementation, not intended to be called directly.""" self._lines.append(self.format(record)) def text(self) -> str: """Return the collected logs as a single newline-delimited string""" return "\n".join(self._lines)
LogEavesdropper
python
great-expectations__great_expectations
tests/actions/test_core_actions.py
{ "start": 23535, "end": 32243 }
class ____: @pytest.mark.unit def test_equality(self): """I kow, this one seems silly. But this was a bug.""" a = SlackNotificationAction(name="my_action", slack_webhook="test", notify_on="all") b = SlackNotificationAction(name="my_action", slack_webhook="test", notify_on="all") assert a == b @pytest.mark.unit def test_run(self, checkpoint_result: CheckpointResult): action = SlackNotificationAction(name="my_action", slack_webhook="test", notify_on="all") with mock.patch.object(Session, "post") as mock_post: output = action.run(checkpoint_result=checkpoint_result) mock_post.assert_called_once_with( url="test", headers=None, json={ "blocks": [ {"text": {"text": mock.ANY, "type": "plain_text"}, "type": "header"}, { "type": "section", "text": {"type": "plain_text", "text": "Runtime: 2024/04/01 08:51 PM"}, }, { "type": "section", "text": { "type": "mrkdwn", "text": ( "\n*Asset*: `__no_data_asset_name__` " f"\n*Expectation Suite*: `{SUITE_A}`" "\n*Summary*: *3* of *3* Expectations were met" ), }, }, { "type": "section", "text": { "type": "mrkdwn", "text": ( "\n*Asset*: `__no_data_asset_name__` " f"\n*Expectation Suite*: `{SUITE_B}`" "\n*Summary*: *2* of *2* Expectations were met" ), }, }, {"type": "divider"}, ], }, ) assert output == {"slack_notification_result": "Slack notification succeeded."} @pytest.mark.unit def test_run_with_assets(self, checkpoint_result_with_assets: CheckpointResult): action = SlackNotificationAction(name="my_action", slack_webhook="test", notify_on="all") with mock.patch.object(Session, "post") as mock_post: output = action.run(checkpoint_result=checkpoint_result_with_assets) mock_post.assert_called_once_with( url="test", headers=None, json={ "blocks": [ {"text": {"text": mock.ANY, "type": "plain_text"}, "type": "header"}, { "type": "section", "text": {"type": "plain_text", "text": "Runtime: 2024/04/01 08:51 PM"}, }, { "type": "section", "text": { "type": "mrkdwn", "text": ( f"\n*Asset*: `asset_1` \n*Expectation Suite*: {SUITE_A} " "<www.testing?slack=true|View Results>" "\n*Summary*: *3* of *3* Expectations were met" ), }, }, { "type": "section", "text": { "type": "mrkdwn", "text": ( "\n*Asset*: `asset_2_two_wow_whoa_vroom` " f"\n*Expectation Suite*: `{SUITE_B}`" "\n*Summary*: *2* of *2* Expectations were met" ), }, }, {"type": "divider"}, ], }, ) assert output == {"slack_notification_result": "Slack notification succeeded."} @pytest.mark.unit def test_grabs_data_docs_pages(self, checkpoint_result_with_assets: CheckpointResult): action = SlackNotificationAction(name="my_action", slack_webhook="test", notify_on="all") site_path = "file:///var/folders/vm/wkw13lnd5vsdh3hjmcv9tym00000gn/T/tmpctw4x7yu/validations/my_suite/__none__/20240910T175850.906745Z/foo-bar.html" action_context = ActionContext() action_context.update( action=UpdateDataDocsAction(name="docs_action"), action_result={ ValidationResultIdentifier( expectation_suite_identifier=ExpectationSuiteIdentifier(name="my_suite"), run_id=RunIdentifier(run_name="prod_20240401"), batch_identifier="my_datasource-my_first_asset", ): { "local_site": site_path, } }, ) with mock.patch.object(Session, "post") as mock_post: output = action.run( checkpoint_result=checkpoint_result_with_assets, action_context=action_context ) mock_post.assert_called_once_with( url="test", headers=None, json={ "blocks": [ {"text": {"text": mock.ANY, "type": "plain_text"}, "type": "header"}, { "type": "section", "text": {"type": "plain_text", "text": "Runtime: 2024/04/01 08:51 PM"}, }, { "type": "section", "text": { "type": "mrkdwn", "text": ( f"\n*Asset*: `asset_1` \n*Expectation Suite*: {SUITE_A} " "<www.testing?slack=true|View Results>" "\n*Summary*: *3* of *3* Expectations were met" ), }, }, { "type": "section", "text": { "type": "mrkdwn", "text": mock.ANY, }, }, { "type": "section", "text": { "type": "mrkdwn", "text": ( "\n*Asset*: `asset_2_two_wow_whoa_vroom` " f"\n*Expectation Suite*: `{SUITE_B}`" "\n*Summary*: *2* of *2* Expectations were met" ), }, }, { "type": "section", "text": { "type": "mrkdwn", "text": mock.ANY, }, }, {"type": "divider"}, ], }, ) docs_block_1 = mock_post.call_args.kwargs["json"]["blocks"][3]["text"]["text"] docs_block_2 = mock_post.call_args.kwargs["json"]["blocks"][5]["text"]["text"] assert "*DataDocs*" in docs_block_1 assert site_path in docs_block_1 assert "*DataDocs*" in docs_block_2 assert site_path in docs_block_2 assert output == {"slack_notification_result": "Slack notification succeeded."} @pytest.mark.unit def test_variable_substitution_webhook(self, mock_context, checkpoint_result): action = SlackNotificationAction(name="my_action", slack_webhook="${SLACK_WEBHOOK}") with mock.patch.object(Session, "post"): action.run(checkpoint_result) mock_context.config_provider.substitute_config.assert_called_once_with("${SLACK_WEBHOOK}") @pytest.mark.unit def test_variable_substitution_token_and_channel(self, mock_context, checkpoint_result): action = SlackNotificationAction( name="my_action", slack_token="${SLACK_TOKEN}", slack_channel="${SLACK_CHANNEL}" ) with mock.patch.object(Session, "post"): action.run(checkpoint_result) assert mock_context.config_provider.substitute_config.call_count == 2 mock_context.config_provider.substitute_config.assert_any_call("${SLACK_CHANNEL}") mock_context.config_provider.substitute_config.assert_any_call("${SLACK_TOKEN}")
TestSlackNotificationAction
python
pytorch__pytorch
test/mobile/model_test/quantization_ops.py
{ "start": 37, "end": 1805 }
class ____(torch.nn.Module): def __init__(self) -> None: super().__init__() self.embedding = torch.ao.nn.quantized.Embedding( num_embeddings=10, embedding_dim=12 ) self.embedding_input = torch.tensor([9, 6, 5, 7, 8, 8, 9, 2, 8]) self.func = torch.ao.nn.quantized.QFunctional() self.conv1 = torch.ao.nn.quantized.ConvTranspose1d(16, 33, 3, stride=2) self.conv2 = torch.ao.nn.quantized.ConvTranspose2d(16, 33, 3, stride=2) self.conv3 = torch.ao.nn.quantized.ConvTranspose3d(16, 33, 3, stride=2) def forward(self): a = torch.quantize_per_tensor(torch.tensor([3.0]), 1.0, 0, torch.qint32) b = torch.quantize_per_tensor(torch.tensor(4.0), 1.0, 0, torch.qint32) c = torch.quantize_per_tensor( torch.tensor([3.0]), torch.tensor(1.0), torch.tensor(0), torch.qint32 ) input1 = torch.randn(1, 16, 4) input2 = torch.randn(1, 16, 4, 4) return len( self.func.add(a, b), self.func.cat((a, a), 0), self.func.mul(a, b), self.func.add_relu(a, b), self.func.add_scalar(a, b), self.func.mul_scalar(a, b), self.embedding(self.embedding_input), self.conv1( torch.quantize_per_tensor( input1, scale=1.0, zero_point=0, dtype=torch.quint8 ) ), self.conv2( torch.quantize_per_tensor( input2, scale=1.0, zero_point=0, dtype=torch.quint8 ) ), c, # self.conv3(torch.quantize_per_tensor(input3, scale=1.0, zero_point=0, dtype=torch.quint8)), # failed on iOS )
GeneralQuantModule
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_P.py
{ "start": 4095, "end": 5941 }
class ____(Benchmark): r""" Penalty 1 objective function. This class defines the Penalty 1 [1]_ global optimization problem. This is a imultimodal minimization problem defined as follows: .. math:: f_{\text{Penalty01}}(x) = \frac{\pi}{30} \left\{10 \sin^2(\pi y_1) + \sum_{i=1}^{n-1} (y_i - 1)^2 \left[1 + 10 \sin^2(\pi y_{i+1}) \right] + (y_n - 1)^2 \right \} + \sum_{i=1}^n u(x_i, 10, 100, 4) Where, in this exercise: .. math:: y_i = 1 + \frac{1}{4}(x_i + 1) And: .. math:: u(x_i, a, k, m) = \begin{cases} k(x_i - a)^m & \textrm{if} \hspace{5pt} x_i > a \\ 0 & \textrm{if} \hspace{5pt} -a \leq x_i \leq a \\ k(-x_i - a)^m & \textrm{if} \hspace{5pt} x_i < -a \end{cases} Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-50, 50]` for :math:`i= 1, ..., n`. *Global optimum*: :math:`f(x) = 0` for :math:`x_i = -1` for :math:`i = 1, ..., n` .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 """ change_dimensionality = True def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-50.0] * self.N, [50.0] * self.N)) self.custom_bounds = ([-5.0, 5.0], [-5.0, 5.0]) self.global_optimum = [[-1.0 for _ in range(self.N)]] self.fglob = 0.0 def fun(self, x, *args): self.nfev += 1 a, b, c = 10.0, 100.0, 4.0 xx = abs(x) u = where(xx > a, b * (xx - a) ** c, 0.0) y = 1.0 + (x + 1.0) / 4.0 return (sum(u) + (pi / 30.0) * (10.0 * sin(pi * y[0]) ** 2.0 + sum((y[: -1] - 1.0) ** 2.0 * (1.0 + 10.0 * sin(pi * y[1:]) ** 2.0)) + (y[-1] - 1) ** 2.0))
Penalty01
python
jazzband__django-model-utils
tests/test_inheritance_iterable.py
{ "start": 194, "end": 583 }
class ____(TestCase): def test_prefetch(self) -> None: qs = InheritanceManagerTestChild1.objects.all().prefetch_related( Prefetch( 'normal_field', queryset=InheritanceManagerTestParent.objects.all(), to_attr='normal_field_prefetched' ) ) self.assertEqual(qs.count(), 0)
InheritanceIterableTest
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/transfers/sheets_to_gcs.py
{ "start": 1223, "end": 5393 }
class ____(BaseOperator): """ Writes Google Sheet data into Google Cloud Storage. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:GoogleSheetsToGCSOperator` :param spreadsheet_id: The Google Sheet ID to interact with. :param sheet_filter: Default to None, if provided, Should be an array of the sheet titles to pull from. :param destination_bucket: The destination Google cloud storage bucket where the report should be written to. (templated) :param destination_path: The Google cloud storage URI array for the object created by the operator. For example: ``path/to/my/files``. :param gcp_conn_id: The connection ID to use when fetching connection info. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = ( "spreadsheet_id", "destination_bucket", "destination_path", "sheet_filter", "impersonation_chain", ) def __init__( self, *, spreadsheet_id: str, destination_bucket: str, sheet_filter: list[str] | None = None, destination_path: str | None = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.gcp_conn_id = gcp_conn_id self.spreadsheet_id = spreadsheet_id self.sheet_filter = sheet_filter self.destination_bucket = destination_bucket self.destination_path = destination_path self.impersonation_chain = impersonation_chain def _upload_data( self, gcs_hook: GCSHook, hook: GSheetsHook, sheet_range: str, sheet_values: list[Any], ) -> str: # Construct destination file path sheet = hook.get_spreadsheet(self.spreadsheet_id) file_name = f"{sheet['properties']['title']}_{sheet_range}.csv".replace(" ", "_") dest_file_name = ( f"{self.destination_path.strip('/')}/{file_name}" if self.destination_path else file_name ) with NamedTemporaryFile("w+") as temp_file: # Write data writer = csv.writer(temp_file) writer.writerows(sheet_values) temp_file.flush() # Upload to GCS gcs_hook.upload( bucket_name=self.destination_bucket, object_name=dest_file_name, filename=temp_file.name, ) return dest_file_name def execute(self, context: Context): sheet_hook = GSheetsHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) gcs_hook = GCSHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) # Pull data and upload destination_array: list[str] = [] sheet_titles = sheet_hook.get_sheet_titles( spreadsheet_id=self.spreadsheet_id, sheet_filter=self.sheet_filter ) for sheet_range in sheet_titles: data = sheet_hook.get_values(spreadsheet_id=self.spreadsheet_id, range_=sheet_range) gcs_path_to_file = self._upload_data(gcs_hook, sheet_hook, sheet_range, data) destination_array.append(gcs_path_to_file) context["ti"].xcom_push(key="destination_objects", value=destination_array) return destination_array
GoogleSheetsToGCSOperator
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_image50.py
{ "start": 315, "end": 907 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("image50.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.insert_image("E9", self.image_dir + "red.png") worksheet.insert_image("E13", self.image_dir + "red.png") workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/shortcuts/progress_bar/base.py
{ "start": 9954, "end": 14402 }
class ____(Generic[_CounterItem]): """ An individual counter (A progress bar can have multiple counters). """ def __init__( self, progress_bar: ProgressBar, data: Iterable[_CounterItem] | None = None, label: AnyFormattedText = "", remove_when_done: bool = False, total: int | None = None, ) -> None: self.start_time = datetime.datetime.now() self.stop_time: datetime.datetime | None = None self.progress_bar = progress_bar self.data = data self.items_completed = 0 self.label = label self.remove_when_done = remove_when_done self._done = False self.total: int | None if total is None: try: self.total = len(cast(Sized, data)) except TypeError: self.total = None # We don't know the total length. else: self.total = total def __iter__(self) -> Iterator[_CounterItem]: if self.data is not None: try: for item in self.data: yield item self.item_completed() # Only done if we iterate to the very end. self.done = True finally: # Ensure counter has stopped even if we did not iterate to the # end (e.g. break or exceptions). self.stopped = True else: raise NotImplementedError("No data defined to iterate over.") def item_completed(self) -> None: """ Start handling the next item. (Can be called manually in case we don't have a collection to loop through.) """ self.items_completed += 1 self.progress_bar.invalidate() @property def done(self) -> bool: """Whether a counter has been completed. Done counter have been stopped (see stopped) and removed depending on remove_when_done value. Contrast this with stopped. A stopped counter may be terminated before 100% completion. A done counter has reached its 100% completion. """ return self._done @done.setter def done(self, value: bool) -> None: self._done = value self.stopped = value if value and self.remove_when_done: self.progress_bar.counters.remove(self) @property def stopped(self) -> bool: """Whether a counter has been stopped. Stopped counters no longer have increasing time_elapsed. This distinction is also used to prevent the Bar formatter with unknown totals from continuing to run. A stopped counter (but not done) can be used to signal that a given counter has encountered an error but allows other counters to continue (e.g. download X of Y failed). Given how only done counters are removed (see remove_when_done) this can help aggregate failures from a large number of successes. Contrast this with done. A done counter has reached its 100% completion. A stopped counter may be terminated before 100% completion. """ return self.stop_time is not None @stopped.setter def stopped(self, value: bool) -> None: if value: # This counter has not already been stopped. if not self.stop_time: self.stop_time = datetime.datetime.now() else: # Clearing any previously set stop_time. self.stop_time = None @property def percentage(self) -> float: if self.total is None: return 0 else: return self.items_completed * 100 / max(self.total, 1) @property def time_elapsed(self) -> datetime.timedelta: """ Return how much time has been elapsed since the start. """ if self.stop_time is None: return datetime.datetime.now() - self.start_time else: return self.stop_time - self.start_time @property def time_left(self) -> datetime.timedelta | None: """ Timedelta representing the time left. """ if self.total is None or not self.percentage: return None elif self.done or self.stopped: return datetime.timedelta(0) else: return self.time_elapsed * (100 - self.percentage) / self.percentage
ProgressBarCounter
python
PrefectHQ__prefect
src/integrations/prefect-ray/tests/test_task_runners.py
{ "start": 5045, "end": 18158 }
class ____: @pytest.fixture(params=task_runner_setups) def task_runner(self, request): fixture_name = request.param._fixture_function.__name__ yield request.getfixturevalue(fixture_name) @pytest.fixture def tmp_file(self, tmp_path): file_path = tmp_path / "canary.txt" file_path.touch() return file_path async def test_duplicate(self, task_runner): new = task_runner.duplicate() assert new == task_runner assert new is not task_runner async def test_successful_flow_run(self, task_runner): @task def task_a(): return "a" @task def task_b(): return "b" @task def task_c(b): return b + "c" @flow(version="test", task_runner=task_runner) def test_flow(): a = task_a.submit() b = task_b.submit() c = task_c.submit(b) return a, b, c a, b, c = test_flow() assert await a.result() == "a" assert await b.result() == "b" assert await c.result() == "bc" async def test_failing_flow_run(self, task_runner): @task def task_a(): raise RuntimeError("This task fails!") @task def task_b(): raise ValueError("This task fails and passes data downstream!") @task def task_c(b): # This task attempts to use the upstream data and should fail too return b + "c" @flow(version="test", task_runner=task_runner) def test_flow(): a = task_a.submit() b = task_b.submit() c = task_c.submit(b) d = task_c.submit(c) return a, b, c, d state = test_flow(return_state=True) assert state.is_failed() result = await state.result(raise_on_failure=False) a, b, c, d = result with pytest.raises(RuntimeError, match="This task fails!"): await a.result() with pytest.raises( ValueError, match="This task fails and passes data downstream" ): await b.result() assert c.is_pending() assert c.name == "NotReady" assert ( f"Upstream task run '{b.state_details.task_run_id}' did not reach a" " 'COMPLETED' state" in c.message ) assert d.is_pending() assert d.name == "NotReady" assert ( f"Upstream task run '{c.state_details.task_run_id}' did not reach a" " 'COMPLETED' state" in d.message ) async def test_async_tasks(self, task_runner): @task async def task_a(): return "a" @task async def task_b(): return "b" @task async def task_c(b): return b + "c" @flow(version="test", task_runner=task_runner) async def test_flow(): a = task_a.submit() b = task_b.submit() c = task_c.submit(b) return a, b, c a, b, c = await test_flow() assert await a.result() == "a" assert await b.result() == "b" assert await c.result() == "bc" async def test_submit_and_wait(self, task_runner): @task async def task_a(): return "a" async def fake_orchestrate_task_run(example_kwarg): return State( type=StateType.COMPLETED, data=example_kwarg, ) with task_runner: future = task_runner.submit(task_a, parameters={}, wait_for=[]) future.wait() state = future.state assert await state.result() == "a" async def test_async_task_timeout(self, task_runner): @task(timeout_seconds=0.1) async def my_timeout_task(): await asyncio.sleep(2) return 42 @task async def my_dependent_task(task_res): return 1764 @task async def my_independent_task(): return 74088 @flow(version="test", task_runner=task_runner) async def test_flow(): a = my_timeout_task.submit() b = my_dependent_task.submit(a) c = my_independent_task.submit() return a, b, c state = await test_flow(return_state=True) assert state.is_failed() ax, bx, cx = await state.result(raise_on_failure=False) assert ax.type == StateType.FAILED assert bx.type == StateType.PENDING assert cx.type == StateType.COMPLETED async def test_sync_task_timeout(self, task_runner): @task(timeout_seconds=1) def my_timeout_task(): time.sleep(2) return 42 @task def my_dependent_task(task_res): return 1764 @task def my_independent_task(): return 74088 @flow(version="test", task_runner=task_runner) def test_flow(): a = my_timeout_task.submit() b = my_dependent_task.submit(a) c = my_independent_task.submit() return a, b, c state = test_flow(return_state=True) assert state.is_failed() ax, bx, cx = await state.result(raise_on_failure=False) assert ax.type == StateType.FAILED assert bx.type == StateType.PENDING assert cx.type == StateType.COMPLETED def test_as_completed_yields_correct_order(self, task_runner): @task def task_a(seconds): time.sleep(seconds) return seconds timings = [1, 5, 10] @flow(version="test", task_runner=task_runner) def test_flow(): done_futures = [] futures = [task_a.submit(seconds) for seconds in reversed(timings)] for future in as_completed(futures=futures): done_futures.append(future.result()) assert done_futures[-1] == timings[-1] test_flow() def get_sleep_time(self) -> float: """ Return an amount of time to sleep for concurrency tests. The RayTaskRunner is prone to flaking on concurrency tests. """ return 5.0 async def test_wait_captures_exceptions_as_crashed_state(self, task_runner): """ Ray wraps the exception, interrupts will result in "Cancelled" tasks or "Killed" workers while normal errors will result in a "RayTaskError". We care more about the crash detection and lack of re-raise here than the equality of the exception. """ @task async def task_a(): raise KeyboardInterrupt() with task_runner: future = task_runner.submit( task=task_a, parameters={}, wait_for=[], ) future.wait() state = future.state assert state is not None, "wait timed out" assert isinstance(state, State), "wait should return a state" assert state.name == "Crashed" def test_flow_and_subflow_both_with_task_runner(self, task_runner, tmp_file): @task def some_task(text): tmp_file.write_text(text) @flow(task_runner=RayTaskRunner()) def subflow(): a = some_task.submit("a") b = some_task.submit("b") c = some_task.submit("c") return a, b, c @flow(task_runner=task_runner) def base_flow(): subflow() time.sleep(self.get_sleep_time()) d = some_task.submit("d") return d base_flow() assert tmp_file.read_text() == "d" def test_ray_options(self): @task def process(x): return x + 1 @flow(task_runner=RayTaskRunner()) def my_flow(): # equivalent to setting @ray.remote(max_calls=1) with remote_options(max_calls=1): process.submit(42) my_flow() def test_dependencies(self): @task def a(): time.sleep(self.get_sleep_time()) b = c = d = e = a @flow(task_runner=RayTaskRunner()) def flow_with_dependent_tasks(): for _ in range(3): a_future = a.submit(wait_for=[]) b_future = b.submit(wait_for=[a_future]) c.submit(wait_for=[b_future]) d.submit(wait_for=[b_future]) e.submit(wait_for=[b_future]) flow_with_dependent_tasks() def test_can_run_many_tasks_without_crashing(self, task_runner): """ Regression test for https://github.com/PrefectHQ/prefect/issues/15539 """ @task def random_integer(range_from: int = 0, range_to: int = 100) -> int: """Task that returns a random integer.""" random_int = random.randint(range_from, range_to) return random_int @flow(task_runner=task_runner) def add_random_integers(number_tasks: int = 50) -> int: """Flow that submits some random_integer tasks and returns the sum of the results.""" futures = [] for _ in range(number_tasks): futures.append(random_integer.submit()) sum = 0 for future in futures: sum += future.result() return sum assert add_random_integers() > 0 async def test_assets_with_task_runner(self, task_runner): upstream = Asset(key="s3://data/dask_raw") downstream = Asset(key="s3://data/dask_processed") @materialize(upstream) async def extract(): return {"rows": 50} @materialize(downstream) async def load(d): return {"rows": d["rows"] * 2} @flow(version="test", task_runner=task_runner) async def pipeline(): run_context = get_run_context() raw_data = extract.submit() processed = load.submit(raw_data) processed.wait() return run_context.flow_run.id flow_run_id = await pipeline() async with get_client() as client: for i in range(5): response = await client._client.post( "/events/filter", json={ "filter": { "event": {"prefix": ["prefect.asset."]}, "related": {"id": [f"prefect.flow-run.{flow_run_id}"]}, }, }, ) response.raise_for_status() data = response.json() asset_events = data.get("events", []) if len(asset_events) >= 3: break # give a little more time for # server to process events await asyncio.sleep(2) else: raise RuntimeError("Unable to get any events from server!") assert len(asset_events) == 3 upstream_events = [ e for e in asset_events if e.get("resource", {}).get("prefect.resource.id") == upstream.key ] downstream_events = [ e for e in asset_events if e.get("resource", {}).get("prefect.resource.id") == downstream.key ] # Should have 2 events for upstream (1 materialization, 1 reference) assert len(upstream_events) == 2 assert len(downstream_events) == 1 # Separate upstream events by type upstream_mat_events = [ e for e in upstream_events if e["event"] == "prefect.asset.materialization.succeeded" ] upstream_ref_events = [ e for e in upstream_events if e["event"] == "prefect.asset.referenced" ] assert len(upstream_mat_events) == 1 assert len(upstream_ref_events) == 1 upstream_mat_event = upstream_mat_events[0] upstream_ref_event = upstream_ref_events[0] downstream_event = downstream_events[0] # confirm upstream materialization event assert upstream_mat_event["event"] == "prefect.asset.materialization.succeeded" assert upstream_mat_event["resource"]["prefect.resource.id"] == upstream.key # confirm upstream reference event assert upstream_ref_event["event"] == "prefect.asset.referenced" assert upstream_ref_event["resource"]["prefect.resource.id"] == upstream.key # confirm downstream events assert downstream_event["event"] == "prefect.asset.materialization.succeeded" assert downstream_event["resource"]["prefect.resource.id"] == downstream.key related_assets = [ r for r in downstream_event["related"] if r.get("prefect.resource.role") == "asset" ] assert len(related_assets) == 1 assert related_assets[0]["prefect.resource.id"] == upstream.key
TestRayTaskRunner
python
django__django
django/views/decorators/csrf.py
{ "start": 537, "end": 1030 }
class ____(CsrfViewMiddleware): # Behave like CsrfViewMiddleware but don't reject requests or log warnings. def _reject(self, request, reason): return None requires_csrf_token = decorator_from_middleware(_EnsureCsrfToken) requires_csrf_token.__name__ = "requires_csrf_token" requires_csrf_token.__doc__ = """ Use this decorator on views that need a correct csrf_token available to RequestContext, but without the CSRF protection that csrf_protect enforces. """
_EnsureCsrfToken
python
dagster-io__dagster
examples/docs_snippets/docs_snippets/concepts/types/types.py
{ "start": 818, "end": 1112 }
class ____: pass @op def my_op() -> MyClass: return MyClass() # end_auto_type # start_test_dagster_type from dagster import Any, Dict, check_dagster_type def test_dagster_type(): assert check_dagster_type(Dict[Any, Any], {"foo": "bar"}).success # end_test_dagster_type
MyClass
python
pandas-dev__pandas
pandas/tests/indexes/test_indexing.py
{ "start": 6980, "end": 8478 }
class ____: def test_get_indexer_base(self, index): if index._index_as_unique: expected = np.arange(index.size, dtype=np.intp) actual = index.get_indexer(index) tm.assert_numpy_array_equal(expected, actual) else: msg = "Reindexing only valid with uniquely valued Index objects" with pytest.raises(InvalidIndexError, match=msg): index.get_indexer(index) with pytest.raises(ValueError, match="Invalid fill method"): index.get_indexer(index, method="invalid") def test_get_indexer_consistency(self, index): # See GH#16819 if index._index_as_unique: indexer = index.get_indexer(index[0:2]) assert isinstance(indexer, np.ndarray) assert indexer.dtype == np.intp else: msg = "Reindexing only valid with uniquely valued Index objects" with pytest.raises(InvalidIndexError, match=msg): index.get_indexer(index[0:2]) indexer, _ = index.get_indexer_non_unique(index[0:2]) assert isinstance(indexer, np.ndarray) assert indexer.dtype == np.intp def test_get_indexer_masked_duplicated_na(self): # GH#48411 idx = Index([1, 2, NA, NA], dtype="Int64") result = idx.get_indexer_for(Index([1, NA], dtype="Int64")) expected = np.array([0, 2, 3], dtype=result.dtype) tm.assert_numpy_array_equal(result, expected)
TestGetIndexer
python
mlflow__mlflow
mlflow/pyfunc/dbconnect_artifact_cache.py
{ "start": 240, "end": 5769 }
class ____: """ Manages Databricks Connect artifacts cache. Note it doesn't support OSS Spark Connect. This class can be used in the following environment: - Databricks shared cluster python notebook REPL - Databricks Serverless python notebook REPL - Databricks connect client python REPL that connects to remote Databricks Serverless - Databricks connect client python REPL that connects to remote Databricks shared cluster .. code-block:: python :caption: Example # client side code db_artifact_cache = DBConnectArtifactCache.get_or_create() db_artifact_cache.add_artifact_archive("archive1", "/tmp/archive1.tar.gz") @pandas_udf(...) def my_udf(x): # we can get the unpacked archive files in `archive1_unpacked_dir` archive1_unpacked_dir = db_artifact_cache.get("archive1") """ _global_cache = None @staticmethod def get_or_create(spark): if ( DBConnectArtifactCache._global_cache is None or spark is not DBConnectArtifactCache._global_cache._spark ): DBConnectArtifactCache._global_cache = DBConnectArtifactCache(spark) cache_file = os.path.join(get_or_create_tmp_dir(), _CACHE_MAP_FILE_NAME) if is_in_databricks_runtime() and os.path.exists(cache_file): # In databricks runtime (shared cluster or Serverless), when you restart the # notebook REPL by %restart_python or dbutils.library.restartPython(), the # DBConnect session is still preserved. So in this case, we can reuse the cached # artifact files. # So that when adding artifact, the cache map is serialized to local disk file # `db_connect_artifact_cache.json` and after REPL restarts, # `DBConnectArtifactCache` restores the cache map by loading data from the file. with open(cache_file) as f: DBConnectArtifactCache._global_cache._cache = json.load(f) return DBConnectArtifactCache._global_cache def __init__(self, spark): self._spark = spark self._cache = {} def __getstate__(self): """ The `DBConnectArtifactCache` instance is created in Databricks Connect client side, and it will be pickled to Databricks Connect UDF sandbox (see `get_unpacked_artifact_dir` method), but Spark Connect client object is not pickle-able, we need to skip this field. """ state = self.__dict__.copy() # Don't pickle `_spark` del state["_spark"] return state def __setstate__(self, state): self.__dict__.update(state) self._spark = None def has_cache_key(self, cache_key): return cache_key in self._cache def add_artifact_archive(self, cache_key, artifact_archive_path): """ Add an artifact archive file to Databricks connect cache. The archive file must be 'tar.gz' format. You can only call this method in Databricks Connect client side. """ if not artifact_archive_path.endswith(".tar.gz"): raise RuntimeError( "'add_artifact_archive' only supports archive file in 'tar.gz' format." ) archive_file_name = os.path.basename(artifact_archive_path) if cache_key not in self._cache: self._spark.addArtifact(artifact_archive_path, archive=True) self._cache[cache_key] = archive_file_name if is_in_databricks_runtime(): with open(os.path.join(get_or_create_tmp_dir(), _CACHE_MAP_FILE_NAME), "w") as f: json.dump(self._cache, f) def get_unpacked_artifact_dir(self, cache_key): """ Get unpacked artifact directory path, you can only call this method inside Databricks Connect spark UDF sandbox. """ if cache_key not in self._cache: raise RuntimeError(f"The artifact '{cache_key}' does not exist.") archive_file_name = self._cache[cache_key] if session_id := os.environ.get("DB_SESSION_UUID"): return ( f"/local_disk0/.ephemeral_nfs/artifacts/{session_id}/archives/{archive_file_name}" ) # If 'DB_SESSION_UUID' environment variable does not exist, it means it is running # in a dedicated mode Spark cluster. return os.path.join(os.getcwd(), archive_file_name) def archive_directory(input_dir, archive_file_path): """ Archive the `input_dir` directory, save the archive file to `archive_file_path`, the generated archive file is 'tar.gz' format. Note: all symlink files in the input directory are kept as it is in the archive file. """ archive_file_path = os.path.abspath(archive_file_path) # Note: `shutil.make_archive` doesn't work because it replaces symlink files with # the file symlink pointing to, which is not the expected behavior in our usage. # We need to pack the python and virtualenv environment, which contains a bunch of # symlink files. subprocess.check_call( ["tar", "-czf", archive_file_path, *os.listdir(input_dir)], cwd=input_dir, ) return archive_file_path def extract_archive_to_dir(archive_path, dest_dir): os.makedirs(dest_dir, exist_ok=True) with tarfile.open(archive_path, "r") as tar: tar.extractall(path=dest_dir) return dest_dir
DBConnectArtifactCache
python
django__django
tests/generic_relations_regress/models.py
{ "start": 1247, "end": 1414 }
class ____(models.Model): account = models.IntegerField(primary_key=True) name = models.CharField(max_length=128) addresses = GenericRelation(Address)
Person
python
euske__pdfminer
pdfminer/layout.py
{ "start": 12617, "end": 20735 }
class ____(LTContainer): def __init__(self, bbox): LTContainer.__init__(self, bbox) self.groups = None return # group_objects: group text object to textlines. def group_objects(self, laparams, objs): obj0 = None line = None for obj1 in objs: if obj0 is not None: # halign: obj0 and obj1 is horizontally aligned. # # +------+ - - - # | obj0 | - - +------+ - # | | | obj1 | | (line_overlap) # +------+ - - | | - # - - - +------+ # # |<--->| # (char_margin) halign = (obj0.is_compatible(obj1) and obj0.is_voverlap(obj1) and (min(obj0.height, obj1.height) * laparams.line_overlap < obj0.voverlap(obj1)) and (obj0.hdistance(obj1) < max(obj0.width, obj1.width) * laparams.char_margin)) # valign: obj0 and obj1 is vertically aligned. # # +------+ # | obj0 | # | | # +------+ - - - # | | | (char_margin) # +------+ - - # | obj1 | # | | # +------+ # # |<-->| # (line_overlap) valign = (laparams.detect_vertical and obj0.is_compatible(obj1) and obj0.is_hoverlap(obj1) and (min(obj0.width, obj1.width) * laparams.line_overlap < obj0.hoverlap(obj1)) and (obj0.vdistance(obj1) < max(obj0.height, obj1.height) * laparams.char_margin)) if ((halign and isinstance(line, LTTextLineHorizontal)) or (valign and isinstance(line, LTTextLineVertical))): line.add(obj1) elif line is not None: yield line line = None else: if valign and not halign: line = LTTextLineVertical(laparams.word_margin) line.add(obj0) line.add(obj1) elif halign and not valign: line = LTTextLineHorizontal(laparams.word_margin) line.add(obj0) line.add(obj1) else: line = LTTextLineHorizontal(laparams.word_margin) line.add(obj0) yield line line = None obj0 = obj1 if line is None: line = LTTextLineHorizontal(laparams.word_margin) line.add(obj0) yield line return # group_textlines: group neighboring lines to textboxes. def group_textlines(self, laparams, lines): plane = Plane(self.bbox) plane.extend(lines) boxes = {} for line in lines: neighbors = line.find_neighbors(plane, laparams.line_margin) if line not in neighbors: continue members = [] for obj1 in neighbors: members.append(obj1) if obj1 in boxes: members.extend(boxes.pop(obj1)) if isinstance(line, LTTextLineHorizontal): box = LTTextBoxHorizontal() else: box = LTTextBoxVertical() for obj in uniq(members): box.add(obj) boxes[obj] = box done = set() for line in lines: if line not in boxes: continue box = boxes[line] if box in done: continue done.add(box) if not box.is_empty(): yield box return # group_textboxes: group textboxes hierarchically. def group_textboxes(self, laparams, boxes): assert boxes def dist(obj1, obj2): """A distance function between two TextBoxes. Consider the bounding rectangle for obj1 and obj2. Return its area less the areas of obj1 and obj2, shown as 'www' below. This value may be negative. +------+..........+ (x1, y1) | obj1 |wwwwwwwwww: +------+www+------+ :wwwwwwwwww| obj2 | (x0, y0) +..........+------+ """ x0 = min(obj1.x0, obj2.x0) y0 = min(obj1.y0, obj2.y0) x1 = max(obj1.x1, obj2.x1) y1 = max(obj1.y1, obj2.y1) return ((x1-x0)*(y1-y0) - obj1.width*obj1.height - obj2.width*obj2.height) def isany(obj1, obj2): """Check if there's any other object between obj1 and obj2. """ x0 = min(obj1.x0, obj2.x0) y0 = min(obj1.y0, obj2.y0) x1 = max(obj1.x1, obj2.x1) y1 = max(obj1.y1, obj2.y1) objs = set(plane.find((x0, y0, x1, y1))) return objs.difference((obj1, obj2)) def key_obj(t): (c,d,_,_) = t return (c,d) # XXX this still takes O(n^2) :( dists = [] for i in range(len(boxes)): obj1 = boxes[i] for j in range(i+1, len(boxes)): obj2 = boxes[j] dists.append((0, dist(obj1, obj2), obj1, obj2)) # We could use dists.sort(), but it would randomize the test result. dists = csort(dists, key=key_obj) plane = Plane(self.bbox) plane.extend(boxes) while dists: (c, d, obj1, obj2) = dists.pop(0) if c == 0 and isany(obj1, obj2): dists.append((1, d, obj1, obj2)) continue if (isinstance(obj1, (LTTextBoxVertical, LTTextGroupTBRL)) or isinstance(obj2, (LTTextBoxVertical, LTTextGroupTBRL))): group = LTTextGroupTBRL([obj1, obj2]) else: group = LTTextGroupLRTB([obj1, obj2]) plane.remove(obj1) plane.remove(obj2) dists = [ (c,d,obj1,obj2) for (c,d,obj1,obj2) in dists if (obj1 in plane and obj2 in plane) ] for other in plane: dists.append((0, dist(group, other), group, other)) dists = csort(dists, key=key_obj) plane.add(group) assert len(plane) == 1 return list(plane) def analyze(self, laparams): # textobjs is a list of LTChar objects, i.e. # it has all the individual characters in the page. (textobjs, otherobjs) = fsplit(lambda obj: isinstance(obj, LTChar), self) for obj in otherobjs: obj.analyze(laparams) if not textobjs: return textlines = list(self.group_objects(laparams, textobjs)) (empties, textlines) = fsplit(lambda obj: obj.is_empty(), textlines) for obj in empties: obj.analyze(laparams) textboxes = list(self.group_textlines(laparams, textlines)) if -1 <= laparams.boxes_flow and laparams.boxes_flow <= +1 and textboxes: self.groups = self.group_textboxes(laparams, textboxes) assigner = IndexAssigner() for group in self.groups: group.analyze(laparams) assigner.run(group) textboxes.sort(key=lambda box: box.index) else: def getkey(box): if isinstance(box, LTTextBoxVertical): return (0, -box.x1, box.y0) else: return (1, box.y0, box.x0) textboxes.sort(key=getkey) self._objs = textboxes + otherobjs + empties return ## LTFigure ##
LTLayoutContainer
python
doocs__leetcode
solution/1800-1899/1868.Product of Two Run-Length Encoded Arrays/Solution.py
{ "start": 0, "end": 587 }
class ____: def findRLEArray( self, encoded1: List[List[int]], encoded2: List[List[int]] ) -> List[List[int]]: ans = [] j = 0 for vi, fi in encoded1: while fi: f = min(fi, encoded2[j][1]) v = vi * encoded2[j][0] if ans and ans[-1][0] == v: ans[-1][1] += f else: ans.append([v, f]) fi -= f encoded2[j][1] -= f if encoded2[j][1] == 0: j += 1 return ans
Solution
python
weaviate__weaviate-python-client
weaviate/collections/queries/hybrid/query/executor.py
{ "start": 949, "end": 19315 }
class ____( Generic[ConnectionType, Properties, References], _BaseExecutor[ConnectionType] ): @overload def hybrid( self, query: Optional[str], *, alpha: NUMBER = 0.7, vector: Optional[HybridVectorType] = None, query_properties: Optional[List[str]] = None, fusion_type: Optional[HybridFusion] = None, max_vector_distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, bm25_operator: Optional[BM25OperatorOptions] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Literal[None] = None, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Union[PROPERTIES, bool, None] = None, return_references: Literal[None] = None, ) -> executor.Result[QueryReturn[Properties, References]]: ... @overload def hybrid( self, query: Optional[str], *, alpha: NUMBER = 0.7, vector: Optional[HybridVectorType] = None, query_properties: Optional[List[str]] = None, fusion_type: Optional[HybridFusion] = None, max_vector_distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, bm25_operator: Optional[BM25OperatorOptions] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Literal[None] = None, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Union[PROPERTIES, bool, None] = None, return_references: REFERENCES, ) -> executor.Result[QueryReturn[Properties, CrossReferences]]: ... @overload def hybrid( self, query: Optional[str], *, alpha: NUMBER = 0.7, vector: Optional[HybridVectorType] = None, query_properties: Optional[List[str]] = None, fusion_type: Optional[HybridFusion] = None, max_vector_distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, bm25_operator: Optional[BM25OperatorOptions] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Literal[None] = None, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Union[PROPERTIES, bool, None] = None, return_references: Type[TReferences], ) -> executor.Result[QueryReturn[Properties, TReferences]]: ... @overload def hybrid( self, query: Optional[str], *, alpha: NUMBER = 0.7, vector: Optional[HybridVectorType] = None, query_properties: Optional[List[str]] = None, fusion_type: Optional[HybridFusion] = None, max_vector_distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, bm25_operator: Optional[BM25OperatorOptions] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Literal[None] = None, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Type[TProperties], return_references: Literal[None] = None, ) -> executor.Result[QueryReturn[TProperties, References]]: ... @overload def hybrid( self, query: Optional[str], *, alpha: NUMBER = 0.7, vector: Optional[HybridVectorType] = None, query_properties: Optional[List[str]] = None, fusion_type: Optional[HybridFusion] = None, max_vector_distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, bm25_operator: Optional[BM25OperatorOptions] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Literal[None] = None, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Type[TProperties], return_references: REFERENCES, ) -> executor.Result[QueryReturn[TProperties, CrossReferences]]: ... @overload def hybrid( self, query: Optional[str], *, alpha: NUMBER = 0.7, vector: Optional[HybridVectorType] = None, query_properties: Optional[List[str]] = None, fusion_type: Optional[HybridFusion] = None, max_vector_distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, bm25_operator: Optional[BM25OperatorOptions] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Literal[None] = None, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Type[TProperties], return_references: Type[TReferences], ) -> executor.Result[QueryReturn[TProperties, TReferences]]: ... ##### GROUP BY ##### @overload def hybrid( self, query: Optional[str], *, alpha: NUMBER = 0.7, vector: Optional[HybridVectorType] = None, query_properties: Optional[List[str]] = None, fusion_type: Optional[HybridFusion] = None, max_vector_distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, bm25_operator: Optional[BM25OperatorOptions] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: GroupBy, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Union[PROPERTIES, bool, None] = None, return_references: Literal[None] = None, ) -> executor.Result[GroupByReturn[Properties, References]]: ... @overload def hybrid( self, query: Optional[str], *, alpha: NUMBER = 0.7, vector: Optional[HybridVectorType] = None, query_properties: Optional[List[str]] = None, fusion_type: Optional[HybridFusion] = None, max_vector_distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, bm25_operator: Optional[BM25OperatorOptions] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: GroupBy, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Union[PROPERTIES, bool, None] = None, return_references: REFERENCES, ) -> executor.Result[GroupByReturn[Properties, CrossReferences]]: ... @overload def hybrid( self, query: Optional[str], *, alpha: NUMBER = 0.7, vector: Optional[HybridVectorType] = None, query_properties: Optional[List[str]] = None, fusion_type: Optional[HybridFusion] = None, max_vector_distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, bm25_operator: Optional[BM25OperatorOptions] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: GroupBy, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Union[PROPERTIES, bool, None] = None, return_references: Type[TReferences], ) -> executor.Result[GroupByReturn[Properties, TReferences]]: ... @overload def hybrid( self, query: Optional[str], *, alpha: NUMBER = 0.7, vector: Optional[HybridVectorType] = None, query_properties: Optional[List[str]] = None, fusion_type: Optional[HybridFusion] = None, max_vector_distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, bm25_operator: Optional[BM25OperatorOptions] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: GroupBy, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Type[TProperties], return_references: Literal[None] = None, ) -> executor.Result[GroupByReturn[TProperties, References]]: ... @overload def hybrid( self, query: Optional[str], *, alpha: NUMBER = 0.7, vector: Optional[HybridVectorType] = None, query_properties: Optional[List[str]] = None, fusion_type: Optional[HybridFusion] = None, max_vector_distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, bm25_operator: Optional[BM25OperatorOptions] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: GroupBy, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Type[TProperties], return_references: REFERENCES, ) -> executor.Result[GroupByReturn[TProperties, CrossReferences]]: ... @overload def hybrid( self, query: Optional[str], *, alpha: NUMBER = 0.7, vector: Optional[HybridVectorType] = None, query_properties: Optional[List[str]] = None, fusion_type: Optional[HybridFusion] = None, max_vector_distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, bm25_operator: Optional[BM25OperatorOptions] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: GroupBy, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Type[TProperties], return_references: Type[TReferences], ) -> executor.Result[GroupByReturn[TProperties, TReferences]]: ... ### DEFAULT ### @overload def hybrid( self, query: Optional[str], *, alpha: NUMBER = 0.7, vector: Optional[HybridVectorType] = None, query_properties: Optional[List[str]] = None, fusion_type: Optional[HybridFusion] = None, max_vector_distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, bm25_operator: Optional[BM25OperatorOptions] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Optional[GroupBy] = None, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Optional[ReturnProperties[TProperties]] = None, return_references: Optional[ReturnReferences[TReferences]] = None, ) -> executor.Result[ QuerySearchReturnType[Properties, References, TProperties, TReferences] ]: ... def hybrid( self, query: Optional[str], *, alpha: NUMBER = 0.7, vector: Optional[HybridVectorType] = None, query_properties: Optional[List[str]] = None, fusion_type: Optional[HybridFusion] = None, max_vector_distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, bm25_operator: Optional[BM25OperatorOptions] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Optional[GroupBy] = None, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Optional[ReturnProperties[TProperties]] = None, return_references: Optional[ReturnReferences[TReferences]] = None, ) -> executor.Result[QuerySearchReturnType[Properties, References, TProperties, TReferences]]: """Search for objects in this collection using the hybrid algorithm blending keyword-based BM25 and vector-based similarity. See the [docs](https://weaviate.io/developers/weaviate/search/hybrid) for a more detailed explanation. Args: query: The keyword-based query to search for, REQUIRED. If query and vector are both None, a normal search will be performed. alpha: The weight of the BM25 score. If not specified, the default weight specified by the server is used. vector: The specific vector to search for. If not specified, the query is vectorized and used in the similarity search. query_properties: The properties to search in. If not specified, all properties are searched. fusion_type: The type of fusion to apply. If not specified, the default fusion type specified by the server is used. limit: The maximum number of results to return. If not specified, the default limit specified by the server is returned. offset: The offset to start from. If not specified, the retrieval begins from the first object in the server. bm25_operator: The BM25 operator to use. If not specified, the default operator specified by the server is used. auto_limit: The maximum number of [autocut](https://weaviate.io/developers/weaviate/api/graphql/additional-operators#autocut) results to return. If not specified, no limit is applied. filters: The filters to apply to the search. group_by: How the results should be grouped by a specific property. rerank: How the results should be reranked. NOTE: A `rerank-*` module must be enabled for this functionality to work. target_vector: The name of the vector space to search in for named vector configurations. Required if multiple spaces are configured. include_vector: Whether to include the vector in the results. If not specified, this is set to False. return_metadata: The metadata to return for each object, defaults to `None`. return_properties: The properties to return for each object. return_references: The references to return for each object. NOTE: - If `return_properties` is not provided then all properties are returned except for blob properties. - If `return_metadata` is not provided then no metadata is provided. Use MetadataQuery.full() to retrieve all metadata. - If `return_references` is not provided then no references are provided. Returns: A `QueryReturn` or `GroupByReturn` object that includes the searched objects. If `group_by` is provided then a `GroupByReturn` object is returned, otherwise a `QueryReturn` object is returned. Raises: weaviate.exceptions.WeaviateQueryError: If the network connection to Weaviate fails. weaviate.exceptions.WeaviateNotImplementedError: If a group by is provided and the Weaviate server version is lower than 1.25.0. """ def resp( res: search_get_pb2.SearchReply, ) -> QuerySearchReturnType[Properties, References, TProperties, TReferences]: return cast( Any, self._result_to_query_or_groupby_return( res, _QueryOptions.from_input( return_metadata, return_properties, include_vector, self._references, return_references, rerank, group_by, ), ), ) request = self._query.hybrid( query=query, alpha=alpha, vector=vector, properties=query_properties, fusion_type=fusion_type, limit=limit, offset=offset, bm25_operator=bm25_operator, distance=max_vector_distance, autocut=auto_limit, filters=filters, group_by=_GroupBy.from_input(group_by), rerank=rerank, target_vector=target_vector, return_metadata=self._parse_return_metadata(return_metadata, include_vector), return_properties=self._parse_return_properties(return_properties), return_references=self._parse_return_references(return_references), ) return executor.execute( response_callback=resp, method=self._connection.grpc_search, request=request, )
_HybridQueryExecutor
python
apache__airflow
airflow-core/tests/unit/dag_processing/test_collection.py
{ "start": 10334, "end": 13275 }
class ____: @staticmethod def clean_db(): clear_db_dags() clear_db_assets() clear_db_triggers() @pytest.fixture(autouse=True) def per_test(self) -> Generator: self.clean_db() yield self.clean_db() def test_add_asset_activate(self, dag_maker, session): asset = Asset("myasset", "file://myasset/", group="old_group") with dag_maker(schedule=[asset]) as dag: EmptyOperator(task_id="mytask") asset_op = AssetModelOperation.collect({dag.dag_id: LazyDeserializedDAG.from_dag(dag)}) orm_assets = asset_op.sync_assets(session=session) session.flush() assert len(orm_assets) == 1 asset_op.activate_assets_if_possible(orm_assets.values(), session=session) session.flush() assert orm_assets["myasset", "file://myasset/"].active is not None def test_add_asset_activate_already_exists(self, dag_maker, session): asset = Asset("myasset", "file://myasset/", group="old_group") session.add(AssetModel.from_public(asset)) session.flush() session.add(AssetActive.for_asset(asset)) session.flush() with dag_maker(schedule=[asset]) as dag: EmptyOperator(task_id="mytask") asset_op = AssetModelOperation.collect({dag.dag_id: LazyDeserializedDAG.from_dag(dag)}) orm_assets = asset_op.sync_assets(session=session) session.flush() assert len(orm_assets) == 1 asset_op.activate_assets_if_possible(orm_assets.values(), session=session) session.flush() assert orm_assets["myasset", "file://myasset/"].active is not None, "should pick up existing active" @pytest.mark.parametrize( "existing_assets", [ pytest.param([Asset("myasset", uri="file://different/asset")], id="name"), pytest.param([Asset("another", uri="file://myasset/")], id="uri"), ], ) def test_add_asset_activate_conflict(self, dag_maker, session, existing_assets): session.add_all(AssetModel.from_public(a) for a in existing_assets) session.flush() session.add_all(AssetActive.for_asset(a) for a in existing_assets) session.flush() asset = Asset(name="myasset", uri="file://myasset/", group="old_group") with dag_maker(schedule=[asset]) as dag: EmptyOperator(task_id="mytask") asset_op = AssetModelOperation.collect({dag.dag_id: LazyDeserializedDAG.from_dag(dag)}) orm_assets = asset_op.sync_assets(session=session) session.flush() assert len(orm_assets) == 1 asset_op.activate_assets_if_possible(orm_assets.values(), session=session) session.flush() assert orm_assets["myasset", "file://myasset/"].active is None, "should not activate due to conflict" @pytest.mark.need_serialized_dag @pytest.mark.db_test
TestAssetModelOperationSyncAssetActive
python
django__django
tests/test_runner_apps/sample/pattern_tests.py
{ "start": 32, "end": 112 }
class ____(TestCase): def test_sample(self): self.assertEqual(1, 1)
Test
python
tornadoweb__tornado
tornado/test/websocket_test.py
{ "start": 30742, "end": 31348 }
class ____(unittest.TestCase): def test_ping_sleep_time(self): from tornado.websocket import WebSocketProtocol13 now = datetime.datetime(2025, 1, 1, 12, 0, 0, tzinfo=datetime.timezone.utc) interval = 10 # seconds last_ping_time = datetime.datetime( 2025, 1, 1, 11, 59, 54, tzinfo=datetime.timezone.utc ) sleep_time = WebSocketProtocol13.ping_sleep_time( last_ping_time=last_ping_time.timestamp(), interval=interval, now=now.timestamp(), ) self.assertEqual(sleep_time, 4)
PingCalculationTest
python
qdrant__qdrant-client
tools/async_client_generator/transformers/import_transformer.py
{ "start": 41, "end": 533 }
class ____(ast.NodeTransformer): def __init__(self, import_replace_map: Optional[dict[str, str]] = None): self.import_replace_map = import_replace_map if import_replace_map is not None else {} def visit_Import(self, node: ast.Import) -> ast.AST: for old_value, new_value in self.import_replace_map.items(): for alias in node.names: alias.name = alias.name.replace(old_value, new_value) return self.generic_visit(node)
ImportTransformer
python
getsentry__sentry
src/sentry/cache/redis.py
{ "start": 1557, "end": 2034 }
class ____(CommonRedisCache): def __init__(self, **options: object) -> None: cluster, options = get_cluster_from_options("SENTRY_CACHE_OPTIONS", options) client = get_cluster_routing_client(cluster, False) # XXX: rb does not have a "raw" client -- use the default client super().__init__(client=client, raw_client=client, **options) # Confusing legacy name for RbCache. We don't actually have a pure redis cache RedisCache = RbCache
RbCache
python
numpy__numpy
numpy/lib/tests/test_function_base.py
{ "start": 55422, "end": 72785 }
class ____: def test_simple(self): def addsubtract(a, b): if a > b: return a - b else: return a + b f = vectorize(addsubtract) r = f([0, 3, 6, 9], [1, 3, 5, 7]) assert_array_equal(r, [1, 6, 1, 2]) def test_scalar(self): def addsubtract(a, b): if a > b: return a - b else: return a + b f = vectorize(addsubtract) r = f([0, 3, 6, 9], 5) assert_array_equal(r, [5, 8, 1, 4]) def test_large(self): x = np.linspace(-3, 2, 10000) f = vectorize(lambda x: x) y = f(x) assert_array_equal(y, x) def test_ufunc(self): f = vectorize(math.cos) args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi]) r1 = f(args) r2 = np.cos(args) assert_array_almost_equal(r1, r2) def test_keywords(self): def foo(a, b=1): return a + b f = vectorize(foo) args = np.array([1, 2, 3]) r1 = f(args) r2 = np.array([2, 3, 4]) assert_array_equal(r1, r2) r1 = f(args, 2) r2 = np.array([3, 4, 5]) assert_array_equal(r1, r2) def test_keywords_with_otypes_order1(self): # gh-1620: The second call of f would crash with # `ValueError: invalid number of arguments`. f = vectorize(_foo1, otypes=[float]) # We're testing the caching of ufuncs by vectorize, so the order # of these function calls is an important part of the test. r1 = f(np.arange(3.0), 1.0) r2 = f(np.arange(3.0)) assert_array_equal(r1, r2) def test_keywords_with_otypes_order2(self): # gh-1620: The second call of f would crash with # `ValueError: non-broadcastable output operand with shape () # doesn't match the broadcast shape (3,)`. f = vectorize(_foo1, otypes=[float]) # We're testing the caching of ufuncs by vectorize, so the order # of these function calls is an important part of the test. r1 = f(np.arange(3.0)) r2 = f(np.arange(3.0), 1.0) assert_array_equal(r1, r2) def test_keywords_with_otypes_order3(self): # gh-1620: The third call of f would crash with # `ValueError: invalid number of arguments`. f = vectorize(_foo1, otypes=[float]) # We're testing the caching of ufuncs by vectorize, so the order # of these function calls is an important part of the test. r1 = f(np.arange(3.0)) r2 = f(np.arange(3.0), y=1.0) r3 = f(np.arange(3.0)) assert_array_equal(r1, r2) assert_array_equal(r1, r3) def test_keywords_with_otypes_several_kwd_args1(self): # gh-1620 Make sure different uses of keyword arguments # don't break the vectorized function. f = vectorize(_foo2, otypes=[float]) # We're testing the caching of ufuncs by vectorize, so the order # of these function calls is an important part of the test. r1 = f(10.4, z=100) r2 = f(10.4, y=-1) r3 = f(10.4) assert_equal(r1, _foo2(10.4, z=100)) assert_equal(r2, _foo2(10.4, y=-1)) assert_equal(r3, _foo2(10.4)) def test_keywords_with_otypes_several_kwd_args2(self): # gh-1620 Make sure different uses of keyword arguments # don't break the vectorized function. f = vectorize(_foo2, otypes=[float]) # We're testing the caching of ufuncs by vectorize, so the order # of these function calls is an important part of the test. r1 = f(z=100, x=10.4, y=-1) r2 = f(1, 2, 3) assert_equal(r1, _foo2(z=100, x=10.4, y=-1)) assert_equal(r2, _foo2(1, 2, 3)) def test_keywords_no_func_code(self): # This needs to test a function that has keywords but # no func_code attribute, since otherwise vectorize will # inspect the func_code. import random try: vectorize(random.randrange) # Should succeed except Exception: raise AssertionError def test_keywords2_ticket_2100(self): # Test kwarg support: enhancement ticket 2100 def foo(a, b=1): return a + b f = vectorize(foo) args = np.array([1, 2, 3]) r1 = f(a=args) r2 = np.array([2, 3, 4]) assert_array_equal(r1, r2) r1 = f(b=1, a=args) assert_array_equal(r1, r2) r1 = f(args, b=2) r2 = np.array([3, 4, 5]) assert_array_equal(r1, r2) def test_keywords3_ticket_2100(self): # Test excluded with mixed positional and kwargs: ticket 2100 def mypolyval(x, p): _p = list(p) res = _p.pop(0) while _p: res = res * x + _p.pop(0) return res vpolyval = np.vectorize(mypolyval, excluded=['p', 1]) ans = [3, 6] assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3])) assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3])) assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3])) def test_keywords4_ticket_2100(self): # Test vectorizing function with no positional args. @vectorize def f(**kw): res = 1.0 for _k in kw: res *= kw[_k] return res assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8]) def test_keywords5_ticket_2100(self): # Test vectorizing function with no kwargs args. @vectorize def f(*v): return np.prod(v) assert_array_equal(f([1, 2], [3, 4]), [3, 8]) def test_coverage1_ticket_2100(self): def foo(): return 1 f = vectorize(foo) assert_array_equal(f(), 1) def test_assigning_docstring(self): def foo(x): """Original documentation""" return x f = vectorize(foo) assert_equal(f.__doc__, foo.__doc__) doc = "Provided documentation" f = vectorize(foo, doc=doc) assert_equal(f.__doc__, doc) def test_UnboundMethod_ticket_1156(self): # Regression test for issue 1156 class Foo: b = 2 def bar(self, a): return a ** self.b assert_array_equal(vectorize(Foo().bar)(np.arange(9)), np.arange(9) ** 2) assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)), np.arange(9) ** 2) def test_execution_order_ticket_1487(self): # Regression test for dependence on execution order: issue 1487 f1 = vectorize(lambda x: x) res1a = f1(np.arange(3)) res1b = f1(np.arange(0.1, 3)) f2 = vectorize(lambda x: x) res2b = f2(np.arange(0.1, 3)) res2a = f2(np.arange(3)) assert_equal(res1a, res2a) assert_equal(res1b, res2b) def test_string_ticket_1892(self): # Test vectorization over strings: issue 1892. f = np.vectorize(lambda x: x) s = '0123456789' * 10 assert_equal(s, f(s)) def test_dtype_promotion_gh_29189(self): # dtype should not be silently promoted (int32 -> int64) dtypes = [np.int16, np.int32, np.int64, np.float16, np.float32, np.float64] for dtype in dtypes: x = np.asarray([1, 2, 3], dtype=dtype) y = np.vectorize(lambda x: x + x)(x) assert x.dtype == y.dtype def test_cache(self): # Ensure that vectorized func called exactly once per argument. _calls = [0] @vectorize def f(x): _calls[0] += 1 return x ** 2 f.cache = True x = np.arange(5) assert_array_equal(f(x), x * x) assert_equal(_calls[0], len(x)) def test_otypes(self): f = np.vectorize(lambda x: x) f.otypes = 'i' x = np.arange(5) assert_array_equal(f(x), x) def test_otypes_object_28624(self): # with object otype, the vectorized function should return y # wrapped into an object array y = np.arange(3) f = vectorize(lambda x: y, otypes=[object]) assert f(None).item() is y assert f([None]).item() is y y = [1, 2, 3] f = vectorize(lambda x: y, otypes=[object]) assert f(None).item() is y assert f([None]).item() is y def test_parse_gufunc_signature(self): assert_equal(nfb._parse_gufunc_signature('(x)->()'), ([('x',)], [()])) assert_equal(nfb._parse_gufunc_signature('(x,y)->()'), ([('x', 'y')], [()])) assert_equal(nfb._parse_gufunc_signature('(x),(y)->()'), ([('x',), ('y',)], [()])) assert_equal(nfb._parse_gufunc_signature('(x)->(y)'), ([('x',)], [('y',)])) assert_equal(nfb._parse_gufunc_signature('(x)->(y),()'), ([('x',)], [('y',), ()])) assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'), ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')])) # Tests to check if whitespaces are ignored assert_equal(nfb._parse_gufunc_signature('(x )->()'), ([('x',)], [()])) assert_equal(nfb._parse_gufunc_signature('( x , y )->( )'), ([('x', 'y')], [()])) assert_equal(nfb._parse_gufunc_signature('(x),( y) ->()'), ([('x',), ('y',)], [()])) assert_equal(nfb._parse_gufunc_signature('( x)-> (y ) '), ([('x',)], [('y',)])) assert_equal(nfb._parse_gufunc_signature(' (x)->( y),( )'), ([('x',)], [('y',), ()])) assert_equal(nfb._parse_gufunc_signature( '( ), ( a, b,c ) ,( d) -> (d , e)'), ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')])) with assert_raises(ValueError): nfb._parse_gufunc_signature('(x)(y)->()') with assert_raises(ValueError): nfb._parse_gufunc_signature('(x),(y)->') with assert_raises(ValueError): nfb._parse_gufunc_signature('((x))->(x)') def test_signature_simple(self): def addsubtract(a, b): if a > b: return a - b else: return a + b f = vectorize(addsubtract, signature='(),()->()') r = f([0, 3, 6, 9], [1, 3, 5, 7]) assert_array_equal(r, [1, 6, 1, 2]) def test_signature_mean_last(self): def mean(a): return a.mean() f = vectorize(mean, signature='(n)->()') r = f([[1, 3], [2, 4]]) assert_array_equal(r, [2, 3]) def test_signature_center(self): def center(a): return a - a.mean() f = vectorize(center, signature='(n)->(n)') r = f([[1, 3], [2, 4]]) assert_array_equal(r, [[-1, 1], [-1, 1]]) def test_signature_two_outputs(self): f = vectorize(lambda x: (x, x), signature='()->(),()') r = f([1, 2, 3]) assert_(isinstance(r, tuple) and len(r) == 2) assert_array_equal(r[0], [1, 2, 3]) assert_array_equal(r[1], [1, 2, 3]) def test_signature_outer(self): f = vectorize(np.outer, signature='(a),(b)->(a,b)') r = f([1, 2], [1, 2, 3]) assert_array_equal(r, [[1, 2, 3], [2, 4, 6]]) r = f([[[1, 2]]], [1, 2, 3]) assert_array_equal(r, [[[[1, 2, 3], [2, 4, 6]]]]) r = f([[1, 0], [2, 0]], [1, 2, 3]) assert_array_equal(r, [[[1, 2, 3], [0, 0, 0]], [[2, 4, 6], [0, 0, 0]]]) r = f([1, 2], [[1, 2, 3], [0, 0, 0]]) assert_array_equal(r, [[[1, 2, 3], [2, 4, 6]], [[0, 0, 0], [0, 0, 0]]]) def test_signature_computed_size(self): f = vectorize(lambda x: x[:-1], signature='(n)->(m)') r = f([1, 2, 3]) assert_array_equal(r, [1, 2]) r = f([[1, 2, 3], [2, 3, 4]]) assert_array_equal(r, [[1, 2], [2, 3]]) def test_signature_excluded(self): def foo(a, b=1): return a + b f = vectorize(foo, signature='()->()', excluded={'b'}) assert_array_equal(f([1, 2, 3]), [2, 3, 4]) assert_array_equal(f([1, 2, 3], b=0), [1, 2, 3]) def test_signature_otypes(self): f = vectorize(lambda x: x, signature='(n)->(n)', otypes=['float64']) r = f([1, 2, 3]) assert_equal(r.dtype, np.dtype('float64')) assert_array_equal(r, [1, 2, 3]) def test_signature_invalid_inputs(self): f = vectorize(operator.add, signature='(n),(n)->(n)') with assert_raises_regex(TypeError, 'wrong number of positional'): f([1, 2]) with assert_raises_regex( ValueError, 'does not have enough dimensions'): f(1, 2) with assert_raises_regex( ValueError, 'inconsistent size for core dimension'): f([1, 2], [1, 2, 3]) f = vectorize(operator.add, signature='()->()') with assert_raises_regex(TypeError, 'wrong number of positional'): f(1, 2) def test_signature_invalid_outputs(self): f = vectorize(lambda x: x[:-1], signature='(n)->(n)') with assert_raises_regex( ValueError, 'inconsistent size for core dimension'): f([1, 2, 3]) f = vectorize(lambda x: x, signature='()->(),()') with assert_raises_regex(ValueError, 'wrong number of outputs'): f(1) f = vectorize(lambda x: (x, x), signature='()->()') with assert_raises_regex(ValueError, 'wrong number of outputs'): f([1, 2]) def test_size_zero_output(self): # see issue 5868 f = np.vectorize(lambda x: x) x = np.zeros([0, 5], dtype=int) with assert_raises_regex(ValueError, 'otypes'): f(x) f.otypes = 'i' assert_array_equal(f(x), x) f = np.vectorize(lambda x: x, signature='()->()') with assert_raises_regex(ValueError, 'otypes'): f(x) f = np.vectorize(lambda x: x, signature='()->()', otypes='i') assert_array_equal(f(x), x) f = np.vectorize(lambda x: x, signature='(n)->(n)', otypes='i') assert_array_equal(f(x), x) f = np.vectorize(lambda x: x, signature='(n)->(n)') assert_array_equal(f(x.T), x.T) f = np.vectorize(lambda x: [x], signature='()->(n)', otypes='i') with assert_raises_regex(ValueError, 'new output dimensions'): f(x) def test_subclasses(self): class subclass(np.ndarray): pass m = np.array([[1., 0., 0.], [0., 0., 1.], [0., 1., 0.]]).view(subclass) v = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]).view(subclass) # generalized (gufunc) matvec = np.vectorize(np.matmul, signature='(m,m),(m)->(m)') r = matvec(m, v) assert_equal(type(r), subclass) assert_equal(r, [[1., 3., 2.], [4., 6., 5.], [7., 9., 8.]]) # element-wise (ufunc) mult = np.vectorize(lambda x, y: x * y) r = mult(m, v) assert_equal(type(r), subclass) assert_equal(r, m * v) def test_name(self): # gh-23021 @np.vectorize def f2(a, b): return a + b assert f2.__name__ == 'f2' def test_decorator(self): @vectorize def addsubtract(a, b): if a > b: return a - b else: return a + b r = addsubtract([0, 3, 6, 9], [1, 3, 5, 7]) assert_array_equal(r, [1, 6, 1, 2]) def test_docstring(self): @vectorize def f(x): """Docstring""" return x if sys.flags.optimize < 2: assert f.__doc__ == "Docstring" def test_partial(self): def foo(x, y): return x + y bar = partial(foo, 3) vbar = np.vectorize(bar) assert vbar(1) == 4 def test_signature_otypes_decorator(self): @vectorize(signature='(n)->(n)', otypes=['float64']) def f(x): return x r = f([1, 2, 3]) assert_equal(r.dtype, np.dtype('float64')) assert_array_equal(r, [1, 2, 3]) assert f.__name__ == 'f' def test_bad_input(self): with assert_raises(TypeError): A = np.vectorize(pyfunc=3) def test_no_keywords(self): with assert_raises(TypeError): @np.vectorize("string") def foo(): return "bar" def test_positional_regression_9477(self): # This supplies the first keyword argument as a positional, # to ensure that they are still properly forwarded after the # enhancement for #9477 f = vectorize((lambda x: x), ['float64']) r = f([2]) assert_equal(r.dtype, np.dtype('float64')) def test_datetime_conversion(self): otype = "datetime64[ns]" arr = np.array(['2024-01-01', '2024-01-02', '2024-01-03'], dtype='datetime64[ns]') assert_array_equal(np.vectorize(lambda x: x, signature="(i)->(j)", otypes=[otype])(arr), arr)
TestVectorize
python
django__django
django/contrib/postgres/search.py
{ "start": 2026, "end": 2150 }
class ____(CheckPostgresInstalledMixin, Field): def db_type(self, connection): return "tsvector"
SearchVectorField
python
sympy__sympy
sympy/physics/mechanics/tests/test_actuator.py
{ "start": 9741, "end": 12685 }
class ____: @pytest.fixture(autouse=True) def _linear_damper_fixture(self): self.damping = Symbol('c') self.l = Symbol('l') self.pA = Point('pA') self.pB = Point('pB') self.pathway = LinearPathway(self.pA, self.pB) self.q = dynamicsymbols('q') self.dq = dynamicsymbols('q', 1) self.u = dynamicsymbols('u') self.N = ReferenceFrame('N') def test_is_force_actuator_subclass(self): assert issubclass(LinearDamper, ForceActuator) def test_is_actuator_base_subclass(self): assert issubclass(LinearDamper, ActuatorBase) def test_valid_constructor(self): self.pB.set_pos(self.pA, self.q*self.N.x) damper = LinearDamper(self.damping, self.pathway) assert isinstance(damper, LinearDamper) assert hasattr(damper, 'damping') assert isinstance(damper.damping, ExprType) assert damper.damping == self.damping assert hasattr(damper, 'pathway') assert isinstance(damper.pathway, LinearPathway) assert damper.pathway == self.pathway def test_valid_constructor_force(self): self.pB.set_pos(self.pA, self.q*self.N.x) damper = LinearDamper(self.damping, self.pathway) expected_force = -self.damping*sqrt(self.q**2)*self.dq/self.q assert hasattr(damper, 'force') assert isinstance(damper.force, ExprType) assert damper.force == expected_force @pytest.mark.parametrize('damping', [None, 'c']) def test_invalid_constructor_damping_not_sympifyable(self, damping): with pytest.raises(SympifyError): _ = LinearDamper(damping, self.pathway) def test_invalid_constructor_pathway_not_pathway_base(self): with pytest.raises(TypeError): _ = LinearDamper(self.damping, None) @pytest.mark.parametrize( 'property_name, fixture_attr_name', [ ('damping', 'damping'), ('pathway', 'pathway'), ] ) def test_properties_are_immutable(self, property_name, fixture_attr_name): damper = LinearDamper(self.damping, self.pathway) value = getattr(self, fixture_attr_name) with pytest.raises(AttributeError): setattr(damper, property_name, value) def test_repr(self): self.pB.set_pos(self.pA, self.q*self.N.x) damper = LinearDamper(self.damping, self.pathway) expected = 'LinearDamper(c, LinearPathway(pA, pB))' assert repr(damper) == expected def test_to_loads(self): self.pB.set_pos(self.pA, self.q*self.N.x) damper = LinearDamper(self.damping, self.pathway) direction = self.q**2/self.q**2*self.N.x pA_force = self.damping*self.dq*direction pB_force = -self.damping*self.dq*direction expected = [Force(self.pA, pA_force), Force(self.pB, pB_force)] assert damper.to_loads() == expected
TestLinearDamper
python
django__django
tests/postgres_tests/test_indexes.py
{ "start": 578, "end": 1848 }
class ____: def test_name_auto_generation(self): index = self.index_class(fields=["field"]) index.set_name_with_model(CharFieldModel) self.assertRegex( index.name, r"postgres_te_field_[0-9a-f]{6}_%s" % self.index_class.suffix ) def test_deconstruction_no_customization(self): index = self.index_class( fields=["title"], name="test_title_%s" % self.index_class.suffix ) path, args, kwargs = index.deconstruct() self.assertEqual( path, "django.contrib.postgres.indexes.%s" % self.index_class.__name__ ) self.assertEqual(args, ()) self.assertEqual( kwargs, {"fields": ["title"], "name": "test_title_%s" % self.index_class.suffix}, ) def test_deconstruction_with_expressions_no_customization(self): name = f"test_title_{self.index_class.suffix}" index = self.index_class(Lower("title"), name=name) path, args, kwargs = index.deconstruct() self.assertEqual( path, f"django.contrib.postgres.indexes.{self.index_class.__name__}", ) self.assertEqual(args, (Lower("title"),)) self.assertEqual(kwargs, {"name": name})
IndexTestMixin
python
conda__conda
conda/common/io.py
{ "start": 12306, "end": 13103 }
class ____(Executor): def __init__(self): self._shutdown = False self._shutdownLock = Lock() def submit(self, fn, *args, **kwargs): with self._shutdownLock: if self._shutdown: raise RuntimeError("cannot schedule new futures after shutdown") f = Future() try: result = fn(*args, **kwargs) except BaseException as e: f.set_exception(e) else: f.set_result(result) return f def map(self, func, *iterables): for iterable in iterables: for thing in iterable: yield func(thing) def shutdown(self, wait=True): with self._shutdownLock: self._shutdown = True
DummyExecutor
python
getsentry__sentry
src/sentry/explore/endpoints/explore_saved_query_detail.py
{ "start": 1266, "end": 1930 }
class ____(OrganizationEndpoint): owner = ApiOwner.EXPLORE permission_classes = (ExploreSavedQueryPermission,) def convert_args(self, request: Request, organization_id_or_slug, id, *args, **kwargs): args, kwargs = super().convert_args(request, organization_id_or_slug, *args, **kwargs) try: kwargs["query"] = ExploreSavedQuery.objects.get( id=id, organization=kwargs["organization"], ) except ExploreSavedQuery.DoesNotExist: raise ResourceDoesNotExist return (args, kwargs) @extend_schema(tags=["Discover"]) @region_silo_endpoint
ExploreSavedQueryBase
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_eth_address.py
{ "start": 679, "end": 1680 }
class ____(ColumnMapMetricProvider): # This is the id string that will be used to reference your metric. condition_metric_name = "column_values.valid_eth_address" # This method implements the core logic for the PandasExecutionEngine @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): return column.apply(lambda x: is_valid_eth_address(x)) # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine # @column_condition_partial(engine=SqlAlchemyExecutionEngine) # def _sqlalchemy(cls, column, _dialect, **kwargs): # raise NotImplementedError # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine # @column_condition_partial(engine=SparkDFExecutionEngine) # def _spark(cls, column, **kwargs): # raise NotImplementedError # This class defines the Expectation itself
ColumnValuesToBeValidEthAddress
python
tensorflow__tensorflow
tensorflow/python/data/ops/dataset_ops.py
{ "start": 193554, "end": 195794 }
class ____(tracking_base.Trackable): """Iterator over a dataset with elements converted to numpy.""" __slots__ = ["_iterator"] def __init__(self, dataset): self._iterator = iter(dataset) self._dataset = dataset def __repr__(self): return f"NumpyIterator(iterator={self._iterator})" def __iter__(self): return self def __next__(self): def to_numpy(x): if hasattr(x, "_numpy"): numpy = x._numpy() # pylint: disable=protected-access elif x is not None: numpy = x.numpy() else: return None if isinstance(numpy, np.ndarray): # `numpy` shares the same underlying buffer as the `x` Tensor. # Tensors are expected to be immutable, so we disable writes. numpy.setflags(write=False) return numpy return nest.map_structure(to_numpy, next(self._iterator)) def next(self): return self.__next__() # override def _serialize_to_tensors(self): # pylint: disable=protected-access return self._iterator._serialize_to_tensors() # override def _restore_from_tensors(self, restored_tensors): # pylint: disable=protected-access return self._iterator._restore_from_tensors(restored_tensors) # override def _copy_trackable_to_cpu(self, object_map): if self not in object_map: # If self is not populated in object_map yet, instantiate the copy object_map[self] = NumpyIterator(self._dataset) # Copy values from `self` to copy of `self` serialized = self._serialize_to_tensors() object_map[self]._restore_from_tensors(serialized) # pylint: disable=protected-access # TODO(b/284309865): Remove once `_save` is no longer used anywhere. def _save(self): # pylint: disable=protected-access return self.save() def save(self): # pylint: disable=protected-access return self._iterator._save() # TODO(b/284309865): Remove once `_restore` is no longer used anywhere. def _restore(self, state): return self.restore(state) def restore(self, state): # pylint: disable=protected-access return self._iterator._restore(state) # TODO(b/284309865): Remove once `_NumpyIterator` is no longer used anywhere. _NumpyIterator = NumpyIterator
NumpyIterator
python
airbytehq__airbyte
airbyte-integrations/connectors/source-shopify/source_shopify/transform.py
{ "start": 156, "end": 4685 }
class ____: """ Transform class was implemented according to issue #4841 Shopify API returns price fields as a string and it should be converted to the number Some records fields contain objects and arrays, which contain price fields. Those price fields should be transformed too. This solution designed to convert string into number, but in future can be modified for general purpose Correct types placed in schemes Transformer iterates over records, compare values type with schema type and transform if it's needed Methods ------- _transform_array(self, array: List[Any], item_properties: Mapping[str, Any]) Some fields type is array. Items inside array contain price fields, which should be transformed This method iterate over items in array, compare schema types and convert if necessary transform(self, field: Any, schema: Mapping[str, Any] = None) Accepts field of Any type and schema, compere type of field and type in schema, convert if necessary """ def __init__(self, schema: Mapping[str, Any], **kwargs): super().__init__(**kwargs) self._schema = schema @staticmethod def _get_json_types(value_type: Any) -> List[str]: json_types = { str: ["string"], int: ["integer", "number"], float: ["number"], dict: ["object"], list: ["array"], bool: ["boolean"], type(None): [ "null", ], # overflow, when we need to read nested entity from the parent record, # that has been already transformed. Decimal: ["number"], } return json_types.get(value_type) @staticmethod def _types_from_schema(properties: Mapping[str, Any]) -> str: schema_types = properties.get("type", []) if not isinstance(schema_types, list): schema_types = [ schema_types, ] return schema_types @staticmethod def _first_non_null_type(schema_types: List[str]) -> str: not_null_types = schema_types.copy() if "null" in not_null_types: not_null_types.remove("null") return not_null_types[0] @staticmethod def _transform_number(value: Any): return float(Decimal(value)) @staticmethod def _transform_string(value: Any): return str(value) def _transform_array(self, array: List[Any], item_properties: Mapping[str, Any]): # iterate over items in array, compare schema types and convert if necessary. for index, record in enumerate(array): array[index] = self.transform(record, item_properties) return array def _transform_object(self, record: MutableMapping[str, Any], properties: Mapping[str, Any]): # compare schema types and convert if necessary. for object_property, value in record.items(): if value is None: continue if object_property in properties: object_properties = properties.get(object_property) or {} record[object_property] = self.transform(value, object_properties) return record def transform(self, field: Any, schema: Mapping[str, Any] = None) -> Iterable[MutableMapping]: schema = schema if schema is not None else self._schema # get available types from schema schema_types = self._types_from_schema(schema) if schema_types and field is not None: # if types presented in schema and field is not None, get available JSON Schema types for field # and not null types from schema, check if field JSON Schema types presented in schema field_json_types = self._get_json_types(type(field)) schema_type = self._first_non_null_type(schema_types) if not any(field_json_type in schema_types for field_json_type in field_json_types): if schema_type == "number": return self._transform_number(field) if schema_type == "string": return self._transform_string(field) if schema_type == "object": properties = schema.get("properties", {}) return self._transform_object(field, properties) if schema_type == "array": properties = schema.get("items", {}) return self._transform_array(field, properties) return field
DataTypeEnforcer
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/model_query_transitive_extends.py
{ "start": 228, "end": 318 }
class ____: attribute = ... def __init__(self): self.instance = ...
Test1_C
python
scrapy__scrapy
tests/test_exporters.py
{ "start": 12452, "end": 12591 }
class ____(TestCsvItemExporter): item_class = MyDataClass custom_field_item_class = CustomFieldDataclass
TestCsvItemExporterDataclass
python
joke2k__faker
faker/providers/person/tw_GH/__init__.py
{ "start": 44, "end": 11396 }
class ____(PersonProvider): formats = ( "{{first_name_male}} {{last_name_male}}", "{{first_name_male}} {{last_name_male}}", "{{first_name_male}} {{last_name_male}}", "{{first_name_male}} {{last_name_male}}", "{{first_name_male}} {{last_name_male}}-{{last_name_male}}", "{{first_name_female}} {{last_name_female}}", "{{first_name_female}} {{last_name_female}}", "{{first_name_female}} {{last_name_female}}", "{{first_name_female}} {{last_name_female}}", "{{first_name_female}} {{last_name_female}}-{{last_name_female}}", "{{prefix_male}} {{first_name_male}} {{last_name_male}}", "{{prefix_female}} {{first_name_female}} {{last_name_female}}", "{{prefix_male}} {{first_name_male}} {{last_name_male}}", "{{prefix_female}} {{first_name_female}} {{last_name_female}}", ) # names from https://en.wikipedia.org/wiki/Ghanaian_name, # https://buzzghana.com/ghanaian-names/, # https://en.wikipedia.org/wiki/Akan_names, first_names_male = ( "Aaron", "Abeiku", "Adam", "Adrian", "Akwesi", "Albert", "Alex", "Alexander", "Andrew", "Anthony", "Antony", "Arthur", "Ben", "Benjamin", "Bernard", "Bruce", "Carl", "Charles", "Christian", "Clifford", "Colins", "Daniel", "Danny", "David", "Denis", "Dennis", "Derrick", "Dominic", "Donald", "Douglas", "Duncan", "Edward", "Ekow", "Elliot", "Elliott", "Eric", "Fiifi", "Francis", "Frank", "Frederick", "George", "Gerald", "Gordon", "Graham", "Gregory", "Harry", "Henry", "Howard", "Isaac", "Akwasi", "Jack", "Jacob", "Jake", "James", "Jason", "Jeffrey", "Jeremy", "Joe", "Joel", "John", "Jonathan", "Joojo", "Joseph", "Josh", "Joshua", "Josiah", "Julian", "Justin", "Karl", "Kenneth", "Kevin", "Kofi", "Kojo", "Kujoe", "Kwabena", "Kwadwo", "Kwaku", "Kwame", "Kwamena", "Kwasi", "Kweku", "Kwesi", "Kyle", "Lawrence", "Leslie", "Louis", "Luke", "Malcolm", "Marcus", "Mark", "Martin", "Mathew", "Matthew", "Max", "Michael", "Nathan", "Nicholas", "Nigel", "Oliver", "Patrick", "Paul", "Peter", "Philip", "Phillip", "Raymond", "Richard", "Robert", "Roger", "Ronald", "Russell", "Sam", "Samuel", "Shaun", "Simon", "Stanley", "Stephen", "Steven", "Terence", "Thomas", "Timothy", "Tom", "Tony", "Victor", "Vincent", "William", "Yaw", ) first_names_female = ( "Aba", "Abena", "Abigail", "Adwoa", "Afia", "Afua", "Akos", "Akosua", "Akua", "Akumaa", "Alice", "Ama", "Amanda", "Amber", "Amelia", "Angela", "Ann", "Annette", "Awesi", "Baaba", "Barbara", "Beatrice", "COmfort", "Caroline", "Catherine", "Charlotte", "Christina", "Comfort", "Constance", "Danielle", "Deborah", "Debra", "Denise", "Dora", "Dorcas", "Dorothy", "Eliabeth", "Elizabeth", "Emily", "Emma", "Ernestina", "Esi", "Eunice", "Felicia", "Francesca", "Gemma", "Georgia", "Georgina", "Gifty", "Grace", "Grace", "Hannabel", "Hannah", "Harriet", "Helen", "Irene", "Janet", "Janet", "Janice", "Jasmine", "Jennifer", "Jessica", "Jill", "Joanna", "Josephine", "Joyce", "Joyce", "Judith", "Julia", "Juliana", "Julie", "Karen", "Kate", "Katherine", "Katy", "Lawrencia", "Linda", "Lisa", "Lorraine", "Lucy", "Lucy", "Lydia", "Lydia", "Mandy", "Margaret", "Margaret", "Maria", "Marian", "Marilyn", "Mary", "Mary", "Maureen", "Michelle", "Millicent", "Nana Ama", "Naomi", "Natalie", "Natasha", "Nicola", "Nimakoah", "Olivia", "Pamela", "Patricia", "Paula", "Priscilla", "Rachael", "Rachel", "Rebecca", "Rebecca", "Regina", "Rita", "Roselyn", "Rosemary", "Rosemary", "Ruth", "Salomey", "Samantha", "Sandra", "Sarah", "Sarah", "Sarah", "Sharon", "Sheila", "Shirley", "Stephanie", "Susan", "Susan", "Sylvia", "Teresa", "Tina", "Tracy", "Vanessa", "Veronica", "Victoria", "Vida", "Wendy", "Yaa", "Yvonne", ) first_names = first_names_male + first_names_female last_names_male = ( "Acheampong", "Adomah", "Adomako", "Adu", "Adusei", "Adutwum", "Afirifa", "Afoakwa", "Agyapong", "Agyapong", "Agyare", "Agyei", "Agyemang", "Ahortor", "Akoto", "Akowua", "Akyeamfuɔ", "Akyeampong", "Akyena", "Akyerεko", "Amo", "Amoa", "Amoako", "Amoasi", "Ampadu", "Ampofo", "Amponsah", "Andorful", "Ankra", "Anokye", "Ansa", "Antwi", "Antwi", "Appia", "Appiah", "Asamoa", "Asamoah", "Asante", "Asare", "Asenso", "Asiama", "Asiedu", "Ata", "Awuah", "Baa", "Baafi", "Baah", "Baawia", "Badu", "Boadi", "Boadu", "Boahen", "Boakye", "Boaten", "Boateng", "Bona", "Bonsra", "Bonsu", "Daako", "Danso", "Darko", "Donkor", "Duah", "Dwamena", "Fofie", "Fosu", "Gyamfi", "Gyasi", "Karikari", "Koomson", "Kumi", "Kusi", "Kwaakye", "Kwarteng", "Kyei", "Mensa", "Mensah", "Nkansa", "Nkansah", "Nkrumah", "Nsia", "Nti", "Ntiamoa", "Ntim", "Nyaako", "Nyame", "Nyantakyi", "Obeng", "Ofori", "Ofosu", "Okyere", "Omani", "Opoku", "Oppong", "Opuku", "Osei", "Oti", "Otiwa", "Otuo", "Owusu", "Prempeh", "Quartey", "Safo", "Sarpong", "Takyi", "Tawia", "Tutu", "Tweneboa", "Twumasi", "Wiafe", "Yaamoa", "Yawson", "Yeboa", "Yeboah", "Yirenkyi", ) last_names_female = ( "Aboraa", "Abrafi", "Acheampong", "Adoma", "Adomah", "Adomako", "Adu", "Adusei", "Adutwum", "Adutwumwaa", "Adwubi", "Afirifa", "Afoakwa", "Afrakomaa", "Agyapomaa", "Agyapong", "Agyapong", "Agyare", "Agyei", "Agyeiwaa", "Agyemang", "Ahortor", "Akoaa", "Akoto", "Akowua", "Akyaa", "Akyeamfuɔ", "Akyeampomaa", "Akyeampong", "Akyena", "Akyerε", "Akyerεko", "Akɔmaa", "Amo", "Amoa", "Amoako", "Amoakowaa", "Amoanimaa", "Amoasi", "Ampadu", "Ampofo", "Ampofowaa", "Ampoma", "Amponsa", "Amponsa", "Andorful", "Anima", "Ankra", "Anokye", "Ansa", "Ansomaa", "Ansomah", "Antwi", "Antwi", "Antwiwaa", "Appia", "Appiah", "Asamoa", "Asamoah", "Asante", "Asantewaa", "Asare", "Asenso", "Asiama", "Asiedu", "Asieduwaa", "Ata", "Ataa", "Awuah", "Baa", "Baafi", "Baah", "Baawia", "Badu", "Boadi", "Boadu", "Boahen", "Boakye", "Boakye", "Boakyewaa", "Boatemaa", "Boatemaah", "Boaten", "Boateng", "Bona", "Bonsra", "Bonsu", "Daako", "Daakoaa", "Danso", "Darko", "Donkor", "Duah", "Dufie", "Dwamena", "Fofie", "Foriwaa", "Fosu", "Fosua", "Frema", "Frimpomaa", "Gyamfi", "Gyamfi", "Gyamfiaa", "Gyasi", "Gyasiwaa", "Karikari", "Koomson", "Kumi", "Kusi", "Kusiwaa", "Kwaakye", "Kwaakyewaa", "Kwartemaa", "Kwarteng", "Kyei", "Kyeiwaa", "Kyerewaa", "Mansa", "Mensa", "Mensah", "Nkansa", "Nkansah", "Nkrumah", "Nsia", "Nti", "Ntiamoa", "Ntim", "Nyaako", "Nyaakoaa", "Nyame", "Nyantakyi", "Obeng", "Ofori", "Ofosu", "Okyere", "Okyere", "Omani", "Opoku", "Oppong", "Opuku", "Osei", "Oti", "Otiwa", "Otuo", "Owusu", "Owusuwaa", "Pokuaa", "Pomaa", "Prempeh", "Quartey", "Safo", "Safo", "Safoaa", "Sarpong", "Serwaa", "Takyi", "Tawia", "Tiwaa", "Tutu", "Tweneboa", "Twumasi", "Wiafe", "Yaamoa", "Yawson", "Yeboa", "Yeboah", "Yirenkyi", ) last_names = last_names_male + last_names_female prefixes_female = ( "Mrs.", "Ms.", "Miss", "Dr.", "Mama", "Maame", "Awura", "Sista", "Osofo Maame", ) prefixes_male = ("Mr.", "Dr.", "Agya", "Owura", "Osofo")
Provider
python
apache__airflow
providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/powerbi.py
{ "start": 1514, "end": 1648 }
class ____(AirflowException): """An exception that indicates a dataset refresh failed to complete."""
PowerBIDatasetRefreshException
python
facebookresearch__faiss
faiss/gpu/test/test_gpu_basics.py
{ "start": 18131, "end": 18260 }
class ____(unittest.TestCase): def test_gpu_flag(self): assert "GPU" in faiss.get_compile_options().split()
TestGpuFlags
python
getsentry__sentry
src/sentry/feedback/usecases/label_generation.py
{ "start": 269, "end": 2119 }
class ____(TypedDict): """Corresponds to GenerateFeedbackLabelsRequest in Seer.""" organization_id: int feedback_message: str AI_LABEL_TAG_PREFIX = "ai_categorization" # If Seer generates more labels, we truncate it to this many labels MAX_AI_LABELS = 15 # Max length of the serialized list of labels, which matches the max length of a tag value, from https://docs.sentry.io/platforms/javascript/enriching-events/tags/ MAX_AI_LABELS_JSON_LENGTH = 200 SEER_LABEL_GENERATION_ENDPOINT_PATH = "/v1/automation/summarize/feedback/labels" SEER_TIMEOUT_S = 15 SEER_RETRIES = 0 # Do not retry since this is called in ingest. @metrics.wraps("feedback.generate_labels") def generate_labels(feedback_message: str, organization_id: int) -> list[str]: """ Generate labels for a feedback message. Raises exception if anything goes wrong during the API call or response processing. """ request = LabelRequest( feedback_message=feedback_message, organization_id=organization_id, ) try: response = make_signed_seer_api_request( connection_pool=seer_summarization_connection_pool, path=SEER_LABEL_GENERATION_ENDPOINT_PATH, body=json.dumps(request).encode("utf-8"), timeout=SEER_TIMEOUT_S, retries=SEER_RETRIES, ) except Exception: logger.exception("Seer failed to generate user feedback labels") raise if response.status < 200 or response.status >= 300: logger.error( "Seer failed to generate user feedback labels", extra={"status_code": response.status, "response_data": response.data}, ) raise Exception("Seer returned non-200 response") # Guaranteed to be a list of strings (validated in Seer) return response.json()["data"]["labels"]
LabelRequest
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/decorator3.py
{ "start": 190, "end": 632 }
class ____: # This should generate an error if version < 3.9. @my_decorators[0] def my_static_method(): return 3 # This should generate an error if version < 3.9. @my_decorators[1] def my_class_method(cls): return 3 # This should generate an error if version < 3.9. @my_decorators[2] def my_property(self): return 3 Foo.my_static_method() Foo.my_class_method() Foo().my_property
Foo
python
huggingface__transformers
src/transformers/models/audioflamingo3/modeling_audioflamingo3.py
{ "start": 11412, "end": 11906 }
class ____(PreTrainedModel): config: AudioFlamingo3Config base_model_prefix = "model" input_modalities = ("audio", "text") supports_gradient_checkpointing = True _no_split_modules = ["AudioFlamingo3Attention"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn = True _supports_sdpa = True @auto_docstring( custom_intro=""" The audio model from AudioFlamingo3 without any head or projection on top. """ )
AudioFlamingo3PreTrainedModel
python
pytorch__pytorch
torch/distributed/fsdp/_fsdp_extensions.py
{ "start": 447, "end": 5033 }
class ____(ABC): """ This enables some customizable hooks to enable composability with tensor parallelism. To activate these hooks, use :func:`_set_fsdp_extensions` to set a custom :class:`FSDPExtensions` that implements the hooks. """ @abstractmethod def pre_flatten_transform( self, tensor: torch.Tensor, ) -> tuple[torch.Tensor, Optional[Any]]: """E.g. converting ``DistributedTensor`` to local tensor.""" ... @abstractmethod def post_unflatten_transform( self, tensor: torch.Tensor, param_extension: Any, ) -> torch.Tensor: """E.g. converting local tensor to ``DistributedTensor``.""" ... @abstractmethod def chunk_tensor( self, tensor: torch.Tensor, rank: int, world_size: int, num_devices_per_node: int, pg: dist.ProcessGroup, device: Optional[torch.device] = None, ) -> torch.Tensor: """Shards a tensor to chunks and returns the local chunk.""" ... @abstractmethod def chunk_dtensor( self, tensor: torch.Tensor, rank: int, device_mesh: DeviceMesh, ) -> torch.Tensor: """Shards a tensor/DTensor to DTensor and returns the local DTensor.""" ... @abstractmethod def pre_load_state_dict_transform( self, tensor: torch.Tensor, ) -> tuple[torch.Tensor, list[Shard]]: """ This is to be called before loading a *sharded* model state dict and should return the tensor and list of shards from which to load data. """ ... @abstractmethod def all_gather_dtensor( self, tensor: DTensor, parent_mesh: Optional[DeviceMesh], ) -> torch.Tensor: """ This is to be called before loading a *sharded* DTensor state dict. This gathers tensor in FSDP dimension and returns local tensor of TP DTensor. """ ... _extensions: Optional[FSDPExtensions] = None def _set_fsdp_extensions(flattener: FSDPExtensions) -> None: global _extensions _extensions = flattener def _ext_pre_flatten_transform( tensor: torch.Tensor, fsdp_extension: Optional[FSDPExtensions] = None, ) -> tuple[torch.Tensor, Optional[Any]]: if fsdp_extension is not None: new_tensor, param_extension = fsdp_extension.pre_flatten_transform(tensor) if param_extension is not None: return new_tensor, param_extension return tensor, None def _ext_post_unflatten_transform( tensor: torch.Tensor, param_extension: Any, fsdp_extension: Optional[FSDPExtensions] = None, ) -> torch.Tensor: if fsdp_extension is not None and param_extension is not None: return fsdp_extension.post_unflatten_transform(tensor, param_extension) return tensor def _ext_chunk_tensor( tensor: torch.Tensor, rank: int, world_size: int, num_devices_per_node: int, pg: dist.ProcessGroup, fsdp_extension: Optional[FSDPExtensions] = None, ) -> torch.Tensor: chunk_tensor_fn = ( fsdp_extension.chunk_tensor if fsdp_extension is not None else _create_chunk_sharded_tensor ) return chunk_tensor_fn( tensor, rank, world_size, num_devices_per_node, pg, ) def _ext_chunk_dtensor( tensor: torch.Tensor, rank: int, device_mesh: DeviceMesh, fsdp_extension: Optional[FSDPExtensions] = None, ) -> torch.Tensor: chunk_dtensor_fn = ( fsdp_extension.chunk_dtensor if fsdp_extension is not None else _create_chunk_dtensor ) return chunk_dtensor_fn( tensor, rank, device_mesh, ) def _ext_pre_load_state_dict_transform( tensor: torch.Tensor, fsdp_extension: Optional[FSDPExtensions] = None, ) -> tuple[torch.Tensor, list[Shard]]: if fsdp_extension is not None: return fsdp_extension.pre_load_state_dict_transform(tensor) if type(tensor) is not ShardedTensor: raise AssertionError(f"Expected ShardedTensor, got {type(tensor)}") shards = tensor.local_shards() return (tensor, shards) def _ext_all_gather_dtensor( tensor: DTensor, parent_mesh: Optional[DeviceMesh], fsdp_extension: Optional[FSDPExtensions] = None, ) -> torch.Tensor: all_gather_dtensor_fn = ( fsdp_extension.all_gather_dtensor if fsdp_extension is not None else _all_gather_dtensor ) return all_gather_dtensor_fn(tensor, parent_mesh)
FSDPExtensions
python
PyCQA__pylint
tests/functional/ext/overlapping_exceptions/overlapping_exceptions.py
{ "start": 96, "end": 1112 }
class ____(SomeException): pass AliasException = SomeException try: pass except (SomeException, SomeException): # [overlapping-except] pass try: pass except (SomeException, SubclassException): # [overlapping-except] pass try: pass except (SomeException, AliasException): # [overlapping-except] pass try: pass except (AliasException, SubclassException): # [overlapping-except] pass try: pass # +1:[overlapping-except, overlapping-except, overlapping-except] except (SomeException, AliasException, SubclassException): pass try: pass except (ArithmeticError, FloatingPointError): # [overlapping-except] pass try: pass except (ValueError, UnicodeDecodeError): # [overlapping-except] pass try: pass except (IOError, OSError): # [overlapping-except] pass try: pass except (socket.error, OSError): # [overlapping-except] pass try: pass except (ConnectionError, socket.error): # [overlapping-except] pass
SubclassException
python
PyCQA__pylint
tests/functional/i/inner_classes.py
{ "start": 361, "end": 407 }
class ____(Aaa): """docstring""" pass
Bbb
python
scrapy__scrapy
tests/test_spidermiddleware_output_chain.py
{ "start": 5396, "end": 5894 }
class ____(_BaseSpiderMiddleware): def process_spider_output(self, response, result): for r in result: r["processed"].append(f"{self.__class__.__name__}.process_spider_output") yield r def process_spider_exception(self, response, exception): method = f"{self.__class__.__name__}.process_spider_exception" self.crawler.spider.logger.info( "%s: %s caught", method, exception.__class__.__name__ )
_GeneratorDoNothingMiddleware
python
getsentry__sentry
src/sentry/seer/autofix/utils.py
{ "start": 2035, "end": 2130 }
class ____(StrEnum): ROOT_CAUSE = "root_cause" SOLUTION = "solution"
AutofixTriggerSource
python
python-pillow__Pillow
docs/example/DdsImagePlugin.py
{ "start": 7932, "end": 8627 }
class ____(ImageFile.PyDecoder): _pulls_fd = True def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]: assert self.fd is not None try: self.set_as_raw(_dxt5(self.fd, self.state.xsize, self.state.ysize)) except struct.error as e: msg = "Truncated DDS file" raise OSError(msg) from e return -1, 0 Image.register_decoder("DXT1", DXT1Decoder) Image.register_decoder("DXT5", DXT5Decoder) def _accept(prefix: bytes) -> bool: return prefix.startswith(b"DDS ") Image.register_open(DdsImageFile.format, DdsImageFile, _accept) Image.register_extension(DdsImageFile.format, ".dds")
DXT5Decoder
python
kamyu104__LeetCode-Solutions
Python/maximum-number-of-tasks-you-can-assign.py
{ "start": 3985, "end": 5232 }
class ____(object): def maxTaskAssign(self, tasks, workers, pills, strength): """ :type tasks: List[int] :type workers: List[int] :type pills: int :type strength: int :rtype: int """ def check(tasks, workers, pills, strength, x): w = workers[-x:] for task in tasks[-x:]: # enumerate from the hardest task to the easiest task, greedily assign it to the weakest worker whom it can be done by i = bisect.bisect_left(w, task) if i != len(w): w.pop(i) continue if pills: i = bisect.bisect_left(w, task-strength) if i != len(w): w.pop(i) pills -= 1 continue return False return True tasks.sort(reverse=True) workers.sort() left, right = 1, min(len(workers), len(tasks)) while left <= right: mid = left + (right-left)//2 if not check(tasks, workers, pills, strength, mid): right = mid-1 else: left = mid+1 return right
Solution4
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/postgresql/base.py
{ "start": 106106, "end": 107691 }
class ____(compiler.IdentifierPreparer): reserved_words = RESERVED_WORDS def _unquote_identifier(self, value): if value[0] == self.initial_quote: value = value[1:-1].replace( self.escape_to_quote, self.escape_quote ) return value def format_type(self, type_, use_schema=True): if not type_.name: raise exc.CompileError( f"PostgreSQL {type_.__class__.__name__} type requires a name." ) name = self.quote(type_.name) effective_schema = self.schema_for_object(type_) # a built-in type with the same name will obscure this type, so raise # for that case. this applies really to any visible type with the same # name in any other visible schema that would not be appropriate for # us to check against, so this is not a robust check, but # at least do something for an obvious built-in name conflict if ( effective_schema is None and type_.name in self.dialect.ischema_names ): raise exc.CompileError( f"{type_!r} has name " f"'{type_.name}' that matches an existing type, and " "requires an explicit schema name in order to be rendered " "in DDL." ) if ( not self.omit_schema and use_schema and effective_schema is not None ): name = f"{self.quote_schema(effective_schema)}.{name}" return name
PGIdentifierPreparer
python
apache__airflow
airflow-core/src/airflow/utils/sqlalchemy.py
{ "start": 4414, "end": 8455 }
class ____(TypeDecorator): """ A version of the JSON column that uses the Airflow extended JSON serialization. See airflow.serialization. """ impl = Text cache_ok = True should_evaluate_none = True def load_dialect_impl(self, dialect) -> TypeEngine: if dialect.name == "postgresql": return dialect.type_descriptor(JSONB) return dialect.type_descriptor(JSON) def process_bind_param(self, value, dialect): from airflow.serialization.serialized_objects import BaseSerialization if value is None: return None return BaseSerialization.serialize(value) def process_result_value(self, value, dialect): from airflow.serialization.serialized_objects import BaseSerialization if value is None: return None return BaseSerialization.deserialize(value) def sanitize_for_serialization(obj: V1Pod): """ Convert pod to dict.... but *safely*. When pod objects created with one k8s version are unpickled in a python env with a more recent k8s version (in which the object attrs may have changed) the unpickled obj may throw an error because the attr expected on new obj may not be there on the unpickled obj. This function still converts the pod to a dict; the only difference is it populates missing attrs with None. You may compare with https://github.com/kubernetes-client/python/blob/5a96bbcbe21a552cc1f9cda13e0522fafb0dbac8/kubernetes/client/api_client.py#L202 If obj is None, return None. If obj is str, int, long, float, bool, return directly. If obj is datetime.datetime, datetime.date convert to string in iso8601 format. If obj is list, sanitize each element in the list. If obj is dict, return the dict. If obj is OpenAPI model, return the properties dict. :param obj: The data to serialize. :return: The serialized form of data. :meta private: """ if obj is None: return None if isinstance(obj, (float, bool, bytes, str, int)): return obj if isinstance(obj, list): return [sanitize_for_serialization(sub_obj) for sub_obj in obj] if isinstance(obj, tuple): return tuple(sanitize_for_serialization(sub_obj) for sub_obj in obj) if isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() if isinstance(obj, dict): obj_dict = obj else: obj_dict = { obj.attribute_map[attr]: getattr(obj, attr) for attr, _ in obj.openapi_types.items() # below is the only line we change, and we just add default=None for getattr if getattr(obj, attr, None) is not None } return {key: sanitize_for_serialization(val) for key, val in obj_dict.items()} def ensure_pod_is_valid_after_unpickling(pod: V1Pod) -> V1Pod | None: """ Convert pod to json and back so that pod is safe. The pod_override in executor_config is a V1Pod object. Such objects created with one k8s version, when unpickled in an env with upgraded k8s version, may blow up when `to_dict` is called, because openapi client code gen calls getattr on all attrs in openapi_types for each object, and when new attrs are added to that list, getattr will fail. Here we re-serialize it to ensure it is not going to blow up. :meta private: """ try: # if to_dict works, the pod is fine pod.to_dict() return pod except AttributeError: pass try: from kubernetes.client.models.v1_pod import V1Pod except ImportError: return None if not isinstance(pod, V1Pod): return None try: from airflow.providers.cncf.kubernetes.pod_generator import PodGenerator # now we actually reserialize / deserialize the pod pod_dict = sanitize_for_serialization(pod) return PodGenerator.deserialize_model_dict(pod_dict) except Exception: return None
ExtendedJSON
python
langchain-ai__langchain
libs/langchain_v1/langchain/agents/middleware/tool_selection.py
{ "start": 2512, "end": 11797 }
class ____(AgentMiddleware): """Uses an LLM to select relevant tools before calling the main model. When an agent has many tools available, this middleware filters them down to only the most relevant ones for the user's query. This reduces token usage and helps the main model focus on the right tools. Examples: !!! example "Limit to 3 tools" ```python from langchain.agents.middleware import LLMToolSelectorMiddleware middleware = LLMToolSelectorMiddleware(max_tools=3) agent = create_agent( model="openai:gpt-4o", tools=[tool1, tool2, tool3, tool4, tool5], middleware=[middleware], ) ``` !!! example "Use a smaller model for selection" ```python middleware = LLMToolSelectorMiddleware(model="openai:gpt-4o-mini", max_tools=2) ``` """ def __init__( self, *, model: str | BaseChatModel | None = None, system_prompt: str = DEFAULT_SYSTEM_PROMPT, max_tools: int | None = None, always_include: list[str] | None = None, ) -> None: """Initialize the tool selector. Args: model: Model to use for selection. If not provided, uses the agent's main model. Can be a model identifier string or `BaseChatModel` instance. system_prompt: Instructions for the selection model. max_tools: Maximum number of tools to select. If the model selects more, only the first `max_tools` will be used. If not specified, there is no limit. always_include: Tool names to always include regardless of selection. These do not count against the `max_tools` limit. """ super().__init__() self.system_prompt = system_prompt self.max_tools = max_tools self.always_include = always_include or [] if isinstance(model, (BaseChatModel, type(None))): self.model: BaseChatModel | None = model else: self.model = init_chat_model(model) def _prepare_selection_request(self, request: ModelRequest) -> _SelectionRequest | None: """Prepare inputs for tool selection. Returns: `SelectionRequest` with prepared inputs, or `None` if no selection is needed. """ # If no tools available, return None if not request.tools or len(request.tools) == 0: return None # Filter to only BaseTool instances (exclude provider-specific tool dicts) base_tools = [tool for tool in request.tools if not isinstance(tool, dict)] # Validate that always_include tools exist if self.always_include: available_tool_names = {tool.name for tool in base_tools} missing_tools = [ name for name in self.always_include if name not in available_tool_names ] if missing_tools: msg = ( f"Tools in always_include not found in request: {missing_tools}. " f"Available tools: {sorted(available_tool_names)}" ) raise ValueError(msg) # Separate tools that are always included from those available for selection available_tools = [tool for tool in base_tools if tool.name not in self.always_include] # If no tools available for selection, return None if not available_tools: return None system_message = self.system_prompt # If there's a max_tools limit, append instructions to the system prompt if self.max_tools is not None: system_message += ( f"\nIMPORTANT: List the tool names in order of relevance, " f"with the most relevant first. " f"If you exceed the maximum number of tools, " f"only the first {self.max_tools} will be used." ) # Get the last user message from the conversation history last_user_message: HumanMessage for message in reversed(request.messages): if isinstance(message, HumanMessage): last_user_message = message break else: msg = "No user message found in request messages" raise AssertionError(msg) model = self.model or request.model valid_tool_names = [tool.name for tool in available_tools] return _SelectionRequest( available_tools=available_tools, system_message=system_message, last_user_message=last_user_message, model=model, valid_tool_names=valid_tool_names, ) def _process_selection_response( self, response: dict, available_tools: list[BaseTool], valid_tool_names: list[str], request: ModelRequest, ) -> ModelRequest: """Process the selection response and return filtered `ModelRequest`.""" selected_tool_names: list[str] = [] invalid_tool_selections = [] for tool_name in response["tools"]: if tool_name not in valid_tool_names: invalid_tool_selections.append(tool_name) continue # Only add if not already selected and within max_tools limit if tool_name not in selected_tool_names and ( self.max_tools is None or len(selected_tool_names) < self.max_tools ): selected_tool_names.append(tool_name) if invalid_tool_selections: msg = f"Model selected invalid tools: {invalid_tool_selections}" raise ValueError(msg) # Filter tools based on selection and append always-included tools selected_tools: list[BaseTool] = [ tool for tool in available_tools if tool.name in selected_tool_names ] always_included_tools: list[BaseTool] = [ tool for tool in request.tools if not isinstance(tool, dict) and tool.name in self.always_include ] selected_tools.extend(always_included_tools) # Also preserve any provider-specific tool dicts from the original request provider_tools = [tool for tool in request.tools if isinstance(tool, dict)] return request.override(tools=[*selected_tools, *provider_tools]) def wrap_model_call( self, request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse], ) -> ModelCallResult: """Filter tools based on LLM selection before invoking the model via handler.""" selection_request = self._prepare_selection_request(request) if selection_request is None: return handler(request) # Create dynamic response model with Literal enum of available tool names type_adapter = _create_tool_selection_response(selection_request.available_tools) schema = type_adapter.json_schema() structured_model = selection_request.model.with_structured_output(schema) response = structured_model.invoke( [ {"role": "system", "content": selection_request.system_message}, selection_request.last_user_message, ] ) # Response should be a dict since we're passing a schema (not a Pydantic model class) if not isinstance(response, dict): msg = f"Expected dict response, got {type(response)}" raise AssertionError(msg) modified_request = self._process_selection_response( response, selection_request.available_tools, selection_request.valid_tool_names, request ) return handler(modified_request) async def awrap_model_call( self, request: ModelRequest, handler: Callable[[ModelRequest], Awaitable[ModelResponse]], ) -> ModelCallResult: """Filter tools based on LLM selection before invoking the model via handler.""" selection_request = self._prepare_selection_request(request) if selection_request is None: return await handler(request) # Create dynamic response model with Literal enum of available tool names type_adapter = _create_tool_selection_response(selection_request.available_tools) schema = type_adapter.json_schema() structured_model = selection_request.model.with_structured_output(schema) response = await structured_model.ainvoke( [ {"role": "system", "content": selection_request.system_message}, selection_request.last_user_message, ] ) # Response should be a dict since we're passing a schema (not a Pydantic model class) if not isinstance(response, dict): msg = f"Expected dict response, got {type(response)}" raise AssertionError(msg) modified_request = self._process_selection_response( response, selection_request.available_tools, selection_request.valid_tool_names, request ) return await handler(modified_request)
LLMToolSelectorMiddleware
python
pytorch__pytorch
tools/test/test_vulkan_codegen.py
{ "start": 2419, "end": 3749 }
class ____(unittest.TestCase): def setUp(self) -> None: self.tmpdir = tempfile.TemporaryDirectory() with open(f"{self.tmpdir.name}/test_shader.glsl,", "w") as f: f.write(test_shader) with open(f"{self.tmpdir.name}/test_params.yaml", "w") as f: f.write(test_params_yaml) self.tmpoutdir = tempfile.TemporaryDirectory() self.generator = SPVGenerator( src_dir_paths=self.tmpdir.name, env=DEFAULT_ENV, glslc_path=None ) def cleanUp(self) -> None: self.tmpdir.cleanup() self.tmpoutdir.cleanup() def testOutputMap(self) -> None: # Each shader variant will produce variants generated based on all possible combinations # of the DTYPE and INPLACE parameters. test_shader_3 has fewer generated variants due to # a custom specified generate_variant_forall field. expected_output_shaders = { "test_shader_1_float", "test_shader_1_inplace_float", "test_shader_1_inplace_int8", "test_shader_1_int8", "test_shader_3_float", "test_shader_3_int", } actual_output_shaders = set(self.generator.output_shader_map.keys()) self.assertEqual(expected_output_shaders, actual_output_shaders)
TestVulkanSPVCodegen
python
tensorflow__tensorflow
tensorflow/python/ops/ragged/ragged_rank_op_test.py
{ "start": 1025, "end": 2610 }
class ____(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters([ # Rank 0 dict( test_input=1, expected_rank=0, ), # Rank 1 dict( test_input=[1], expected_rank=1, ), dict( test_input=[1, 2, 3, 4], expected_rank=1, ), # Rank 2 dict( test_input=[[1], [2], [3]], expected_rank=2, ), # Rank 3 dict( test_input=[[[1], [2, 3]], [[4], [5, 6, 7]]], expected_rank=3, ), # Rank 3, ragged_rank=2 dict( test_input=[[[1], [2, 3], [10, 20]], [[4], [5, 6, 7]]], expected_rank=3, ragged_rank=2, ), # Rank 4, ragged_rank=3 with dimensions: {2, (1, 2), (2), (1, 2)} dict( test_input=[[[[1], [2]]], [[[3, 4], [5, 6]], [[7, 8], [9, 10]]]], expected_rank=4, ), # Rank 4, ragged_rank=2 with dimensions: {2, (1, 2), (1, 2), 2} dict( test_input=[ [[[1, 2]]], [[[5, 6], [7, 8]], [[9, 10], [11, 12]]]], expected_rank=4, ragged_rank=2, ), ]) def testRaggedRank(self, test_input, expected_rank, ragged_rank=None): test_input = ragged_factory_ops.constant( test_input, ragged_rank=ragged_rank) self.assertAllEqual(ragged_array_ops.rank( test_input), expected_rank) if __name__ == '__main__': googletest.main()
RaggedRankOpTest
python
aimacode__aima-python
agents4e.py
{ "start": 2011, "end": 8951 }
class ____(Thing): """An Agent is a subclass of Thing with one required slot, .program, which should hold a function that takes one argument, the percept, and returns an action. (What counts as a percept or action will depend on the specific environment in which the agent exists.) Note that 'program' is a slot, not a method. If it were a method, then the program could 'cheat' and look at aspects of the agent. It's not supposed to do that: the program can only look at the percepts. An agent program that needs a model of the world (and of the agent itself) will have to build and maintain its own model. There is an optional slot, .performance, which is a number giving the performance measure of the agent in its environment.""" def __init__(self, program=None): self.alive = True self.bump = False self.holding = [] self.performance = 0 if program is None or not isinstance(program, collections.abc.Callable): print("Can't find a valid program for {}, falling back to default.".format(self.__class__.__name__)) def program(percept): return eval(input('Percept={}; action? '.format(percept))) self.program = program def can_grab(self, thing): """Return True if this agent can grab this thing. Override for appropriate subclasses of Agent and Thing.""" return False def TraceAgent(agent): """Wrap the agent's program to print its input and output. This will let you see what the agent is doing in the environment.""" old_program = agent.program def new_program(percept): action = old_program(percept) print('{} perceives {} and does {}'.format(agent, percept, action)) return action agent.program = new_program return agent # ______________________________________________________________________________ def TableDrivenAgentProgram(table): """ [Figure 2.7] This agent selects an action based on the percept sequence. It is practical only for tiny domains. To customize it, provide as table a dictionary of all {percept_sequence:action} pairs. """ percepts = [] def program(percept): percepts.append(percept) action = table.get(tuple(percepts)) return action return program def RandomAgentProgram(actions): """An agent that chooses an action at random, ignoring all percepts. >>> list = ['Right', 'Left', 'Suck', 'NoOp'] >>> program = RandomAgentProgram(list) >>> agent = Agent(program) >>> environment = TrivialVacuumEnvironment() >>> environment.add_thing(agent) >>> environment.run() >>> environment.status == {(1, 0): 'Clean' , (0, 0): 'Clean'} True """ return lambda percept: random.choice(actions) # ______________________________________________________________________________ def SimpleReflexAgentProgram(rules, interpret_input): """ [Figure 2.10] This agent takes action based solely on the percept. """ def program(percept): state = interpret_input(percept) rule = rule_match(state, rules) action = rule.action return action return program def ModelBasedReflexAgentProgram(rules, update_state, transition_model, sensor_model): """ [Figure 2.12] This agent takes action based on the percept and state. """ def program(percept): program.state = update_state(program.state, program.action, percept, transition_model, sensor_model) rule = rule_match(program.state, rules) action = rule.action return action program.state = program.action = None return program def rule_match(state, rules): """Find the first rule that matches state.""" for rule in rules: if rule.matches(state): return rule # ______________________________________________________________________________ loc_A, loc_B = (0, 0), (1, 0) # The two locations for the Vacuum world def RandomVacuumAgent(): """Randomly choose one of the actions from the vacuum environment. >>> agent = RandomVacuumAgent() >>> environment = TrivialVacuumEnvironment() >>> environment.add_thing(agent) >>> environment.run() >>> environment.status == {(1,0):'Clean' , (0,0) : 'Clean'} True """ return Agent(RandomAgentProgram(['Right', 'Left', 'Suck', 'NoOp'])) def TableDrivenVacuumAgent(): """Tabular approach towards vacuum world as mentioned in [Figure 2.3] >>> agent = TableDrivenVacuumAgent() >>> environment = TrivialVacuumEnvironment() >>> environment.add_thing(agent) >>> environment.run() >>> environment.status == {(1,0):'Clean' , (0,0) : 'Clean'} True """ table = {((loc_A, 'Clean'),): 'Right', ((loc_A, 'Dirty'),): 'Suck', ((loc_B, 'Clean'),): 'Left', ((loc_B, 'Dirty'),): 'Suck', ((loc_A, 'Dirty'), (loc_A, 'Clean')): 'Right', ((loc_A, 'Clean'), (loc_B, 'Dirty')): 'Suck', ((loc_B, 'Clean'), (loc_A, 'Dirty')): 'Suck', ((loc_B, 'Dirty'), (loc_B, 'Clean')): 'Left', ((loc_A, 'Dirty'), (loc_A, 'Clean'), (loc_B, 'Dirty')): 'Suck', ((loc_B, 'Dirty'), (loc_B, 'Clean'), (loc_A, 'Dirty')): 'Suck'} return Agent(TableDrivenAgentProgram(table)) def ReflexVacuumAgent(): """ [Figure 2.8] A reflex agent for the two-state vacuum environment. >>> agent = ReflexVacuumAgent() >>> environment = TrivialVacuumEnvironment() >>> environment.add_thing(agent) >>> environment.run() >>> environment.status == {(1,0):'Clean' , (0,0) : 'Clean'} True """ def program(percept): location, status = percept if status == 'Dirty': return 'Suck' elif location == loc_A: return 'Right' elif location == loc_B: return 'Left' return Agent(program) def ModelBasedVacuumAgent(): """An agent that keeps track of what locations are clean or dirty. >>> agent = ModelBasedVacuumAgent() >>> environment = TrivialVacuumEnvironment() >>> environment.add_thing(agent) >>> environment.run() >>> environment.status == {(1,0):'Clean' , (0,0) : 'Clean'} True """ model = {loc_A: None, loc_B: None} def program(percept): """Same as ReflexVacuumAgent, except if everything is clean, do NoOp.""" location, status = percept model[location] = status # Update the model here if model[loc_A] == model[loc_B] == 'Clean': return 'NoOp' elif status == 'Dirty': return 'Suck' elif location == loc_A: return 'Right' elif location == loc_B: return 'Left' return Agent(program) # ______________________________________________________________________________
Agent
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 368979, "end": 369310 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("cursor", "node") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") node = sgqlc.types.Field("MarketplaceListing", graphql_name="node")
MarketplaceListingEdge
python
boto__boto3
tests/unit/dynamodb/test_types.py
{ "start": 1733, "end": 5017 }
class ____(unittest.TestCase): def setUp(self): self.serializer = TypeSerializer() def test_serialize_unsupported_type(self): with pytest.raises(TypeError, match=r'Unsupported type'): self.serializer.serialize(object()) def test_serialize_null(self): assert self.serializer.serialize(None) == {'NULL': True} def test_serialize_boolean(self): assert self.serializer.serialize(False) == {'BOOL': False} def test_serialize_integer(self): assert self.serializer.serialize(1) == {'N': '1'} def test_serialize_decimal(self): assert self.serializer.serialize(Decimal('1.25')) == {'N': '1.25'} def test_serialize_float_error(self): error_msg = r'Float types are not supported. Use Decimal types instead' with pytest.raises(TypeError, match=error_msg): self.serializer.serialize(1.25) def test_serialize_NaN_error(self): with pytest.raises(TypeError, match=r'Infinity and NaN not supported'): self.serializer.serialize(Decimal('NaN')) def test_serialize_string(self): assert self.serializer.serialize('foo') == {'S': 'foo'} def test_serialize_binary(self): assert self.serializer.serialize(Binary(b'\x01')) == {'B': b'\x01'} def test_serialize_bytearray(self): assert self.serializer.serialize(bytearray([1])) == {'B': b'\x01'} def test_serialize_bytes(self): assert self.serializer.serialize(b'\x01') == {'B': b'\x01'} def test_serialize_number_set(self): serialized_value = self.serializer.serialize({1, 2, 3}) assert len(serialized_value) == 1 assert 'NS' in serialized_value self.assertCountEqual(serialized_value['NS'], ['1', '2', '3']) def test_serialize_string_set(self): serialized_value = self.serializer.serialize({'foo', 'bar'}) assert len(serialized_value) == 1 assert 'SS' in serialized_value self.assertCountEqual(serialized_value['SS'], ['foo', 'bar']) def test_serialize_binary_set(self): serialized_value = self.serializer.serialize( {Binary(b'\x01'), Binary(b'\x02')} ) assert len(serialized_value) == 1 assert 'BS' in serialized_value self.assertCountEqual(serialized_value['BS'], [b'\x01', b'\x02']) def test_serialize_list(self): serialized_value = self.serializer.serialize(['foo', 1, [1]]) assert len(serialized_value) == 1 assert 'L' in serialized_value self.assertCountEqual( serialized_value['L'], [{'S': 'foo'}, {'N': '1'}, {'L': [{'N': '1'}]}], ) def test_serialize_tuple(self): serialized_value = self.serializer.serialize(('foo', 1, (1,))) self.assertEqual(len(serialized_value), 1) self.assertIn('L', serialized_value) self.assertCountEqual( serialized_value['L'], [{'S': 'foo'}, {'N': '1'}, {'L': [{'N': '1'}]}], ) def test_serialize_map(self): serialized_value = self.serializer.serialize( {'foo': 'bar', 'baz': {'biz': 1}} ) assert serialized_value == { 'M': {'foo': {'S': 'bar'}, 'baz': {'M': {'biz': {'N': '1'}}}} }
TestSerializer
python
kamyu104__LeetCode-Solutions
Python/find-polygon-with-the-largest-perimeter.py
{ "start": 60, "end": 411 }
class ____(object): def largestPerimeter(self, nums): """ :type nums: List[int] :rtype: int """ nums.sort() prefix = sum(nums) for i in reversed(xrange(2, len(nums))): prefix -= nums[i] if prefix > nums[i]: return prefix+nums[i] return -1
Solution
python
pola-rs__polars
py-polars/src/polars/datatypes/classes.py
{ "start": 8832, "end": 8911 }
class ____(NumericType): """Base class for integer data types."""
IntegerType
python
realpython__materials
django-vue-graphql/source_code_final/back_end/blog/admin.py
{ "start": 103, "end": 184 }
class ____(admin.ModelAdmin): model = Profile @admin.register(Tag)
ProfileAdmin
python
jazzband__django-model-utils
model_utils/models.py
{ "start": 1303, "end": 1635 }
class ____(models.Model): """ An abstract base class model that provides ``start`` and ``end`` fields to record a timeframe. """ start = models.DateTimeField(_('start'), null=True, blank=True) end = models.DateTimeField(_('end'), null=True, blank=True) class Meta: abstract = True
TimeFramedModel
python
lxml__lxml
doc/s5/ep2008/atom.py
{ "start": 13435, "end": 13579 }
class ____(_EntryElement): """ For elements that contain a date in their text content. """ date = _date_text_property()
DateElement
python
great-expectations__great_expectations
tasks.py
{ "start": 26469, "end": 41447 }
class ____(NamedTuple): requirement_files: tuple[str, ...] services: tuple[str, ...] = tuple() extra_pytest_args: tuple[ # TODO: remove this once remove the custom flagging system str, ... ] = tuple() MARKER_DEPENDENCY_MAP: Final[Mapping[str, TestDependencies]] = { "athena": TestDependencies(("reqs/requirements-dev-athena.txt",)), "aws_deps": TestDependencies(("reqs/requirements-dev-lite.txt",)), "bigquery": TestDependencies(("reqs/requirements-dev-bigquery.txt",)), "clickhouse": TestDependencies(("reqs/requirements-dev-clickhouse.txt",)), "cloud": TestDependencies( ( "reqs/requirements-dev-cloud.txt", "reqs/requirements-dev-snowflake.txt", "reqs/requirements-dev-spark.txt", ), services=( "mercury", "spark", ), extra_pytest_args=("--cloud",), ), "databricks": TestDependencies( requirement_files=("reqs/requirements-dev-databricks.txt",), ), "docs-basic": TestDependencies( # these installs are handled by the CI requirement_files=( "reqs/requirements-dev-test.txt", "reqs/requirements-dev-mysql.txt", "reqs/requirements-dev-postgresql.txt", # "Deprecated API features detected" warning/error for test_docs[split_data_on_whole_table_bigquery] when pandas>=2.0 # noqa: E501 "reqs/requirements-dev-trino.txt", ), services=("postgresql", "mysql", "trino"), extra_pytest_args=( "--mysql", "--postgresql", "--trino", "--docs-tests", ), ), "docs-creds-needed": TestDependencies( # these installs are handled by the CI requirement_files=( "reqs/requirements-dev-test.txt", "reqs/requirements-dev-azure.txt", "reqs/requirements-dev-bigquery.txt", "reqs/requirements-dev-cloud.txt", "reqs/requirements-dev-redshift.txt", "reqs/requirements-dev-snowflake.txt", # "Deprecated API features detected" warning/error for test_docs[split_data_on_whole_table_bigquery] when pandas>=2.0 # noqa: E501 ), services=("mercury",), extra_pytest_args=( "--aws", "--azure", "--bigquery", "--redshift", "--snowflake", "--cloud", "--docs-tests", ), ), "docs-spark": TestDependencies( requirement_files=( "reqs/requirements-dev-test.txt", "reqs/requirements-dev-spark.txt", ), services=("spark",), extra_pytest_args=("--spark", "--docs-tests"), ), "gx-redshift": TestDependencies( requirement_files=("reqs/requirements-dev-gx-redshift.txt",), ), "mssql": TestDependencies( ("reqs/requirements-dev-mssql.txt",), services=("mssql",), extra_pytest_args=("--mssql",), ), "mysql": TestDependencies( ("reqs/requirements-dev-mysql.txt",), services=("mysql",), extra_pytest_args=("--mysql",), ), "pyarrow": TestDependencies(("reqs/requirements-dev-arrow.txt",)), "postgresql": TestDependencies( ("reqs/requirements-dev-postgresql.txt",), services=("postgresql",), extra_pytest_args=("--postgresql",), ), "redshift": TestDependencies( requirement_files=("reqs/requirements-dev-redshift.txt",), ), "snowflake": TestDependencies( requirement_files=("reqs/requirements-dev-snowflake.txt",), ), "spark": TestDependencies( requirement_files=("reqs/requirements-dev-spark.txt",), services=("spark",), extra_pytest_args=("--spark",), ), "spark_connect": TestDependencies( requirement_files=( "reqs/requirements-dev-spark.txt", "reqs/requirements-dev-spark-connect.txt", ), services=("spark",), extra_pytest_args=("--spark_connect",), ), "trino": TestDependencies( ("reqs/requirements-dev-trino.txt",), services=("trino",), extra_pytest_args=("--trino",), ), } def _marker_statement(marker: str) -> str: # Perhaps we should move this configuration to the MARKER_DEPENDENCY_MAP instead of # doing the mapping here. if marker in [ "postgresql", "mssql", "mysql", "spark", "trino", ]: return f"'all_backends or {marker}'" elif marker == "gx-redshift": return "'redshift'" else: return f"'{marker}'" def _tokenize_marker_string(marker_string: str) -> Generator[str, None, None]: """_summary_ Args: marker_string (str): _description_ Yields: Generator[str, None, None]: _description_ """ tokens = marker_string.split() if len(tokens) == 1: yield tokens[0] elif marker_string == "openpyxl or pyarrow or project or sqlite or aws_creds": yield "aws_creds" yield "openpyxl" yield "pyarrow" yield "project" yield "sqlite" else: raise ValueError(f"Unable to tokenize marker string: {marker_string}") # noqa: TRY003 def _get_marker_dependencies(markers: str | Sequence[str]) -> list[TestDependencies]: if isinstance(markers, str): markers = [markers] dependencies: list[TestDependencies] = [] for marker_string in markers: for marker_token in _tokenize_marker_string(marker_string): if marker_depedencies := MARKER_DEPENDENCY_MAP.get(marker_token): LOGGER.debug(f"'{marker_token}' has dependencies") dependencies.append(marker_depedencies) return dependencies @invoke.task( iterable=["markers", "requirements_dev"], help={ "markers": "Optional marker to install dependencies for. Can be specified multiple times.", "requirements_dev": "Short name of `requirements-dev-*.txt` file to install, e.g. test, spark, cloud, etc. Can be specified multiple times.", # noqa: E501 "constraints": "Optional flag to install dependencies with constraints, default True", "gx_install": "Install the local version of Great Expectations.", "editable_install": "Install an editable local version of Great Expectations.", "force_reinstall": "Force re-installation of dependencies.", }, ) def deps( # noqa: C901 - too complex ctx: Context, markers: list[str], requirements_dev: list[str], constraints: bool = True, gx_install: bool = False, editable_install: bool = False, force_reinstall: bool = False, ): """ Install dependencies for development and testing. Specific requirement files needed for a specific test marker can be registered in `MARKER_DEPENDENCY_MAP`, `invoke deps` will always check for and use these when installing dependencies. If no `markers` or `requirements-dev` are specified, the dev-contrib and core requirements are installed. Example usage: Installing the needed dependencies for running the `external_sqldialect` tests and the 'requirements-dev-cloud.txt' dependencies. $ invoke deps -m external_sqldialect -r cloud """ # noqa: E501 cmds = ["pip", "install"] if editable_install: cmds.append("-e .") elif gx_install: cmds.append(".") if force_reinstall: cmds.append("--force-reinstall") req_files: list[str] = ["requirements.txt"] for test_deps in _get_marker_dependencies(markers): req_files.extend(test_deps.requirement_files) for name in requirements_dev: req_path: pathlib.Path = REQS_DIR / f"requirements-dev-{name}.txt" assert req_path.exists(), f"Requirement file {req_path} does not exist" req_files.append(str(req_path)) if not markers and not requirements_dev: req_files.append("reqs/requirements-dev-contrib.txt") for req_file in req_files: cmds.append(f"-r {req_file}") if constraints: cmds.append("-c constraints-dev.txt") ctx.run(" ".join(cmds), echo=True, pty=True) @invoke.task(iterable=["service_names", "up_services", "verbose"]) def docs_snippet_tests( ctx: Context, marker: str, up_services: bool = False, verbose: bool = False, reports: bool = False, ): pytest_cmds = [ "pytest", "-rEf", ] if reports: pytest_cmds.extend(["--cov=great_expectations", "--cov-report=xml"]) if verbose: pytest_cmds.append("-vv") for test_deps in _get_marker_dependencies(marker): if up_services: service(ctx, names=test_deps.services, markers=test_deps.services) for extra_pytest_arg in test_deps.extra_pytest_args: pytest_cmds.append(extra_pytest_arg) pytest_cmds.append("tests/integration/test_script_runner.py") ctx.run(" ".join(pytest_cmds), echo=True, pty=True) @invoke.task( help={ "pty": _PTY_HELP_DESC, "reports": "Generate coverage & result reports to be uploaded to codecov", "W": "Warnings control", }, iterable=["service_names", "up_services", "verbose"], ) def ci_tests( # noqa: C901 - too complex (9) ctx: Context, marker: str, up_services: bool = False, restart_services: bool = False, verbose: bool = False, reports: bool = False, slowest: int = 5, timeout: float = 0.0, # 0 indicates no timeout xdist: bool = False, W: str | None = None, pty: bool = True, ): """ Run tests in CI. This method looks up the pytest marker provided and runs the tests for that marker, as well as looking up any required services, testing dependencies and extra CLI flags that are need and starting them if `up_services` is True. `up_services` is False by default to avoid starting services which may already be up when running tests locally. `restart_services` is False by default to avoid always restarting the services. Defined this as a new invoke task to avoid some of the baggage of our old test setup. """ pytest_options = [f"--durations={slowest}", "-rEf"] if xdist: pytest_options.append("-n 4") if timeout != 0: pytest_options.append(f"--timeout={timeout}") if reports: pytest_options.extend( ["--cov=great_expectations", "--cov-report=xml", "--junitxml=junit.xml"] ) if verbose: pytest_options.append("-vv") if W: # https://docs.python.org/3/library/warnings.html#describing-warning-filters pytest_options.append(f"-W={W}") for test_deps in _get_marker_dependencies(marker): if restart_services or up_services: service( ctx, names=test_deps.services, markers=test_deps.services, restart_services=restart_services, pty=pty, ) for extra_pytest_arg in test_deps.extra_pytest_args: pytest_options.append(extra_pytest_arg) pytest_cmd = ["pytest", "-m", _marker_statement(marker)] + pytest_options ctx.run(" ".join(pytest_cmd), echo=True, pty=pty) @invoke.task( aliases=("services",), help={"pty": _PTY_HELP_DESC}, iterable=["names", "markers"], ) def service( ctx: Context, names: Sequence[str], markers: Sequence[str], restart_services: bool = False, pty: bool = True, ): """ Startup a service, by referencing its name directly or by looking up a pytest marker. If a marker is specified, the services listed in `MARKER_DEPENDENCY_MAP` will be used. If restart_services was passed, the containers will be stopped and re-built. Note: The main reason this is a separate task is to make it easy to start services when running tests locally. """ service_names = set(names) if markers: for test_deps in _get_marker_dependencies(markers): service_names.update(test_deps.services) if service_names: print(f" Starting services for {', '.join(service_names)} ...") for service_name in service_names: cmds = [] if service_name == "mercury" and os.environ.get("CI") != "true": cmds.extend( [ "FORCE_NO_ALIAS=true", "assume", "dev", "--exec", "'aws ecr get-login-password --region us-east-1'", "|", "docker", "login", "--username", "AWS", "--password-stdin", "258143015559.dkr.ecr.us-east-1.amazonaws.com", "&&", ] ) if restart_services: print(f" Removing existing containers and building latest for {service_name} ...") cmds.extend( [ "docker", "compose", "-f", f"assets/docker/{service_name}/docker-compose.yml", "rm", "-fsv", "&&", "docker", "compose", "-f", f"assets/docker/{service_name}/docker-compose.yml", "build", "--pull", "&&", ] ) cmds.extend( [ "docker", "compose", "--progress", "quiet", "-f", f"assets/docker/{service_name}/docker-compose.yml", "up", "-d", "--wait", "--wait-timeout 120", ] ) ctx.run(" ".join(cmds), echo=True, pty=pty) # TODO: Add healthchecks to services that require this sleep and then remove it. # This is a temporary hack to give services enough time to come up before moving on. ctx.run("sleep 15") else: print(" No matching services to start") @invoke.task() def print_public_api(ctx: Context): """Prints to STDOUT all of our public api.""" # Walk the GX package to make sure we import all submodules to ensure we # retrieve all things decorated with our public api decorator. import great_expectations for module_info in pkgutil.walk_packages(["great_expectations"], prefix="great_expectations."): importlib.import_module(module_info.name) print(great_expectations._docs_decorators.public_api_introspector)
TestDependencies
python
kubernetes-client__python
kubernetes/client/models/v1_pod_spec.py
{ "start": 383, "end": 58308 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'active_deadline_seconds': 'int', 'affinity': 'V1Affinity', 'automount_service_account_token': 'bool', 'containers': 'list[V1Container]', 'dns_config': 'V1PodDNSConfig', 'dns_policy': 'str', 'enable_service_links': 'bool', 'ephemeral_containers': 'list[V1EphemeralContainer]', 'host_aliases': 'list[V1HostAlias]', 'host_ipc': 'bool', 'host_network': 'bool', 'host_pid': 'bool', 'host_users': 'bool', 'hostname': 'str', 'hostname_override': 'str', 'image_pull_secrets': 'list[V1LocalObjectReference]', 'init_containers': 'list[V1Container]', 'node_name': 'str', 'node_selector': 'dict(str, str)', 'os': 'V1PodOS', 'overhead': 'dict(str, str)', 'preemption_policy': 'str', 'priority': 'int', 'priority_class_name': 'str', 'readiness_gates': 'list[V1PodReadinessGate]', 'resource_claims': 'list[V1PodResourceClaim]', 'resources': 'V1ResourceRequirements', 'restart_policy': 'str', 'runtime_class_name': 'str', 'scheduler_name': 'str', 'scheduling_gates': 'list[V1PodSchedulingGate]', 'security_context': 'V1PodSecurityContext', 'service_account': 'str', 'service_account_name': 'str', 'set_hostname_as_fqdn': 'bool', 'share_process_namespace': 'bool', 'subdomain': 'str', 'termination_grace_period_seconds': 'int', 'tolerations': 'list[V1Toleration]', 'topology_spread_constraints': 'list[V1TopologySpreadConstraint]', 'volumes': 'list[V1Volume]' } attribute_map = { 'active_deadline_seconds': 'activeDeadlineSeconds', 'affinity': 'affinity', 'automount_service_account_token': 'automountServiceAccountToken', 'containers': 'containers', 'dns_config': 'dnsConfig', 'dns_policy': 'dnsPolicy', 'enable_service_links': 'enableServiceLinks', 'ephemeral_containers': 'ephemeralContainers', 'host_aliases': 'hostAliases', 'host_ipc': 'hostIPC', 'host_network': 'hostNetwork', 'host_pid': 'hostPID', 'host_users': 'hostUsers', 'hostname': 'hostname', 'hostname_override': 'hostnameOverride', 'image_pull_secrets': 'imagePullSecrets', 'init_containers': 'initContainers', 'node_name': 'nodeName', 'node_selector': 'nodeSelector', 'os': 'os', 'overhead': 'overhead', 'preemption_policy': 'preemptionPolicy', 'priority': 'priority', 'priority_class_name': 'priorityClassName', 'readiness_gates': 'readinessGates', 'resource_claims': 'resourceClaims', 'resources': 'resources', 'restart_policy': 'restartPolicy', 'runtime_class_name': 'runtimeClassName', 'scheduler_name': 'schedulerName', 'scheduling_gates': 'schedulingGates', 'security_context': 'securityContext', 'service_account': 'serviceAccount', 'service_account_name': 'serviceAccountName', 'set_hostname_as_fqdn': 'setHostnameAsFQDN', 'share_process_namespace': 'shareProcessNamespace', 'subdomain': 'subdomain', 'termination_grace_period_seconds': 'terminationGracePeriodSeconds', 'tolerations': 'tolerations', 'topology_spread_constraints': 'topologySpreadConstraints', 'volumes': 'volumes' } def __init__(self, active_deadline_seconds=None, affinity=None, automount_service_account_token=None, containers=None, dns_config=None, dns_policy=None, enable_service_links=None, ephemeral_containers=None, host_aliases=None, host_ipc=None, host_network=None, host_pid=None, host_users=None, hostname=None, hostname_override=None, image_pull_secrets=None, init_containers=None, node_name=None, node_selector=None, os=None, overhead=None, preemption_policy=None, priority=None, priority_class_name=None, readiness_gates=None, resource_claims=None, resources=None, restart_policy=None, runtime_class_name=None, scheduler_name=None, scheduling_gates=None, security_context=None, service_account=None, service_account_name=None, set_hostname_as_fqdn=None, share_process_namespace=None, subdomain=None, termination_grace_period_seconds=None, tolerations=None, topology_spread_constraints=None, volumes=None, local_vars_configuration=None): # noqa: E501 """V1PodSpec - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._active_deadline_seconds = None self._affinity = None self._automount_service_account_token = None self._containers = None self._dns_config = None self._dns_policy = None self._enable_service_links = None self._ephemeral_containers = None self._host_aliases = None self._host_ipc = None self._host_network = None self._host_pid = None self._host_users = None self._hostname = None self._hostname_override = None self._image_pull_secrets = None self._init_containers = None self._node_name = None self._node_selector = None self._os = None self._overhead = None self._preemption_policy = None self._priority = None self._priority_class_name = None self._readiness_gates = None self._resource_claims = None self._resources = None self._restart_policy = None self._runtime_class_name = None self._scheduler_name = None self._scheduling_gates = None self._security_context = None self._service_account = None self._service_account_name = None self._set_hostname_as_fqdn = None self._share_process_namespace = None self._subdomain = None self._termination_grace_period_seconds = None self._tolerations = None self._topology_spread_constraints = None self._volumes = None self.discriminator = None if active_deadline_seconds is not None: self.active_deadline_seconds = active_deadline_seconds if affinity is not None: self.affinity = affinity if automount_service_account_token is not None: self.automount_service_account_token = automount_service_account_token self.containers = containers if dns_config is not None: self.dns_config = dns_config if dns_policy is not None: self.dns_policy = dns_policy if enable_service_links is not None: self.enable_service_links = enable_service_links if ephemeral_containers is not None: self.ephemeral_containers = ephemeral_containers if host_aliases is not None: self.host_aliases = host_aliases if host_ipc is not None: self.host_ipc = host_ipc if host_network is not None: self.host_network = host_network if host_pid is not None: self.host_pid = host_pid if host_users is not None: self.host_users = host_users if hostname is not None: self.hostname = hostname if hostname_override is not None: self.hostname_override = hostname_override if image_pull_secrets is not None: self.image_pull_secrets = image_pull_secrets if init_containers is not None: self.init_containers = init_containers if node_name is not None: self.node_name = node_name if node_selector is not None: self.node_selector = node_selector if os is not None: self.os = os if overhead is not None: self.overhead = overhead if preemption_policy is not None: self.preemption_policy = preemption_policy if priority is not None: self.priority = priority if priority_class_name is not None: self.priority_class_name = priority_class_name if readiness_gates is not None: self.readiness_gates = readiness_gates if resource_claims is not None: self.resource_claims = resource_claims if resources is not None: self.resources = resources if restart_policy is not None: self.restart_policy = restart_policy if runtime_class_name is not None: self.runtime_class_name = runtime_class_name if scheduler_name is not None: self.scheduler_name = scheduler_name if scheduling_gates is not None: self.scheduling_gates = scheduling_gates if security_context is not None: self.security_context = security_context if service_account is not None: self.service_account = service_account if service_account_name is not None: self.service_account_name = service_account_name if set_hostname_as_fqdn is not None: self.set_hostname_as_fqdn = set_hostname_as_fqdn if share_process_namespace is not None: self.share_process_namespace = share_process_namespace if subdomain is not None: self.subdomain = subdomain if termination_grace_period_seconds is not None: self.termination_grace_period_seconds = termination_grace_period_seconds if tolerations is not None: self.tolerations = tolerations if topology_spread_constraints is not None: self.topology_spread_constraints = topology_spread_constraints if volumes is not None: self.volumes = volumes @property def active_deadline_seconds(self): """Gets the active_deadline_seconds of this V1PodSpec. # noqa: E501 Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer. # noqa: E501 :return: The active_deadline_seconds of this V1PodSpec. # noqa: E501 :rtype: int """ return self._active_deadline_seconds @active_deadline_seconds.setter def active_deadline_seconds(self, active_deadline_seconds): """Sets the active_deadline_seconds of this V1PodSpec. Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer. # noqa: E501 :param active_deadline_seconds: The active_deadline_seconds of this V1PodSpec. # noqa: E501 :type: int """ self._active_deadline_seconds = active_deadline_seconds @property def affinity(self): """Gets the affinity of this V1PodSpec. # noqa: E501 :return: The affinity of this V1PodSpec. # noqa: E501 :rtype: V1Affinity """ return self._affinity @affinity.setter def affinity(self, affinity): """Sets the affinity of this V1PodSpec. :param affinity: The affinity of this V1PodSpec. # noqa: E501 :type: V1Affinity """ self._affinity = affinity @property def automount_service_account_token(self): """Gets the automount_service_account_token of this V1PodSpec. # noqa: E501 AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. # noqa: E501 :return: The automount_service_account_token of this V1PodSpec. # noqa: E501 :rtype: bool """ return self._automount_service_account_token @automount_service_account_token.setter def automount_service_account_token(self, automount_service_account_token): """Sets the automount_service_account_token of this V1PodSpec. AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. # noqa: E501 :param automount_service_account_token: The automount_service_account_token of this V1PodSpec. # noqa: E501 :type: bool """ self._automount_service_account_token = automount_service_account_token @property def containers(self): """Gets the containers of this V1PodSpec. # noqa: E501 List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. # noqa: E501 :return: The containers of this V1PodSpec. # noqa: E501 :rtype: list[V1Container] """ return self._containers @containers.setter def containers(self, containers): """Sets the containers of this V1PodSpec. List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. # noqa: E501 :param containers: The containers of this V1PodSpec. # noqa: E501 :type: list[V1Container] """ if self.local_vars_configuration.client_side_validation and containers is None: # noqa: E501 raise ValueError("Invalid value for `containers`, must not be `None`") # noqa: E501 self._containers = containers @property def dns_config(self): """Gets the dns_config of this V1PodSpec. # noqa: E501 :return: The dns_config of this V1PodSpec. # noqa: E501 :rtype: V1PodDNSConfig """ return self._dns_config @dns_config.setter def dns_config(self, dns_config): """Sets the dns_config of this V1PodSpec. :param dns_config: The dns_config of this V1PodSpec. # noqa: E501 :type: V1PodDNSConfig """ self._dns_config = dns_config @property def dns_policy(self): """Gets the dns_policy of this V1PodSpec. # noqa: E501 Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. # noqa: E501 :return: The dns_policy of this V1PodSpec. # noqa: E501 :rtype: str """ return self._dns_policy @dns_policy.setter def dns_policy(self, dns_policy): """Sets the dns_policy of this V1PodSpec. Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. # noqa: E501 :param dns_policy: The dns_policy of this V1PodSpec. # noqa: E501 :type: str """ self._dns_policy = dns_policy @property def enable_service_links(self): """Gets the enable_service_links of this V1PodSpec. # noqa: E501 EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true. # noqa: E501 :return: The enable_service_links of this V1PodSpec. # noqa: E501 :rtype: bool """ return self._enable_service_links @enable_service_links.setter def enable_service_links(self, enable_service_links): """Sets the enable_service_links of this V1PodSpec. EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true. # noqa: E501 :param enable_service_links: The enable_service_links of this V1PodSpec. # noqa: E501 :type: bool """ self._enable_service_links = enable_service_links @property def ephemeral_containers(self): """Gets the ephemeral_containers of this V1PodSpec. # noqa: E501 List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. # noqa: E501 :return: The ephemeral_containers of this V1PodSpec. # noqa: E501 :rtype: list[V1EphemeralContainer] """ return self._ephemeral_containers @ephemeral_containers.setter def ephemeral_containers(self, ephemeral_containers): """Sets the ephemeral_containers of this V1PodSpec. List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. # noqa: E501 :param ephemeral_containers: The ephemeral_containers of this V1PodSpec. # noqa: E501 :type: list[V1EphemeralContainer] """ self._ephemeral_containers = ephemeral_containers @property def host_aliases(self): """Gets the host_aliases of this V1PodSpec. # noqa: E501 HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. # noqa: E501 :return: The host_aliases of this V1PodSpec. # noqa: E501 :rtype: list[V1HostAlias] """ return self._host_aliases @host_aliases.setter def host_aliases(self, host_aliases): """Sets the host_aliases of this V1PodSpec. HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. # noqa: E501 :param host_aliases: The host_aliases of this V1PodSpec. # noqa: E501 :type: list[V1HostAlias] """ self._host_aliases = host_aliases @property def host_ipc(self): """Gets the host_ipc of this V1PodSpec. # noqa: E501 Use the host's ipc namespace. Optional: Default to false. # noqa: E501 :return: The host_ipc of this V1PodSpec. # noqa: E501 :rtype: bool """ return self._host_ipc @host_ipc.setter def host_ipc(self, host_ipc): """Sets the host_ipc of this V1PodSpec. Use the host's ipc namespace. Optional: Default to false. # noqa: E501 :param host_ipc: The host_ipc of this V1PodSpec. # noqa: E501 :type: bool """ self._host_ipc = host_ipc @property def host_network(self): """Gets the host_network of this V1PodSpec. # noqa: E501 Host networking requested for this pod. Use the host's network namespace. When using HostNetwork you should specify ports so the scheduler is aware. When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`, and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`. Default to false. # noqa: E501 :return: The host_network of this V1PodSpec. # noqa: E501 :rtype: bool """ return self._host_network @host_network.setter def host_network(self, host_network): """Sets the host_network of this V1PodSpec. Host networking requested for this pod. Use the host's network namespace. When using HostNetwork you should specify ports so the scheduler is aware. When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`, and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`. Default to false. # noqa: E501 :param host_network: The host_network of this V1PodSpec. # noqa: E501 :type: bool """ self._host_network = host_network @property def host_pid(self): """Gets the host_pid of this V1PodSpec. # noqa: E501 Use the host's pid namespace. Optional: Default to false. # noqa: E501 :return: The host_pid of this V1PodSpec. # noqa: E501 :rtype: bool """ return self._host_pid @host_pid.setter def host_pid(self, host_pid): """Sets the host_pid of this V1PodSpec. Use the host's pid namespace. Optional: Default to false. # noqa: E501 :param host_pid: The host_pid of this V1PodSpec. # noqa: E501 :type: bool """ self._host_pid = host_pid @property def host_users(self): """Gets the host_users of this V1PodSpec. # noqa: E501 Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. # noqa: E501 :return: The host_users of this V1PodSpec. # noqa: E501 :rtype: bool """ return self._host_users @host_users.setter def host_users(self, host_users): """Sets the host_users of this V1PodSpec. Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. # noqa: E501 :param host_users: The host_users of this V1PodSpec. # noqa: E501 :type: bool """ self._host_users = host_users @property def hostname(self): """Gets the hostname of this V1PodSpec. # noqa: E501 Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. # noqa: E501 :return: The hostname of this V1PodSpec. # noqa: E501 :rtype: str """ return self._hostname @hostname.setter def hostname(self, hostname): """Sets the hostname of this V1PodSpec. Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. # noqa: E501 :param hostname: The hostname of this V1PodSpec. # noqa: E501 :type: str """ self._hostname = hostname @property def hostname_override(self): """Gets the hostname_override of this V1PodSpec. # noqa: E501 HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod. This field only specifies the pod's hostname and does not affect its DNS records. When this field is set to a non-empty string: - It takes precedence over the values set in `hostname` and `subdomain`. - The Pod's hostname will be set to this value. - `setHostnameAsFQDN` must be nil or set to false. - `hostNetwork` must be set to false. This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters. Requires the HostnameOverride feature gate to be enabled. # noqa: E501 :return: The hostname_override of this V1PodSpec. # noqa: E501 :rtype: str """ return self._hostname_override @hostname_override.setter def hostname_override(self, hostname_override): """Sets the hostname_override of this V1PodSpec. HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod. This field only specifies the pod's hostname and does not affect its DNS records. When this field is set to a non-empty string: - It takes precedence over the values set in `hostname` and `subdomain`. - The Pod's hostname will be set to this value. - `setHostnameAsFQDN` must be nil or set to false. - `hostNetwork` must be set to false. This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters. Requires the HostnameOverride feature gate to be enabled. # noqa: E501 :param hostname_override: The hostname_override of this V1PodSpec. # noqa: E501 :type: str """ self._hostname_override = hostname_override @property def image_pull_secrets(self): """Gets the image_pull_secrets of this V1PodSpec. # noqa: E501 ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod # noqa: E501 :return: The image_pull_secrets of this V1PodSpec. # noqa: E501 :rtype: list[V1LocalObjectReference] """ return self._image_pull_secrets @image_pull_secrets.setter def image_pull_secrets(self, image_pull_secrets): """Sets the image_pull_secrets of this V1PodSpec. ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod # noqa: E501 :param image_pull_secrets: The image_pull_secrets of this V1PodSpec. # noqa: E501 :type: list[V1LocalObjectReference] """ self._image_pull_secrets = image_pull_secrets @property def init_containers(self): """Gets the init_containers of this V1PodSpec. # noqa: E501 List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ # noqa: E501 :return: The init_containers of this V1PodSpec. # noqa: E501 :rtype: list[V1Container] """ return self._init_containers @init_containers.setter def init_containers(self, init_containers): """Sets the init_containers of this V1PodSpec. List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ # noqa: E501 :param init_containers: The init_containers of this V1PodSpec. # noqa: E501 :type: list[V1Container] """ self._init_containers = init_containers @property def node_name(self): """Gets the node_name of this V1PodSpec. # noqa: E501 NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename # noqa: E501 :return: The node_name of this V1PodSpec. # noqa: E501 :rtype: str """ return self._node_name @node_name.setter def node_name(self, node_name): """Sets the node_name of this V1PodSpec. NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename # noqa: E501 :param node_name: The node_name of this V1PodSpec. # noqa: E501 :type: str """ self._node_name = node_name @property def node_selector(self): """Gets the node_selector of this V1PodSpec. # noqa: E501 NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # noqa: E501 :return: The node_selector of this V1PodSpec. # noqa: E501 :rtype: dict(str, str) """ return self._node_selector @node_selector.setter def node_selector(self, node_selector): """Sets the node_selector of this V1PodSpec. NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # noqa: E501 :param node_selector: The node_selector of this V1PodSpec. # noqa: E501 :type: dict(str, str) """ self._node_selector = node_selector @property def os(self): """Gets the os of this V1PodSpec. # noqa: E501 :return: The os of this V1PodSpec. # noqa: E501 :rtype: V1PodOS """ return self._os @os.setter def os(self, os): """Sets the os of this V1PodSpec. :param os: The os of this V1PodSpec. # noqa: E501 :type: V1PodOS """ self._os = os @property def overhead(self): """Gets the overhead of this V1PodSpec. # noqa: E501 Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md # noqa: E501 :return: The overhead of this V1PodSpec. # noqa: E501 :rtype: dict(str, str) """ return self._overhead @overhead.setter def overhead(self, overhead): """Sets the overhead of this V1PodSpec. Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md # noqa: E501 :param overhead: The overhead of this V1PodSpec. # noqa: E501 :type: dict(str, str) """ self._overhead = overhead @property def preemption_policy(self): """Gets the preemption_policy of this V1PodSpec. # noqa: E501 PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. # noqa: E501 :return: The preemption_policy of this V1PodSpec. # noqa: E501 :rtype: str """ return self._preemption_policy @preemption_policy.setter def preemption_policy(self, preemption_policy): """Sets the preemption_policy of this V1PodSpec. PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. # noqa: E501 :param preemption_policy: The preemption_policy of this V1PodSpec. # noqa: E501 :type: str """ self._preemption_policy = preemption_policy @property def priority(self): """Gets the priority of this V1PodSpec. # noqa: E501 The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. # noqa: E501 :return: The priority of this V1PodSpec. # noqa: E501 :rtype: int """ return self._priority @priority.setter def priority(self, priority): """Sets the priority of this V1PodSpec. The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. # noqa: E501 :param priority: The priority of this V1PodSpec. # noqa: E501 :type: int """ self._priority = priority @property def priority_class_name(self): """Gets the priority_class_name of this V1PodSpec. # noqa: E501 If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. # noqa: E501 :return: The priority_class_name of this V1PodSpec. # noqa: E501 :rtype: str """ return self._priority_class_name @priority_class_name.setter def priority_class_name(self, priority_class_name): """Sets the priority_class_name of this V1PodSpec. If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. # noqa: E501 :param priority_class_name: The priority_class_name of this V1PodSpec. # noqa: E501 :type: str """ self._priority_class_name = priority_class_name @property def readiness_gates(self): """Gets the readiness_gates of this V1PodSpec. # noqa: E501 If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates # noqa: E501 :return: The readiness_gates of this V1PodSpec. # noqa: E501 :rtype: list[V1PodReadinessGate] """ return self._readiness_gates @readiness_gates.setter def readiness_gates(self, readiness_gates): """Sets the readiness_gates of this V1PodSpec. If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates # noqa: E501 :param readiness_gates: The readiness_gates of this V1PodSpec. # noqa: E501 :type: list[V1PodReadinessGate] """ self._readiness_gates = readiness_gates @property def resource_claims(self): """Gets the resource_claims of this V1PodSpec. # noqa: E501 ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. This field is immutable. # noqa: E501 :return: The resource_claims of this V1PodSpec. # noqa: E501 :rtype: list[V1PodResourceClaim] """ return self._resource_claims @resource_claims.setter def resource_claims(self, resource_claims): """Sets the resource_claims of this V1PodSpec. ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. This field is immutable. # noqa: E501 :param resource_claims: The resource_claims of this V1PodSpec. # noqa: E501 :type: list[V1PodResourceClaim] """ self._resource_claims = resource_claims @property def resources(self): """Gets the resources of this V1PodSpec. # noqa: E501 :return: The resources of this V1PodSpec. # noqa: E501 :rtype: V1ResourceRequirements """ return self._resources @resources.setter def resources(self, resources): """Sets the resources of this V1PodSpec. :param resources: The resources of this V1PodSpec. # noqa: E501 :type: V1ResourceRequirements """ self._resources = resources @property def restart_policy(self): """Gets the restart_policy of this V1PodSpec. # noqa: E501 Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy # noqa: E501 :return: The restart_policy of this V1PodSpec. # noqa: E501 :rtype: str """ return self._restart_policy @restart_policy.setter def restart_policy(self, restart_policy): """Sets the restart_policy of this V1PodSpec. Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy # noqa: E501 :param restart_policy: The restart_policy of this V1PodSpec. # noqa: E501 :type: str """ self._restart_policy = restart_policy @property def runtime_class_name(self): """Gets the runtime_class_name of this V1PodSpec. # noqa: E501 RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class # noqa: E501 :return: The runtime_class_name of this V1PodSpec. # noqa: E501 :rtype: str """ return self._runtime_class_name @runtime_class_name.setter def runtime_class_name(self, runtime_class_name): """Sets the runtime_class_name of this V1PodSpec. RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class # noqa: E501 :param runtime_class_name: The runtime_class_name of this V1PodSpec. # noqa: E501 :type: str """ self._runtime_class_name = runtime_class_name @property def scheduler_name(self): """Gets the scheduler_name of this V1PodSpec. # noqa: E501 If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. # noqa: E501 :return: The scheduler_name of this V1PodSpec. # noqa: E501 :rtype: str """ return self._scheduler_name @scheduler_name.setter def scheduler_name(self, scheduler_name): """Sets the scheduler_name of this V1PodSpec. If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. # noqa: E501 :param scheduler_name: The scheduler_name of this V1PodSpec. # noqa: E501 :type: str """ self._scheduler_name = scheduler_name @property def scheduling_gates(self): """Gets the scheduling_gates of this V1PodSpec. # noqa: E501 SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod. SchedulingGates can only be set at pod creation time, and be removed only afterwards. # noqa: E501 :return: The scheduling_gates of this V1PodSpec. # noqa: E501 :rtype: list[V1PodSchedulingGate] """ return self._scheduling_gates @scheduling_gates.setter def scheduling_gates(self, scheduling_gates): """Sets the scheduling_gates of this V1PodSpec. SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod. SchedulingGates can only be set at pod creation time, and be removed only afterwards. # noqa: E501 :param scheduling_gates: The scheduling_gates of this V1PodSpec. # noqa: E501 :type: list[V1PodSchedulingGate] """ self._scheduling_gates = scheduling_gates @property def security_context(self): """Gets the security_context of this V1PodSpec. # noqa: E501 :return: The security_context of this V1PodSpec. # noqa: E501 :rtype: V1PodSecurityContext """ return self._security_context @security_context.setter def security_context(self, security_context): """Sets the security_context of this V1PodSpec. :param security_context: The security_context of this V1PodSpec. # noqa: E501 :type: V1PodSecurityContext """ self._security_context = security_context @property def service_account(self): """Gets the service_account of this V1PodSpec. # noqa: E501 DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead. # noqa: E501 :return: The service_account of this V1PodSpec. # noqa: E501 :rtype: str """ return self._service_account @service_account.setter def service_account(self, service_account): """Sets the service_account of this V1PodSpec. DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead. # noqa: E501 :param service_account: The service_account of this V1PodSpec. # noqa: E501 :type: str """ self._service_account = service_account @property def service_account_name(self): """Gets the service_account_name of this V1PodSpec. # noqa: E501 ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ # noqa: E501 :return: The service_account_name of this V1PodSpec. # noqa: E501 :rtype: str """ return self._service_account_name @service_account_name.setter def service_account_name(self, service_account_name): """Sets the service_account_name of this V1PodSpec. ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ # noqa: E501 :param service_account_name: The service_account_name of this V1PodSpec. # noqa: E501 :type: str """ self._service_account_name = service_account_name @property def set_hostname_as_fqdn(self): """Gets the set_hostname_as_fqdn of this V1PodSpec. # noqa: E501 If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false. # noqa: E501 :return: The set_hostname_as_fqdn of this V1PodSpec. # noqa: E501 :rtype: bool """ return self._set_hostname_as_fqdn @set_hostname_as_fqdn.setter def set_hostname_as_fqdn(self, set_hostname_as_fqdn): """Sets the set_hostname_as_fqdn of this V1PodSpec. If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false. # noqa: E501 :param set_hostname_as_fqdn: The set_hostname_as_fqdn of this V1PodSpec. # noqa: E501 :type: bool """ self._set_hostname_as_fqdn = set_hostname_as_fqdn @property def share_process_namespace(self): """Gets the share_process_namespace of this V1PodSpec. # noqa: E501 Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. # noqa: E501 :return: The share_process_namespace of this V1PodSpec. # noqa: E501 :rtype: bool """ return self._share_process_namespace @share_process_namespace.setter def share_process_namespace(self, share_process_namespace): """Sets the share_process_namespace of this V1PodSpec. Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. # noqa: E501 :param share_process_namespace: The share_process_namespace of this V1PodSpec. # noqa: E501 :type: bool """ self._share_process_namespace = share_process_namespace @property def subdomain(self): """Gets the subdomain of this V1PodSpec. # noqa: E501 If specified, the fully qualified Pod hostname will be \"<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>\". If not specified, the pod will not have a domainname at all. # noqa: E501 :return: The subdomain of this V1PodSpec. # noqa: E501 :rtype: str """ return self._subdomain @subdomain.setter def subdomain(self, subdomain): """Sets the subdomain of this V1PodSpec. If specified, the fully qualified Pod hostname will be \"<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>\". If not specified, the pod will not have a domainname at all. # noqa: E501 :param subdomain: The subdomain of this V1PodSpec. # noqa: E501 :type: str """ self._subdomain = subdomain @property def termination_grace_period_seconds(self): """Gets the termination_grace_period_seconds of this V1PodSpec. # noqa: E501 Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. # noqa: E501 :return: The termination_grace_period_seconds of this V1PodSpec. # noqa: E501 :rtype: int """ return self._termination_grace_period_seconds @termination_grace_period_seconds.setter def termination_grace_period_seconds(self, termination_grace_period_seconds): """Sets the termination_grace_period_seconds of this V1PodSpec. Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. # noqa: E501 :param termination_grace_period_seconds: The termination_grace_period_seconds of this V1PodSpec. # noqa: E501 :type: int """ self._termination_grace_period_seconds = termination_grace_period_seconds @property def tolerations(self): """Gets the tolerations of this V1PodSpec. # noqa: E501 If specified, the pod's tolerations. # noqa: E501 :return: The tolerations of this V1PodSpec. # noqa: E501 :rtype: list[V1Toleration] """ return self._tolerations @tolerations.setter def tolerations(self, tolerations): """Sets the tolerations of this V1PodSpec. If specified, the pod's tolerations. # noqa: E501 :param tolerations: The tolerations of this V1PodSpec. # noqa: E501 :type: list[V1Toleration] """ self._tolerations = tolerations @property def topology_spread_constraints(self): """Gets the topology_spread_constraints of this V1PodSpec. # noqa: E501 TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. # noqa: E501 :return: The topology_spread_constraints of this V1PodSpec. # noqa: E501 :rtype: list[V1TopologySpreadConstraint] """ return self._topology_spread_constraints @topology_spread_constraints.setter def topology_spread_constraints(self, topology_spread_constraints): """Sets the topology_spread_constraints of this V1PodSpec. TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. # noqa: E501 :param topology_spread_constraints: The topology_spread_constraints of this V1PodSpec. # noqa: E501 :type: list[V1TopologySpreadConstraint] """ self._topology_spread_constraints = topology_spread_constraints @property def volumes(self): """Gets the volumes of this V1PodSpec. # noqa: E501 List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes # noqa: E501 :return: The volumes of this V1PodSpec. # noqa: E501 :rtype: list[V1Volume] """ return self._volumes @volumes.setter def volumes(self, volumes): """Sets the volumes of this V1PodSpec. List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes # noqa: E501 :param volumes: The volumes of this V1PodSpec. # noqa: E501 :type: list[V1Volume] """ self._volumes = volumes def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1PodSpec): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1PodSpec): return True return self.to_dict() != other.to_dict()
V1PodSpec
python
google__jax
tests/jaxpr_effects_test.py
{ "start": 1796, "end": 3690 }
class ____(effects.JaxprInputEffect): pass foo_effect = OrderedEffect("foo") foo2_effect = OrderedEffect("foo2") bar_effect = BasicEffect("bar") baz_effect = UnlowerableEffect() while_effect = WhileEffect() while1_effect = WhileEffect() while2_effect = WhileEffect() log_effect = OrderedEffect("log") unordered_log_effect = BasicEffect("unordered_log") effects.lowerable_effects.add_type(BasicEffect) effects.lowerable_effects.add_type(WhileEffect) effects.ordered_effects.add_type(OrderedEffect) effects.ordered_effects.add_type(WhileEffect) effects.control_flow_allowed_effects.add_type(WhileEffect) effects.remat_allowed_effects.add_type(RematEffect) effects.control_flow_allowed_effects.add_type(InputEffect) def trivial_effect_lowering(ctx, *, effect): ctx.set_tokens_out(ctx.tokens_in) return [] mlir.register_lowering(effect_p, trivial_effect_lowering) def function_effect_lowering(ctx, *, effect): ctx.set_tokens_out(ctx.tokens_in) return [] callback_p = core.Primitive('callback') callback_p.multiple_results = True @callback_p.def_impl def _(*args, callback, out_avals, effect): del out_avals, effect callback(*args) return [] @callback_p.def_effectful_abstract_eval def _(*avals, callback, out_avals, effect): del avals, callback return out_avals, {effect} def callback_effect_lowering(ctx: mlir.LoweringRuleContext, *args, callback, out_avals, effect): del out_avals token_in = None if effects.ordered_effects.contains(effect): token_in = ctx.tokens_in.get(effect) out_op, token_out, _ = cb.emit_python_callback( ctx, callback, token_in, list(args), list(ctx.avals_in), list(ctx.avals_out), has_side_effect=True, returns_token=True) if token_out: ctx.set_tokens_out(ctx.tokens_in.update_tokens(mlir.TokenSet({effect: token_out}))) return out_op mlir.register_lowering(callback_p, callback_effect_lowering)
InputEffect
python
getsentry__sentry
src/sentry/replays/lib/new_query/conditions.py
{ "start": 2731, "end": 3931 }
class ____(GenericBase): """Integer scalar condition class.""" @staticmethod def visit_eq(expression: Expression, value: int) -> Condition: return Condition(expression, Op.EQ, value) @staticmethod def visit_neq(expression: Expression, value: int) -> Condition: return Condition(expression, Op.NEQ, value) @staticmethod def visit_gt(expression: Expression, value: int) -> Condition: return Condition(expression, Op.GT, value) @staticmethod def visit_gte(expression: Expression, value: int) -> Condition: return Condition(expression, Op.GTE, value) @staticmethod def visit_lt(expression: Expression, value: int) -> Condition: return Condition(expression, Op.LT, value) @staticmethod def visit_lte(expression: Expression, value: int) -> Condition: return Condition(expression, Op.LTE, value) @staticmethod def visit_in(expression: Expression, value: list[int]) -> Condition: return Condition(expression, Op.IN, value) @staticmethod def visit_not_in(expression: Expression, value: list[int]) -> Condition: return Condition(expression, Op.NOT_IN, value)
IntegerScalar
python
tiangolo__fastapi
tests/test_response_model_data_filter_no_inheritance.py
{ "start": 209, "end": 276 }
class ____(BaseModel): email: str hashed_password: str
UserDB
python
huggingface__transformers
src/transformers/models/nemotron/modeling_nemotron.py
{ "start": 3329, "end": 8564 }
class ____(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: NemotronConfig, device=None): super().__init__() self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_type = self.config.rope_parameters["rope_type"] rope_init_fn: Callable = self.compute_default_rope_parameters if self.rope_type != "default": rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = inv_freq @staticmethod # Ignore copy def compute_default_rope_parameters( config: Optional[NemotronConfig] = None, device: Optional["torch.device"] = None, seq_len: Optional[int] = None, ) -> tuple["torch.Tensor", float]: """ Computes the inverse frequencies according to the original RoPE implementation Args: config ([`~transformers.PreTrainedConfig`]): The model configuration. device (`torch.device`): The device to use for initialization of the inverse frequencies. seq_len (`int`, *optional*): The current sequence length. Unused for this type of RoPE. Returns: Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). """ base = config.rope_parameters["rope_theta"] partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0) head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads dim = int(head_dim * partial_rotary_factor) attention_factor = 1.0 # Unused in this type of RoPE # Compute the inverse frequencies inv_freq = 1.0 / ( base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim) ) return inv_freq, attention_factor @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) # Copied from transformers.models.llama.modeling_llama.rotate_half def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) rot_dim = cos.shape[-1] # If q_pass/k_pass is empty, rotary pos embedding is applied to all tensor q/k q, q_pass = q[..., :rot_dim], q[..., rot_dim:] k, k_pass = k[..., :rot_dim], k[..., rot_dim:] q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return torch.cat((q_embed, q_pass), dim=-1), torch.cat((k_embed, k_pass), dim=-1)
NemotronRotaryEmbedding
python
openai__openai-python
examples/responses/streaming.py
{ "start": 159, "end": 526 }
class ____(BaseModel): steps: List[Step] final_answer: str client = OpenAI() with client.responses.stream( input="solve 8x + 31 = 2", model="gpt-4o-2024-08-06", text_format=MathResponse, ) as stream: for event in stream: if "output_text" in event.type: rich.print(event) rich.print(stream.get_final_response())
MathResponse
python
yaml__pyyaml
tests/legacy_tests/conftest.py
{ "start": 463, "end": 1425 }
class ____(pytest.Item): def __init__(self, parent=None, config=None, session=None, nodeid=None, function=None, filenames=None, **kwargs): self._function = function self._fargs = filenames or [] super().__init__(os.path.basename(filenames[0]) if filenames else parent.name, parent, config, session, nodeid) # this is gnarly since the type of fspath is private; fixed in pytest 7 to use pathlib on the `path` attr if filenames: # pass the data file location as the test path self.fspath = parent.fspath.__class__(filenames[0]) self.lineno = 1 else: # pass the function location in the code self.fspath = parent.fspath.__class__(function.__code__.co_filename) self.lineno = function.__code__.co_firstlineno def runtest(self): self._function(verbose=True, *self._fargs) def reportinfo(self): return self.fspath, self.lineno, ''
PyYAMLItem
python
pytorch__pytorch
torch/distributions/transforms.py
{ "start": 33614, "end": 34288 }
class ____(Transform): """ Transform from unconstrained matrices to lower-triangular matrices with nonnegative diagonal entries. This is useful for parameterizing positive definite matrices in terms of their Cholesky factorization. """ domain = constraints.independent(constraints.real, 2) codomain = constraints.lower_cholesky def __eq__(self, other): return isinstance(other, LowerCholeskyTransform) def _call(self, x): return x.tril(-1) + x.diagonal(dim1=-2, dim2=-1).exp().diag_embed() def _inverse(self, y): return y.tril(-1) + y.diagonal(dim1=-2, dim2=-1).log().diag_embed()
LowerCholeskyTransform
python
airbytehq__airbyte
airbyte-integrations/connectors/destination-surrealdb/destination_surrealdb/destination.py
{ "start": 2765, "end": 12503 }
class ____(Destination): """ Destination connector for SurrealDB. """ def write( self, config: Mapping[str, Any], configured_catalog: ConfiguredAirbyteCatalog, input_messages: Iterable[AirbyteMessage] ) -> Iterable[AirbyteMessage]: """ Reads the input stream of messages, config, and catalog to write data to the destination. This method returns an iterable (typically a generator of AirbyteMessages via yield) containing state messages received in the input message stream. Outputting a state message means that every AirbyteRecordMessage which came before it has been successfully persisted to the destination. This is used to ensure fault tolerance in the case that a sync fails before fully completing, then the source is given the last state message output from this method as the starting point of the next sync. :param config: dict of JSON configuration matching the configuration declared in spec.json :param configured_catalog: The Configured Catalog describing the schema of the data being received and how it should be persisted in the destination :param input_messages: The stream of input messages received from the source :return: Iterable of AirbyteStateMessages wrapped in AirbyteMessage structs """ streams = {s.stream.name for s in configured_catalog.streams} logger.info("Starting write to SurrealDB with %d streams", len(streams)) con = surrealdb_connect(config) namespace = str(config.get(CONFIG_SURREALDB_NAMESPACE)) database = str(config.get(CONFIG_SURREALDB_DATABASE)) con.query(f"DEFINE NAMESPACE IF NOT EXISTS {namespace};") con.query(f"DEFINE DATABASE IF NOT EXISTS {database};") con.use(namespace, database) # See https://docs.airbyte.com/release_notes/upgrading_to_destinations_v2#breakdown-of-breaking-changes is_legacyv1 = False if "airbyte_destinations_version" in config: is_legacyv1 = config["airbyte_destinations_version"] == "v1" do_write_raw = False if "airbyte_write_raw" in config: do_write_raw = config["airbyte_write_raw"] dest_table_definitions = {} for configured_stream in configured_catalog.streams: table_name = configured_stream.stream.name if configured_stream.destination_sync_mode == DestinationSyncMode.overwrite: # delete the tables logger.info("Removing table for overwrite: %s", table_name) con.query(f"REMOVE TABLE IF EXISTS {table_name};") # create the table if needed con.query(f"DEFINE TABLE IF NOT EXISTS {table_name};") looks_raw = table_name.startswith("airbyte_raw_") fields_to_types = {} if is_legacyv1: fields_to_types = {"_airbyte_ab_id": "string", "_airbyte_emitted_at": "datetime"} if looks_raw: fields_to_types["_airbyte_data"] = "string" else: fields_to_types = { "_airbyte_raw_id": "string", "_airbyte_extracted_at": "datetime", } if looks_raw: fields_to_types["_airbyte_data"] = "object" fields_to_types["_airbyte_loaded_at"] = "datetime" else: fields_to_types["_airbyte_meta"] = "object" stream_fields = configured_stream.stream.json_schema["properties"].keys() for field_name in stream_fields: props = configured_stream.stream.json_schema["properties"][field_name] tpe = props["type"] fmt = props["format"] if "format" in props else None if tpe == "string" and fmt == "date-time": fields_to_types[field_name] = "datetime" elif tpe == "integer": fields_to_types[field_name] = "int" else: fields_to_types[field_name] = tpe for field_name, field_type in fields_to_types.items(): con.query(f"DEFINE FIELD OVERWRITE {field_name} ON {table_name} TYPE {field_type};") dest_table_definitions[table_name] = fields_to_types buffer = defaultdict(lambda: defaultdict(list)) for message in input_messages: if message.type == Type.STATE: # flush the buffer for stream_name in buffer.keys(): logger.info("flushing buffer for state: %s", message) DestinationSurrealDB._flush_buffer(con=con, buffer=buffer, stream_name=stream_name) buffer = defaultdict(lambda: defaultdict(list)) yield message elif message.type == Type.RECORD: data = message.record.data stream_name = message.record.stream if stream_name not in streams: logger.debug("Stream %s was not present in configured streams, skipping", stream_name) continue emitted_at = message.record.emitted_at emitted_at = datetime.datetime.fromtimestamp(emitted_at / 1000, datetime.timezone.utc) loaded_at = datetime.datetime.now(datetime.timezone.utc) # add to buffer raw_id = str(uuid.uuid4()) if is_legacyv1: # OLD Raw Table Columns # See https://docs.airbyte.com/release_notes/upgrading_to_destinations_v2#breakdown-of-breaking-changes buffer[stream_name]["_airbyte_ab_id"].append(raw_id) buffer[stream_name]["_airbyte_emitted_at"].append(emitted_at) buffer[stream_name]["_airbyte_loaded_at"].append(loaded_at) else: record_meta: dict[str, str] = {} buffer[stream_name][AB_RAW_ID_COLUMN].append(raw_id) buffer[stream_name][AB_EXTRACTED_AT_COLUMN].append(loaded_at) buffer[stream_name][AB_META_COLUMN].append(record_meta) if do_write_raw or stream_name.startswith("airbyte_raw_"): if is_legacyv1: # OLD Raw Table Columns # See https://docs.airbyte.com/release_notes/upgrading_to_destinations_v2#breakdown-of-breaking-changes buffer[stream_name]["_airbyte_data"].append(json.dumps(data)) else: buffer[stream_name]["_airbyte_data"].append(data) else: for field_name in data.keys(): raw_data = data[field_name] if field_name not in dest_table_definitions[stream_name]: logger.error("field %s not in dest_table_definitions[%s]", field_name, stream_name) continue field_type = dest_table_definitions[stream_name][field_name] if field_type == "datetime": # This supports the following cases: # - "2022-06-20T18:56:18" in case airbyte_type is "timestamp_without_timezone" raw_data = datetime.datetime.fromisoformat(raw_data) buffer[stream_name][field_name].append(raw_data) else: logger.info("Message type %s not supported, skipping", message.type) # flush any remaining messages for stream_name in buffer.keys(): DestinationSurrealDB._flush_buffer(con=con, buffer=buffer, stream_name=stream_name) @staticmethod def _flush_buffer(*, con: Surreal, buffer: Dict[str, Dict[str, List[Any]]], stream_name: str): table_name = stream_name buf = buffer[stream_name] field_names = buf.keys() id_field = "_airbyte_ab_id" if "_airbyte_ab_id" in field_names else AB_RAW_ID_COLUMN id_column = buf[id_field] for i, _id in enumerate(id_column): record = {} for field_name in field_names: record[field_name] = buf[field_name][i] try: con.upsert(f"{table_name}:{_id}", record) except Exception as e: logger.error("error upserting record %s: %s", record, e) def check(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus: """ Tests if the input configuration can be used to successfully connect to the destination with the needed permissions e.g: if a provided API token or password can be used to connect and write to the destination. :param logger: Logging object to display debug/info/error to the logs (logs will not be accessible via airbyte UI if they are not passed to this logger) :param config: Json object containing the configuration of this destination, content of this json is as specified in the properties of the spec.json file :return: AirbyteConnectionStatus indicating a Success or Failure """ try: con = surrealdb_connect(config) logger.debug("Connected to SurrealDB. Running test query.") con.query("SELECT * FROM [1];") logger.debug("Test query succeeded.") return AirbyteConnectionStatus(status=Status.SUCCEEDED) except Exception as e: return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {repr(e)}")
DestinationSurrealDB
python
weaviate__weaviate-python-client
weaviate/exceptions.py
{ "start": 13599, "end": 13955 }
class ____(Exception): def __init__(self, pb: version.Version, grpc: version.Version) -> None: super().__init__( f"gRPC incompatibility detected. Protobuf: {pb.base_version}, gRPC: {grpc.base_version}. Ensure that your protobuf and grpcio versions are compatible or runtime errors may occur." )
WeaviateProtobufIncompatibility
python
doocs__leetcode
solution/2400-2499/2484.Count Palindromic Subsequences/Solution.py
{ "start": 0, "end": 1063 }
class ____: def countPalindromes(self, s: str) -> int: mod = 10**9 + 7 n = len(s) pre = [[[0] * 10 for _ in range(10)] for _ in range(n + 2)] suf = [[[0] * 10 for _ in range(10)] for _ in range(n + 2)] t = list(map(int, s)) c = [0] * 10 for i, v in enumerate(t, 1): for j in range(10): for k in range(10): pre[i][j][k] = pre[i - 1][j][k] for j in range(10): pre[i][j][v] += c[j] c[v] += 1 c = [0] * 10 for i in range(n, 0, -1): v = t[i - 1] for j in range(10): for k in range(10): suf[i][j][k] = suf[i + 1][j][k] for j in range(10): suf[i][j][v] += c[j] c[v] += 1 ans = 0 for i in range(1, n + 1): for j in range(10): for k in range(10): ans += pre[i - 1][j][k] * suf[i + 1][j][k] ans %= mod return ans
Solution
python
doocs__leetcode
solution/2600-2699/2605.Form Smallest Number From Two Digit Arrays/Solution3.py
{ "start": 0, "end": 446 }
class ____: def minNumber(self, nums1: List[int], nums2: List[int]) -> int: mask1 = mask2 = 0 for x in nums1: mask1 |= 1 << x for x in nums2: mask2 |= 1 << x mask = mask1 & mask2 if mask: return (mask & -mask).bit_length() - 1 a = (mask1 & -mask1).bit_length() - 1 b = (mask2 & -mask2).bit_length() - 1 return min(a * 10 + b, b * 10 + a)
Solution