language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
dagster-io__dagster
python_modules/libraries/dagster-deltalake/dagster_deltalake/config.py
{ "start": 2052, "end": 3863 }
class ____(Config): """Storage configuration for Amazon Web Services (AWS) S3 object store.""" provider: Literal["s3"] = "s3" access_key_id: Optional[str] = None """AWS access key ID""" secret_access_key: Optional[str] = None """AWS access key secret""" region: Optional[str] = None """AWS region""" bucket: Optional[str] = None """Storage bucket name""" endpoint: Optional[str] = None """Sets custom endpoint for communicating with S3.""" token: Optional[str] = None """Token to use for requests (passed to underlying provider)""" imdsv1_fallback: bool = False """Allow fall back to ImdsV1""" virtual_hosted_style_request: Optional[str] = None """Bucket is hosted under virtual-hosted-style URL""" unsigned_payload: Optional[bool] = None """Avoid computing payload checksum when calculating signature.""" checksum: Optional[str] = None """Set the checksum algorithm for this client.""" metadata_endpoint: Optional[str] = None """Instance metadata endpoint URL for fetching credentials""" container_credentials_relative_uri: Optional[str] = None """Set the container credentials relative URI https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html """ copy_if_not_exists: Optional[str] = None """Specifiy additional headers passed to strage backend, that enable 'if_not_exists' semantics. https://docs.rs/object_store/0.7.0/object_store/aws/enum.S3CopyIfNotExists.html#variant.Header """ allow_unsafe_rename: Optional[bool] = None """Allows tables writes that may conflict with concurrent writers.""" def str_dict(self) -> dict[str, str]: """Storage options as str dict.""" return _to_str_dict(self.dict())
S3Config
python
dask__distributed
distributed/dashboard/components/scheduler.py
{ "start": 20833, "end": 22364 }
class ____(DashboardComponent): """Histogram of memory usage, showing how many workers there are in each bucket of usage. Replaces the per-worker graph when there are >= 50 workers. """ @log_errors def __init__(self, scheduler, **kwargs): self.last = 0 self.scheduler = scheduler self.source = ColumnDataSource( {"left": [1, 2], "right": [10, 10], "top": [0, 0]} ) self.root = figure( title="Bytes stored per worker", name="workers_memory", y_axis_label="frequency", tools="", **kwargs, ) self.root.xaxis[0].formatter = NumeralTickFormatter(format="0.0 b") self.root.xaxis.ticker = AdaptiveTicker(**TICKS_1024) self.root.xaxis.major_label_orientation = XLABEL_ORIENTATION self.root.xaxis.minor_tick_line_alpha = 0 self.root.ygrid.visible = False self.root.toolbar_location = None self.root.quad( source=self.source, left="left", right="right", bottom=0, top="top", color="deepskyblue", fill_alpha=0.5, ) @without_property_validation def update(self): nbytes = np.asarray( [ws.metrics["memory"] for ws in self.scheduler.workers.values()] ) counts, x = np.histogram(nbytes, bins=40) d = {"left": x[:-1], "right": x[1:], "top": counts} update(self.source, d)
WorkersMemoryHistogram
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1012943, "end": 1013411 }
class ____(sgqlc.types.Type): """Autogenerated return type of UnmarkProjectV2AsTemplate""" __schema__ = github_schema __field_names__ = ("client_mutation_id", "project_v2") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation.""" project_v2 = sgqlc.types.Field("ProjectV2", graphql_name="projectV2") """The project."""
UnmarkProjectV2AsTemplatePayload
python
walkccc__LeetCode
solutions/1895. Largest Magic Square/1895.py
{ "start": 0, "end": 1617 }
class ____: def largestMagicSquare(self, grid: list[list[int]]) -> int: m = len(grid) n = len(grid[0]) # prefixRow[i][j] := the sum of the first j numbers in the i-th row prefixRow = [[0] * (n + 1) for _ in range(m)] # prefixCol[i][j] := the sum of the first j numbers in the i-th column prefixCol = [[0] * (m + 1) for _ in range(n)] for i in range(m): for j in range(n): prefixRow[i][j + 1] = prefixRow[i][j] + grid[i][j] prefixCol[j][i + 1] = prefixCol[j][i] + grid[i][j] def isMagicSquare(i: int, j: int, k: int) -> bool: """Returns True if grid[i..i + k)[j..j + k) is a magic square.""" diag, antiDiag = 0, 0 for d in range(k): diag += grid[i + d][j + d] antiDiag += grid[i + d][j + k - 1 - d] if diag != antiDiag: return False for d in range(k): if self._getSum(prefixRow, i + d, j, j + k - 1) != diag: return False if self._getSum(prefixCol, j + d, i, i + k - 1) != diag: return False return True def containsMagicSquare(k: int) -> bool: """Returns True if the grid contains any magic square of size k x k.""" for i in range(m - k + 1): for j in range(n - k + 1): if isMagicSquare(i, j, k): return True return False for k in range(min(m, n), 1, -1): if containsMagicSquare(k): return k return 1 def _getSum(self, prefix: list[list[int]], i: int, l: int, r: int) -> int: """Returns sum(grid[i][l..r]) or sum(grid[l..r][i]).""" return prefix[i][r + 1] - prefix[i][l]
Solution
python
PyCQA__pycodestyle
tests/test_blank_lines.py
{ "start": 9601, "end": 10114 }
class ____(object): pass """) self.assertEqual([ 'E302:9:1', # another_function 'E302:17:1', # SomeCloseClass ], result) def test_top_level_more_blank_lines(self): """ It will trigger an error when more 2 blank lines are found before top level definitions. """ result = errors_from_src("""# First comment line. # Second line of comment. def some_function(): pass def this_one_is_good(): pass
AFarEnoughClass
python
sqlalchemy__sqlalchemy
test/orm/test_options.py
{ "start": 27301, "end": 37167 }
class ____(_fixtures.FixtureTest): """test the error messages emitted when using property options in conjunction with column-only entities, or for not existing options """ run_create_tables = False run_inserts = None run_deletes = None def test_option_with_mapper_PropCompatator(self): Item = self.classes.Item self._assert_option([Item], Item.keywords) def test_option_with_mapper_then_column_PropComparator(self): Item = self.classes.Item self._assert_option([Item, Item.id], Item.keywords) def test_option_with_column_then_mapper_PropComparator(self): Item = self.classes.Item self._assert_option([Item.id, Item], Item.keywords) def test_option_with_column_PropComparator(self): Item = self.classes.Item self._assert_eager_with_just_column_exception( Item.id, Item.keywords, r"Query has only expression-based entities; attribute loader " r"options for Mapper\[Item\(items\)\] can't be applied here.", ) def test_option_against_nonexistent_PropComparator(self): Item = self.classes.Item Keyword = self.classes.Keyword self._assert_eager_with_entity_exception( [Keyword], (joinedload(Item.keywords),), r"Mapped class Mapper\[Item\(items\)\] does not apply to any of " "the root entities in this query, e.g. " r"Mapper\[Keyword\(keywords\)\]. " "Please specify the full path from one of " "the root entities to the target attribute. ", ) def test_load_only_against_multi_entity_attr(self): User = self.classes.User Item = self.classes.Item self._assert_eager_with_entity_exception( [User, Item], lambda: (load_only(User.id, Item.id),), r"Can't apply wildcard \('\*'\) or load_only\(\) loader option " r"to multiple entities in the same option. Use separate options " "per entity.", ) def test_col_option_against_relationship_attr(self): Item = self.classes.Item self._assert_loader_strategy_exception( [Item], lambda: (load_only(Item.keywords),), 'Can\'t apply "column loader" strategy to property ' '"Item.keywords", which is a "relationship"; this ' 'loader strategy is intended to be used with a "column property".', ) def test_option_against_wrong_multi_entity_type_attr_one(self): Item = self.classes.Item Keyword = self.classes.Keyword self._assert_loader_strategy_exception( [Keyword, Item], lambda: (joinedload(Keyword.id).joinedload(Item.keywords),), 'Can\'t apply "joined loader" strategy to property "Keyword.id", ' 'which is a "column property"; this loader strategy is intended ' 'to be used with a "relationship property".', ) def test_option_against_wrong_multi_entity_type_attr_two(self): Item = self.classes.Item Keyword = self.classes.Keyword self._assert_loader_strategy_exception( [Keyword, Item], lambda: (joinedload(Keyword.keywords).joinedload(Item.keywords),), 'Can\'t apply "joined loader" strategy to property ' '"Keyword.keywords", which is a "mapped sql expression"; ' "this loader " 'strategy is intended to be used with a "relationship property".', ) def test_option_against_wrong_multi_entity_type_attr_three(self): Item = self.classes.Item Keyword = self.classes.Keyword self._assert_eager_with_entity_exception( [Keyword.id, Item.id], lambda: (joinedload(Keyword.keywords),), r"Query has only expression-based entities; attribute loader " r"options for Mapper\[Keyword\(keywords\)\] can't be applied " "here.", ) @testing.combinations(True, False, argnames="first_element") def test_wrong_type_in_option_cls(self, first_element): Item = self.classes.Item Keyword = self.classes.Keyword self._assert_eager_with_entity_exception( [Item], lambda: ( (joinedload(Keyword),) if first_element else (Load(Item).joinedload(Keyword),) ), "expected ORM mapped attribute for loader strategy argument", ) @testing.combinations( (15,), (object(),), (type,), ({"foo": "bar"},), argnames="rando" ) @testing.combinations(True, False, argnames="first_element") def test_wrong_type_in_option_any_random_type(self, rando, first_element): Item = self.classes.Item self._assert_eager_with_entity_exception( [Item], lambda: ( (joinedload(rando),) if first_element else (Load(Item).joinedload(rando)) ), "expected ORM mapped attribute for loader strategy argument", ) @testing.combinations(True, False, argnames="first_element") def test_wrong_type_in_option_descriptor(self, first_element): OrderWProp = self.classes.OrderWProp self._assert_eager_with_entity_exception( [OrderWProp], lambda: ( (joinedload(OrderWProp.some_attr),) if first_element else (Load(OrderWProp).joinedload(OrderWProp.some_attr),) ), "expected ORM mapped attribute for loader strategy argument", ) def test_non_contiguous_all_option(self): User = self.classes.User self._assert_eager_with_entity_exception( [User], lambda: (joinedload(User.addresses).joinedload(User.orders),), r'ORM mapped entity or attribute "User.orders" does not link ' r'from relationship "User.addresses"', ) def test_non_contiguous_all_option_of_type(self): User = self.classes.User Order = self.classes.Order self._assert_eager_with_entity_exception( [User], lambda: ( joinedload(User.addresses).joinedload( User.orders.of_type(Order) ), ), r'ORM mapped entity or attribute "User.orders" does not link ' r'from relationship "User.addresses"', ) @classmethod def setup_mappers(cls): users, User, addresses, Address, orders, Order = ( cls.tables.users, cls.classes.User, cls.tables.addresses, cls.classes.Address, cls.tables.orders, cls.classes.Order, ) cls.mapper_registry.map_imperatively( User, users, properties={ "addresses": relationship(Address), "orders": relationship(Order), }, ) cls.mapper_registry.map_imperatively(Address, addresses) cls.mapper_registry.map_imperatively(Order, orders) keywords, items, item_keywords, Keyword, Item = ( cls.tables.keywords, cls.tables.items, cls.tables.item_keywords, cls.classes.Keyword, cls.classes.Item, ) cls.mapper_registry.map_imperatively( Keyword, keywords, properties={ "keywords": column_property(keywords.c.name + "some keyword") }, ) cls.mapper_registry.map_imperatively( Item, items, properties=dict( keywords=relationship(Keyword, secondary=item_keywords) ), ) class OrderWProp(cls.classes.Order): @property def some_attr(self): return "hi" cls.mapper_registry.map_imperatively( OrderWProp, None, inherits=cls.classes.Order ) def _assert_option(self, entity_list, option): Item = self.classes.Item context = ( fixture_session() .query(*entity_list) .options(joinedload(option)) ._compile_state() ) key = ("loader", (inspect(Item), inspect(Item).attrs.keywords)) assert key in context.attributes def _assert_loader_strategy_exception(self, entity_list, options, message): sess = fixture_session() with expect_raises_message(orm_exc.LoaderStrategyException, message): # accommodate Load() objects that will raise # on construction if callable(options): options = options() # accommodate UnboundLoad objects that will raise # only when compile state is set up sess.query(*entity_list).options(*options)._compile_state() def _assert_eager_with_entity_exception( self, entity_list, options, message ): sess = fixture_session() with expect_raises_message(sa.exc.ArgumentError, message): # accommodate Load() objects that will raise # on construction if callable(options): options = options() # accommodate UnboundLoad objects that will raise # only when compile state is set up sess.query(*entity_list).options(*options)._compile_state() def _assert_eager_with_just_column_exception( self, column, eager_option, message ): assert_raises_message( sa.exc.ArgumentError, message, fixture_session() .query(column) .options(joinedload(eager_option)) ._compile_state, )
OptionsNoPropTest
python
tensorflow__tensorflow
tensorflow/python/training/saving/saveable_object_util.py
{ "start": 27593, "end": 33071 }
class ____(trackable.Trackable): """Converts object's `SaveableObjects` to functions used in TF2 checkpointing. A class that converts a Trackable object's `SaveableObjects` to save and restore functions with the same signatures as `Trackable._serialize_to_tensors` and `Trackable._restore_from_tensors`. This class also produces a method for filling the object proto. """ __slots__ = ("_obj", "_saveables") def __init__(self, obj, saveables): """Constructor. Args: obj: A Trackable object. saveables: A list of saveables for `obj`. """ self._obj = obj self._saveables = saveables @property def obj(self): return self._obj @property def saveables(self): """Returns a list of SaveableObjects generated from the Trackable object.""" return self._saveables def _serialize_to_tensors(self): """Returns a dict of tensors to serialize.""" return saveable_object_to_tensor_dict(self.saveables) def _restore_from_tensors(self, restored_tensors): """Returns the restore ops defined in the Saveables.""" # Map restored tensors to the corresponding SaveableObjects, then call # restore. There must be an exact match between restored tensors and the # expected attributes. expected_keys = [] for saveable in self.saveables: expected_keys.extend( trackable_utils.extract_local_name(_convert_to_string(spec.name)) for spec in saveable.specs) if set(expected_keys) != restored_tensors.keys(): raise ValueError(f"Could not restore object {self._obj} because not all " "expected tensors were in the checkpoint." f"\n\tExpected: {expected_keys}" f"\n\tGot: {list(restored_tensors.keys())}") return saveable_object_to_restore_fn(self.saveables)(restored_tensors) def saveable_object_to_tensor_dict(saveables): """Converts a list of SaveableObjects to a tensor dictionary.""" tensor_dict = {} for saveable in saveables: for spec in saveable.specs: name = _convert_to_string(spec.name) slice_spec = _convert_to_string(spec.slice_spec) # Currently, tensor dict cannot handle callable tensor values (which # are needed for uninitialized variables), so keep using SaveSpec. tensor = spec if callable(spec._tensor) else spec._tensor # pylint: disable=protected-access if slice_spec: tensor_dict.setdefault(name, {})[slice_spec] = tensor else: tensor_dict[name] = tensor return tensor_dict def saveable_object_to_restore_fn(saveables): """Generates `Trackable._restore_from_tensors` from SaveableObjects.""" def _restore_from_tensors(restored_tensors): restore_ops = {} for saveable in saveables: saveable_restored_tensors = [] for spec in saveable.specs: name = trackable_utils.extract_local_name(_convert_to_string(spec.name)) slice_spec = _convert_to_string(spec.slice_spec) maybe_tensor = restored_tensors[name] if not isinstance(maybe_tensor, dict): maybe_tensor = {"": maybe_tensor} saveable_restored_tensors.append(maybe_tensor[slice_spec]) restore_ops[saveable.name] = saveable.restore( saveable_restored_tensors, restored_shapes=None) return restore_ops return _restore_from_tensors def serialized_tensors_to_saveable_cache(serialized_tensors): """Converts a tensor dict to a SaveableObject cache. Args: serialized_tensors: Map from Trackable to a tensor dict. The tensor dict maps checkpoint key (-> slice_spec) -> Tensor Returns: A dict mapping Trackable objects to a map from local savable name to SaveableObject. """ saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary() for obj, tensor_dict in serialized_tensors.items(): if not tensor_dict: continue if isinstance(obj, SaveableCompatibilityConverter): trackable_obj = obj.obj saveables_cache[trackable_obj] = {} for saveable in obj.saveables: local_name = trackable_utils.extract_local_name(saveable.name) saveables_cache[trackable_obj][local_name] = [saveable] continue specs = [] # The local names and prefixes are computed to ensure that the generated # SaveableObject can call `Trackable._restore_from_tensors()` local_names = [] prefix = saveable_compat.get_saveable_name(obj) or "" for checkpoint_key, maybe_tensor in tensor_dict.items(): # Make sure that `maybe_tensor` is a dict from `slice_spec` to `tensor`. if not isinstance(maybe_tensor, dict): maybe_tensor = {"": maybe_tensor} for slice_spec, tensor in maybe_tensor.items(): if isinstance(tensor, saveable_object.SaveSpec): specs.append(tensor) else: specs.append(saveable_object.SaveSpec(tensor, slice_spec, checkpoint_key)) local_names.append(trackable_utils.extract_local_name(checkpoint_key, prefix)) object_name = trackable_utils.extract_object_name( next(iter(tensor_dict.keys()))) saveables_cache[obj] = { trackable_utils.SERIALIZE_TO_TENSORS_NAME: [TrackableSaveable( obj, specs, object_name, local_names=local_names, prefix=prefix)]} return saveables_cache
SaveableCompatibilityConverter
python
falconry__falcon
falcon/asgi/response.py
{ "start": 1158, "end": 15928 }
class ____(response.Response): """Represents an HTTP response to a client request. Note: ``Response`` is not meant to be instantiated directly by responders. Keyword Arguments: options (dict): Set of global options passed from the App handler. """ # PERF(kgriffs): These will be shadowed when set on an instance; let's # us avoid having to implement __init__ and incur the overhead of # an additional function call. _sse: SSEEmitter | None = None _registered_callbacks: list[ResponseCallbacks] | None = None stream: AsyncReadableIO | AsyncIterator[bytes] | None # type: ignore[assignment] """An async iterator or generator that yields a series of byte strings that will be streamed to the ASGI server as a series of "http.response.body" events. Falcon will assume the body is complete when the iterable is exhausted or as soon as it yields ``None`` rather than an instance of ``bytes``:: async def producer(): while True: data_chunk = await read_data() if not data_chunk: break yield data_chunk resp.stream = producer Alternatively, a file-like object may be used as long as it implements an awaitable ``read()`` method:: resp.stream = await aiofiles.open('resp_data.bin', 'rb') If the object assigned to :attr:`~.stream` holds any resources (such as a file handle) that must be explicitly released, the object must implement a ``close()`` method. The ``close()`` method will be called after exhausting the iterable or file-like object. Note: In order to be compatible with Python 3.7+ and PEP 479, async iterators must return ``None`` instead of raising :class:`StopIteration`. This requirement does not apply to async generators (PEP 525). Note: If the stream length is known in advance, you may wish to also set the Content-Length header on the response. """ @property def sse(self) -> SSEEmitter | None: """A Server-Sent Event (SSE) emitter, implemented as an async iterator or generator that yields a series of of :class:`falcon.asgi.SSEvent` instances. Each event will be serialized and sent to the client as HTML5 Server-Sent Events:: async def emitter(): while True: some_event = await get_next_event() if not some_event: # Send an event consisting of a single "ping" # comment to keep the connection alive. yield SSEvent() # Alternatively, one can simply yield None and # a "ping" will also be sent as above. # yield continue yield SSEvent(json=some_event, retry=5000) # ...or yield SSEvent(data=b'something', event_id=some_id) # Alternatively, you may yield anything that implements # a serialize() method that returns a byte string # conforming to the SSE event stream format. # yield some_event resp.sse = emitter() Note: When the `sse` property is set, it supersedes both the `text` and `data` properties. Note: When hosting an app that emits Server-Sent Events, the web server should be set with a relatively long keep-alive TTL to minimize the overhead of connection renegotiations. """ # noqa: D400 D205 return self._sse @sse.setter def sse(self, value: SSEEmitter | None) -> None: self._sse = value def set_stream( self, stream: AsyncReadableIO | AsyncIterator[bytes], # type: ignore[override] content_length: int, ) -> None: """Set both `stream` and `content_length`. Although the :attr:`~falcon.asgi.Response.stream` and :attr:`~falcon.asgi.Response.content_length` properties may be set directly, using this method ensures :attr:`~falcon.asgi.Response.content_length` is not accidentally neglected when the length of the stream is known in advance. Using this method is also slightly more performant as compared to setting the properties individually. Note: If the stream length is unknown, you can set :attr:`~falcon.asgi.Response.stream` directly, and ignore :attr:`~falcon.asgi.Response.content_length`. In this case, the ASGI server may choose to use chunked encoding for HTTP/1.1 Args: stream: A readable, awaitable file-like object or async iterable that returns byte strings. If the object implements a close() method, it will be called after reading all of the data. content_length (int): Length of the stream, used for the Content-Length header in the response. """ self.stream = stream # PERF(kgriffs): Set directly rather than incur the overhead of # the self.content_length property. self._headers['content-length'] = str(content_length) async def render_body(self) -> bytes | None: # type: ignore[override] """Get the raw bytestring content for the response body. This coroutine can be awaited to get the raw data for the HTTP response body, taking into account the :attr:`~.text`, :attr:`~.data`, and :attr:`~.media` attributes. Note: This method ignores :attr:`~.stream`; the caller must check and handle that attribute directly. Returns: bytes: The UTF-8 encoded value of the `text` attribute, if set. Otherwise, the value of the `data` attribute if set, or finally the serialized value of the `media` attribute. If none of these attributes are set, ``None`` is returned. """ # NOTE(vytas): The code below is also inlined in asgi.App.__call__. data: bytes | None text = self.text if text is None: data = self._data if data is None and self._media is not None: # NOTE(kgriffs): We use a special _UNSET singleton since # None is ambiguous (the media handler might return None). if self._media_rendered is _UNSET: if not self.content_type: self.content_type = self.options.default_media_type handler, serialize_sync, _ = self.options.media_handlers._resolve( self.content_type, self.options.default_media_type ) if serialize_sync: self._media_rendered = serialize_sync(self._media) else: self._media_rendered = await handler.serialize_async( self._media, self.content_type ) data = self._media_rendered else: try: # NOTE(kgriffs): Normally we expect text to be a string data = text.encode() except AttributeError: # NOTE(kgriffs): Assume it was a bytes object already data = text # type: ignore[assignment] return data def schedule(self, callback: Callable[[], Awaitable[None]]) -> None: """Schedule an async callback to run soon after sending the HTTP response. This method can be used to execute a background job after the response has been returned to the client. The callback is assumed to be an async coroutine function. It will be scheduled to run on the event loop as soon as possible. The callback will be invoked without arguments. Use :any:`functools.partial` to pass arguments to the callback as needed. Note: If an unhandled exception is raised while processing the request, the callback will not be scheduled to run. Note: When an SSE emitter has been set on the response, the callback will be scheduled before the first call to the emitter. Warning: Because coroutines run on the main request thread, care should be taken to ensure they are non-blocking. Long-running operations must use async libraries or delegate to an :class:`~concurrent.futures.Executor` pool to avoid blocking the processing of subsequent requests. Args: callback(object): An async coroutine function. The callback will be invoked without arguments. """ if not iscoroutinefunction(callback): if iscoroutine(callback): raise TypeError( 'The callback object appears to ' 'be a coroutine, rather than a coroutine function. Please ' 'pass the function itself, rather than the result obtained ' 'by calling the function. ' ) elif is_python_func(callback): # pragma: nocover raise TypeError('The callback must be a coroutine function.') # NOTE(kgriffs): The implicit "else" branch is actually covered # by tests running in a Cython environment, but we can't # detect it with the coverage tool. rc: tuple[Callable[[], Awaitable[None]], Literal[True]] = (callback, True) if not self._registered_callbacks: self._registered_callbacks = [rc] else: self._registered_callbacks.append(rc) def schedule_sync(self, callback: Callable[[], None]) -> None: """Schedule a synchronous callback to run soon after sending the HTTP response. This method can be used to execute a background job after the response has been returned to the client. The callback is assumed to be a synchronous (non-coroutine) function. It will be scheduled on the event loop's default :class:`~concurrent.futures.Executor` (which can be overridden via :meth:`asyncio.AbstractEventLoop.set_default_executor`). The callback will be invoked without arguments. Use :any:`functools.partial` to pass arguments to the callback as needed. Note: If an unhandled exception is raised while processing the request, the callback will not be scheduled to run. Note: When an SSE emitter has been set on the response, the callback will be scheduled before the first call to the emitter. Warning: Synchronous callables run on the event loop's default :class:`~concurrent.futures.Executor`, which uses an instance of :class:`~concurrent.futures.ThreadPoolExecutor` unless :meth:`asyncio.AbstractEventLoop.set_default_executor` is used to change it to something else. Due to the GIL, CPU-bound jobs will block request processing for the current process unless the default :class:`~concurrent.futures.Executor` is changed to one that is process-based instead of thread-based (e.g., an instance of :class:`concurrent.futures.ProcessPoolExecutor`). Args: callback(object): An async coroutine function or a synchronous callable. The callback will be called without arguments. """ rc: tuple[Callable[[], None], Literal[False]] = (callback, False) if not self._registered_callbacks: self._registered_callbacks = [rc] else: self._registered_callbacks.append(rc) # ------------------------------------------------------------------------ # Helper methods # ------------------------------------------------------------------------ def _asgi_headers(self, media_type: str | None = None) -> list[tuple[bytes, bytes]]: """Convert headers into the format expected by ASGI servers. Header names must be lowercased and both name and value must be byte strings. See also: https://asgi.readthedocs.io/en/latest/specs/www.html#response-start Args: media_type: Default media type to use for the Content-Type header if the header was not set explicitly (default ``None``). """ headers = self._headers # PERF(vytas): uglier inline version of Response._set_media_type if media_type is not None and 'content-type' not in headers: headers['content-type'] = media_type try: # NOTE(vytas): Supporting ISO-8859-1 for historical reasons as per # RFC 7230, Section 3.2.4; and to strive for maximum # compatibility with WSGI. # PERF(vytas): On CPython, _encode_items_to_latin1 is implemented # in Cython (with a pure Python fallback), where the resulting # C code speeds up the method substantially by directly invoking # CPython's C API functions such as PyUnicode_EncodeLatin1. items = _encode_items_to_latin1(headers) except UnicodeEncodeError as ex: # TODO(vytas): In 3.1.0, update this error message to highlight the # fact that we decided to allow ISO-8859-1? raise ValueError( 'The modern series of HTTP standards require that header ' f'names and values use only ASCII characters: {ex}' ) if self._extra_headers: items += [ (n.encode('ascii'), v.encode('ascii')) for n, v in self._extra_headers ] # NOTE(kgriffs): It is important to append these after self._extra_headers # in case the latter contains Set-Cookie headers that should be # overridden by a call to unset_cookie(). if self._cookies is not None: # PERF(tbug): # The below implementation is ~23% faster than # the alternative: # # self._cookies.output().split("\\r\\n") # # Even without the .split("\\r\\n"), the below # is still ~17% faster, so don't use .output() items += [ (b'set-cookie', c.OutputString().encode('ascii')) for c in self._cookies.values() ] return items
Response
python
airbytehq__airbyte
airbyte-integrations/connectors/destination-milvus/destination_milvus/destination.py
{ "start": 808, "end": 2681 }
class ____(Destination): indexer: Indexer embedder: Embedder def _init_indexer(self, config: ConfigModel): self.embedder = create_from_config(config.embedding, config.processing) self.indexer = MilvusIndexer(config.indexing, self.embedder.embedding_dimensions) def write( self, config: Mapping[str, Any], configured_catalog: ConfiguredAirbyteCatalog, input_messages: Iterable[AirbyteMessage] ) -> Iterable[AirbyteMessage]: config_model = ConfigModel.parse_obj(config) self._init_indexer(config_model) writer = Writer( config_model.processing, self.indexer, self.embedder, batch_size=BATCH_SIZE, omit_raw_text=config_model.omit_raw_text ) yield from writer.write(configured_catalog, input_messages) def check(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus: parsed_config = ConfigModel.parse_obj(config) self._init_indexer(parsed_config) checks = [self.embedder.check(), self.indexer.check(), DocumentProcessor.check_config(parsed_config.processing)] errors = [error for error in checks if error is not None] if len(errors) > 0: return AirbyteConnectionStatus(status=Status.FAILED, message="\n".join(errors)) else: return AirbyteConnectionStatus(status=Status.SUCCEEDED) def spec(self, *args: Any, **kwargs: Any) -> ConnectorSpecification: return ConnectorSpecification( documentationUrl="https://docs.airbyte.com/integrations/destinations/milvus", supportsIncremental=True, supported_destination_sync_modes=[DestinationSyncMode.overwrite, DestinationSyncMode.append, DestinationSyncMode.append_dedup], connectionSpecification=ConfigModel.schema(), # type: ignore[attr-defined] )
DestinationMilvus
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_hyperlink24.py
{ "start": 315, "end": 1461 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("hyperlink24.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with hyperlinks.""" workbook = Workbook(self.got_filename) # Turn off default URL format for testing. workbook.default_url_format = None worksheet = workbook.add_worksheet() worksheet.write_url( "A1", "http://www.example.com/some_long_url_that_is_255_characters_long_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_z#some_long_location_that_is_255_characters_long_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_abcdefgh_z", ) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
numpy__numpy
numpy/_core/tests/test_unicode.py
{ "start": 2874, "end": 2987 }
class ____(CreateZeros): """Check the creation of zero-valued arrays (size 2)""" ulen = 2
TestCreateZeros_2
python
joke2k__faker
faker/providers/ssn/zh_TW/__init__.py
{ "start": 1013, "end": 1310 }
class ____(SsnProvider): def ssn(self) -> str: ssn_without_last_char = self.numerify(self.random_uppercase_letter() + str(self.random_int(1, 2)) + "#######") last_char = str((10 - checksum(ssn_without_last_char) % 10) % 10) return ssn_without_last_char + last_char
Provider
python
google__pytype
pytype/metrics_test.py
{ "start": 4976, "end": 8216 }
class ____(unittest.TestCase): """Tests for Distribution.""" def setUp(self): super().setUp() metrics._prepare_for_test() def test_accumulation(self): d = metrics.Distribution("foo") # Check contents of an empty distribution. self.assertEqual(0, d._count) self.assertEqual(0, d._total) self.assertIsNone(d._min) self.assertIsNone(d._max) self.assertIsNone(d._mean()) self.assertIsNone(d._stdev()) # Add some values. d.add(3) d.add(2) d.add(5) # Check the final contents. self.assertEqual(3, d._count) self.assertEqual(10, d._total) self.assertEqual(2, d._min) self.assertEqual(5, d._max) self.assertAlmostEqual(10.0 / 3, d._mean()) # Stddev should be sqrt(14/9). self.assertAlmostEqual(math.sqrt(14.0 / 9), d._stdev()) def test_summary(self): d = metrics.Distribution("foo") self.assertEqual( "foo: total=0.0, count=0, min=None, max=None, mean=None, stdev=None", str(d), ) # This test is delicate because it is checking the string output of # floating point calculations. This specific data set was chosen because # the number of samples is a power of two (thus the division is exact) and # the variance is a natural square (thus the sqrt() is exact). d.add(1) d.add(5) self.assertEqual( "foo: total=6.0, count=2, min=1, max=5, mean=3.0, stdev=2.0", str(d) ) def test_disabled(self): metrics._prepare_for_test(enabled=False) d = metrics.Distribution("foo") d.add(123) self.assertEqual(0, d._count) def test_merge(self): d = metrics.Distribution("foo") # Merge two empty metrics together. other = metrics.Distribution("d_empty") d._merge(other) self.assertEqual(0, d._count) self.assertEqual(0, d._total) self.assertEqual(0, d._squared) self.assertEqual(None, d._min) self.assertEqual(None, d._max) # Merge into an empty metric (verifies the case where min/max must be # copied directly from the merged metric). other = metrics.Distribution("d2") other.add(10) other.add(20) d._merge(other) self.assertEqual(2, d._count) self.assertEqual(30, d._total) self.assertEqual(500, d._squared) self.assertEqual(10, d._min) self.assertEqual(20, d._max) # Merge into an existing metric resulting in a new min. other = metrics.Distribution("d3") other.add(5) d._merge(other) self.assertEqual(3, d._count) self.assertEqual(35, d._total) self.assertEqual(525, d._squared) self.assertEqual(5, d._min) self.assertEqual(20, d._max) # Merge into an existing metric resulting in a new max. other = metrics.Distribution("d4") other.add(30) d._merge(other) self.assertEqual(4, d._count) self.assertEqual(65, d._total) self.assertEqual(1425, d._squared) self.assertEqual(5, d._min) self.assertEqual(30, d._max) # Merge an empty metric (slopppy min/max code would fail). other = metrics.Distribution("d5") d._merge(other) self.assertEqual(4, d._count) self.assertEqual(65, d._total) self.assertEqual(1425, d._squared) self.assertEqual(5, d._min) self.assertEqual(30, d._max)
DistributionTest
python
networkx__networkx
networkx/algorithms/planarity.py
{ "start": 6978, "end": 25509 }
class ____: """A class to maintain the state during planarity check.""" __slots__ = [ "G", "roots", "height", "lowpt", "lowpt2", "nesting_depth", "parent_edge", "DG", "adjs", "ordered_adjs", "ref", "side", "S", "stack_bottom", "lowpt_edge", "left_ref", "right_ref", "embedding", ] def __init__(self, G): # copy G without adding self-loops self.G = nx.Graph() self.G.add_nodes_from(G.nodes) for e in G.edges: if e[0] != e[1]: self.G.add_edge(e[0], e[1]) self.roots = [] # distance from tree root self.height = defaultdict(lambda: None) self.lowpt = {} # height of lowest return point of an edge self.lowpt2 = {} # height of second lowest return point self.nesting_depth = {} # for nesting order # None -> missing edge self.parent_edge = defaultdict(lambda: None) # oriented DFS graph self.DG = nx.DiGraph() self.DG.add_nodes_from(G.nodes) self.adjs = {} self.ordered_adjs = {} self.ref = defaultdict(lambda: None) self.side = defaultdict(lambda: 1) # stack of conflict pairs self.S = [] self.stack_bottom = {} self.lowpt_edge = {} self.left_ref = {} self.right_ref = {} self.embedding = PlanarEmbedding() def lr_planarity(self): """Execute the LR planarity test. Returns ------- embedding : dict If the graph is planar an embedding is returned. Otherwise None. """ if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6: # graph is not planar return None # make adjacency lists for dfs for v in self.G: self.adjs[v] = list(self.G[v]) # orientation of the graph by depth first search traversal for v in self.G: if self.height[v] is None: self.height[v] = 0 self.roots.append(v) self.dfs_orientation(v) # Free no longer used variables self.G = None self.lowpt2 = None self.adjs = None # testing for v in self.DG: # sort the adjacency lists by nesting depth # note: this sorting leads to non linear time self.ordered_adjs[v] = sorted( self.DG[v], key=lambda x: self.nesting_depth[(v, x)] ) for v in self.roots: if not self.dfs_testing(v): return None # Free no longer used variables self.height = None self.lowpt = None self.S = None self.stack_bottom = None self.lowpt_edge = None for e in self.DG.edges: self.nesting_depth[e] = self.sign(e) * self.nesting_depth[e] self.embedding.add_nodes_from(self.DG.nodes) for v in self.DG: # sort the adjacency lists again self.ordered_adjs[v] = sorted( self.DG[v], key=lambda x: self.nesting_depth[(v, x)] ) # initialize the embedding previous_node = None for w in self.ordered_adjs[v]: self.embedding.add_half_edge(v, w, ccw=previous_node) previous_node = w # Free no longer used variables self.DG = None self.nesting_depth = None self.ref = None # compute the complete embedding for v in self.roots: self.dfs_embedding(v) # Free no longer used variables self.roots = None self.parent_edge = None self.ordered_adjs = None self.left_ref = None self.right_ref = None self.side = None return self.embedding def lr_planarity_recursive(self): """Recursive version of :meth:`lr_planarity`.""" if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6: # graph is not planar return None # orientation of the graph by depth first search traversal for v in self.G: if self.height[v] is None: self.height[v] = 0 self.roots.append(v) self.dfs_orientation_recursive(v) # Free no longer used variable self.G = None # testing for v in self.DG: # sort the adjacency lists by nesting depth # note: this sorting leads to non linear time self.ordered_adjs[v] = sorted( self.DG[v], key=lambda x: self.nesting_depth[(v, x)] ) for v in self.roots: if not self.dfs_testing_recursive(v): return None for e in self.DG.edges: self.nesting_depth[e] = self.sign_recursive(e) * self.nesting_depth[e] self.embedding.add_nodes_from(self.DG.nodes) for v in self.DG: # sort the adjacency lists again self.ordered_adjs[v] = sorted( self.DG[v], key=lambda x: self.nesting_depth[(v, x)] ) # initialize the embedding previous_node = None for w in self.ordered_adjs[v]: self.embedding.add_half_edge(v, w, ccw=previous_node) previous_node = w # compute the complete embedding for v in self.roots: self.dfs_embedding_recursive(v) return self.embedding def dfs_orientation(self, v): """Orient the graph by DFS, compute lowpoints and nesting order.""" # the recursion stack dfs_stack = [v] # index of next edge to handle in adjacency list of each node ind = defaultdict(lambda: 0) # boolean to indicate whether to skip the initial work for an edge skip_init = defaultdict(lambda: False) while dfs_stack: v = dfs_stack.pop() e = self.parent_edge[v] for w in self.adjs[v][ind[v] :]: vw = (v, w) if not skip_init[vw]: if (v, w) in self.DG.edges or (w, v) in self.DG.edges: ind[v] += 1 continue # the edge was already oriented self.DG.add_edge(v, w) # orient the edge self.lowpt[vw] = self.height[v] self.lowpt2[vw] = self.height[v] if self.height[w] is None: # (v, w) is a tree edge self.parent_edge[w] = vw self.height[w] = self.height[v] + 1 dfs_stack.append(v) # revisit v after finishing w dfs_stack.append(w) # visit w next skip_init[vw] = True # don't redo this block break # handle next node in dfs_stack (i.e. w) else: # (v, w) is a back edge self.lowpt[vw] = self.height[w] # determine nesting graph self.nesting_depth[vw] = 2 * self.lowpt[vw] if self.lowpt2[vw] < self.height[v]: # chordal self.nesting_depth[vw] += 1 # update lowpoints of parent edge e if e is not None: if self.lowpt[vw] < self.lowpt[e]: self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw]) self.lowpt[e] = self.lowpt[vw] elif self.lowpt[vw] > self.lowpt[e]: self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw]) else: self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw]) ind[v] += 1 def dfs_orientation_recursive(self, v): """Recursive version of :meth:`dfs_orientation`.""" e = self.parent_edge[v] for w in self.G[v]: if (v, w) in self.DG.edges or (w, v) in self.DG.edges: continue # the edge was already oriented vw = (v, w) self.DG.add_edge(v, w) # orient the edge self.lowpt[vw] = self.height[v] self.lowpt2[vw] = self.height[v] if self.height[w] is None: # (v, w) is a tree edge self.parent_edge[w] = vw self.height[w] = self.height[v] + 1 self.dfs_orientation_recursive(w) else: # (v, w) is a back edge self.lowpt[vw] = self.height[w] # determine nesting graph self.nesting_depth[vw] = 2 * self.lowpt[vw] if self.lowpt2[vw] < self.height[v]: # chordal self.nesting_depth[vw] += 1 # update lowpoints of parent edge e if e is not None: if self.lowpt[vw] < self.lowpt[e]: self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw]) self.lowpt[e] = self.lowpt[vw] elif self.lowpt[vw] > self.lowpt[e]: self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw]) else: self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw]) def dfs_testing(self, v): """Test for LR partition.""" # the recursion stack dfs_stack = [v] # index of next edge to handle in adjacency list of each node ind = defaultdict(lambda: 0) # boolean to indicate whether to skip the initial work for an edge skip_init = defaultdict(lambda: False) while dfs_stack: v = dfs_stack.pop() e = self.parent_edge[v] # to indicate whether to skip the final block after the for loop skip_final = False for w in self.ordered_adjs[v][ind[v] :]: ei = (v, w) if not skip_init[ei]: self.stack_bottom[ei] = top_of_stack(self.S) if ei == self.parent_edge[w]: # tree edge dfs_stack.append(v) # revisit v after finishing w dfs_stack.append(w) # visit w next skip_init[ei] = True # don't redo this block skip_final = True # skip final work after breaking break # handle next node in dfs_stack (i.e. w) else: # back edge self.lowpt_edge[ei] = ei self.S.append(ConflictPair(right=Interval(ei, ei))) # integrate new return edges if self.lowpt[ei] < self.height[v]: if w == self.ordered_adjs[v][0]: # e_i has return edge self.lowpt_edge[e] = self.lowpt_edge[ei] else: # add constraints of e_i if not self.add_constraints(ei, e): # graph is not planar return False ind[v] += 1 if not skip_final: # remove back edges returning to parent if e is not None: # v isn't root self.remove_back_edges(e) return True def dfs_testing_recursive(self, v): """Recursive version of :meth:`dfs_testing`.""" e = self.parent_edge[v] for w in self.ordered_adjs[v]: ei = (v, w) self.stack_bottom[ei] = top_of_stack(self.S) if ei == self.parent_edge[w]: # tree edge if not self.dfs_testing_recursive(w): return False else: # back edge self.lowpt_edge[ei] = ei self.S.append(ConflictPair(right=Interval(ei, ei))) # integrate new return edges if self.lowpt[ei] < self.height[v]: if w == self.ordered_adjs[v][0]: # e_i has return edge self.lowpt_edge[e] = self.lowpt_edge[ei] else: # add constraints of e_i if not self.add_constraints(ei, e): # graph is not planar return False # remove back edges returning to parent if e is not None: # v isn't root self.remove_back_edges(e) return True def add_constraints(self, ei, e): P = ConflictPair() # merge return edges of e_i into P.right while True: Q = self.S.pop() if not Q.left.empty(): Q.swap() if not Q.left.empty(): # not planar return False if self.lowpt[Q.right.low] > self.lowpt[e]: # merge intervals if P.right.empty(): # topmost interval P.right = Q.right.copy() else: self.ref[P.right.low] = Q.right.high P.right.low = Q.right.low else: # align self.ref[Q.right.low] = self.lowpt_edge[e] if top_of_stack(self.S) == self.stack_bottom[ei]: break # merge conflicting return edges of e_1,...,e_i-1 into P.L while top_of_stack(self.S).left.conflicting(ei, self) or top_of_stack( self.S ).right.conflicting(ei, self): Q = self.S.pop() if Q.right.conflicting(ei, self): Q.swap() if Q.right.conflicting(ei, self): # not planar return False # merge interval below lowpt(e_i) into P.R self.ref[P.right.low] = Q.right.high if Q.right.low is not None: P.right.low = Q.right.low if P.left.empty(): # topmost interval P.left = Q.left.copy() else: self.ref[P.left.low] = Q.left.high P.left.low = Q.left.low if not (P.left.empty() and P.right.empty()): self.S.append(P) return True def remove_back_edges(self, e): u = e[0] # trim back edges ending at parent u # drop entire conflict pairs while self.S and top_of_stack(self.S).lowest(self) == self.height[u]: P = self.S.pop() if P.left.low is not None: self.side[P.left.low] = -1 if self.S: # one more conflict pair to consider P = self.S.pop() # trim left interval while P.left.high is not None and P.left.high[1] == u: P.left.high = self.ref[P.left.high] if P.left.high is None and P.left.low is not None: # just emptied self.ref[P.left.low] = P.right.low self.side[P.left.low] = -1 P.left.low = None # trim right interval while P.right.high is not None and P.right.high[1] == u: P.right.high = self.ref[P.right.high] if P.right.high is None and P.right.low is not None: # just emptied self.ref[P.right.low] = P.left.low self.side[P.right.low] = -1 P.right.low = None self.S.append(P) # side of e is side of a highest return edge if self.lowpt[e] < self.height[u]: # e has return edge hl = top_of_stack(self.S).left.high hr = top_of_stack(self.S).right.high if hl is not None and (hr is None or self.lowpt[hl] > self.lowpt[hr]): self.ref[e] = hl else: self.ref[e] = hr def dfs_embedding(self, v): """Completes the embedding.""" # the recursion stack dfs_stack = [v] # index of next edge to handle in adjacency list of each node ind = defaultdict(lambda: 0) while dfs_stack: v = dfs_stack.pop() for w in self.ordered_adjs[v][ind[v] :]: ind[v] += 1 ei = (v, w) if ei == self.parent_edge[w]: # tree edge self.embedding.add_half_edge_first(w, v) self.left_ref[v] = w self.right_ref[v] = w dfs_stack.append(v) # revisit v after finishing w dfs_stack.append(w) # visit w next break # handle next node in dfs_stack (i.e. w) else: # back edge if self.side[ei] == 1: self.embedding.add_half_edge(w, v, ccw=self.right_ref[w]) else: self.embedding.add_half_edge(w, v, cw=self.left_ref[w]) self.left_ref[w] = v def dfs_embedding_recursive(self, v): """Recursive version of :meth:`dfs_embedding`.""" for w in self.ordered_adjs[v]: ei = (v, w) if ei == self.parent_edge[w]: # tree edge self.embedding.add_half_edge_first(w, v) self.left_ref[v] = w self.right_ref[v] = w self.dfs_embedding_recursive(w) else: # back edge if self.side[ei] == 1: # place v directly after right_ref[w] in embed. list of w self.embedding.add_half_edge(w, v, ccw=self.right_ref[w]) else: # place v directly before left_ref[w] in embed. list of w self.embedding.add_half_edge(w, v, cw=self.left_ref[w]) self.left_ref[w] = v def sign(self, e): """Resolve the relative side of an edge to the absolute side.""" # the recursion stack dfs_stack = [e] # dict to remember reference edges old_ref = defaultdict(lambda: None) while dfs_stack: e = dfs_stack.pop() if self.ref[e] is not None: dfs_stack.append(e) # revisit e after finishing self.ref[e] dfs_stack.append(self.ref[e]) # visit self.ref[e] next old_ref[e] = self.ref[e] # remember value of self.ref[e] self.ref[e] = None else: self.side[e] *= self.side[old_ref[e]] return self.side[e] def sign_recursive(self, e): """Recursive version of :meth:`sign`.""" if self.ref[e] is not None: self.side[e] = self.side[e] * self.sign_recursive(self.ref[e]) self.ref[e] = None return self.side[e]
LRPlanarity
python
HypothesisWorks__hypothesis
hypothesis-python/tests/django/toystore/test_basic_configuration.py
{ "start": 908, "end": 1314 }
class ____: @settings( suppress_health_check=[HealthCheck.too_slow, HealthCheck.differing_executors] ) @given(integers()) def test_is_blank_slate(self, unused): Company.objects.create(name="MickeyCo") def test_normal_test_1(self): Company.objects.create(name="MickeyCo") def test_normal_test_2(self): Company.objects.create(name="MickeyCo")
SomeStuff
python
matplotlib__matplotlib
lib/mpl_toolkits/axisartist/angle_helper.py
{ "start": 5349, "end": 8798 }
class ____: deg_mark = r"^{\circ}" min_mark = r"^{\prime}" sec_mark = r"^{\prime\prime}" fmt_d = "$%d" + deg_mark + "$" fmt_ds = r"$%d.%s" + deg_mark + "$" # %s for sign fmt_d_m = r"$%s%d" + deg_mark + r"\,%02d" + min_mark + "$" fmt_d_ms = r"$%s%d" + deg_mark + r"\,%02d.%s" + min_mark + "$" fmt_d_m_partial = "$%s%d" + deg_mark + r"\,%02d" + min_mark + r"\," fmt_s_partial = "%02d" + sec_mark + "$" fmt_ss_partial = "%02d.%s" + sec_mark + "$" def _get_number_fraction(self, factor): ## check for fractional numbers number_fraction = None # check for 60 for threshold in [1, 60, 3600]: if factor <= threshold: break d = factor // threshold int_log_d = int(np.floor(np.log10(d))) if 10**int_log_d == d and d != 1: number_fraction = int_log_d factor = factor // 10**int_log_d return factor, number_fraction return factor, number_fraction def __call__(self, direction, factor, values): if len(values) == 0: return [] ss = np.sign(values) signs = ["-" if v < 0 else "" for v in values] factor, number_fraction = self._get_number_fraction(factor) values = np.abs(values) if number_fraction is not None: values, frac_part = divmod(values, 10 ** number_fraction) frac_fmt = "%%0%dd" % (number_fraction,) frac_str = [frac_fmt % (f1,) for f1 in frac_part] if factor == 1: if number_fraction is None: return [self.fmt_d % (s * int(v),) for s, v in zip(ss, values)] else: return [self.fmt_ds % (s * int(v), f1) for s, v, f1 in zip(ss, values, frac_str)] elif factor == 60: deg_part, min_part = divmod(values, 60) if number_fraction is None: return [self.fmt_d_m % (s1, d1, m1) for s1, d1, m1 in zip(signs, deg_part, min_part)] else: return [self.fmt_d_ms % (s, d1, m1, f1) for s, d1, m1, f1 in zip(signs, deg_part, min_part, frac_str)] elif factor == 3600: if ss[-1] == -1: inverse_order = True values = values[::-1] signs = signs[::-1] else: inverse_order = False l_hm_old = "" r = [] deg_part, min_part_ = divmod(values, 3600) min_part, sec_part = divmod(min_part_, 60) if number_fraction is None: sec_str = [self.fmt_s_partial % (s1,) for s1 in sec_part] else: sec_str = [self.fmt_ss_partial % (s1, f1) for s1, f1 in zip(sec_part, frac_str)] for s, d1, m1, s1 in zip(signs, deg_part, min_part, sec_str): l_hm = self.fmt_d_m_partial % (s, d1, m1) if l_hm != l_hm_old: l_hm_old = l_hm l = l_hm + s1 else: l = "$" + s + s1 r.append(l) if inverse_order: return r[::-1] else: return r else: # factor > 3600. return [r"$%s^{\circ}$" % v for v in ss*values]
FormatterDMS
python
PyCQA__pylint
tests/functional/c/class_members.py
{ "start": 62, "end": 283 }
class ____: attr: int # `bar` definitely does not exist here, but in a complex scenario, # it might. We simply exclude PEP 526 class and instance variables # from `no-member`. print(Class().attr) print(Class.attr)
Class
python
ray-project__ray
python/ray/data/aggregate.py
{ "start": 45474, "end": 49247 }
class ____(AggregateFnV2[List[int], float]): """Calculates the percentage of zero values in a numeric column. This aggregation computes the percentage of zero values in a numeric dataset column. It can optionally ignore null values when calculating the percentage. The result is a percentage value between 0.0 and 100.0, where 0.0 means no zero values and 100.0 means all non-null values are zero. Example: .. testcode:: import ray from ray.data.aggregate import ZeroPercentage # Create a dataset with some zero values ds = ray.data.from_items([ {"value": 0}, {"value": 1}, {"value": 0}, {"value": 3}, {"value": 0} ]) # Calculate zero value percentage result = ds.aggregate(ZeroPercentage(on="value")) # result: 60.0 (3 out of 5 values are zero) # With null values and ignore_nulls=True (default) ds = ray.data.from_items([ {"value": 0}, {"value": None}, {"value": 0}, {"value": 3}, {"value": 0} ]) result = ds.aggregate(ZeroPercentage(on="value", ignore_nulls=True)) # result: 75.0 (3 out of 4 non-null values are zero) # Using with groupby ds = ray.data.from_items([ {"group": "A", "value": 0}, {"group": "A", "value": 1}, {"group": "B", "value": 0}, {"group": "B", "value": 0} ]) result = ds.groupby("group").aggregate(ZeroPercentage(on="value")).take_all() # result: [{'group': 'A', 'zero_pct(value)': 50.0}, # {'group': 'B', 'zero_pct(value)': 100.0}] Args: on: The name of the column to calculate zero value percentage on. Must be a numeric column. ignore_nulls: Whether to ignore null values when calculating the percentage. If True (default), null values are excluded from both numerator and denominator. If False, null values are included in the denominator but not the numerator. alias_name: Optional name for the resulting column. If not provided, defaults to "zero_pct({column_name})". """ def __init__( self, on: str, ignore_nulls: bool = True, alias_name: Optional[str] = None, ): # Initialize with a list accumulator [zero_count, non_null_count] super().__init__( alias_name if alias_name else f"zero_pct({str(on)})", on=on, ignore_nulls=ignore_nulls, zero_factory=lambda: [0, 0], ) def aggregate_block(self, block: Block) -> List[int]: column_accessor = BlockColumnAccessor.for_column(block[self._target_col_name]) count = column_accessor.count(ignore_nulls=self._ignore_nulls) if count == 0: return [0, 0] arrow_compatible = column_accessor._as_arrow_compatible() # Use PyArrow compute to count zeros # First create a boolean mask for zero values zero_mask = pc.equal(arrow_compatible, 0) # Sum the boolean mask to get count of True values (zeros) zero_count = pc.sum(zero_mask).as_py() or 0 return [zero_count, count] def combine(self, current_accumulator: List[int], new: List[int]) -> List[int]: return [ current_accumulator[0] + new[0], # Sum zero counts current_accumulator[1] + new[1], # Sum non-null counts ] def finalize(self, accumulator: List[int]) -> Optional[float]: if accumulator[1] == 0: return None return (accumulator[0] / accumulator[1]) * 100.0 @PublicAPI(stability="alpha")
ZeroPercentage
python
keras-team__keras
keras/src/ops/numpy_test.py
{ "start": 128400, "end": 194307 }
class ____(testing.TestCase): def test_mean(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.mean(x), np.mean(x)) self.assertAllClose(knp.mean(x, axis=()), np.mean(x, axis=())) self.assertAllClose(knp.mean(x, axis=1), np.mean(x, axis=1)) self.assertAllClose(knp.mean(x, axis=(1,)), np.mean(x, axis=(1,))) self.assertAllClose( knp.mean(x, axis=1, keepdims=True), np.mean(x, axis=1, keepdims=True), ) self.assertAllClose(knp.Mean()(x), np.mean(x)) self.assertAllClose(knp.Mean(axis=1)(x), np.mean(x, axis=1)) self.assertAllClose( knp.Mean(axis=1, keepdims=True)(x), np.mean(x, axis=1, keepdims=True), ) # test overflow x = np.array([65504, 65504, 65504], dtype="float16") self.assertAllClose(knp.mean(x), np.mean(x)) def test_array_split(self): x = np.array([[1, 2, 3], [4, 5, 6]]) # Even split (axis=0) knp_res1 = knp.array_split(x, 2) np_res1 = np.array_split(x, 2) self.assertEqual(len(knp_res1), len(np_res1)) for k_arr, n_arr in zip(knp_res1, np_res1): self.assertAllClose(k_arr, n_arr) # Even split (axis=1) knp_res2 = knp.array_split(x, 3, axis=1) np_res2 = np.array_split(x, 3, axis=1) self.assertEqual(len(knp_res2), len(np_res2)) for k_arr, n_arr in zip(knp_res2, np_res2): self.assertAllClose(k_arr, n_arr) # Uneven split (axis=1) - 3 columns into 2 sections knp_res3 = knp.array_split(x, 2, axis=1) np_res3 = np.array_split(x, 2, axis=1) self.assertEqual(len(knp_res3), len(np_res3)) for k_arr, n_arr in zip(knp_res3, np_res3): self.assertAllClose(k_arr, n_arr) def test_all(self): x = np.array([[True, False, True], [True, True, True]]) self.assertAllClose(knp.all(x), np.all(x)) self.assertAllClose(knp.all(x, axis=()), np.all(x, axis=())) self.assertAllClose(knp.all(x, axis=1), np.all(x, axis=1)) self.assertAllClose(knp.all(x, axis=(1,)), np.all(x, axis=(1,))) self.assertAllClose( knp.all(x, axis=1, keepdims=True), np.all(x, axis=1, keepdims=True), ) self.assertAllClose(knp.All()(x), np.all(x)) self.assertAllClose(knp.All(axis=1)(x), np.all(x, axis=1)) self.assertAllClose( knp.All(axis=1, keepdims=True)(x), np.all(x, axis=1, keepdims=True), ) def test_any(self): x = np.array([[True, False, True], [True, True, True]]) self.assertAllClose(knp.any(x), np.any(x)) self.assertAllClose(knp.any(x, axis=()), np.any(x, axis=())) self.assertAllClose(knp.any(x, axis=1), np.any(x, axis=1)) self.assertAllClose(knp.any(x, axis=(1,)), np.any(x, axis=(1,))) self.assertAllClose( knp.any(x, axis=1, keepdims=True), np.any(x, axis=1, keepdims=True), ) self.assertAllClose(knp.Any()(x), np.any(x)) self.assertAllClose(knp.Any(axis=1)(x), np.any(x, axis=1)) self.assertAllClose( knp.Any(axis=1, keepdims=True)(x), np.any(x, axis=1, keepdims=True), ) def test_trapezoid(self): y = np.random.random((3, 3, 3)) x = np.random.random((3, 3, 3)) dx = 2.0 self.assertAllClose(knp.trapezoid(y), np.trapezoid(y)) self.assertAllClose(knp.trapezoid(y, x=x), np.trapezoid(y, x=x)) self.assertAllClose(knp.trapezoid(y, dx=dx), np.trapezoid(y, dx=dx)) self.assertAllClose( knp.trapezoid(y, x=x, axis=1), np.trapezoid(y, x=x, axis=1), ) def test_var(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.var(x), np.var(x)) self.assertAllClose(knp.var(x, axis=()), np.var(x, axis=())) self.assertAllClose(knp.var(x, axis=1), np.var(x, axis=1)) self.assertAllClose(knp.var(x, axis=(1,)), np.var(x, axis=(1,))) self.assertAllClose( knp.var(x, axis=1, keepdims=True), np.var(x, axis=1, keepdims=True), ) self.assertAllClose(knp.Var()(x), np.var(x)) self.assertAllClose(knp.Var(axis=1)(x), np.var(x, axis=1)) self.assertAllClose( knp.Var(axis=1, keepdims=True)(x), np.var(x, axis=1, keepdims=True), ) def test_sum(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.sum(x), np.sum(x)) self.assertAllClose(knp.sum(x, axis=()), np.sum(x, axis=())) self.assertAllClose(knp.sum(x, axis=1), np.sum(x, axis=1)) self.assertAllClose(knp.sum(x, axis=(1,)), np.sum(x, axis=(1,))) self.assertAllClose( knp.sum(x, axis=1, keepdims=True), np.sum(x, axis=1, keepdims=True), ) self.assertAllClose(knp.Sum()(x), np.sum(x)) self.assertAllClose(knp.Sum(axis=1)(x), np.sum(x, axis=1)) self.assertAllClose( knp.Sum(axis=1, keepdims=True)(x), np.sum(x, axis=1, keepdims=True), ) def test_amax(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.amax(x), np.amax(x)) self.assertAllClose(knp.amax(x, axis=()), np.amax(x, axis=())) self.assertAllClose(knp.amax(x, axis=1), np.amax(x, axis=1)) self.assertAllClose(knp.amax(x, axis=(1,)), np.amax(x, axis=(1,))) self.assertAllClose( knp.amax(x, axis=1, keepdims=True), np.amax(x, axis=1, keepdims=True), ) self.assertAllClose(knp.Amax()(x), np.amax(x)) self.assertAllClose(knp.Amax(axis=1)(x), np.amax(x, axis=1)) self.assertAllClose( knp.Amax(axis=1, keepdims=True)(x), np.amax(x, axis=1, keepdims=True), ) def test_amin(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.amin(x), np.amin(x)) self.assertAllClose(knp.amin(x, axis=()), np.amin(x, axis=())) self.assertAllClose(knp.amin(x, axis=1), np.amin(x, axis=1)) self.assertAllClose(knp.amin(x, axis=(1,)), np.amin(x, axis=(1,))) self.assertAllClose( knp.amin(x, axis=1, keepdims=True), np.amin(x, axis=1, keepdims=True), ) self.assertAllClose(knp.Amin()(x), np.amin(x)) self.assertAllClose(knp.Amin(axis=1)(x), np.amin(x, axis=1)) self.assertAllClose( knp.Amin(axis=1, keepdims=True)(x), np.amin(x, axis=1, keepdims=True), ) def test_square(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.square(x), np.square(x)) self.assertAllClose(knp.Square()(x), np.square(x)) def test_negative(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.negative(x), np.negative(x)) self.assertAllClose(knp.Negative()(x), np.negative(x)) def test_abs(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.abs(x), np.abs(x)) self.assertAllClose(knp.Abs()(x), np.abs(x)) def test_absolute(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.absolute(x), np.absolute(x)) self.assertAllClose(knp.Absolute()(x), np.absolute(x)) def test_squeeze(self): x = np.ones([1, 3, 1, 5]) self.assertAllClose(knp.squeeze(x), np.squeeze(x)) self.assertAllClose(knp.squeeze(x, axis=0), np.squeeze(x, axis=0)) self.assertAllClose(knp.Squeeze()(x), np.squeeze(x)) self.assertAllClose(knp.Squeeze(axis=0)(x), np.squeeze(x, axis=0)) # Multiple axes x = np.ones([2, 1, 1, 1]) self.assertAllClose(knp.squeeze(x, (1, 2)), np.squeeze(x, (1, 2))) self.assertAllClose(knp.squeeze(x, (-1, -2)), np.squeeze(x, (-1, -2))) self.assertAllClose(knp.squeeze(x, (1, 2, 3)), np.squeeze(x, (1, 2, 3))) self.assertAllClose(knp.squeeze(x, (-1, 1)), np.squeeze(x, (-1, 1))) self.assertAllClose(knp.Squeeze((1, 2))(x), np.squeeze(x, (1, 2))) self.assertAllClose(knp.Squeeze((-1, -2))(x), np.squeeze(x, (-1, -2))) self.assertAllClose(knp.Squeeze((1, 2, 3))(x), np.squeeze(x, (1, 2, 3))) self.assertAllClose(knp.Squeeze((-1, 1))(x), np.squeeze(x, (-1, 1))) def test_transpose(self): x = np.ones([1, 2, 3, 4, 5]) self.assertAllClose(knp.transpose(x), np.transpose(x)) self.assertAllClose( knp.transpose(x, axes=(1, 0, 3, 2, 4)), np.transpose(x, axes=(1, 0, 3, 2, 4)), ) self.assertAllClose(knp.Transpose()(x), np.transpose(x)) self.assertAllClose( knp.Transpose(axes=(1, 0, 3, 2, 4))(x), np.transpose(x, axes=(1, 0, 3, 2, 4)), ) def test_arccos(self): x = np.array([[1, 0.5, -0.7], [0.9, 0.2, -1]]) self.assertAllClose(knp.arccos(x), np.arccos(x)) self.assertAllClose(knp.Arccos()(x), np.arccos(x)) def test_arccosh(self): x = np.array([[1, 0.5, -0.7], [0.9, 0.2, -1]]) self.assertAllClose(knp.arccosh(x), np.arccosh(x)) self.assertAllClose(knp.Arccosh()(x), np.arccosh(x)) def test_arcsin(self): x = np.array([[1, 0.5, -0.7], [0.9, 0.2, -1]]) self.assertAllClose(knp.arcsin(x), np.arcsin(x)) self.assertAllClose(knp.Arcsin()(x), np.arcsin(x)) def test_arcsinh(self): x = np.array([[1, 0.5, -0.7], [0.9, 0.2, -1]]) self.assertAllClose(knp.arcsinh(x), np.arcsinh(x)) self.assertAllClose(knp.Arcsinh()(x), np.arcsinh(x)) def test_arctan(self): x = np.array([[1, 0.5, -0.7], [0.9, 0.2, -1]]) self.assertAllClose(knp.arctan(x), np.arctan(x)) self.assertAllClose(knp.Arctan()(x), np.arctan(x)) def test_arctanh(self): x = np.array([[1, 0.5, -0.7], [0.9, 0.2, -1]]) self.assertAllClose(knp.arctanh(x), np.arctanh(x)) self.assertAllClose(knp.Arctanh()(x), np.arctanh(x)) def test_argmax(self): x = np.array([[1, 2, 3], [3, 2, 1], [4, 5, 6]]) self.assertAllClose(knp.argmax(x), np.argmax(x)) self.assertAllClose(knp.argmax(x, axis=1), np.argmax(x, axis=1)) self.assertAllClose( knp.argmax(x, axis=1, keepdims=True), np.argmax(x, axis=1, keepdims=True), ) self.assertAllClose( knp.argmax(x, keepdims=True), np.argmax(x, keepdims=True) ) self.assertAllClose(knp.Argmax()(x), np.argmax(x)) self.assertAllClose(knp.Argmax(axis=1)(x), np.argmax(x, axis=1)) self.assertAllClose(knp.Argmax()(x), np.argmax(x)) self.assertAllClose( knp.Argmax(keepdims=True)(x), np.argmax(x, keepdims=True) ) def test_argmin(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.argmin(x), np.argmin(x)) self.assertAllClose(knp.argmin(x, axis=1), np.argmin(x, axis=1)) self.assertAllClose( knp.argmin(x, keepdims=True), np.argmin(x, keepdims=True) ) self.assertAllClose(knp.Argmin()(x), np.argmin(x)) self.assertAllClose(knp.Argmin(axis=1)(x), np.argmin(x, axis=1)) self.assertAllClose( knp.Argmin(keepdims=True)(x), np.argmin(x, keepdims=True) ) def test_argsort(self): x = np.array([[1, 2, 3], [4, 5, 6]]) self.assertAllClose(knp.argsort(x), np.argsort(x)) self.assertAllClose(knp.argsort(x, axis=1), np.argsort(x, axis=1)) self.assertAllClose(knp.argsort(x, axis=None), np.argsort(x, axis=None)) self.assertAllClose(knp.Argsort()(x), np.argsort(x)) self.assertAllClose(knp.Argsort(axis=1)(x), np.argsort(x, axis=1)) self.assertAllClose(knp.Argsort(axis=None)(x), np.argsort(x, axis=None)) x = np.array(1) # rank == 0 self.assertAllClose(knp.argsort(x), np.argsort(x)) self.assertAllClose(knp.Argsort()(x), np.argsort(x)) def test_array(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.array(x), np.array(x)) self.assertAllClose(knp.Array()(x), np.array(x)) self.assertTrue(backend.is_tensor(knp.array(x))) self.assertTrue(backend.is_tensor(knp.Array()(x))) # Check dtype conversion. x = [[1, 0, 1], [1, 1, 0]] output = knp.array(x, dtype="int32") self.assertEqual(standardize_dtype(output.dtype), "int32") x = [[1, 0, 1], [1, 1, 0]] output = knp.array(x, dtype="float32") self.assertEqual(standardize_dtype(output.dtype), "float32") x = [[1, 0, 1], [1, 1, 0]] output = knp.array(x, dtype="bool") self.assertEqual(standardize_dtype(output.dtype), "bool") def test_average(self): x = np.array([[1, 2, 3], [3, 2, 1]]) weights = np.ones([2, 3]) weights_1d = np.ones([3]) self.assertAllClose(knp.average(x), np.average(x)) self.assertAllClose(knp.average(x, axis=()), np.average(x, axis=())) self.assertAllClose(knp.average(x, axis=1), np.average(x, axis=1)) self.assertAllClose(knp.average(x, axis=(1,)), np.average(x, axis=(1,))) self.assertAllClose( knp.average(x, axis=1, weights=weights), np.average(x, axis=1, weights=weights), ) self.assertAllClose( knp.average(x, axis=1, weights=weights_1d), np.average(x, axis=1, weights=weights_1d), ) self.assertAllClose(knp.Average()(x), np.average(x)) self.assertAllClose(knp.Average(axis=1)(x), np.average(x, axis=1)) self.assertAllClose( knp.Average(axis=1)(x, weights=weights), np.average(x, axis=1, weights=weights), ) self.assertAllClose( knp.Average(axis=1)(x, weights=weights_1d), np.average(x, axis=1, weights=weights_1d), ) def test_bartlett(self): x = np.random.randint(1, 100 + 1) self.assertAllClose(knp.bartlett(x), np.bartlett(x)) self.assertAllClose(knp.Bartlett()(x), np.bartlett(x)) def test_blackman(self): x = np.random.randint(1, 100 + 1) self.assertAllClose(knp.blackman(x), np.blackman(x)) self.assertAllClose(knp.Blackman()(x), np.blackman(x)) def test_hamming(self): x = np.random.randint(1, 100 + 1) self.assertAllClose(knp.hamming(x), np.hamming(x)) self.assertAllClose(knp.Hamming()(x), np.hamming(x)) def test_hanning(self): x = np.random.randint(1, 100 + 1) self.assertAllClose(knp.hanning(x), np.hanning(x)) self.assertAllClose(knp.Hanning()(x), np.hanning(x)) def test_kaiser(self): x = np.random.randint(1, 100 + 1) beta = float(np.random.randint(10, 20 + 1)) self.assertAllClose(knp.kaiser(x, beta), np.kaiser(x, beta)) self.assertAllClose(knp.Kaiser(beta)(x), np.kaiser(x, beta)) @parameterized.named_parameters( named_product(sparse_input=(False, True), sparse_arg=(False, True)) ) def test_bincount(self, sparse_input, sparse_arg): if (sparse_input or sparse_arg) and not backend.SUPPORTS_SPARSE_TENSORS: pytest.skip("Backend does not support sparse tensors") if testing.tensorflow_uses_gpu(): self.skipTest("bincount does not work in tensorflow gpu") x = x_np = np.array([1, 1, 2, 3, 2, 4, 4, 6]) weights = weights_np = np.array([0, 0, 3, 2, 1, 1, 4, 2]) if sparse_input: indices = np.array([[1], [3], [5], [7], [9], [11], [13], [15]]) if backend.backend() == "tensorflow": import tensorflow as tf x = tf.SparseTensor(indices, x, (16,)) weights = tf.SparseTensor(indices, weights, (16,)) elif backend.backend() == "jax": from jax.experimental import sparse as jax_sparse x = jax_sparse.BCOO((x, indices), shape=(16,)) weights = jax_sparse.BCOO((weights, indices), shape=(16,)) minlength = 3 output = knp.bincount( x, weights=weights, minlength=minlength, sparse=sparse_arg ) self.assertAllClose( output, np.bincount(x_np, weights=weights_np, minlength=minlength) ) self.assertSparse(output, sparse_input or sparse_arg) output = knp.Bincount( weights=weights, minlength=minlength, sparse=sparse_arg )(x) self.assertAllClose( output, np.bincount(x_np, weights=weights_np, minlength=minlength) ) self.assertSparse(output, sparse_input or sparse_arg) x = knp.expand_dims(x, 0) weights = knp.expand_dims(weights, 0) expected_output = np.array([[0, 0, 4, 2, 5, 0, 2]]) output = knp.bincount( x, weights=weights, minlength=minlength, sparse=sparse_arg ) self.assertAllClose(output, expected_output) self.assertSparse(output, sparse_input or sparse_arg) output = knp.Bincount( weights=weights, minlength=minlength, sparse=sparse_arg )(x) self.assertAllClose(output, expected_output) self.assertSparse(output, sparse_input or sparse_arg) # test with weights=None expected_output = np.array([[0, 2, 2, 1, 2, 0, 1]]) output = knp.Bincount( weights=None, minlength=minlength, sparse=sparse_arg )(x) self.assertAllClose(output, expected_output) self.assertSparse(output, sparse_input or sparse_arg) def test_bitwise_invert(self): x = np.array([2, 5, 255]) self.assertAllClose(knp.bitwise_invert(x), np.bitwise_not(x)) self.assertAllClose(knp.BitwiseInvert()(x), np.bitwise_not(x)) # bitwise_not is same as bitwise_invert def test_broadcast_to(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose( knp.broadcast_to(x, [2, 2, 3]), np.broadcast_to(x, [2, 2, 3]), ) self.assertAllClose( knp.BroadcastTo([2, 2, 3])(x), np.broadcast_to(x, [2, 2, 3]), ) def test_cbrt(self): x = np.array([[-8, -1, 0], [1, 8, 27]], dtype="float32") ref_y = np.sign(x) * np.abs(x) ** (1.0 / 3.0) y = knp.cbrt(x) self.assertEqual(standardize_dtype(y.dtype), "float32") self.assertAllClose(y, ref_y) y = knp.Cbrt()(x) self.assertEqual(standardize_dtype(y.dtype), "float32") self.assertAllClose(y, ref_y) def test_ceil(self): x = np.array([[1.2, 2.1, -2.5], [2.4, -11.9, -5.5]]) self.assertAllClose(knp.ceil(x), np.ceil(x)) self.assertAllClose(knp.Ceil()(x), np.ceil(x)) def test_clip(self): x = np.array([[1.2, 2.1, 0.5], [2.4, 11.9, 0.5]]) self.assertAllClose(knp.clip(x, 1, 2), np.clip(x, 1, 2)) self.assertAllClose(knp.clip(x, 1, 2), np.clip(x, 1, 2)) self.assertAllClose(knp.Clip(0, 1)(x), np.clip(x, 0, 1)) self.assertAllClose(knp.Clip(0, 1)(x), np.clip(x, 0, 1)) def test_concatenate(self): x = np.array([[1, 2, 3], [3, 2, 1]]) y = np.array([[4, 5, 6], [6, 5, 4]]) z = np.array([[7, 8, 9], [9, 8, 7]]) self.assertAllClose( knp.concatenate([x, y], axis=0), np.concatenate([x, y], axis=0), ) self.assertAllClose( knp.concatenate([x, y, z], axis=0), np.concatenate([x, y, z], axis=0), ) self.assertAllClose( knp.concatenate([x, y], axis=1), np.concatenate([x, y], axis=1), ) self.assertAllClose( knp.Concatenate(axis=0)([x, y]), np.concatenate([x, y], axis=0), ) self.assertAllClose( knp.Concatenate(axis=0)([x, y, z]), np.concatenate([x, y, z], axis=0), ) self.assertAllClose( knp.Concatenate(axis=1)([x, y]), np.concatenate([x, y], axis=1), ) def test_view(self): x = np.array(1, dtype="int16") result = knp.view(x, dtype="float16") assert backend.standardize_dtype(result.dtype) == "float16" with self.assertRaises(Exception): result = knp.view(x, dtype="int8") with self.assertRaises(Exception): result = knp.view(x, dtype="int32") x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype="int16") result = knp.view(x, dtype="int16") assert backend.standardize_dtype(result.dtype) == "int16" self.assertEqual( backend.standardize_dtype(knp.view(x, dtype="int16").dtype), "int16" ) self.assertAllClose(knp.view(x, dtype="int16"), x.view("int16")) self.assertEqual( backend.standardize_dtype(knp.view(x, dtype="float16").dtype), "float16", ) self.assertAllClose(knp.view(x, dtype="float16"), x.view("float16")) self.assertEqual( backend.standardize_dtype(knp.view(x, dtype="int8").dtype), "int8" ) self.assertAllClose(knp.view(x, dtype="int8"), x.view("int8")) self.assertEqual( backend.standardize_dtype(knp.view(x, dtype="int32").dtype), "int32" ) self.assertAllClose(knp.view(x, dtype="int32"), x.view("int32")) @parameterized.named_parameters( [ {"testcase_name": "axis_0", "axis": 0}, {"testcase_name": "axis_1", "axis": 1}, ] ) @pytest.mark.skipif( not backend.SUPPORTS_SPARSE_TENSORS, reason="Backend does not support sparse tensors.", ) def test_concatenate_sparse(self, axis): if backend.backend() == "tensorflow": import tensorflow as tf x = tf.SparseTensor([[0, 0], [1, 2]], [1.0, 2.0], (2, 3)) y = tf.SparseTensor([[0, 0], [1, 1]], [4.0, 5.0], (2, 3)) elif backend.backend() == "jax": import jax.experimental.sparse as jax_sparse x = jax_sparse.BCOO(([1.0, 2.0], [[0, 0], [1, 2]]), shape=(2, 3)) y = jax_sparse.BCOO(([4.0, 5.0], [[0, 0], [1, 1]]), shape=(2, 3)) x_np = backend.convert_to_numpy(x) y_np = backend.convert_to_numpy(y) z = np.random.rand(2, 3).astype("float32") self.assertAllClose( knp.concatenate([x, z], axis=axis), np.concatenate([x_np, z], axis=axis), ) self.assertAllClose( knp.concatenate([z, x], axis=axis), np.concatenate([z, x_np], axis=axis), ) self.assertAllClose( knp.concatenate([x, y], axis=axis), np.concatenate([x_np, y_np], axis=axis), ) self.assertAllClose( knp.Concatenate(axis=axis)([x, z]), np.concatenate([x_np, z], axis=axis), ) self.assertAllClose( knp.Concatenate(axis=axis)([z, x]), np.concatenate([z, x_np], axis=axis), ) self.assertAllClose( knp.Concatenate(axis=axis)([x, y]), np.concatenate([x_np, y_np], axis=axis), ) self.assertSparse(knp.concatenate([x, y], axis=axis)) self.assertSparse(knp.Concatenate(axis=axis)([x, y])) def test_conjugate(self): x = np.array([[1 + 2j, 2 + 3j], [3 + 4j, 4 + 5j]]) self.assertAllClose(knp.conjugate(x), np.conjugate(x)) self.assertAllClose(knp.Conjugate()(x), np.conjugate(x)) def test_conj(self): x = np.array([[1 + 2j, 2 + 3j], [3 + 4j, 4 + 5j]]) self.assertAllClose(knp.conj(x), np.conj(x)) self.assertAllClose(knp.Conj()(x), np.conj(x)) def test_copy(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.copy(x), np.copy(x)) self.assertAllClose(knp.Copy()(x), np.copy(x)) def test_corrcoef(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.corrcoef(x), np.corrcoef(x)) self.assertAllClose(knp.Corrcoef()(x), np.corrcoef(x)) def test_cos(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.cos(x), np.cos(x)) self.assertAllClose(knp.Cos()(x), np.cos(x)) def test_cosh(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.cosh(x), np.cosh(x)) self.assertAllClose(knp.Cosh()(x), np.cosh(x)) def test_count_nonzero(self): x = np.array([[0, 2, 3], [3, 2, 0]]) self.assertAllClose(knp.count_nonzero(x), np.count_nonzero(x)) self.assertAllClose( knp.count_nonzero(x, axis=()), np.count_nonzero(x, axis=()) ) self.assertAllClose( knp.count_nonzero(x, axis=1), np.count_nonzero(x, axis=1), ) self.assertAllClose( knp.count_nonzero(x, axis=(1,)), np.count_nonzero(x, axis=(1,)), ) self.assertAllClose( knp.CountNonzero()(x), np.count_nonzero(x), ) self.assertAllClose( knp.CountNonzero(axis=1)(x), np.count_nonzero(x, axis=1), ) @parameterized.product( axis=[None, 0, 1, -1], dtype=[None, "int32", "float32"], ) def test_cumprod(self, axis, dtype): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose( knp.cumprod(x, axis=axis, dtype=dtype), np.cumprod(x, axis=axis, dtype=dtype or x.dtype), ) self.assertAllClose( knp.Cumprod(axis=axis, dtype=dtype)(x), np.cumprod(x, axis=axis, dtype=dtype or x.dtype), ) @parameterized.product( axis=[None, 0, 1, -1], dtype=[None, "int32", "float32"], ) def test_cumsum(self, axis, dtype): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose( knp.cumsum(x, axis=axis, dtype=dtype), np.cumsum(x, axis=axis, dtype=dtype or x.dtype), ) self.assertAllClose( knp.Cumsum(axis=axis, dtype=dtype)(x), np.cumsum(x, axis=axis, dtype=dtype or x.dtype), ) def test_deg2rad(self): x = np.random.uniform(-360, 360, size=(3, 3)) self.assertAllClose(knp.deg2rad(x), np.deg2rad(x)) self.assertAllClose(knp.Deg2rad()(x), np.deg2rad(x)) def test_diag(self): x = np.array([1, 2, 3]) self.assertAllClose(knp.diag(x), np.diag(x)) self.assertAllClose(knp.diag(x, k=1), np.diag(x, k=1)) self.assertAllClose(knp.diag(x, k=-1), np.diag(x, k=-1)) self.assertAllClose(knp.Diag()(x), np.diag(x)) self.assertAllClose(knp.Diag(k=1)(x), np.diag(x, k=1)) self.assertAllClose(knp.Diag(k=-1)(x), np.diag(x, k=-1)) x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.diag(x), np.diag(x)) self.assertAllClose(knp.diag(x, k=1), np.diag(x, k=1)) self.assertAllClose(knp.diag(x, k=-1), np.diag(x, k=-1)) self.assertAllClose(knp.Diag()(x), np.diag(x)) self.assertAllClose(knp.Diag(k=1)(x), np.diag(x, k=1)) self.assertAllClose(knp.Diag(k=-1)(x), np.diag(x, k=-1)) def test_diagflat(self): x = np.array([1, 2, 3]) self.assertAllClose(knp.diagflat(x), np.diagflat(x)) self.assertAllClose(knp.diagflat(x, k=1), np.diagflat(x, k=1)) self.assertAllClose(knp.diagflat(x, k=-1), np.diagflat(x, k=-1)) x = np.array([[1, 2], [3, 4]]) self.assertAllClose(knp.diagflat(x), np.diagflat(x)) self.assertAllClose(knp.diagflat(x, k=1), np.diagflat(x, k=1)) self.assertAllClose(knp.diagflat(x, k=-1), np.diagflat(x, k=-1)) x = np.array([1, 2, 3, 4]) self.assertAllClose(knp.diagflat(x), np.diagflat(x)) self.assertAllClose(knp.diagflat(x, k=2), np.diagflat(x, k=2)) self.assertAllClose(knp.diagflat(x, k=-2), np.diagflat(x, k=-2)) x_float = np.array([1.1, 2.2, 3.3]) self.assertAllClose(knp.diagflat(x_float), np.diagflat(x_float)) x_complex = np.array([1 + 1j, 2 + 2j, 3 + 3j]) self.assertAllClose(knp.diagflat(x_complex), np.diagflat(x_complex)) x = np.array([1, 2, 3]) self.assertAllClose(knp.Diagflat()(x), np.diagflat(x)) self.assertAllClose(knp.Diagflat(k=1)(x), np.diagflat(x, k=1)) self.assertAllClose(knp.Diagflat(k=-1)(x), np.diagflat(x, k=-1)) def test_diagonal(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.diagonal(x), np.diagonal(x)) self.assertAllClose( knp.diagonal(x, offset=1), np.diagonal(x, offset=1), ) self.assertAllClose( knp.diagonal(x, offset=-1), np.diagonal(x, offset=-1) ) self.assertAllClose(knp.Diagonal()(x), np.diagonal(x)) self.assertAllClose(knp.Diagonal(offset=1)(x), np.diagonal(x, offset=1)) self.assertAllClose( knp.Diagonal(offset=-1)(x), np.diagonal(x, offset=-1) ) x = np.ones([2, 3, 4, 5]) self.assertAllClose(knp.diagonal(x), np.diagonal(x)) self.assertAllClose( knp.diagonal(x, offset=1, axis1=2, axis2=3), np.diagonal(x, offset=1, axis1=2, axis2=3), ) self.assertAllClose( knp.diagonal(x, offset=-1, axis1=2, axis2=3), np.diagonal(x, offset=-1, axis1=2, axis2=3), ) def test_diff(self): x = np.array([1, 2, 4, 7, 0]) self.assertAllClose(knp.diff(x), np.diff(x)) self.assertAllClose(knp.diff(x, n=2), np.diff(x, n=2)) self.assertAllClose(knp.diff(x, n=3), np.diff(x, n=3)) x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) self.assertAllClose(knp.diff(x), np.diff(x)) self.assertAllClose(knp.diff(x, axis=0), np.diff(x, axis=0)) self.assertAllClose(knp.diff(x, n=2, axis=0), np.diff(x, n=2, axis=0)) self.assertAllClose(knp.diff(x, n=2, axis=1), np.diff(x, n=2, axis=1)) # Test n=0 x = np.array([1, 2, 4, 7, 0]) self.assertAllClose(knp.diff(x, n=0), np.diff(x, n=0)) def test_dot(self): x = np.arange(24).reshape([2, 3, 4]).astype("float32") y = np.arange(12).reshape([4, 3]).astype("float32") z = np.arange(4).astype("float32") self.assertAllClose(knp.dot(x, y), np.dot(x, y)) self.assertAllClose(knp.dot(x, z), np.dot(x, z)) self.assertAllClose(knp.dot(x, 2), np.dot(x, 2)) self.assertAllClose(knp.Dot()(x, y), np.dot(x, y)) self.assertAllClose(knp.Dot()(x, z), np.dot(x, z)) self.assertAllClose(knp.Dot()(x, 2), np.dot(x, 2)) def test_exp(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.exp(x), np.exp(x)) self.assertAllClose(knp.Exp()(x), np.exp(x)) def test_exp2(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.exp2(x), np.exp2(x)) self.assertAllClose(knp.Exp2()(x), np.exp2(x)) def test_expand_dims(self): x = np.ones([2, 3, 4]) self.assertAllClose(knp.expand_dims(x, 0), np.expand_dims(x, 0)) self.assertAllClose(knp.expand_dims(x, 1), np.expand_dims(x, 1)) self.assertAllClose(knp.expand_dims(x, -2), np.expand_dims(x, -2)) self.assertAllClose(knp.ExpandDims(0)(x), np.expand_dims(x, 0)) self.assertAllClose(knp.ExpandDims(1)(x), np.expand_dims(x, 1)) self.assertAllClose(knp.ExpandDims(-2)(x), np.expand_dims(x, -2)) # Multiple axes self.assertAllClose( knp.expand_dims(x, (1, 2)), np.expand_dims(x, (1, 2)) ) self.assertAllClose( knp.expand_dims(x, (-1, -2)), np.expand_dims(x, (-1, -2)) ) self.assertAllClose( knp.expand_dims(x, (-1, 1)), np.expand_dims(x, (-1, 1)) ) self.assertAllClose( knp.ExpandDims((1, 2))(x), np.expand_dims(x, (1, 2)) ) self.assertAllClose( knp.ExpandDims((-1, -2))(x), np.expand_dims(x, (-1, -2)) ) self.assertAllClose( knp.ExpandDims((-1, 1))(x), np.expand_dims(x, (-1, 1)) ) def test_expm1(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.expm1(x), np.expm1(x)) self.assertAllClose(knp.Expm1()(x), np.expm1(x)) def test_flip(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.flip(x), np.flip(x)) self.assertAllClose(knp.flip(x, 0), np.flip(x, 0)) self.assertAllClose(knp.flip(x, 1), np.flip(x, 1)) self.assertAllClose(knp.Flip()(x), np.flip(x)) self.assertAllClose(knp.Flip(0)(x), np.flip(x, 0)) self.assertAllClose(knp.Flip(1)(x), np.flip(x, 1)) def test_floor(self): x = np.array([[1.1, 2.2, -3.3], [3.3, 2.2, -1.1]]) self.assertAllClose(knp.floor(x), np.floor(x)) self.assertAllClose(knp.Floor()(x), np.floor(x)) def test_hstack(self): x = np.array([[1, 2, 3], [3, 2, 1]]) y = np.array([[4, 5, 6], [6, 5, 4]]) self.assertAllClose(knp.hstack([x, y]), np.hstack([x, y])) self.assertAllClose(knp.Hstack()([x, y]), np.hstack([x, y])) x = np.ones([2, 3, 4]) y = np.ones([2, 5, 4]) self.assertAllClose(knp.hstack([x, y]), np.hstack([x, y])) self.assertAllClose(knp.Hstack()([x, y]), np.hstack([x, y])) def test_imag(self): x = np.array([[1 + 1j, 2 + 2j, 3 + 3j], [3 + 3j, 2 + 2j, 1 + 1j]]) self.assertAllClose(knp.imag(x), np.imag(x)) self.assertAllClose(knp.Imag()(x), np.imag(x)) def test_isfinite(self): x = np.array([[1, 2, np.inf], [np.nan, np.nan, np.nan]]) self.assertAllClose(knp.isfinite(x), np.isfinite(x)) self.assertAllClose(knp.Isfinite()(x), np.isfinite(x)) def test_isinf(self): x = np.array([[1, 2, np.inf], [np.nan, np.nan, np.nan]]) self.assertAllClose(knp.isinf(x), np.isinf(x)) self.assertAllClose(knp.Isinf()(x), np.isinf(x)) def test_isnan(self): x = np.array([[1, 2, np.inf], [np.nan, np.nan, np.nan]]) self.assertAllClose(knp.isnan(x), np.isnan(x)) self.assertAllClose(knp.Isnan()(x), np.isnan(x)) def test_isneginf(self): x = np.array( [[1, 2, np.inf, -np.inf], [np.nan, np.nan, np.nan, np.nan]] ) self.assertAllClose(knp.isneginf(x), np.isneginf(x)) self.assertAllClose(knp.Isneginf()(x), np.isneginf(x)) def test_isposinf(self): x = np.array( [[1, 2, np.inf, -np.inf], [np.nan, np.nan, np.nan, np.nan]] ) self.assertAllClose(knp.isposinf(x), np.isposinf(x)) self.assertAllClose(knp.Isposinf()(x), np.isposinf(x)) def test_isreal(self): x = np.array([1 + 1j, 1 + 0j, 4.5, 3, 2, 2j], dtype=complex) self.assertAllClose(knp.isreal(x), np.isreal(x)) self.assertAllClose(knp.Isreal()(x), np.isreal(x)) x = np.array([1.0, 2.0, 3.0]) self.assertAllClose(knp.isreal(x), np.isreal(x)) self.assertAllClose(knp.Isreal()(x), np.isreal(x)) def test_log(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.log(x), np.log(x)) self.assertAllClose(knp.Log()(x), np.log(x)) def test_log10(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.log10(x), np.log10(x)) self.assertAllClose(knp.Log10()(x), np.log10(x)) def test_log1p(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.log1p(x), np.log1p(x)) self.assertAllClose(knp.Log1p()(x), np.log1p(x)) def test_log2(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.log2(x), np.log2(x)) self.assertAllClose(knp.Log2()(x), np.log2(x)) def test_logaddexp(self): x = np.array([[1, 2, 3], [3, 2, 1]]) y = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.logaddexp(x, y), np.logaddexp(x, y)) self.assertAllClose(knp.Logaddexp()(x, y), np.logaddexp(x, y)) def test_logaddexp2(self): x = np.array([[1, 2, 3], [3, 2, 1]]) y = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.logaddexp2(x, y), np.logaddexp2(x, y)) self.assertAllClose(knp.Logaddexp2()(x, y), np.logaddexp2(x, y)) def test_logical_not(self): x = np.array([[True, False], [False, True]]) self.assertAllClose(knp.logical_not(x), np.logical_not(x)) self.assertAllClose(knp.LogicalNot()(x), np.logical_not(x)) def test_max(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.max(x), np.max(x)) self.assertAllClose(knp.Max()(x), np.max(x)) self.assertAllClose(knp.max(x, 0), np.max(x, 0)) self.assertAllClose(knp.Max(0)(x), np.max(x, 0)) self.assertAllClose(knp.max(x, 1), np.max(x, 1)) self.assertAllClose(knp.Max(1)(x), np.max(x, 1)) # test max with initial self.assertAllClose(knp.max(x, initial=4), 4) # test empty tensor x = np.array([[]]) self.assertAllClose(knp.max(x, initial=1), np.max(x, initial=1)) self.assertAllClose( knp.max(x, initial=1, keepdims=True), np.max(x, initial=1, keepdims=True), ) def test_min(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.min(x), np.min(x)) self.assertAllClose(knp.Min()(x), np.min(x)) self.assertAllClose(knp.min(x, axis=(0, 1)), np.min(x, (0, 1))) self.assertAllClose(knp.Min((0, 1))(x), np.min(x, (0, 1))) self.assertAllClose(knp.min(x, axis=()), np.min(x, axis=())) self.assertAllClose(knp.Min(())(x), np.min(x, axis=())) self.assertAllClose(knp.min(x, 0), np.min(x, 0)) self.assertAllClose(knp.Min(0)(x), np.min(x, 0)) self.assertAllClose(knp.min(x, 1), np.min(x, 1)) self.assertAllClose(knp.Min(1)(x), np.min(x, 1)) # test min with initial self.assertAllClose(knp.min(x, initial=0), 0) # test empty tensor x = np.array([[]]) self.assertAllClose(knp.min(x, initial=1), np.min(x, initial=1)) self.assertAllClose( knp.min(x, initial=1, keepdims=True), np.min(x, initial=1, keepdims=True), ) def test_median(self): x = np.array([[1, 2, 3], [3, 2, 1]]).astype("float32") self.assertAllClose(knp.median(x), np.median(x)) self.assertAllClose( knp.median(x, keepdims=True), np.median(x, keepdims=True) ) self.assertAllClose(knp.median(x, axis=1), np.median(x, axis=1)) self.assertAllClose(knp.median(x, axis=(1,)), np.median(x, axis=(1,))) self.assertAllClose( knp.median(x, axis=1, keepdims=True), np.median(x, axis=1, keepdims=True), ) self.assertAllClose(knp.Median()(x), np.median(x)) self.assertAllClose(knp.Median(axis=1)(x), np.median(x, axis=1)) self.assertAllClose( knp.Median(axis=1, keepdims=True)(x), np.median(x, axis=1, keepdims=True), ) def test_meshgrid(self): x = np.array([1, 2, 3]) y = np.array([4, 5, 6]) z = np.array([7, 8, 9]) self.assertAllClose(knp.meshgrid(x, y), np.meshgrid(x, y)) self.assertAllClose(knp.meshgrid(x, z), np.meshgrid(x, z)) self.assertAllClose( knp.meshgrid(x, y, z, indexing="ij"), np.meshgrid(x, y, z, indexing="ij"), ) self.assertAllClose(knp.Meshgrid()(x, y), np.meshgrid(x, y)) self.assertAllClose(knp.Meshgrid()(x, z), np.meshgrid(x, z)) self.assertAllClose( knp.Meshgrid(indexing="ij")(x, y, z), np.meshgrid(x, y, z, indexing="ij"), ) if backend.backend() == "tensorflow": # Arguments to `jax.numpy.meshgrid` must be 1D now. x = np.ones([1, 2, 3]) y = np.ones([4, 5, 6, 6]) z = np.ones([7, 8]) self.assertAllClose(knp.meshgrid(x, y), np.meshgrid(x, y)) self.assertAllClose(knp.meshgrid(x, z), np.meshgrid(x, z)) self.assertAllClose( knp.meshgrid(x, y, z, indexing="ij"), np.meshgrid(x, y, z, indexing="ij"), ) self.assertAllClose(knp.Meshgrid()(x, y), np.meshgrid(x, y)) self.assertAllClose(knp.Meshgrid()(x, z), np.meshgrid(x, z)) self.assertAllClose( knp.Meshgrid(indexing="ij")(x, y, z), np.meshgrid(x, y, z, indexing="ij"), ) def test_moveaxis(self): x = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) self.assertAllClose(knp.moveaxis(x, 0, -1), np.moveaxis(x, 0, -1)) self.assertAllClose(knp.moveaxis(x, -1, 0), np.moveaxis(x, -1, 0)) self.assertAllClose( knp.moveaxis(x, (0, 1), (1, 0)), np.moveaxis(x, (0, 1), (1, 0)), ) self.assertAllClose( knp.moveaxis(x, [0, 1, 2], [2, 0, 1]), np.moveaxis(x, [0, 1, 2], [2, 0, 1]), ) self.assertAllClose(knp.Moveaxis(-1, 0)(x), np.moveaxis(x, -1, 0)) self.assertAllClose( knp.Moveaxis((0, 1), (1, 0))(x), np.moveaxis(x, (0, 1), (1, 0)), ) self.assertAllClose( knp.Moveaxis([0, 1, 2], [2, 0, 1])(x), np.moveaxis(x, [0, 1, 2], [2, 0, 1]), ) def test_ndim(self): x = np.array([1, 2, 3]) self.assertEqual(knp.ndim(x), np.ndim(x)) self.assertEqual(knp.Ndim()(x), np.ndim(x)) def test_nonzero(self): x = np.array([[0, 0, 3], [3, 0, 0]]) self.assertAllClose(knp.nonzero(x), np.nonzero(x)) self.assertAllClose(knp.Nonzero()(x), np.nonzero(x)) def test_ones_like(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.ones_like(x), np.ones_like(x)) self.assertAllClose(knp.OnesLike()(x), np.ones_like(x)) @parameterized.named_parameters( named_product( dtype=[ "float16", "float32", "float64", "uint8", "int8", "int16", "int32", ], mode=["constant", "reflect", "symmetric"], constant_values=[None, 0, 2], ) ) def test_pad(self, dtype, mode, constant_values): # 2D x = np.ones([2, 3], dtype=dtype) pad_width = ((1, 1), (1, 1)) if mode != "constant": if constant_values is not None: with self.assertRaisesRegex( ValueError, "Argument `constant_values` can only be " "provided when `mode == 'constant'`", ): knp.pad( x, pad_width, mode=mode, constant_values=constant_values ) return # constant_values is None kwargs = {} else: # mode is constant kwargs = {"constant_values": constant_values or 0} self.assertAllClose( knp.pad(x, pad_width, mode=mode, constant_values=constant_values), np.pad(x, pad_width, mode=mode, **kwargs), ) self.assertAllClose( knp.Pad(pad_width, mode=mode)(x, constant_values=constant_values), np.pad(x, pad_width, mode=mode, **kwargs), ) # 5D (pad last 3D) x = np.ones([2, 3, 4, 5, 6], dtype=dtype) pad_width = ((0, 0), (0, 0), (2, 3), (1, 1), (1, 1)) self.assertAllClose( knp.pad(x, pad_width, mode=mode, constant_values=constant_values), np.pad(x, pad_width, mode=mode, **kwargs), ) self.assertAllClose( knp.Pad(pad_width, mode=mode)(x, constant_values=constant_values), np.pad(x, pad_width, mode=mode, **kwargs), ) # 5D (pad arbitrary dimensions) if backend.backend() == "torch" and mode != "constant": self.skipTest( "reflect and symmetric padding for arbitrary dimensions " "are not supported by torch" ) x = np.ones([2, 3, 4, 5, 6], dtype=dtype) pad_width = ((1, 1), (2, 1), (3, 2), (4, 3), (5, 4)) self.assertAllClose( knp.pad(x, pad_width, mode=mode, constant_values=constant_values), np.pad(x, pad_width, mode=mode, **kwargs), ) self.assertAllClose( knp.Pad(pad_width, mode=mode)(x, constant_values=constant_values), np.pad(x, pad_width, mode=mode, **kwargs), ) def test_prod(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.prod(x), np.prod(x)) self.assertAllClose(knp.prod(x, axis=()), np.prod(x, axis=())) self.assertAllClose(knp.prod(x, axis=1), np.prod(x, axis=1)) self.assertAllClose(knp.prod(x, axis=(1,)), np.prod(x, axis=(1,))) self.assertAllClose( knp.prod(x, axis=1, keepdims=True), np.prod(x, axis=1, keepdims=True), ) self.assertAllClose(knp.Prod()(x), np.prod(x)) self.assertAllClose(knp.Prod(axis=1)(x), np.prod(x, axis=1)) self.assertAllClose( knp.Prod(axis=1, keepdims=True)(x), np.prod(x, axis=1, keepdims=True), ) def test_ravel(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.ravel(x), np.ravel(x)) self.assertAllClose(knp.Ravel()(x), np.ravel(x)) def test_unravel_index(self): x = np.array([0, 1, 2, 3]) shape = (2, 2) self.assertAllClose( knp.unravel_index(x, shape), np.unravel_index(x, shape) ) x = np.array([[0, 1], [2, 3]]) shape = (2, 2) self.assertAllClose( knp.unravel_index(x, shape), np.unravel_index(x, shape) ) def test_real(self): x = np.array([[1, 2, 3 - 3j], [3, 2, 1 + 5j]]) self.assertAllClose(knp.real(x), np.real(x)) self.assertAllClose(knp.Real()(x), np.real(x)) def test_reciprocal(self): x = np.array([[1.0, 2.0, 3.0], [3.0, 2.0, 1.0]]) self.assertAllClose(knp.reciprocal(x), np.reciprocal(x)) self.assertAllClose(knp.Reciprocal()(x), np.reciprocal(x)) def test_repeat(self): x = np.array([[1, 2], [3, 4]]) self.assertAllClose(knp.repeat(x, 2), np.repeat(x, 2)) self.assertAllClose( knp.Repeat(np.array([2]))(x), np.repeat(x, np.array([2])), ) self.assertAllClose(knp.repeat(x, 3, axis=1), np.repeat(x, 3, axis=1)) self.assertAllClose( knp.repeat(x, np.array([1, 2]), axis=-1), np.repeat(x, np.array([1, 2]), axis=-1), ) self.assertAllClose(knp.Repeat(2)(x), np.repeat(x, 2)) self.assertAllClose(knp.Repeat(3, axis=1)(x), np.repeat(x, 3, axis=1)) self.assertAllClose( knp.Repeat(np.array([1, 2]), axis=0)(x), np.repeat(x, np.array([1, 2]), axis=0), ) def test_reshape(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.reshape(x, [3, 2]), np.reshape(x, [3, 2])) self.assertAllClose(knp.Reshape([3, 2])(x), np.reshape(x, [3, 2])) self.assertAllClose(knp.Reshape(-1)(x), np.reshape(x, -1)) def test_roll(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.roll(x, 1), np.roll(x, 1)) self.assertAllClose(knp.roll(x, 1, axis=1), np.roll(x, 1, axis=1)) self.assertAllClose(knp.roll(x, -1, axis=0), np.roll(x, -1, axis=0)) self.assertAllClose(knp.Roll(1)(x), np.roll(x, 1)) self.assertAllClose(knp.Roll(1, axis=1)(x), np.roll(x, 1, axis=1)) self.assertAllClose(knp.Roll(-1, axis=0)(x), np.roll(x, -1, axis=0)) def test_round(self): x = np.array([[1.1, 2.5, 3.9], [3.2, 2.3, 1.8]]) self.assertAllClose(knp.round(x), np.round(x)) self.assertAllClose(knp.Round()(x), np.round(x)) # Test with decimal=1 self.assertAllClose(knp.round(x, decimals=1), np.round(x, decimals=1)) self.assertAllClose(knp.Round(decimals=1)(x), np.round(x, decimals=1)) # Test with integers x = np.array([[1, 2, 3], [3, 2, 1]], dtype="int32") self.assertAllClose(knp.round(x, decimals=1), np.round(x, decimals=1)) self.assertAllClose(knp.Round(decimals=1)(x), np.round(x, decimals=1)) # Test with integers and decimal < 0 x = np.array([[123, 234, 345], [345, 234, 123]], dtype="int32") self.assertAllClose(knp.round(x, decimals=-1), np.round(x, decimals=-1)) self.assertAllClose(knp.Round(decimals=-1)(x), np.round(x, decimals=-1)) def test_searchsorted(self): a = np.array([1, 2, 2, 3, 4, 5, 5]) v = np.array([4, 3, 5, 1, 2]) expected = np.searchsorted(a, v).astype("int32") self.assertAllEqual(knp.searchsorted(a, v), expected) self.assertAllEqual(knp.SearchSorted()(a, v), expected) def test_sign(self): x = np.array([[1, -2, 3], [-3, 2, -1]]) self.assertAllClose(knp.sign(x), np.sign(x)) self.assertAllClose(knp.Sign()(x), np.sign(x)) def test_signbit(self): x = np.array([[0.0, -0.0, -1.1e-45], [1.1e-38, 2, -1]]) self.assertAllClose(knp.signbit(x), np.signbit(x)) self.assertAllClose(knp.Signbit()(x), np.signbit(x)) def test_sin(self): x = np.array([[1, -2, 3], [-3, 2, -1]]) self.assertAllClose(knp.sin(x), np.sin(x)) self.assertAllClose(knp.Sin()(x), np.sin(x)) def test_sinh(self): x = np.array([[1, -2, 3], [-3, 2, -1]]) self.assertAllClose(knp.sinh(x), np.sinh(x)) self.assertAllClose(knp.Sinh()(x), np.sinh(x)) def test_size(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.size(x), np.size(x)) self.assertAllClose(knp.Size()(x), np.size(x)) def test_sort(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.sort(x), np.sort(x)) self.assertAllClose(knp.Sort()(x), np.sort(x)) self.assertAllClose(knp.sort(x, axis=0), np.sort(x, axis=0)) self.assertAllClose(knp.Sort(axis=0)(x), np.sort(x, axis=0)) def test_split(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertIsInstance(knp.split(x, 2), list) self.assertAllClose(knp.split(x, 2), np.split(x, 2)) self.assertAllClose(knp.Split(2)(x), np.split(x, 2)) self.assertAllClose( knp.split(x, [1, 2], axis=1), np.split(x, [1, 2], axis=1), ) self.assertAllClose( knp.Split([1, 2], axis=1)(x), np.split(x, [1, 2], axis=1), ) # test invalid indices_or_sections with self.assertRaises(Exception): knp.split(x, 3) # test zero dimension x = np.ones(shape=(0,)) self.assertEqual(len(knp.split(x, 2)), 2) self.assertEqual(len(knp.Split(2)(x)), 2) # test indices_or_sections as tensor x = knp.array([[1, 2, 3], [3, 2, 1]]) indices_or_sections = knp.array([1, 2]) x_np = np.array([[1, 2, 3], [3, 2, 1]]) indices_or_sections_np = np.array([1, 2]) self.assertAllClose( knp.split(x, indices_or_sections, axis=1), np.split(x_np, indices_or_sections_np, axis=1), ) @pytest.mark.skipif( backend.backend() != "tensorflow", reason="Only test tensorflow backend", ) def test_split_with_jit_in_tf(self): import tensorflow as tf x = knp.array([[1, 2, 3], [3, 2, 1]]) indices = knp.array([1, 2]) x_np = np.array([[1, 2, 3], [3, 2, 1]]) indices_np = np.array([1, 2]) @tf.function(jit_compile=True) def fn(x, indices, axis): return knp.split(x, indices, axis=axis) self.assertAllClose( fn(x, indices, axis=1), np.split(x_np, indices_np, axis=1), ) def test_sqrt(self): x = np.array([[1, 4, 9], [16, 25, 36]], dtype="float32") ref_y = np.sqrt(x) y = knp.sqrt(x) self.assertEqual(standardize_dtype(y.dtype), "float32") self.assertAllClose(y, ref_y) y = knp.Sqrt()(x) self.assertEqual(standardize_dtype(y.dtype), "float32") self.assertAllClose(y, ref_y) def test_sqrt_int32(self): x = np.array([[1, 4, 9], [16, 25, 36]], dtype="int32") ref_y = np.sqrt(x) y = knp.sqrt(x) self.assertEqual(standardize_dtype(y.dtype), "float32") self.assertAllClose(y, ref_y) y = knp.Sqrt()(x) self.assertEqual(standardize_dtype(y.dtype), "float32") self.assertAllClose(y, ref_y) def test_stack(self): x = np.array([[1, 2, 3], [3, 2, 1]]) y = np.array([[4, 5, 6], [6, 5, 4]]) self.assertAllClose(knp.stack([x, y]), np.stack([x, y])) self.assertAllClose(knp.stack([x, y], axis=1), np.stack([x, y], axis=1)) self.assertAllClose(knp.Stack()([x, y]), np.stack([x, y])) self.assertAllClose(knp.Stack(axis=1)([x, y]), np.stack([x, y], axis=1)) def test_std(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.std(x), np.std(x)) self.assertAllClose(knp.std(x, axis=1), np.std(x, axis=1)) self.assertAllClose( knp.std(x, axis=1, keepdims=True), np.std(x, axis=1, keepdims=True), ) self.assertAllClose(knp.Std()(x), np.std(x)) self.assertAllClose(knp.Std(axis=1)(x), np.std(x, axis=1)) self.assertAllClose( knp.Std(axis=1, keepdims=True)(x), np.std(x, axis=1, keepdims=True), ) def test_swapaxes(self): x = np.arange(24).reshape([1, 2, 3, 4]) self.assertAllClose( knp.swapaxes(x, 0, 1), np.swapaxes(x, 0, 1), ) self.assertAllClose( knp.Swapaxes(0, 1)(x), np.swapaxes(x, 0, 1), ) def test_tan(self): x = np.array([[1, -2, 3], [-3, 2, -1]]) self.assertAllClose(knp.tan(x), np.tan(x)) self.assertAllClose(knp.Tan()(x), np.tan(x)) def test_tanh(self): x = np.array([[1, -2, 3], [-3, 2, -1]]) self.assertAllClose(knp.tanh(x), np.tanh(x)) self.assertAllClose(knp.Tanh()(x), np.tanh(x)) def test_tile(self): x = np.array([[1, 2, 3], [3, 2, 1]]) self.assertAllClose(knp.tile(x, 2), np.tile(x, 2)) self.assertAllClose(knp.tile(x, [2, 3]), np.tile(x, [2, 3])) self.assertAllClose(knp.Tile([2, 3])(x), np.tile(x, [2, 3])) # If repeats.ndim > x.ndim self.assertAllClose(knp.tile(x, [2, 3, 4]), np.tile(x, [2, 3, 4])) self.assertAllClose(knp.Tile([2, 3, 4])(x), np.tile(x, [2, 3, 4])) # If repeats.ndim < x.ndim self.assertAllClose(knp.tile(x, [2]), np.tile(x, [2])) self.assertAllClose(knp.Tile([2])(x), np.tile(x, [2])) def test_trace(self): x = np.arange(24).reshape([1, 2, 3, 4]) self.assertAllClose(knp.trace(x), np.trace(x)) self.assertAllClose( knp.trace(x, axis1=2, axis2=3), np.trace(x, axis1=2, axis2=3), ) self.assertAllClose( knp.Trace(axis1=2, axis2=3)(x), np.trace(x, axis1=2, axis2=3), ) def test_tril(self): x = np.arange(24).reshape([1, 2, 3, 4]) self.assertAllClose(knp.tril(x), np.tril(x)) self.assertAllClose(knp.tril(x, -1), np.tril(x, -1)) self.assertAllClose(knp.Tril(-1)(x), np.tril(x, -1)) x = np.ones([5, 5]) self.assertAllClose(knp.tril(x), np.tril(x)) self.assertAllClose(knp.tril(x, -1), np.tril(x, -1)) self.assertAllClose(knp.Tril(-1)(x), np.tril(x, -1)) def test_tril_in_layer(self): # https://github.com/keras-team/keras/issues/18890 x = keras.Input((None, 3)) y1 = keras.layers.Lambda( lambda x: keras.ops.tril( keras.ops.ones((keras.ops.shape(x)[1], keras.ops.shape(x)[1])) ), output_shape=(None, None, 3), )(x) y2 = keras.layers.Lambda( lambda x: keras.ops.tril( keras.ops.ones((keras.ops.shape(x)[1], keras.ops.shape(x)[1])), k=-1, ), output_shape=(None, None, 3), )(x) model = keras.Model(x, [y1, y2]) result = model(np.ones((1, 2, 3), "float32")) self.assertAllClose( result, [np.tril(np.ones((2, 2))), np.tril(np.ones((2, 2)), k=-1)] ) @pytest.mark.skipif( backend.backend() != "tensorflow", reason="Only test tensorflow backend", ) def test_tril_with_jit_in_tf(self): import tensorflow as tf x = knp.reshape(knp.arange(24), [1, 2, 3, 4]) k = knp.array(0) x_np = np.reshape(np.arange(24), [1, 2, 3, 4]) k_np = np.array(0) @tf.function(jit_compile=True) def fn(x, k): return knp.tril(x, k=k) self.assertAllClose(fn(x, k), np.tril(x_np, k_np)) def test_triu(self): x = np.arange(24).reshape([1, 2, 3, 4]) self.assertAllClose(knp.triu(x), np.triu(x)) self.assertAllClose(knp.triu(x, -1), np.triu(x, -1)) self.assertAllClose(knp.Triu(-1)(x), np.triu(x, -1)) x = np.ones([5, 5]) self.assertAllClose(knp.triu(x), np.triu(x)) self.assertAllClose(knp.triu(x, -1), np.triu(x, -1)) self.assertAllClose(knp.Triu(-1)(x), np.triu(x, -1)) def test_triu_in_layer(self): # https://github.com/keras-team/keras/issues/18890 x = keras.Input((None, 3)) y1 = keras.layers.Lambda( lambda x: keras.ops.triu( keras.ops.ones((keras.ops.shape(x)[1], keras.ops.shape(x)[1])) ), output_shape=(None, None, 3), )(x) y2 = keras.layers.Lambda( lambda x: keras.ops.triu( keras.ops.ones((keras.ops.shape(x)[1], keras.ops.shape(x)[1])), k=-1, ), output_shape=(None, None, 3), )(x) model = keras.Model(x, [y1, y2]) result = model(np.ones((1, 2, 3), "float32")) self.assertAllClose( result, [np.triu(np.ones((2, 2))), np.triu(np.ones((2, 2)), k=-1)] ) @pytest.mark.skipif( backend.backend() != "tensorflow", reason="Only test tensorflow backend", ) def test_triu_with_jit_in_tf(self): import tensorflow as tf x = knp.reshape(knp.arange(24), [1, 2, 3, 4]) k = knp.array(0) x_np = np.reshape(np.arange(24), [1, 2, 3, 4]) k_np = np.array(0) @tf.function(jit_compile=True) def fn(x, k): return knp.triu(x, k=k) self.assertAllClose(fn(x, k), np.triu(x_np, k_np)) def test_trunc(self): x = np.array([-1.7, -2.5, -0.2, 0.2, 1.5, 1.7, 2.0]) self.assertAllClose(knp.trunc(x), np.trunc(x)) self.assertAllClose(knp.Trunc()(x), np.trunc(x)) x = np.array([-1, -2, -0, 0, 1, 1, 2], dtype="int32") self.assertAllClose(knp.trunc(x), np.trunc(x)) self.assertAllClose(knp.Trunc()(x), np.trunc(x)) def test_vstack(self): x = np.array([[1, 2, 3], [3, 2, 1]]) y = np.array([[4, 5, 6], [6, 5, 4]]) self.assertAllClose(knp.vstack([x, y]), np.vstack([x, y])) self.assertAllClose(knp.Vstack()([x, y]), np.vstack([x, y])) def test_floor_divide(self): x = np.array([[1, 2, 3], [3, 2, 1]]) y = np.array([[4, 5, 6], [3, 2, 1]]) z = np.array([[[1, 2, 3], [3, 2, 1]]]) self.assertAllClose(knp.floor_divide(x, y), np.floor_divide(x, y)) self.assertAllClose(knp.floor_divide(x, z), np.floor_divide(x, z)) self.assertAllClose(knp.FloorDivide()(x, y), np.floor_divide(x, y)) self.assertAllClose(knp.FloorDivide()(x, z), np.floor_divide(x, z)) def test_xor(self): x = np.array([[True, False], [True, True]]) y = np.array([[False, False], [True, False]]) self.assertAllClose(knp.logical_xor(x, y), np.logical_xor(x, y)) self.assertAllClose(knp.logical_xor(x, True), np.logical_xor(x, True)) self.assertAllClose(knp.logical_xor(True, x), np.logical_xor(True, x)) self.assertAllClose(knp.LogicalXor()(x, y), np.logical_xor(x, y)) self.assertAllClose(knp.LogicalXor()(x, True), np.logical_xor(x, True)) self.assertAllClose(knp.LogicalXor()(True, x), np.logical_xor(True, x)) def test_correlate(self): x = np.array([1, 2, 3]) y = np.array([0, 1, 0.5]) self.assertAllClose(knp.correlate(x, y), np.correlate(x, y)) self.assertAllClose( knp.correlate(x, y, mode="same"), np.correlate(x, y, mode="same") ) self.assertAllClose( knp.correlate(x, y, mode="full"), np.correlate(x, y, mode="full") ) self.assertAllClose(knp.Correlate()(x, y), np.correlate(x, y)) self.assertAllClose( knp.Correlate(mode="same")(x, y), np.correlate(x, y, mode="same") ) self.assertAllClose( knp.Correlate(mode="full")(x, y), np.correlate(x, y, mode="full") ) def test_correlate_different_size(self): x = np.array([1, 3, 5]) y = np.array([7, 9]) self.assertAllClose(knp.correlate(x, y), np.correlate(x, y)) self.assertAllClose( knp.correlate(x, y, mode="same"), np.correlate(x, y, mode="same") ) self.assertAllClose( knp.correlate(x, y, mode="full"), np.correlate(x, y, mode="full") ) self.assertAllClose(knp.Correlate()(x, y), np.correlate(x, y)) self.assertAllClose( knp.Correlate(mode="same")(x, y), np.correlate(x, y, mode="same") ) self.assertAllClose( knp.Correlate(mode="full")(x, y), np.correlate(x, y, mode="full") ) def test_select(self): x = np.arange(6) condlist = [x < 3, x > 3] choicelist = [x, x**2] y = knp.select(condlist, choicelist, 42) self.assertAllClose(y, [0, 1, 2, 42, 16, 25]) # Test with tuples condlist = (x < 3, x > 3) choicelist = (x, x**2) y = knp.select(condlist, choicelist, 42) self.assertAllClose(y, [0, 1, 2, 42, 16, 25]) # Test with symbolic tensors x = backend.KerasTensor((6,)) condlist = [x < 3, x > 3] choicelist = [x, x**2] y = knp.select(condlist, choicelist, 42) self.assertEqual(y.shape, (6,)) def test_slogdet(self): x = np.ones((4, 4)) * 2.0 out = knp.slogdet(x) self.assertAllClose(out[0], 0) self.assertAllClose(out[0], 0) x = backend.KerasTensor((3, 3)) out = knp.slogdet(x) self.assertEqual(out[0].shape, ()) self.assertEqual(out[1].shape, ()) x = backend.KerasTensor((2, 4, 3, 3)) out = knp.slogdet(x) self.assertEqual(out[0].shape, ()) self.assertEqual(out[1].shape, (2, 4)) def test_nan_to_num(self): x = knp.array([1.0, np.nan, np.inf, -np.inf]) self.assertAllClose( knp.nan_to_num(x), [1.0, 0.0, 3.402823e38, -3.402823e38] ) self.assertAllClose( knp.NanToNum()(x), [1.0, 0.0, 3.402823e38, -3.402823e38] ) self.assertAllClose( knp.nan_to_num(x, nan=2, posinf=3, neginf=4), [1.0, 2.0, 3.0, 4.0] ) self.assertAllClose( knp.NanToNum(nan=2, posinf=3, neginf=4)(x), [1.0, 2.0, 3.0, 4.0] ) x = backend.KerasTensor((3, 4)) self.assertEqual( knp.NanToNum(nan=2, posinf=3, neginf=4)(x).shape, (3, 4) ) def test_vectorize(self): # Basic functionality def myfunc(a, b): return a + b vfunc = np.vectorize(myfunc) y = vfunc([1, 2, 3, 4], 2) self.assertAllClose(y, [3, 4, 5, 6]) # Test signature arg vfunc = knp.vectorize(knp.trace, signature="(d,d)->()") out = vfunc(np.eye(4)) self.assertAllClose( out, np.vectorize(np.trace, signature="(d,d)->()")(np.eye(4)) ) vfunc = knp.vectorize(knp.diag, signature="(d,d)->(d)") out = vfunc(np.eye(4)) self.assertAllClose( out, np.vectorize(np.diag, signature="(d,d)->(d)")(np.eye(4)) ) def test_argpartition(self): x = np.array([3, 4, 2, 1]) self.assertAllClose(knp.argpartition(x, 2), np.argpartition(x, 2)) self.assertAllClose(knp.Argpartition(2)(x), np.argpartition(x, 2)) x = np.array([[3, 4, 2], [1, 3, 4]]) self.assertAllClose(knp.argpartition(x, 1), np.argpartition(x, 1)) self.assertAllClose(knp.Argpartition(1)(x), np.argpartition(x, 1)) x = np.array([[[3, 4], [2, 3]], [[1, 2], [0, 1]]]) self.assertAllClose(knp.argpartition(x, 1), np.argpartition(x, 1)) self.assertAllClose(knp.Argpartition(1)(x), np.argpartition(x, 1)) def test_angle(self): x = np.array([[1, 0.5, -0.7], [0.9, 0.2, -1]]) self.assertAllClose(knp.angle(x), np.angle(x)) self.assertAllClose(knp.Angle()(x), np.angle(x))
NumpyOneInputOpsCorrectnessTest
python
readthedocs__readthedocs.org
readthedocs/builds/migrations/0015_uploading_build_state.py
{ "start": 149, "end": 923 }
class ____(migrations.Migration): safe = Safe.after_deploy() dependencies = [ ("builds", "0014_migrate-doctype-from-project-to-version"), ] operations = [ migrations.AlterField( model_name="build", name="state", field=models.CharField( choices=[ ("triggered", "Triggered"), ("cloning", "Cloning"), ("installing", "Installing"), ("building", "Building"), ("uploading", "Uploading"), ("finished", "Finished"), ], default="finished", max_length=55, verbose_name="State", ), ), ]
Migration
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/functions.py
{ "start": 60393, "end": 60532 }
class ____(AnsiFunction[str]): """The SESSION_USER() SQL function.""" type = sqltypes.String() inherit_cache = True
session_user
python
getsentry__sentry
tests/sentry/deletions/test_monitor_checkin.py
{ "start": 437, "end": 14318 }
class ____(APITestCase, TransactionTestCase, HybridCloudTestMixin): def test_delete_checkin_directly(self) -> None: """ Test that deleting a MonitorCheckIn directly (not via Monitor deletion) properly handles MonitorIncident children via MonitorCheckInDeletionTask. """ project = self.create_project(name="test") env = Environment.objects.create(organization_id=project.organization_id, name="prod") monitor = Monitor.objects.create( organization_id=project.organization.id, project_id=project.id, config={"schedule": "* * * * *", "schedule_type": ScheduleType.CRONTAB}, ) monitor_env = MonitorEnvironment.objects.create( monitor=monitor, environment_id=env.id, ) # Create check-ins checkin1 = MonitorCheckIn.objects.create( monitor=monitor, monitor_environment=monitor_env, project_id=project.id, status=CheckInStatus.ERROR, ) checkin2 = MonitorCheckIn.objects.create( monitor=monitor, monitor_environment=monitor_env, project_id=project.id, status=CheckInStatus.OK, ) checkin3 = MonitorCheckIn.objects.create( monitor=monitor, monitor_environment=monitor_env, project_id=project.id, status=CheckInStatus.ERROR, ) # Create incidents referencing checkin1 incident1 = MonitorIncident.objects.create( monitor=monitor, monitor_environment=monitor_env, starting_checkin=checkin1, resolving_checkin=checkin2, ) incident2 = MonitorIncident.objects.create( monitor=monitor, monitor_environment=monitor_env, starting_checkin=checkin3, resolving_checkin=checkin1, # checkin1 is also a resolving checkin ) detection1 = MonitorEnvBrokenDetection.objects.create( monitor_incident=incident1, ) detection2 = MonitorEnvBrokenDetection.objects.create( monitor_incident=incident2, ) # Verify initial state assert MonitorCheckIn.objects.count() == 3 assert MonitorIncident.objects.count() == 2 assert MonitorEnvBrokenDetection.objects.count() == 2 # Delete checkin1 directly (not via Monitor) self.ScheduledDeletion.schedule(instance=checkin1, days=0) with self.tasks(): run_scheduled_deletions() # Verify checkin1 is deleted assert not MonitorCheckIn.objects.filter(id=checkin1.id).exists() # Verify both incidents are deleted (incident1 has checkin1 as starting_checkin, # incident2 has checkin1 as resolving_checkin) assert not MonitorIncident.objects.filter(id=incident1.id).exists() assert not MonitorIncident.objects.filter(id=incident2.id).exists() # Verify detections are deleted assert not MonitorEnvBrokenDetection.objects.filter(id=detection1.id).exists() assert not MonitorEnvBrokenDetection.objects.filter(id=detection2.id).exists() # Verify other check-ins still exist assert MonitorCheckIn.objects.filter(id=checkin2.id).exists() assert MonitorCheckIn.objects.filter(id=checkin3.id).exists() assert MonitorCheckIn.objects.count() == 2 # Verify monitor and environment still exist assert Monitor.objects.filter(id=monitor.id).exists() assert MonitorEnvironment.objects.filter(id=monitor_env.id).exists() def test_delete_monitor_with_incidents_and_detections(self) -> None: """ Test that deleting a Monitor properly cascades to: - MonitorIncidents (deleted first via child relations) - MonitorCheckIns (bulk deleted after incidents) - MonitorEnvBrokenDetection (via MonitorIncident deletion) This verifies the ordered deletion: MonitorIncident → MonitorCheckIn → MonitorEnvironment """ project = self.create_project(name="test") env = Environment.objects.create(organization_id=project.organization_id, name="prod") monitor = Monitor.objects.create( organization_id=project.organization.id, project_id=project.id, config={"schedule": "* * * * *", "schedule_type": ScheduleType.CRONTAB}, ) monitor_env = MonitorEnvironment.objects.create( monitor=monitor, environment_id=env.id, ) # Create multiple check-ins to test bulk deletion checkin1 = MonitorCheckIn.objects.create( monitor=monitor, monitor_environment=monitor_env, project_id=project.id, status=CheckInStatus.ERROR, ) checkin2 = MonitorCheckIn.objects.create( monitor=monitor, monitor_environment=monitor_env, project_id=project.id, status=CheckInStatus.ERROR, ) checkin3 = MonitorCheckIn.objects.create( monitor=monitor, monitor_environment=monitor_env, project_id=project.id, status=CheckInStatus.OK, ) # Create incidents - one starting with checkin1, resolving with checkin3 incident1 = MonitorIncident.objects.create( monitor=monitor, monitor_environment=monitor_env, starting_checkin=checkin1, resolving_checkin=checkin3, ) # Create another incident - starting with checkin2, not yet resolved incident2 = MonitorIncident.objects.create( monitor=monitor, monitor_environment=monitor_env, starting_checkin=checkin2, resolving_checkin=None, ) # Create MonitorEnvBrokenDetection pointing to incidents detection1 = MonitorEnvBrokenDetection.objects.create( monitor_incident=incident1, ) detection2 = MonitorEnvBrokenDetection.objects.create( monitor_incident=incident2, ) # Verify initial state assert MonitorCheckIn.objects.filter(monitor=monitor).count() == 3 assert MonitorIncident.objects.filter(monitor=monitor).count() == 2 assert MonitorEnvBrokenDetection.objects.count() == 2 # Schedule monitor for deletion (which should cascade to check-ins) self.ScheduledDeletion.schedule(instance=monitor, days=0) with self.tasks(): run_scheduled_deletions() # Verify everything is deleted assert not Monitor.objects.filter(id=monitor.id).exists() assert not MonitorEnvironment.objects.filter(id=monitor_env.id).exists() assert not MonitorCheckIn.objects.filter( id__in=[checkin1.id, checkin2.id, checkin3.id] ).exists() assert not MonitorIncident.objects.filter(id__in=[incident1.id, incident2.id]).exists() assert not MonitorEnvBrokenDetection.objects.filter( id__in=[detection1.id, detection2.id] ).exists() # Shared objects should continue to exist assert Environment.objects.filter(id=env.id).exists() assert self.project.__class__.objects.filter(id=self.project.id).exists() def test_delete_monitor_with_shared_incident(self) -> None: """ Test that deleting a Monitor handles edge case where one incident references multiple check-ins (starting_checkin != resolving_checkin). """ project = self.create_project(name="test") env = Environment.objects.create(organization_id=project.organization_id, name="prod") monitor = Monitor.objects.create( organization_id=project.organization.id, project_id=project.id, config={"schedule": "* * * * *", "schedule_type": ScheduleType.CRONTAB}, ) monitor_env = MonitorEnvironment.objects.create( monitor=monitor, environment_id=env.id, ) # Create check-ins failed_checkin = MonitorCheckIn.objects.create( monitor=monitor, monitor_environment=monitor_env, project_id=project.id, status=CheckInStatus.ERROR, ) ok_checkin = MonitorCheckIn.objects.create( monitor=monitor, monitor_environment=monitor_env, project_id=project.id, status=CheckInStatus.OK, ) # Create incident that references BOTH check-ins incident = MonitorIncident.objects.create( monitor=monitor, monitor_environment=monitor_env, starting_checkin=failed_checkin, resolving_checkin=ok_checkin, ) detection = MonitorEnvBrokenDetection.objects.create( monitor_incident=incident, ) # Schedule monitor for deletion self.ScheduledDeletion.schedule(instance=monitor, days=0) with self.tasks(): run_scheduled_deletions() # Verify complete deletion assert not MonitorCheckIn.objects.filter(id__in=[failed_checkin.id, ok_checkin.id]).exists() assert not MonitorIncident.objects.filter(id=incident.id).exists() assert not MonitorEnvBrokenDetection.objects.filter(id=detection.id).exists() def test_delete_monitor_only_affects_its_own_data(self) -> None: """ Test that deleting one Monitor doesn't affect another Monitor's data. This verifies that deletion queries are properly scoped by monitor_id. """ project = self.create_project(name="test") env = Environment.objects.create(organization_id=project.organization_id, name="prod") # Create first monitor with check-ins and incidents monitor1 = Monitor.objects.create( organization_id=project.organization.id, project_id=project.id, name="Monitor 1", config={"schedule": "* * * * *", "schedule_type": ScheduleType.CRONTAB}, ) monitor_env1 = MonitorEnvironment.objects.create( monitor=monitor1, environment_id=env.id, ) checkin1_m1 = MonitorCheckIn.objects.create( monitor=monitor1, monitor_environment=monitor_env1, project_id=project.id, status=CheckInStatus.ERROR, ) checkin2_m1 = MonitorCheckIn.objects.create( monitor=monitor1, monitor_environment=monitor_env1, project_id=project.id, status=CheckInStatus.OK, ) incident1_m1 = MonitorIncident.objects.create( monitor=monitor1, monitor_environment=monitor_env1, starting_checkin=checkin1_m1, resolving_checkin=checkin2_m1, ) detection1_m1 = MonitorEnvBrokenDetection.objects.create( monitor_incident=incident1_m1, ) # Create second monitor with check-ins and incidents monitor2 = Monitor.objects.create( organization_id=project.organization.id, project_id=project.id, name="Monitor 2", config={"schedule": "0 * * * *", "schedule_type": ScheduleType.CRONTAB}, ) monitor_env2 = MonitorEnvironment.objects.create( monitor=monitor2, environment_id=env.id, ) checkin1_m2 = MonitorCheckIn.objects.create( monitor=monitor2, monitor_environment=monitor_env2, project_id=project.id, status=CheckInStatus.ERROR, ) checkin2_m2 = MonitorCheckIn.objects.create( monitor=monitor2, monitor_environment=monitor_env2, project_id=project.id, status=CheckInStatus.OK, ) incident1_m2 = MonitorIncident.objects.create( monitor=monitor2, monitor_environment=monitor_env2, starting_checkin=checkin1_m2, resolving_checkin=checkin2_m2, ) detection1_m2 = MonitorEnvBrokenDetection.objects.create( monitor_incident=incident1_m2, ) # Verify initial state - both monitors exist with their data assert MonitorCheckIn.objects.count() == 4 assert MonitorIncident.objects.count() == 2 assert MonitorEnvBrokenDetection.objects.count() == 2 # Delete only monitor1 self.ScheduledDeletion.schedule(instance=monitor1, days=0) with self.tasks(): run_scheduled_deletions() # Verify monitor1 and its data are deleted assert not Monitor.objects.filter(id=monitor1.id).exists() assert not MonitorEnvironment.objects.filter(id=monitor_env1.id).exists() assert not MonitorCheckIn.objects.filter(id__in=[checkin1_m1.id, checkin2_m1.id]).exists() assert not MonitorIncident.objects.filter(id=incident1_m1.id).exists() assert not MonitorEnvBrokenDetection.objects.filter(id=detection1_m1.id).exists() # Verify monitor2 and ALL its data still exist (unaffected) assert Monitor.objects.filter(id=monitor2.id).exists() assert MonitorEnvironment.objects.filter(id=monitor_env2.id).exists() assert MonitorCheckIn.objects.filter(id=checkin1_m2.id).exists() assert MonitorCheckIn.objects.filter(id=checkin2_m2.id).exists() assert MonitorIncident.objects.filter(id=incident1_m2.id).exists() assert MonitorEnvBrokenDetection.objects.filter(id=detection1_m2.id).exists() # Verify counts - should only have monitor2's data remaining assert MonitorCheckIn.objects.count() == 2 assert MonitorIncident.objects.count() == 1 assert MonitorEnvBrokenDetection.objects.count() == 1
DeleteMonitorCheckInTest
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/type_api.py
{ "start": 54693, "end": 55830 }
class ____(TypeEngineMixin): """Indicates DB-native types supported by an :class:`.Emulated` type.""" @classmethod def adapt_native_to_emulated( cls, impl: Union[TypeEngine[Any], TypeEngineMixin], **kw: Any, ) -> TypeEngine[Any]: """Given an impl, adapt this type's class to the impl assuming "emulated". """ impltype = impl.__class__ return impl.adapt(impltype, **kw) @classmethod def adapt_emulated_to_native( cls, impl: Union[TypeEngine[Any], TypeEngineMixin], **kw: Any, ) -> TypeEngine[Any]: """Given an impl, adapt this type's class to the impl assuming "native". The impl will be an :class:`.Emulated` class but not a :class:`.NativeForEmulated`. e.g.: postgresql.ENUM produces a type given an Enum instance. """ # dmypy seems to crash on this return cls(**kw) # type: ignore # dmypy seems to crash with this, on repeated runs with changes # if TYPE_CHECKING: # def __init__(self, **kw: Any): # ...
NativeForEmulated
python
huggingface__transformers
src/transformers/models/phimoe/modeling_phimoe.py
{ "start": 24200, "end": 25854 }
class ____(GradientCheckpointingLayer): def __init__(self, config: PhimoeConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = PhimoeAttention(config, layer_idx) self.mlp = PhimoeSparseMoeBlock(config) self.input_layernorm = PhimoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = PhimoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> torch.Tensor: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, _ = self.self_attn( hidden_states=hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states @auto_docstring
PhimoeDecoderLayer
python
huggingface__transformers
src/transformers/models/colpali/modular_colpali.py
{ "start": 1467, "end": 15474 }
class ____(PaliGemmaProcessor): r""" Constructs a ColPali processor which wraps a PaliGemmaProcessor and special methods to process images and queries, as well as to compute the late-interaction retrieval score. [`ColPaliProcessor`] offers all the functionalities of [`PaliGemmaProcessor`]. See the [`~PaliGemmaProcessor.__call__`] for more information. Args: image_processor ([`SiglipImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. visual_prompt_prefix (`str`, *optional*, defaults to `"Describe the image."`): A string that gets tokenized and prepended to the image tokens. query_prefix (`str`, *optional*, defaults to `"Question: "`): A prefix to be used for the query. """ def __init__( self, image_processor=None, tokenizer=None, chat_template=None, visual_prompt_prefix: str = "Describe the image.", query_prefix: str = "Question: ", ): self.visual_prompt_prefix = visual_prompt_prefix self.query_prefix = query_prefix super().__init__(image_processor=image_processor, tokenizer=tokenizer, chat_template=chat_template) @property def query_augmentation_token(self) -> str: """ Return the query augmentation token. Query augmentation buffers are used as reasoning buffers during inference. """ return self.tokenizer.pad_token def __call__( self, images: Optional[ImageInput] = None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None, **kwargs: Unpack[ColPaliProcessorKwargs], ) -> BatchFeature: """ Main method to prepare for the model either (1) one or several texts, either (2) one or several image(s). This method is a custom wrapper around the PaliGemmaProcessor's [`~PaliGemmaProcessor.__call__`] method adapted for the ColPali model. It cannot process both text and images at the same time. When preparing the text(s), this method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`]. When preparing the image(s), this method forwards the `images` and `kwargs` arguments to SiglipImageProcessor's [`~SiglipImageProcessor.__call__`]. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ output_kwargs = self._merge_kwargs( ColPaliProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) suffix = output_kwargs["text_kwargs"].pop("suffix", None) return_token_type_ids = True if text is None and images is None: raise ValueError("Either text or images must be provided") if text is not None and images is not None: raise ValueError("Only one of text or images can be processed at a time") if images is not None: images = self.image_processor.fetch_images(images) images = make_flat_list_of_images(images) texts_doc = [self.visual_prompt_prefix] * len(images) images = [image.convert("RGB") for image in images] input_strings = [ build_string_from_input( prompt=prompt, bos_token=self.tokenizer.bos_token, image_seq_len=self.image_seq_length, image_token=IMAGE_TOKEN, num_images=len(image_list) if isinstance(image_list, list) else 1, ) for prompt, image_list in zip(texts_doc, images) ] pixel_values = self.image_processor(images, **output_kwargs["images_kwargs"])["pixel_values"] # max_length has to account for the image tokens if output_kwargs["text_kwargs"].get("max_length", None) is not None: output_kwargs["text_kwargs"]["max_length"] += self.image_seq_length inputs = self.tokenizer( input_strings, return_token_type_ids=return_token_type_ids, **output_kwargs["text_kwargs"], ) return_data = {**inputs, "pixel_values": pixel_values} if return_token_type_ids: labels = inputs["input_ids"].masked_fill(inputs["token_type_ids"] == 0, -100) return_data.update({"labels": labels}) return BatchFeature(data=return_data) elif text is not None: if isinstance(text, str): text = [text] elif not (isinstance(text, list) and isinstance(text[0], str)): raise ValueError("Text must be a string or a list of strings") if suffix is None: suffix = self.query_augmentation_token * 10 texts_query: list[str] = [] for query in text: query = self.tokenizer.bos_token + self.query_prefix + query + suffix + "\n" texts_query.append(query) output_kwargs["text_kwargs"]["max_length"] = output_kwargs["text_kwargs"].get("max_length", 50) batch_query = self.tokenizer( texts_query, return_token_type_ids=return_token_type_ids, **output_kwargs["text_kwargs"], ) return batch_query def process_images( self, images: Optional[ImageInput] = None, **kwargs: Unpack[ColPaliProcessorKwargs], ) -> BatchFeature: """ Prepare for the model one or several image(s). This method is a wrapper around the `__call__` method of the ColPaliProcessor's [`ColPaliProcessor.__call__`]. This method forwards the `images` and `kwargs` arguments to the image processor. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ return self.__call__(images=images, **kwargs) def process_queries( self, text: Union[TextInput, list[TextInput]], **kwargs: Unpack[ColPaliProcessorKwargs], ) -> BatchFeature: """ Prepare for the model one or several texts. This method is a wrapper around the `__call__` method of the ColPaliProcessor's [`ColPaliProcessor.__call__`]. This method forwards the `text` and `kwargs` arguments to the tokenizer. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). """ return self.__call__(text=text, **kwargs) def score_retrieval( self, query_embeddings: Union["torch.Tensor", list["torch.Tensor"]], passage_embeddings: Union["torch.Tensor", list["torch.Tensor"]], batch_size: int = 128, output_dtype: Optional["torch.dtype"] = None, output_device: Union["torch.device", str] = "cpu", ) -> "torch.Tensor": """ Compute the late-interaction/MaxSim score (ColBERT-like) for the given multi-vector query embeddings (`qs`) and passage embeddings (`ps`). For ColPali, a passage is the image of a document page. Because the embedding tensors are multi-vector and can thus have different shapes, they should be fed as: (1) a list of tensors, where the i-th tensor is of shape (sequence_length_i, embedding_dim) (2) a single tensor of shape (n_passages, max_sequence_length, embedding_dim) -> usually obtained by padding the list of tensors. Args: query_embeddings (`Union[torch.Tensor, list[torch.Tensor]`): Query embeddings. passage_embeddings (`Union[torch.Tensor, list[torch.Tensor]`): Passage embeddings. batch_size (`int`, *optional*, defaults to 128): Batch size for computing scores. output_dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): The dtype of the output tensor. If `None`, the dtype of the input embeddings is used. output_device (`torch.device` or `str`, *optional*, defaults to "cpu"): The device of the output tensor. Returns: `torch.Tensor`: A tensor of shape `(n_queries, n_passages)` containing the scores. The score tensor is saved on the "cpu" device. """ if len(query_embeddings) == 0: raise ValueError("No queries provided") if len(passage_embeddings) == 0: raise ValueError("No passages provided") if query_embeddings[0].device != passage_embeddings[0].device: raise ValueError("Queries and passages must be on the same device") if query_embeddings[0].dtype != passage_embeddings[0].dtype: raise ValueError("Queries and passages must have the same dtype") if output_dtype is None: output_dtype = query_embeddings[0].dtype scores: list[torch.Tensor] = [] for i in range(0, len(query_embeddings), batch_size): batch_scores: list[torch.Tensor] = [] batch_queries = torch.nn.utils.rnn.pad_sequence( query_embeddings[i : i + batch_size], batch_first=True, padding_value=0 ) for j in range(0, len(passage_embeddings), batch_size): batch_passages = torch.nn.utils.rnn.pad_sequence( passage_embeddings[j : j + batch_size], batch_first=True, padding_value=0 ) batch_scores.append( torch.einsum("bnd,csd->bcns", batch_queries, batch_passages).max(dim=3)[0].sum(dim=2) ) scores.append(torch.cat(batch_scores, dim=1).to(output_dtype).to(output_device)) return torch.cat(scores, dim=0) __all__ = [ "ColPaliProcessor", ]
ColPaliProcessor
python
walkccc__LeetCode
solutions/1636. Sort Array by Increasing Frequency/1636.py
{ "start": 222, "end": 555 }
class ____: def frequencySort(self, nums: list[int]) -> list[int]: ans = [] heap = [] for num, freq in collections.Counter(nums).items(): heapq.heappush(heap, T(num, freq)) while len(heap) > 0: num = heap[0].num freq = heapq.heappop(heap).freq ans.extend([num] * freq) return ans
Solution
python
pytorch__pytorch
benchmarks/operator_benchmark/pt/addmm_test.py
{ "start": 2300, "end": 4614 }
class ____(op_bench.TorchBenchmarkBase): def init(self, B, M, N, K, device, dtype): self.inputs = { "input_one": torch.rand( (M, N), device=device, requires_grad=self.auto_set(), dtype=dtype ), "batch1": torch.rand( (B, M, K), device=device, requires_grad=self.auto_set(), dtype=dtype ), "batch2": torch.rand( ( B, K, N, ), device=device, requires_grad=self.auto_set(), dtype=dtype, ), } self.set_module_name("addbmm") def forward(self, input_one, batch1, batch2): return torch.addbmm(input_one, batch1, batch2) def get_memory_traffic_bytes(self): """Override for addbmm: input + sum(batch1[i] @ batch2[i]) -> (M, N) addbmm computes: input_one (M, N) + sum over batch of batch1 (B, M, K) @ batch2 (B, K, N) Memory traffic: read(M*N + B*M*K + B*K*N) + write(M*N) """ input_one = self.inputs["input_one"] batch1 = self.inputs["batch1"] batch2 = self.inputs["batch2"] M, N = input_one.shape B, M_check, K = batch1.shape B_check, K_check, N_check = batch2.shape assert M == M_check and N == N_check and B == B_check and K == K_check, ( "Dimensions must match" ) bytes_per_element = input_one.element_size() total_elements = M * N + B * M * K + B * K * N + M * N return total_elements * bytes_per_element addbmm_long_configs = op_bench.cross_product_configs( B=[8, 32], M=[256, 1024], N=[256, 1024], K=[64, 128], device=["cuda"], dtype=[torch.float16, torch.bfloat16, torch.float32], tags=["long"], ) addbmm_short_configs = op_bench.cross_product_configs( B=[1, 8], M=[8, 128], N=[32, 64], K=[256, 512], device=["cpu", "cuda"], dtype=[torch.float16, torch.bfloat16, torch.float32], tags=["short"], ) op_bench.generate_pt_test(addbmm_long_configs + addbmm_short_configs, AddbmmBenchmark) op_bench.generate_pt_gradient_test(addbmm_long_configs, AddbmmBenchmark) if __name__ == "__main__": op_bench.benchmark_runner.main()
AddbmmBenchmark
python
dagster-io__dagster
python_modules/dagster/dagster/_core/execution/context/invocation.py
{ "start": 2924, "end": 5087 }
class ____: """Base class for any direct invocation execution contexts. Each type of execution context (ex. OpExecutionContext, AssetExecutionContext) needs to have a variant for direct invocation. Those direct invocation contexts have some methods that are not available until the context is bound to a particular op/asset. The "bound" properties are held in PerInvocationProperties. There are also some properties that are specific to a particular execution of an op/asset, these properties are held in DirectExecutionProperties. Direct invocation contexts must be able to be bound and unbound from a particular op/asset. Additionally, there are some methods that all direct invocation contexts must implement so that the will be usable in the execution code path. """ @abstractmethod def bind( self, op_def: OpDefinition, pending_invocation: Optional[PendingNodeInvocation[OpDefinition]], assets_def: Optional[AssetsDefinition], config_from_args: Optional[Mapping[str, Any]], resources_from_args: Optional[Mapping[str, Any]], ): """Subclasses of BaseDirectExecutionContext must implement bind.""" @abstractmethod def unbind(self): """Subclasses of BaseDirectExecutionContext must implement unbind.""" @property @abstractmethod def per_invocation_properties(self) -> "PerInvocationProperties": """Subclasses of BaseDirectExecutionContext must contain a PerInvocationProperties object.""" @property @abstractmethod def execution_properties(self) -> "DirectExecutionProperties": """Subclasses of BaseDirectExecutionContext must contain a DirectExecutionProperties object.""" @abstractmethod def for_type(self, dagster_type: DagsterType) -> TypeCheckContext: """Subclasses of BaseDirectExecutionContext must implement for_type.""" pass @abstractmethod def observe_output(self, output_name: str, mapping_key: Optional[str] = None) -> None: """Subclasses of BaseDirectExecutionContext must implement observe_output.""" pass
BaseDirectExecutionContext
python
google__pytype
pytype/pyc/opcodes.py
{ "start": 8226, "end": 8424 }
class ____(Opcode): _FLAGS = HAS_JUNKNOWN __slots__ = () # NOTE: GET_AWAITABLE gains an argument in Python 3.11, but adding the argument # here causes tests in earlier versions to fail.
YIELD_FROM
python
ray-project__ray
python/ray/dag/compiled_dag_node.py
{ "start": 15138, "end": 16138 }
class ____: """Represents an input to an ExecutableTask. Args: input_variant: either an unresolved input (when type is ChannelInterface) , or a resolved input value (when type is Any) channel_idx: if input_variant is an unresolved input, this is the index into the input channels list. """ def __init__( self, input_variant: Union[ChannelInterface, Any], channel_idx: Optional[int], ): self.input_variant = input_variant self.channel_idx = channel_idx def resolve(self, channel_results: Any) -> Any: """ Resolve the input value from the channel results. Args: channel_results: The results from reading the input channels. """ if isinstance(self.input_variant, ChannelInterface): value = channel_results[self.channel_idx] else: value = self.input_variant return value @DeveloperAPI
_ExecutableTaskInput
python
psf__requests
tests/test_utils.py
{ "start": 9555, "end": 10512 }
class ____: @pytest.mark.parametrize( "path", ( "/", __file__, pytest.__file__, "/etc/invalid/location", ), ) def test_unzipped_paths_unchanged(self, path): assert path == extract_zipped_paths(path) def test_zipped_paths_extracted(self, tmpdir): zipped_py = tmpdir.join("test.zip") with zipfile.ZipFile(zipped_py.strpath, "w") as f: f.write(__file__) _, name = os.path.splitdrive(__file__) zipped_path = os.path.join(zipped_py.strpath, name.lstrip(r"\/")) extracted_path = extract_zipped_paths(zipped_path) assert extracted_path != zipped_path assert os.path.exists(extracted_path) assert filecmp.cmp(extracted_path, __file__) def test_invalid_unc_path(self): path = r"\\localhost\invalid\location" assert extract_zipped_paths(path) == path
TestExtractZippedPaths
python
dask__dask
dask/dataframe/tseries/resample.py
{ "start": 6326, "end": 6382 }
class ____(ResampleReduction): how = "std"
ResampleStd
python
doocs__leetcode
lcof/面试题58 - I. 翻转单词顺序/Solution.py
{ "start": 0, "end": 400 }
class ____: def reverseWords(self, s: str) -> str: words = [] i, n = 0, len(s) while i < n: while i < n and s[i] == " ": i += 1 if i < n: j = i while j < n and s[j] != " ": j += 1 words.append(s[i:j]) i = j return " ".join(words[::-1])
Solution
python
doocs__leetcode
solution/1700-1799/1702.Maximum Binary String After Change/Solution.py
{ "start": 0, "end": 245 }
class ____: def maximumBinaryString(self, binary: str) -> str: k = binary.find('0') if k == -1: return binary k += binary[k + 1 :].count('0') return '1' * k + '0' + '1' * (len(binary) - k - 1)
Solution
python
davidhalter__jedi
jedi/inference/gradual/typing.py
{ "start": 8912, "end": 10166 }
class ____(LazyValueWrapper): def __init__(self, parent_context, origin_tree_name, actual): self.inference_state = parent_context.inference_state self.parent_context = parent_context self._origin_tree_name = origin_tree_name self._actual = actual # e.g. builtins.list @property def name(self): return ValueName(self, self._origin_tree_name) def py__name__(self): return self.name.string_name def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self._actual) def _get_wrapped_value(self): module_name, class_name = self._actual.split('.') # TODO use inference_state.import_module? from jedi.inference.imports import Importer module, = Importer( self.inference_state, [module_name], self.inference_state.builtins_module ).follow() classes = module.py__getattribute__(class_name) # There should only be one, because it's code that we control. assert len(classes) == 1, classes cls = next(iter(classes)) return cls def gather_annotation_classes(self): return ValueSet([self._get_wrapped_value()]) def get_signatures(self): return []
TypeAlias
python
apache__airflow
airflow-core/src/airflow/jobs/dag_processor_job_runner.py
{ "start": 1184, "end": 2366 }
class ____(BaseJobRunner, LoggingMixin): """ DagProcessorJobRunner is a job runner that runs a DagFileProcessorManager processor. :param job: Job instance to use :param processor: DagFileProcessorManager instance to use """ job_type = "DagProcessorJob" def __init__( self, job: Job, processor: DagFileProcessorManager, *args, **kwargs, ): super().__init__(job) self.processor = processor self.processor.heartbeat = lambda: perform_heartbeat( job=self.job, heartbeat_callback=self.heartbeat_callback, only_if_necessary=True, ) def _execute(self) -> int | None: self.log.info("Starting the Dag Processor Job") try: self.processor.run() except Exception: self.log.exception("Exception when executing DagProcessorJob") raise finally: self.processor.terminate() self.processor.end() return None def heartbeat_callback(self, session: Session | None = None) -> None: Stats.incr("dag_processor_heartbeat", 1, 1)
DagProcessorJobRunner
python
pytorch__pytorch
torch/profiler/_memory_profiler.py
{ "start": 1259, "end": 1517 }
class ____(enum.Enum): PREEXISTING = enum.auto() CREATE = enum.auto() INCREMENT_VERSION = enum.auto() DESTROY = enum.auto() _ACTION_TO_INDEX = {i: i.value for i in Action} @dataclasses.dataclass(eq=True, unsafe_hash=False, frozen=True)
Action
python
pytorch__pytorch
test/dynamo/test_functions.py
{ "start": 85475, "end": 86699 }
class ____(torch.nn.Module): def forward(self, s9: "Sym(s9)", L_lambda0_keywords_y_: "f32[s9, s9]"): l_lambda0_keywords_y_ = L_lambda0_keywords_y_ mul: "f32[s9, s9]" = l_lambda0_keywords_y_ * l_lambda0_keywords_y_ add: "f32[s9, s9]" = l_lambda0_keywords_y_ + l_lambda0_keywords_y_; l_lambda0_keywords_y_ = None mul_1: "f32[s9, s9]" = torch.mul(mul, add); mul = add = None return (mul_1,) """, ) def test_partials_graph_break_reconstruct_args_and_kwargs(self): def fn(udf_mul_0, x): lambda0 = functools.partial(udf_mul_0, x, 4, z=x) lambda1 = functools.partial(udf_mul_0, 4, z=x) return torch.mul(lambda0(), lambda1(5)) backend = EagerAndRecordGraphs() cnts = CompileCounterWithBackend(backend) x = torch.randn(2, 2) dynamo_result = torch.compile(fn, backend=cnts)(udf_mul2, x) eager_result = fn(udf_mul2, x) self.assertEqual(eager_result, dynamo_result) if torch._dynamo.config.assume_static_by_default: self.assertExpectedInline( normalize_gm(backend.graphs[0].print_readable(print_output=False)), """\
GraphModule
python
Textualize__textual
tests/test_app_focus_blur.py
{ "start": 186, "end": 2840 }
class ____(App[None]): AUTO_FOCUS = "#input-4" def compose(self) -> ComposeResult: for n in range(10): yield Input(id=f"input-{n}") async def test_app_blur() -> None: """Test that AppBlur removes focus.""" async with FocusBlurApp().run_test() as pilot: assert pilot.app.focused is not None assert pilot.app.focused.id == "input-4" pilot.app.post_message(AppBlur()) await pilot.pause() assert pilot.app.focused is None async def test_app_focus_restores_focus() -> None: """Test that AppFocus restores the correct focus.""" async with FocusBlurApp().run_test() as pilot: assert pilot.app.focused is not None assert pilot.app.focused.id == "input-4" pilot.app.post_message(AppBlur()) await pilot.pause() assert pilot.app.focused is None pilot.app.post_message(AppFocus()) await pilot.pause() assert pilot.app.focused is not None assert pilot.app.focused.id == "input-4" async def test_app_focus_restores_none_focus() -> None: """Test that AppFocus doesn't set focus if nothing was focused.""" async with FocusBlurApp().run_test() as pilot: pilot.app.screen.focused = None pilot.app.post_message(AppBlur()) await pilot.pause() assert pilot.app.focused is None pilot.app.post_message(AppFocus()) await pilot.pause() assert pilot.app.focused is None async def test_app_focus_handles_missing_widget() -> None: """Test that AppFocus works even when the last-focused widget has gone away.""" async with FocusBlurApp().run_test() as pilot: assert pilot.app.focused is not None assert pilot.app.focused.id == "input-4" pilot.app.post_message(AppBlur()) await pilot.pause() assert pilot.app.focused is None await pilot.app.query_one("#input-4").remove() pilot.app.post_message(AppFocus()) await pilot.pause() assert pilot.app.focused is None async def test_app_focus_defers_to_new_focus() -> None: """Test that AppFocus doesn't undo a fresh focus done while the app is in AppBlur state.""" async with FocusBlurApp().run_test() as pilot: assert pilot.app.focused is not None assert pilot.app.focused.id == "input-4" pilot.app.post_message(AppBlur()) await pilot.pause() assert pilot.app.focused is None pilot.app.query_one("#input-1").focus() await pilot.pause() pilot.app.post_message(AppFocus()) await pilot.pause() assert pilot.app.focused.id == "input-1"
FocusBlurApp
python
getsentry__sentry
src/sentry/issues/endpoints/related_issues.py
{ "start": 913, "end": 2556 }
class ____(GroupEndpoint): owner = ApiOwner.ISSUES publish_status = {"GET": ApiPublishStatus.EXPERIMENTAL} enforce_rate_limit = True rate_limits = RateLimitConfig( limit_overrides={ "GET": { RateLimitCategory.IP: RateLimit(limit=15, window=5), RateLimitCategory.USER: RateLimit(limit=15, window=5), RateLimitCategory.ORGANIZATION: RateLimit(limit=15, window=1), } } ) # We get a Group object since the endpoint is /issues/{issue_id}/related-issues def get(self, request: Request, group: Group) -> Response: """ Retrieve related issues for a Group ```````````````````````````````````` Related issues can be based on the same root cause or trace connected. :pparam Request request: the request object :pparam Group group: the group object """ serializer = RequestSerializer(data=request.query_params) if not serializer.is_valid(): return Response(serializer.errors, status=400) _data = serializer.validated_data related_type = _data["type"] try: data, meta = ( same_root_cause_analysis(group) if related_type == "same_root_cause" else trace_connected_analysis( group, event_id=_data.get("event_id"), project_id=_data.get("project_id") ) ) return Response({"type": related_type, "data": data, "meta": meta}) except AssertionError: return Response({}, status=400)
RelatedIssuesEndpoint
python
kamyu104__LeetCode-Solutions
Python/number-of-excellent-pairs.py
{ "start": 128, "end": 591 }
class ____(object): def countExcellentPairs(self, nums, k): """ :type nums: List[int] :type k: int :rtype: int """ def popcount(x): return bin(x)[2:].count('1') cnt = collections.Counter(popcount(x) for x in set(nums)) return sum(cnt[i]*cnt[j] for i in cnt.iterkeys() for j in cnt.iterkeys() if i+j >= k) # Time: O(nlogn) # Space: O(n) # bit manipulation, sort, two pointers
Solution
python
kamyu104__LeetCode-Solutions
Python/find-array-given-subset-sums.py
{ "start": 5679, "end": 7009 }
class ____(object): def recoverArray(self, n, sums): """ :type n: int :type sums: List[int] :rtype: List[int] """ dp = {k: v for k, v in collections.Counter(sums).iteritems()} sorted_sums = sorted(dp.iterkeys()) # Time: O(2^n * log(2^n)) = O(n * 2^n) shift = 0 result = [] for _ in xrange(n): # log(2^n) times, each time costs O(2^(n-len(result))), Total Time: O(2^n) new_dp = {} new_sorted_sums = [] new_shift = sorted_sums[0]-sorted_sums[1] if dp[sorted_sums[0]] == 1 else 0 assert(new_shift <= 0) for x in sorted_sums: if not dp[x]: continue dp[x-new_shift] -= dp[x] if new_shift else dp[x]//2 new_dp[x-new_shift] = dp[x] new_sorted_sums.append(x-new_shift) dp = new_dp sorted_sums = new_sorted_sums if shift in dp: # contain 0, choose this side result.append(new_shift) else: # contain no 0, choose another side and shift 0 offset result.append(-new_shift) shift -= new_shift return result # Time: O(n * 2^n), len(sums) = 2^n # Space: O(2^n) import collections # runtime: 1720 ms
Solution4
python
weaviate__weaviate-python-client
weaviate/backup/backup.py
{ "start": 1998, "end": 2215 }
class ____(BaseModel): """Return type of the backup status methods.""" error: Optional[str] = Field(default=None) status: BackupStatus path: str backup_id: str = Field(alias="id")
BackupStatusReturn
python
bokeh__bokeh
src/bokeh/client/states.py
{ "start": 1815, "end": 1928 }
class ____(Enum): NO_ERROR = auto() HTTP_ERROR = auto() NETWORK_ERROR = auto()
ErrorReason
python
huggingface__transformers
src/transformers/models/timesformer/modeling_timesformer.py
{ "start": 9069, "end": 9711 }
class ____(nn.Module): """ The residual connection is defined in TimesformerLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: TimesformerConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
TimesformerSelfOutput
python
doocs__leetcode
solution/2700-2799/2787.Ways to Express an Integer as Sum of Powers/Solution.py
{ "start": 0, "end": 408 }
class ____: def numberOfWays(self, n: int, x: int) -> int: mod = 10**9 + 7 f = [[0] * (n + 1) for _ in range(n + 1)] f[0][0] = 1 for i in range(1, n + 1): k = pow(i, x) for j in range(n + 1): f[i][j] = f[i - 1][j] if k <= j: f[i][j] = (f[i][j] + f[i - 1][j - k]) % mod return f[n][n]
Solution
python
mlflow__mlflow
mlflow/types/responses_helpers.py
{ "start": 6101, "end": 6535 }
class ____(BaseModel): tool_choice: str | ToolChoiceFunction | None = None @model_validator(mode="after") def check_tool_choice(self) -> "ToolChoice": if ( self.tool_choice and isinstance(self.tool_choice, str) and self.tool_choice not in {"none", "auto", "required"} ): warnings.warn(f"Invalid tool choice: {self.tool_choice}") return self
ToolChoice
python
ansible__ansible
lib/ansible/plugins/action/set_fact.py
{ "start": 945, "end": 2186 }
class ____(ActionBase): TRANSFERS_FILES = False _requires_connection = False def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect facts = {} cacheable = boolean(self._task.args.pop('cacheable', False)) if self._task.args: for (k, v) in self._task.args.items(): k = self._templar.template(k) # a rare case where key templating is allowed; backward-compatibility for dynamic storage validate_variable_name(k) facts[k] = v else: raise AnsibleActionFail('No key/value pairs provided, at least one is required for this action to succeed') if facts: # just as _facts actions, we don't set changed=true as we are not modifying the actual host result['ansible_facts'] = facts result['_ansible_facts_cacheable'] = cacheable else: # this should not happen, but JIC we get here raise AnsibleActionFail('Unable to create any variables with provided arguments') return result
ActionModule
python
python-poetry__poetry
src/poetry/console/commands/env/info.py
{ "start": 288, "end": 2688 }
class ____(Command): name = "env info" description = "Displays information about the current environment." options: ClassVar[list[Option]] = [ option("path", "p", "Only display the environment's path."), option( "executable", "e", "Only display the environment's python executable path." ), ] def handle(self) -> int: from poetry.utils.env import EnvManager env = EnvManager(self.poetry).get() if self.option("path"): if not env.is_venv(): return 1 self.line(str(env.path)) return 0 if self.option("executable"): if not env.is_venv(): return 1 self.line(str(env.python)) return 0 self._display_complete_info(env) return 0 def _display_complete_info(self, env: Env) -> None: env_python_version = ".".join(str(s) for s in env.version_info[:3]) self.line("") self.line("<b>Virtualenv</b>") listing = [ f"<info>Python</info>: <comment>{env_python_version}</>", f"<info>Implementation</info>: <comment>{env.python_implementation}</>", ( "<info>Path</info>: " f" <comment>{env.path if env.is_venv() else 'NA'}</>" ), ( "<info>Executable</info>: " f" <comment>{env.python if env.is_venv() else 'NA'}</>" ), ] if env.is_venv(): listing.append( "<info>Valid</info>: " f" <{'comment' if env.is_sane() else 'error'}>{env.is_sane()}</>" ) self.line("\n".join(listing)) self.line("") base_env = env.parent_env python = ".".join(str(v) for v in base_env.version_info[:3]) self.line("<b>Base</b>") self.line( "\n".join( [ f"<info>Platform</info>: <comment>{env.platform}</>", f"<info>OS</info>: <comment>{env.os}</>", f"<info>Python</info>: <comment>{python}</>", f"<info>Path</info>: <comment>{base_env.path}</>", f"<info>Executable</info>: <comment>{base_env.python}</>", ] ) )
EnvInfoCommand
python
pypa__pip
src/pip/_vendor/rich/layout.py
{ "start": 883, "end": 947 }
class ____(Exception): """Layout related error."""
LayoutError
python
astropy__astropy
astropy/time/formats.py
{ "start": 3359, "end": 17488 }
class ____: """ Base class for time representations. Parameters ---------- val1 : numpy ndarray, list, number, str, or bytes Values to initialize the time or times. Bytes are decoded as ascii. Quantities with time units are allowed for formats where the interpretation is unambiguous. val2 : numpy ndarray, list, or number; optional Value(s) to initialize the time or times. Only used for numerical input, to help preserve precision. scale : str Time scale of input value(s) precision : int Precision for seconds as floating point in_subfmt : str Select subformat for inputting string times out_subfmt : str Select subformat for outputting string times from_jd : bool If true then val1, val2 are jd1, jd2 """ _default_scale = "utc" # As of astropy 0.4 _default_precision = 3 _min_precision = 0 _max_precision = 9 subfmts = () _registry = TIME_FORMATS # Check that numeric inputs are finite (not nan or inf). This is overridden in # subclasses in which nan and inf are valid inputs. _check_finite = True def __init__( self, val1, val2, scale, precision, in_subfmt, out_subfmt, from_jd=False ): self.scale = scale # validation of scale done later with _check_scale self.precision = precision self.in_subfmt = in_subfmt self.out_subfmt = out_subfmt self._jd1, self._jd2 = None, None if from_jd: self.jd1 = val1 self.jd2 = val2 else: val1, val2 = self._check_val_type(val1, val2) self.set_jds(val1, val2) def __init_subclass__(cls, **kwargs): # Register time formats that define a name, but leave out astropy_time since # it is not a user-accessible format and is only used for initialization into # a different format. if "name" in cls.__dict__ and cls.name != "astropy_time": # FIXME: check here that we're not introducing a collision with # an existing method or attribute; problem is it could be either # astropy.time.Time or astropy.time.TimeDelta, and at the point # where this is run neither of those classes have necessarily been # constructed yet. if "value" in cls.__dict__ and not hasattr(cls.value, "fget"): raise ValueError("If defined, 'value' must be a property") cls._registry[cls.name] = cls # If this class defines its own subfmts, preprocess the definitions. if "subfmts" in cls.__dict__: cls.subfmts = _regexify_subfmts(cls.subfmts) super().__init_subclass__(**kwargs) @classmethod def _get_allowed_subfmt(cls, subfmt): """Get an allowed subfmt for this class, either the input ``subfmt`` if this is valid or '*' as a default. This method gets used in situations where the format of an existing Time object is changing and so the out_ or in_subfmt may need to be coerced to the default '*' if that ``subfmt`` is no longer valid. """ try: cls._select_subfmts(subfmt) except ValueError: subfmt = "*" return subfmt @property def in_subfmt(self): return self._in_subfmt @in_subfmt.setter def in_subfmt(self, subfmt): # Validate subfmt value for this class, raises ValueError if not. self._select_subfmts(subfmt) self._in_subfmt = subfmt @property def out_subfmt(self): return self._out_subfmt @out_subfmt.setter def out_subfmt(self, subfmt): # Validate subfmt value for this class, raises ValueError if not. self._select_subfmts(subfmt) self._out_subfmt = subfmt @property def jd1(self): return self._jd1 @jd1.setter def jd1(self, jd1): self._jd1 = _validate_jd_for_storage(jd1) if self._jd2 is not None: self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2) @property def jd2(self): return self._jd2 @jd2.setter def jd2(self, jd2): self._jd2 = _validate_jd_for_storage(jd2) if self._jd1 is not None: self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2) @classmethod @functools.cache def fill_value(cls, subfmt): """ Return a value corresponding to J2000 (2000-01-01 12:00:00) in this format. This is used as a fill value for masked arrays to ensure that any ERFA operations on the masked array will not fail due to the masked value. """ tm = Time(2451545.0, format="jd", scale="utc") return tm.to_value(format=cls.name, subfmt=subfmt) def __len__(self): return len(self.jd1) @property def scale(self): """Time scale.""" self._scale = self._check_scale(self._scale) return self._scale @scale.setter def scale(self, val): self._scale = val @property def precision(self): return self._precision @precision.setter def precision(self, val): if val is None: val = self._default_precision # Verify precision is 0-9 (inclusive) if not ( isinstance(val, int) and self._min_precision <= val <= self._max_precision ): raise ValueError( "precision attribute must be an int between " f"{self._min_precision} and {self._max_precision}" ) self._precision = val def _check_finite_vals(self, val1, val2): """A helper function to TimeFormat._check_val_type that's meant to be optionally bypassed in subclasses that have _check_finite=False """ # val1 cannot contain nan, but val2 can contain nan isfinite1 = np.isfinite(val1) if val1.size > 1: # Calling .all() on a scalar is surprisingly slow isfinite1 = ( isfinite1.all() ) # Note: arr.all() about 3x faster than np.all(arr) elif val1.size == 0: isfinite1 = False ok1 = ( val1.dtype.kind == "f" and val1.dtype.itemsize >= 8 and isfinite1 ) or val1.size == 0 ok2 = ( val2 is None or ( val2.dtype.kind == "f" and val2.dtype.itemsize >= 8 and not np.any(np.isinf(val2)) ) or val2.size == 0 ) if not (ok1 and ok2): raise TypeError( f"Input values for {self.name} class must be finite doubles" ) def _check_val_type(self, val1, val2): """Input value validation, typically overridden by derived classes.""" if self.__class__._check_finite: self._check_finite_vals(val1, val2) if getattr(val1, "unit", None) is not None: # Convert any quantity-likes to days first, attempting to be # careful with the conversion, so that, e.g., large numbers of # seconds get converted without losing precision because # 1/86400 is not exactly representable as a float. val1 = u.Quantity(val1, copy=False) if val2 is not None: val2 = u.Quantity(val2, copy=False) try: val1, val2 = quantity_day_frac(val1, val2) except u.UnitsError: raise u.UnitConversionError( "only quantities with time units can be " "used to instantiate Time instances." ) # We now have days, but the format may expect another unit. # On purpose, multiply with 1./day_unit because typically it is # 1./erfa.DAYSEC, and inverting it recovers the integer. # (This conversion will get undone in format's set_jds, hence # there may be room for optimizing this.) factor = 1.0 / getattr(self, "unit", 1.0) if factor != 1.0: val1, carry = two_product(val1, factor) carry += val2 * factor val1, val2 = two_sum(val1, carry) elif getattr(val2, "unit", None) is not None: raise TypeError("Cannot mix float and Quantity inputs") if val2 is None: val2 = np.array(0, dtype=val1.dtype) def asarray_or_scalar(val): """ Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray or a Python or numpy scalar. """ return val.view(np.ndarray) if isinstance(val, np.ndarray) else val return asarray_or_scalar(val1), asarray_or_scalar(val2) def _check_scale(self, scale): """ Return a validated scale value. If there is a class attribute 'scale' then that defines the default / required time scale for this format. In this case if a scale value was provided that needs to match the class default, otherwise return the class default. Otherwise just make sure that scale is in the allowed list of scales. Provide a different error message if `None` (no value) was supplied. """ if scale is None: scale = self._default_scale if scale not in TIME_SCALES: raise ScaleValueError( f"Scale value '{scale}' not in allowed values {TIME_SCALES}" ) return scale def set_jds(self, val1, val2): """ Set internal jd1 and jd2 from val1 and val2. Must be provided by derived classes. """ raise NotImplementedError def to_value(self, parent=None, out_subfmt=None): """ Return time representation from internal jd1 and jd2 in specified ``out_subfmt``. This is the base method that ignores ``parent`` and uses the ``value`` property to compute the output. This is done by temporarily setting ``self.out_subfmt`` and calling ``self.value``. This is required for legacy Format subclasses prior to astropy 4.0 New code should instead implement the value functionality in ``to_value()`` and then make the ``value`` property be a simple call to ``self.to_value()``. Parameters ---------- parent : object Parent `~astropy.time.Time` object associated with this `~astropy.time.TimeFormat` object out_subfmt : str or None Output subformt (use existing self.out_subfmt if `None`) Returns ------- value : numpy.array, numpy.ma.array Array or masked array of formatted time representation values """ # Get value via ``value`` property, overriding out_subfmt temporarily if needed. if out_subfmt is not None: out_subfmt_orig = self.out_subfmt try: self.out_subfmt = out_subfmt value = self.value finally: self.out_subfmt = out_subfmt_orig else: value = self.value return value @property def value(self): raise NotImplementedError @classmethod def _select_subfmts(cls, pattern): """ Return a list of subformats where name matches ``pattern`` using fnmatch. If no subformat matches pattern then a ValueError is raised. A special case is a format with no allowed subformats, i.e. subfmts=(), and pattern='*'. This is OK and happens when this method is used for validation of an out_subfmt. """ if not isinstance(pattern, str): raise ValueError("subfmt attribute must be a string") elif pattern == "*": return cls.subfmts subfmts = [x for x in cls.subfmts if fnmatch.fnmatchcase(x[0], pattern)] if len(subfmts) == 0: if len(cls.subfmts) == 0: raise ValueError(f"subformat not allowed for format {cls.name}") else: subfmt_names = [x[0] for x in cls.subfmts] raise ValueError( f"subformat {pattern!r} must match one of " f"{subfmt_names} for format {cls.name}" ) return subfmts @classmethod def _fill_masked_values(cls, val, val2, mask, in_subfmt): """Fill masked values with the fill value for this format. This also takes care of broadcasting the outputs to the correct shape. Parameters ---------- val : ndarray Array of values val2 : ndarray, None Array of second values (or None) mask : ndarray Mask array in_subfmt : str Input subformat Returns ------- val, val2 : ndarray Arrays with masked values filled with the fill value for this format. These are copies of the originals. """ if val2 is None: val, mask = np.broadcast_arrays(val, mask) else: val, val2, mask = np.broadcast_arrays(val, val2, mask) val2 = val2.copy() val2[mask] = np.zeros_like(val2, shape=()) # Fill value needs to comply with the specified input subformat. Usually this # is "*" for any matching input, but for a custom subformat the fill value # needs to be compatible with the specified subformat. fill_value = cls.fill_value(in_subfmt) # For string types ensure that the numpy string length is long enough to # hold the fill value for the specified subformat. if (val_kind := val.dtype.kind) in ("U", "S") and ( new_width := len(fill_value) ) > val.dtype.itemsize // (4 if val_kind == "U" else 1): val = val.astype(f"{val_kind}{new_width}") # Makes copy. else: val = val.copy() val[mask] = fill_value return val, val2
TimeFormat
python
getsentry__sentry
src/sentry/dynamic_sampling/rules/biases/custom_rule_bias.py
{ "start": 458, "end": 2100 }
class ____(Bias): """ Boosts to 100% sample rate all the traces matching an active custom rule. """ def generate_rules(self, project: Project, base_sample_rate: float) -> list[PolymorphicRule]: rules = CustomDynamicSamplingRule.get_project_rules(project) ret_val: list[PolymorphicRule] = [] for rule in rules: try: validate_rule_condition(rule.condition) except ValueError: logger.exception( "Custom rule with invalid condition found", extra={"rule_id": rule.rule_id, "condition": rule.condition}, ) continue try: condition = cast(RuleCondition, orjson.loads(rule.condition)) ret_val.append( { "samplingValue": {"type": "reservoir", "limit": rule.num_samples}, "type": "transaction", "id": rule.external_rule_id, "condition": condition, "timeRange": { "start": rule.start_date.strftime(CUSTOM_RULE_DATE_FORMAT), "end": rule.end_date.strftime(CUSTOM_RULE_DATE_FORMAT), }, } ) except orjson.JSONDecodeError: logger.exception( "Custom rule with invalid json found", extra={"rule_id": rule.rule_id, "condition": rule.condition}, ) continue return ret_val
CustomRuleBias
python
mlflow__mlflow
mlflow/utils/lazy_load.py
{ "start": 80, "end": 1726 }
class ____(types.ModuleType): """Class for module lazy loading. This class helps lazily load modules at package level, which avoids pulling in large dependencies like `tensorflow` or `torch`. This class is mirrored from wandb's LazyLoader: https://github.com/wandb/wandb/blob/79b2d4b73e3a9e4488e503c3131ff74d151df689/wandb/sdk/lib/lazyloader.py#L9 """ def __init__(self, local_name, parent_module_globals, name): self._local_name = local_name self._parent_module_globals = parent_module_globals self._module = None super().__init__(str(name)) def _load(self): """Load the module and insert it into the parent's globals.""" if self._module: # If already loaded, return the loaded module. return self._module # Import the target module and insert it into the parent's namespace module = importlib.import_module(self.__name__) self._parent_module_globals[self._local_name] = module sys.modules[self._local_name] = module # Update this object's dict so that if someone keeps a reference to the `LazyLoader`, # lookups are efficient (`__getattr__` is only called on lookups that fail). self.__dict__.update(module.__dict__) return module def __getattr__(self, item): module = self._load() return getattr(module, item) def __dir__(self): module = self._load() return dir(module) def __repr__(self): if not self._module: return f"<module '{self.__name__} (Not loaded yet)'>" return repr(self._module)
LazyLoader
python
huggingface__transformers
tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py
{ "start": 7966, "end": 11046 }
class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) test_missing_keys = False def setUp(self): self.model_tester = GPTNeoXJapaneseModelTester(self) self.config_tester = ConfigTester(self, config_class=GPTNeoXJapaneseConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(config, input_ids, input_mask) def test_model_as_decoder(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask) def test_model_as_decoder_with_default_input_mask(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask) def test_decoder_model_past_large_inputs(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(config, input_ids, input_mask) def test_model_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) @slow def test_generation(self): model_id = "abeja/gpt-neox-japanese-2.7b" prompts = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"] # fmt: skip EXPECTED_OUTPUTS = [ "データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。", "100年後に必要とされる会社は、「人」が中心の会社です。", "フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。", "国境の長いトンネルを抜けると、そこは雪国だった。", "美味しい日本食といえば、やっぱりお寿司ですよね。", ] tokenizer = GPTNeoXJapaneseTokenizer.from_pretrained(model_id) model = GPTNeoXJapaneseForCausalLM.from_pretrained(model_id) predicted_outputs = [] for prompt in prompts: input_ids = tokenizer(prompt, return_tensors="pt").input_ids generated_ids = model.generate(input_ids, max_length=50) generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) predicted_outputs += generated_string self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS) @unittest.skip("GPTNeoXJapanese applies bias to attention scores") def test_custom_4d_attention_mask(self): pass
GPTNeoXModelJapaneseTest
python
cython__cython
Cython/Compiler/Nodes.py
{ "start": 12223, "end": 14177 }
class ____(Node): """ Sets compiler directives for the children nodes """ # directives {string:value} A dictionary holding the right value for # *all* possible directives. # body Node child_attrs = ["body"] @classmethod def for_directives(cls, body, env, **directives): new_directives = Options.copy_inherited_directives(env.directives, **directives) return cls(body.pos, body=body, directives=new_directives, is_terminator=body.is_terminator) @classmethod def for_internal(cls, body, env): new_directives = Options.copy_for_internal(env.directives) return cls(body.pos, body=body, directives=new_directives, is_terminator=body.is_terminator) def analyse_declarations(self, env): old = env.directives env.directives = self.directives self.body.analyse_declarations(env) env.directives = old def analyse_expressions(self, env): old = env.directives env.directives = self.directives self.body = self.body.analyse_expressions(env) env.directives = old return self def generate_function_definitions(self, env, code): env_old = env.directives code_old = code.globalstate.directives code.globalstate.directives = self.directives self.body.generate_function_definitions(env, code) env.directives = env_old code.globalstate.directives = code_old def generate_execution_code(self, code): old = code.globalstate.directives code.globalstate.directives = self.directives self.body.generate_execution_code(code) code.globalstate.directives = old def annotate(self, code): old = code.globalstate.directives code.globalstate.directives = self.directives self.body.annotate(code) code.globalstate.directives = old
CompilerDirectivesNode
python
huggingface__transformers
src/transformers/models/mixtral/modular_mixtral.py
{ "start": 8316, "end": 9274 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.top_k = config.num_experts_per_tok self.jitter_noise = config.router_jitter_noise self.gate = MixtralTopKRouter(config) self.experts = MixtralExperts(config) def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: batch_size, sequence_length, hidden_dim = hidden_states.shape if self.training and self.jitter_noise > 0: hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise) hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) top_k_weights, top_k_index = self.gate(hidden_states) hidden_states = self.experts(hidden_states, top_k_index, top_k_weights) hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim) return hidden_states
MixtralSparseMoeBlock
python
pennersr__django-allauth
tests/apps/socialaccount/providers/wahoo/tests.py
{ "start": 238, "end": 981 }
class ____(OAuth2TestsMixin, TestCase): provider_id = WahooProvider.id def get_mocked_response(self): # https://cloud-api.wahooligan.com/#users return MockedResponse( HTTPStatus.OK, """ { "id": 60462, "height": "2.0", "weight": "80.0", "first": "Bob", "last": "Smith", "email": "sample@test-domain.com", "birth": "1980-10-02", "gender": 1, "created_at": "2018-10-23T15:38:23.000Z", "updated_at": "2018-10-24T20:46:40.000Z" } """, ) def get_expected_to_str(self): return "sample@test-domain.com"
WahooTests
python
readthedocs__readthedocs.org
readthedocs/config/tests/test_validation.py
{ "start": 3067, "end": 3757 }
class ____: def test_it_accepts_unicode(self): result = validate_string("Unicöde") assert isinstance(result, str) def test_it_accepts_nonunicode(self): result = validate_string("Unicode") assert isinstance(result, str) def test_it_rejects_float(self): with raises(ConfigValidationError) as excinfo: validate_string(123.456) assert excinfo.value.message_id == ConfigValidationError.INVALID_STRING def test_it_rejects_none(self): with raises(ConfigValidationError) as excinfo: validate_string(None) assert excinfo.value.message_id == ConfigValidationError.INVALID_STRING
TestValidateString
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/array_ops/array_ops_test.py
{ "start": 46265, "end": 47662 }
class ____(object): def __init__(self, test, x, tensor_type=dtypes.float32, use_resource=False): self.tensor_type = tensor_type self.test = test self._use_resource = use_resource self.x_np = np.array(x).astype(tensor_type.as_numpy_dtype) # Give the value a non-zero imaginary component for complex types. if tensor_type.is_complex: self.x_np -= 1j * self.x_np self.x = constant_op.constant(self.x_np, dtype=tensor_type) def __setitem__(self, index, value): value = np.array(value).astype(self.tensor_type.as_numpy_dtype) # Give the value a non-zero imaginary component for complex types. if self.tensor_type.is_complex: value -= 1j * value config.set_soft_device_placement(True) with test_util.device(use_gpu=True): if self._use_resource: var = resource_variable_ops.ResourceVariable(self.x) else: var = variables.Variable(self.x) self.test.evaluate(var.initializer) val = self.test.evaluate(var[index].assign(value)) # val_copy is used to check that tf.compat.v1.assign works equivalently # to the assign method above. val_copy = self.test.evaluate(state_ops.assign(var[index], value)) valnp = np.copy(self.x_np) valnp[index] = np.array(value) self.test.assertAllEqual(val, valnp) self.test.assertAllEqual(val_copy, valnp)
StridedSliceAssignChecker
python
geekcomputers__Python
insta_monitering/insta_datafetcher.py
{ "start": 8503, "end": 9217 }
class ____(multiprocessing.Process): def __init__(self, user, tags, type, productId): try: multiprocessing.Process.__init__(self) self.user = user self.tags = tags self.type = type self.productId = productId except Exception as err: print(f"exception : {err}\n") print("errorthreadPorcess:>>", sys.exc_info()[1]) def run(self): try: hashtags( user=self.user, tags=self.tags, type=self.type, productId=self.productId ) except Exception as err: print(f"exception : {err}\n") print("error::run>>", sys.exc_info()[1])
theradPorcess
python
kamyu104__LeetCode-Solutions
Python/solve-the-equation.py
{ "start": 41, "end": 566 }
class ____(object): def solveEquation(self, equation): """ :type equation: str :rtype: str """ a, b, side = 0, 0, 1 for eq, sign, num, isx in re.findall('(=)|([-+]?)(\d*)(x?)', equation): if eq: side = -1 elif isx: a += side * int(sign + '1') * int(num or 1) elif num: b -= side * int(sign + num) return 'x=%d' % (b / a) if a else 'No solution' if b else 'Infinite solutions'
Solution
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_managed_kafka.py
{ "start": 6488, "end": 7798 }
class ____: @mock.patch(MANAGED_KAFKA_PATH.format("types.Cluster.to_dict")) @mock.patch(MANAGED_KAFKA_PATH.format("ManagedKafkaHook")) def test_execute(self, mock_hook, to_dict_mock): op = ManagedKafkaUpdateClusterOperator( task_id=TASK_ID, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, project_id=GCP_PROJECT, location=GCP_LOCATION, cluster_id=TEST_CLUSTER_ID, cluster=TEST_UPDATED_CLUSTER, update_mask=TEST_CLUSTER_UPDATE_MASK, request_id=None, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, ) op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()}) mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN) mock_hook.return_value.update_cluster.assert_called_once_with( project_id=GCP_PROJECT, location=GCP_LOCATION, cluster_id=TEST_CLUSTER_ID, cluster=TEST_UPDATED_CLUSTER, update_mask=TEST_CLUSTER_UPDATE_MASK, request_id=None, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, )
TestManagedKafkaUpdateClusterOperator
python
huggingface__transformers
src/transformers/models/swin2sr/modeling_swin2sr.py
{ "start": 33714, "end": 35092 }
class ____(nn.Module): """Upsample module. Args: scale (`int`): Scale factor. Supported scales: 2^n and 3. num_features (`int`): Channel number of intermediate features. """ def __init__(self, scale, num_features): super().__init__() self.scale = scale if (scale & (scale - 1)) == 0: # scale = 2^n for i in range(int(math.log2(scale))): self.add_module(f"convolution_{i}", nn.Conv2d(num_features, 4 * num_features, 3, 1, 1)) self.add_module(f"pixelshuffle_{i}", nn.PixelShuffle(2)) elif scale == 3: self.convolution = nn.Conv2d(num_features, 9 * num_features, 3, 1, 1) self.pixelshuffle = nn.PixelShuffle(3) else: raise ValueError(f"Scale {scale} is not supported. Supported scales: 2^n and 3.") def forward(self, hidden_state): if (self.scale & (self.scale - 1)) == 0: for i in range(int(math.log2(self.scale))): hidden_state = self.__getattr__(f"convolution_{i}")(hidden_state) hidden_state = self.__getattr__(f"pixelshuffle_{i}")(hidden_state) elif self.scale == 3: hidden_state = self.convolution(hidden_state) hidden_state = self.pixelshuffle(hidden_state) return hidden_state
Upsample
python
google__pytype
pytype/abstract/_singletons.py
{ "start": 6457, "end": 7948 }
class ____(_base.BaseValue): """A Singleton class must only be instantiated once. This is essentially an ABC for Unsolvable, Empty, and others. """ # TODO: b/350643999 - Should rather be a ClassVar but it breaks build # investigate and fix. _instance: Optional["Singleton"] = None def __new__(cls, *args, **kwargs): # If cls is a subclass of a subclass of Singleton, cls._instance will be # filled by its parent. cls needs to be given its own instance. if not cls._instance or type(cls._instance) != cls: # pylint: disable=unidiomatic-typecheck log.debug("Singleton: Making new instance for %s", cls) cls._instance = super().__new__(cls) # pylint: disable=no-value-for-parameter return cls._instance def get_special_attribute( self, node: cfg.CFGNode, name: str, valself: cfg.Variable ) -> cfg.Variable | None: del name, valself return self.to_variable(node) def compute_mro(self) -> tuple[_base.BaseValue, _base.BaseValue]: return self.default_mro() def call( self, node: cfg.CFGNode, func: cfg.Binding, args: "function.Args", alias_map: datatypes.UnionFind | None = None, ) -> tuple[cfg.CFGNode, cfg.Variable]: del func, args return node, self.to_variable(node) def instantiate( self, node: cfg.CFGNode, container: "_instance_base.SimpleValue | abstract_utils.DummyContainer | None" = None, ) -> cfg.Variable: return self.to_variable(node)
Singleton
python
huggingface__transformers
src/transformers/models/zamba2/modeling_zamba2.py
{ "start": 78452, "end": 83455 }
class ____(Zamba2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = Zamba2Model(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) __all__ = ["Zamba2ForCausalLM", "Zamba2ForSequenceClassification", "Zamba2Model", "Zamba2PreTrainedModel"]
Zamba2ForSequenceClassification
python
cython__cython
Cython/Debugger/Tests/test_libcython_in_gdb.py
{ "start": 13922, "end": 15247 }
class ____(DebugTestCase): def setUp(self): super().setUp() self.fd, self.tmpfilename = tempfile.mkstemp() self.tmpfile = os.fdopen(self.fd, 'r+') def tearDown(self): super().tearDown() try: self.tmpfile.close() finally: os.remove(self.tmpfilename) def eval_command(self, command): gdb.execute('cy exec open(%r, "w").write(str(%s))' % (self.tmpfilename, command)) return self.tmpfile.read().strip() def test_cython_exec(self): self.break_and_run('os.path.join("foo", "bar")') # test normal behaviour self.assertEqual("[0]", self.eval_command('[a]')) return #The test after this return freezes gdb, so I temporarily removed it. # test multiline code result = gdb.execute(textwrap.dedent('''\ cy exec pass "nothing" end ''')) result = self.tmpfile.read().rstrip() self.assertEqual('', result) def test_python_exec(self): self.break_and_run('os.path.join("foo", "bar")') gdb.execute('cy step') gdb.execute('cy exec some_random_var = 14') self.assertEqual('14', self.eval_command('some_random_var'))
TestExec
python
anthropics__anthropic-sdk-python
src/anthropic/types/beta_authentication_error.py
{ "start": 199, "end": 301 }
class ____(BaseModel): message: str type: Literal["authentication_error"]
BetaAuthenticationError
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_unittest.py
{ "start": 1230, "end": 1588 }
class ____(unittest.TestCase): @fails_with(FailedHealthCheck) @given(st.integers()) def setUp(self, i): pass def test(self): """Provide something to set up for, so the setUp method is called.""" SUBTEST_SUITE = """ import unittest from hypothesis import given, settings, strategies as st
test_given_on_setUp_fails_health_check
python
huggingface__transformers
src/transformers/models/nanochat/modeling_nanochat.py
{ "start": 12700, "end": 14461 }
class ____(GradientCheckpointingLayer): def __init__(self, config: NanoChatConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = NanoChatAttention(config=config, layer_idx=layer_idx) self.mlp = NanoChatMLP(config) self.input_layernorm = NanoChatRMSNorm(eps=config.rms_norm_eps) self.post_attention_layernorm = NanoChatRMSNorm(eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, **kwargs: Unpack[TransformersKwargs], ) -> torch.Tensor: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states @auto_docstring
NanoChatDecoderLayer
python
apache__airflow
airflow-core/tests/unit/core/test_stats.py
{ "start": 15627, "end": 16230 }
class ____: def setup_method(self): pytest.importorskip("datadog") from datadog import DogStatsd self.dogstatsd_client = Mock(spec=DogStatsd) self.dogstatsd = SafeDogStatsdLogger(self.dogstatsd_client, metrics_tags=True) def test_does_send_stats_using_dogstatsd_with_tags(self): self.dogstatsd.incr("empty_key", 1, 1, tags={"key1": "value1", "key2": "value2"}) self.dogstatsd_client.increment.assert_called_once_with( metric="empty_key", sample_rate=1, tags=["key1:value1", "key2:value2"], value=1 )
TestDogStatsWithMetricsTags
python
tensorflow__tensorflow
tensorflow/python/distribute/numpy_dataset.py
{ "start": 3647, "end": 3800 }
class ____(object): """Used with `colocate_with` to create a non-mirrored variable.""" def __init__(self, device): self.device = device
SingleDevice
python
apache__airflow
providers/dingding/tests/unit/dingding/operators/test_dingding.py
{ "start": 1043, "end": 2374 }
class ____: _config = { "dingding_conn_id": "dingding_default", "message_type": "text", "message": "Airflow dingding webhook test", "at_mobiles": ["123", "456"], "at_all": False, } def setup_method(self): args = {"owner": "airflow", "start_date": DEFAULT_DATE} self.dag = DAG("test_dag_id", schedule=None, default_args=args) @mock.patch("airflow.providers.dingding.operators.dingding.DingdingHook") def test_execute(self, mock_hook): operator = DingdingOperator(task_id="dingding_task", dag=self.dag, **self._config) assert operator is not None assert self._config["dingding_conn_id"] == operator.dingding_conn_id assert self._config["message_type"] == operator.message_type assert self._config["message"] == operator.message assert self._config["at_mobiles"] == operator.at_mobiles assert self._config["at_all"] == operator.at_all operator.execute(None) mock_hook.assert_called_once_with( self._config["dingding_conn_id"], self._config["message_type"], self._config["message"], self._config["at_mobiles"], self._config["at_all"], ) mock_hook.return_value.send.assert_called_once_with()
TestDingdingOperator
python
pypa__setuptools
setuptools/tests/config/test_apply_pyprojecttoml.py
{ "start": 19420, "end": 19933 }
class ____: def test_namespace_packages(self, tmp_path): pyproject = tmp_path / "pyproject.toml" config = """ [project] name = "myproj" version = "42" [tool.setuptools] namespace-packages = ["myproj.pkg"] """ pyproject.write_text(cleandoc(config), encoding="utf-8") with pytest.raises(RemovedConfigError, match="namespace-packages"): pyprojecttoml.apply_configuration(makedist(tmp_path), pyproject)
TestDeprecatedFields
python
pydantic__pydantic
pydantic/_internal/_core_metadata.py
{ "start": 295, "end": 5162 }
class ____(TypedDict, total=False): """A `TypedDict` for holding the metadata dict of the schema. Attributes: pydantic_js_functions: List of JSON schema functions that resolve refs during application. pydantic_js_annotation_functions: List of JSON schema functions that don't resolve refs during application. pydantic_js_prefer_positional_arguments: Whether JSON schema generator will prefer positional over keyword arguments for an 'arguments' schema. custom validation function. Only applies to before, plain, and wrap validators. pydantic_js_updates: key / value pair updates to apply to the JSON schema for a type. pydantic_js_extra: WIP, either key/value pair updates to apply to the JSON schema, or a custom callable. pydantic_internal_union_tag_key: Used internally by the `Tag` metadata to specify the tag used for a discriminated union. pydantic_internal_union_discriminator: Used internally to specify the discriminator value for a discriminated union when the discriminator was applied to a `'definition-ref'` schema, and that reference was missing at the time of the annotation application. TODO: Perhaps we should move this structure to pydantic-core. At the moment, though, it's easier to iterate on if we leave it in pydantic until we feel there is a semi-stable API. TODO: It's unfortunate how functionally oriented JSON schema generation is, especially that which occurs during the core schema generation process. It's inevitable that we need to store some json schema related information on core schemas, given that we generate JSON schemas directly from core schemas. That being said, debugging related issues is quite difficult when JSON schema information is disguised via dynamically defined functions. """ pydantic_js_functions: list[GetJsonSchemaFunction] pydantic_js_annotation_functions: list[GetJsonSchemaFunction] pydantic_js_prefer_positional_arguments: bool pydantic_js_updates: JsonDict pydantic_js_extra: JsonDict | JsonSchemaExtraCallable pydantic_internal_union_tag_key: str pydantic_internal_union_discriminator: str def update_core_metadata( core_metadata: Any, /, *, pydantic_js_functions: list[GetJsonSchemaFunction] | None = None, pydantic_js_annotation_functions: list[GetJsonSchemaFunction] | None = None, pydantic_js_updates: JsonDict | None = None, pydantic_js_extra: JsonDict | JsonSchemaExtraCallable | None = None, ) -> None: from ..json_schema import PydanticJsonSchemaWarning """Update CoreMetadata instance in place. When we make modifications in this function, they take effect on the `core_metadata` reference passed in as the first (and only) positional argument. First, cast to `CoreMetadata`, then finish with a cast to `dict[str, Any]` for core schema compatibility. We do this here, instead of before / after each call to this function so that this typing hack can be easily removed if/when we move `CoreMetadata` to `pydantic-core`. For parameter descriptions, see `CoreMetadata` above. """ core_metadata = cast(CoreMetadata, core_metadata) if pydantic_js_functions: core_metadata.setdefault('pydantic_js_functions', []).extend(pydantic_js_functions) if pydantic_js_annotation_functions: core_metadata.setdefault('pydantic_js_annotation_functions', []).extend(pydantic_js_annotation_functions) if pydantic_js_updates: if (existing_updates := core_metadata.get('pydantic_js_updates')) is not None: core_metadata['pydantic_js_updates'] = {**existing_updates, **pydantic_js_updates} else: core_metadata['pydantic_js_updates'] = pydantic_js_updates if pydantic_js_extra is not None: existing_pydantic_js_extra = core_metadata.get('pydantic_js_extra') if existing_pydantic_js_extra is None: core_metadata['pydantic_js_extra'] = pydantic_js_extra if isinstance(existing_pydantic_js_extra, dict): if isinstance(pydantic_js_extra, dict): core_metadata['pydantic_js_extra'] = {**existing_pydantic_js_extra, **pydantic_js_extra} if callable(pydantic_js_extra): warn( 'Composing `dict` and `callable` type `json_schema_extra` is not supported.' 'The `callable` type is being ignored.' "If you'd like support for this behavior, please open an issue on pydantic.", PydanticJsonSchemaWarning, ) if callable(existing_pydantic_js_extra): # if ever there's a case of a callable, we'll just keep the last json schema extra spec core_metadata['pydantic_js_extra'] = pydantic_js_extra
CoreMetadata
python
matplotlib__matplotlib
lib/matplotlib/cbook.py
{ "start": 5565, "end": 13295 }
class ____: """ Handle registering, processing, blocking, and disconnecting for a set of signals and callbacks: >>> def oneat(x): ... print('eat', x) >>> def ondrink(x): ... print('drink', x) >>> from matplotlib.cbook import CallbackRegistry >>> callbacks = CallbackRegistry() >>> id_eat = callbacks.connect('eat', oneat) >>> id_drink = callbacks.connect('drink', ondrink) >>> callbacks.process('drink', 123) drink 123 >>> callbacks.process('eat', 456) eat 456 >>> callbacks.process('be merry', 456) # nothing will be called >>> callbacks.disconnect(id_eat) >>> callbacks.process('eat', 456) # nothing will be called >>> with callbacks.blocked(signal='drink'): ... callbacks.process('drink', 123) # nothing will be called >>> callbacks.process('drink', 123) drink 123 In practice, one should always disconnect all callbacks when they are no longer needed to avoid dangling references (and thus memory leaks). However, real code in Matplotlib rarely does so, and due to its design, it is rather difficult to place this kind of code. To get around this, and prevent this class of memory leaks, we instead store weak references to bound methods only, so when the destination object needs to die, the CallbackRegistry won't keep it alive. Parameters ---------- exception_handler : callable, optional If not None, *exception_handler* must be a function that takes an `Exception` as single parameter. It gets called with any `Exception` raised by the callbacks during `CallbackRegistry.process`, and may either re-raise the exception or handle it in another manner. The default handler prints the exception (with `traceback.print_exc`) if an interactive event loop is running; it re-raises the exception if no interactive event loop is running. signals : list, optional If not None, *signals* is a list of signals that this registry handles: attempting to `process` or to `connect` to a signal not in the list throws a `ValueError`. The default, None, does not restrict the handled signals. """ # We maintain two mappings: # callbacks: signal -> {cid -> weakref-to-callback} # _func_cid_map: {(signal, weakref-to-callback) -> cid} def __init__(self, exception_handler=_exception_printer, *, signals=None): self._signals = None if signals is None else list(signals) # Copy it. self.exception_handler = exception_handler self.callbacks = {} self._cid_gen = itertools.count() self._func_cid_map = _UnhashDict([]) # A hidden variable that marks cids that need to be pickled. self._pickled_cids = set() def __getstate__(self): return { **vars(self), # In general, callbacks may not be pickled, so we just drop them, # unless directed otherwise by self._pickled_cids. "callbacks": {s: {cid: proxy() for cid, proxy in d.items() if cid in self._pickled_cids} for s, d in self.callbacks.items()}, # It is simpler to reconstruct this from callbacks in __setstate__. "_func_cid_map": None, "_cid_gen": next(self._cid_gen) } def __setstate__(self, state): cid_count = state.pop('_cid_gen') vars(self).update(state) self.callbacks = { s: {cid: _weak_or_strong_ref(func, functools.partial(self._remove_proxy, s)) for cid, func in d.items()} for s, d in self.callbacks.items()} self._func_cid_map = _UnhashDict( ((s, proxy), cid) for s, d in self.callbacks.items() for cid, proxy in d.items()) self._cid_gen = itertools.count(cid_count) def connect(self, signal, func): """Register *func* to be called when signal *signal* is generated.""" if self._signals is not None: _api.check_in_list(self._signals, signal=signal) proxy = _weak_or_strong_ref(func, functools.partial(self._remove_proxy, signal)) try: return self._func_cid_map[signal, proxy] except KeyError: cid = self._func_cid_map[signal, proxy] = next(self._cid_gen) self.callbacks.setdefault(signal, {})[cid] = proxy return cid def _connect_picklable(self, signal, func): """ Like `.connect`, but the callback is kept when pickling/unpickling. Currently internal-use only. """ cid = self.connect(signal, func) self._pickled_cids.add(cid) return cid # Keep a reference to sys.is_finalizing, as sys may have been cleared out # at that point. def _remove_proxy(self, signal, proxy, *, _is_finalizing=sys.is_finalizing): if _is_finalizing(): # Weakrefs can't be properly torn down at that point anymore. return cid = self._func_cid_map.pop((signal, proxy), None) if cid is not None: del self.callbacks[signal][cid] self._pickled_cids.discard(cid) else: # Not found return if len(self.callbacks[signal]) == 0: # Clean up empty dicts del self.callbacks[signal] def disconnect(self, cid): """ Disconnect the callback registered with callback id *cid*. No error is raised if such a callback does not exist. """ self._pickled_cids.discard(cid) for signal, proxy in self._func_cid_map: if self._func_cid_map[signal, proxy] == cid: break else: # Not found return assert self.callbacks[signal][cid] == proxy del self.callbacks[signal][cid] self._func_cid_map.pop((signal, proxy)) if len(self.callbacks[signal]) == 0: # Clean up empty dicts del self.callbacks[signal] def process(self, s, *args, **kwargs): """ Process signal *s*. All of the functions registered to receive callbacks on *s* will be called with ``*args`` and ``**kwargs``. """ if self._signals is not None: _api.check_in_list(self._signals, signal=s) for ref in list(self.callbacks.get(s, {}).values()): func = ref() if func is not None: try: func(*args, **kwargs) # this does not capture KeyboardInterrupt, SystemExit, # and GeneratorExit except Exception as exc: if self.exception_handler is not None: self.exception_handler(exc) else: raise @contextlib.contextmanager def blocked(self, *, signal=None): """ Block callback signals from being processed. A context manager to temporarily block/disable callback signals from being processed by the registered listeners. Parameters ---------- signal : str, optional The callback signal to block. The default is to block all signals. """ orig = self.callbacks try: if signal is None: # Empty out the callbacks self.callbacks = {} else: # Only remove the specific signal self.callbacks = {k: orig[k] for k in orig if k != signal} yield finally: self.callbacks = orig
CallbackRegistry
python
falconry__falcon
tests/test_middleware.py
{ "start": 529, "end": 628 }
class ____: def process_request(self, req, resp): self.req = req
CaptureRequestMiddleware
python
huggingface__transformers
src/transformers/models/ernie/modular_ernie.py
{ "start": 4932, "end": 4972 }
class ____(BertLayer): pass
ErnieLayer
python
keras-team__keras
keras/src/metrics/reduction_metrics.py
{ "start": 3171, "end": 5119 }
class ____(Metric): """Compute the (weighted) mean of the given values. For example, if values is `[1, 3, 5, 7]` then the mean is 4. If `sample_weight` was specified as `[1, 1, 0, 0]` then the mean would be 2. This metric creates two variables, `total` and `count`. The mean value returned is simply `total` divided by `count`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Example: >>> m = Mean() >>> m.update_state([1, 3, 5, 7]) >>> m.result() 4.0 >>> m.reset_state() >>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0]) >>> m.result() 2.0 """ def __init__(self, name="mean", dtype=None): super().__init__(name=name, dtype=dtype) self.total = self.add_variable( shape=(), initializer=initializers.Zeros(), dtype=self.dtype, name="total", ) self.count = self.add_variable( shape=(), initializer=initializers.Zeros(), dtype=self.dtype, name="count", ) def update_state(self, values, sample_weight=None): values, sample_weight = reduce_to_samplewise_values( values, sample_weight, reduce_fn=ops.mean, dtype=self.dtype ) self.total.assign_add(ops.sum(values)) if sample_weight is not None: num_samples = ops.sum(sample_weight) elif len(values.shape) >= 1: num_samples = ops.shape(values)[0] else: num_samples = 1 self.count.assign_add(ops.cast(num_samples, dtype=self.dtype)) def reset_state(self): self.total.assign(0) self.count.assign(0) def result(self): return ops.divide_no_nan( self.total, ops.cast(self.count, dtype=self.dtype) ) @keras_export("keras.metrics.MeanMetricWrapper")
Mean
python
apache__airflow
airflow-core/src/airflow/models/xcom_arg.py
{ "start": 4929, "end": 8971 }
class ____(SchedulerXComArg): args: Sequence[SchedulerXComArg] fillvalue: Any @classmethod def _deserialize(cls, data: dict[str, Any], dag: SerializedDAG) -> Self: return cls( [deserialize_xcom_arg(arg, dag) for arg in data["args"]], fillvalue=data.get("fillvalue", NOTSET), ) def iter_references(self) -> Iterator[tuple[Operator, str]]: for arg in self.args: yield from arg.iter_references() @singledispatch def get_task_map_length(xcom_arg: SchedulerXComArg, run_id: str, *, session: Session) -> int | None: # The base implementation -- specific XComArg subclasses have specialised implementations raise NotImplementedError(f"get_task_map_length not implemented for {type(xcom_arg)}") @get_task_map_length.register def _(xcom_arg: SchedulerPlainXComArg, run_id: str, *, session: Session) -> int | None: from airflow.models.mappedoperator import is_mapped from airflow.models.taskinstance import TaskInstance from airflow.models.taskmap import TaskMap from airflow.models.xcom import XComModel dag_id = xcom_arg.operator.dag_id task_id = xcom_arg.operator.task_id if is_mapped(xcom_arg.operator): unfinished_ti_exists = exists_query( TaskInstance.dag_id == dag_id, TaskInstance.run_id == run_id, TaskInstance.task_id == task_id, # Special NULL treatment is needed because 'state' can be NULL. # The "IN" part would produce "NULL NOT IN ..." and eventually # "NULl = NULL", which is a big no-no in SQL. or_( TaskInstance.state.is_(None), TaskInstance.state.in_(s.value for s in State.unfinished if s is not None), ), session=session, ) if unfinished_ti_exists: return None # Not all of the expanded tis are done yet. query = select(func.count(XComModel.map_index)).where( XComModel.dag_id == dag_id, XComModel.run_id == run_id, XComModel.task_id == task_id, XComModel.map_index >= 0, XComModel.key == XCOM_RETURN_KEY, ) else: query = select(TaskMap.length).where( TaskMap.dag_id == dag_id, TaskMap.run_id == run_id, TaskMap.task_id == task_id, TaskMap.map_index < 0, ) return session.scalar(query) @get_task_map_length.register def _(xcom_arg: SchedulerMapXComArg, run_id: str, *, session: Session) -> int | None: return get_task_map_length(xcom_arg.arg, run_id, session=session) @get_task_map_length.register def _(xcom_arg: SchedulerZipXComArg, run_id: str, *, session: Session) -> int | None: all_lengths = (get_task_map_length(arg, run_id, session=session) for arg in xcom_arg.args) ready_lengths = [length for length in all_lengths if length is not None] if len(ready_lengths) != len(xcom_arg.args): return None # If any of the referenced XComs is not ready, we are not ready either. if is_arg_set(xcom_arg.fillvalue): return max(ready_lengths) return min(ready_lengths) @get_task_map_length.register def _(xcom_arg: SchedulerConcatXComArg, run_id: str, *, session: Session) -> int | None: all_lengths = (get_task_map_length(arg, run_id, session=session) for arg in xcom_arg.args) ready_lengths = [length for length in all_lengths if length is not None] if len(ready_lengths) != len(xcom_arg.args): return None # If any of the referenced XComs is not ready, we are not ready either. return sum(ready_lengths) def deserialize_xcom_arg(data: dict[str, Any], dag: SerializedDAG): """DAG serialization interface.""" klass = _XCOM_ARG_TYPES[data.get("type", "")] return klass._deserialize(data, dag) _XCOM_ARG_TYPES: dict[str, type[SchedulerXComArg]] = { "": SchedulerPlainXComArg, "concat": SchedulerConcatXComArg, "map": SchedulerMapXComArg, "zip": SchedulerZipXComArg, }
SchedulerZipXComArg
python
getsentry__sentry
tests/sentry/models/test_apitoken.py
{ "start": 7449, "end": 9161 }
class ____(TestCase): def setUp(self) -> None: self.user = self.create_user() self.proxy = self.create_user() self.org = self.create_organization() self.internal_app = self.create_internal_integration( name="Internal App", organization=self.org, ) self.install = SentryAppInstallation.objects.get(sentry_app=self.internal_app) def test_multiple_tokens_have_correct_organization_id(self) -> None: # First token is no longer created automatically with the application, so we must manually # create multiple tokens that aren't directly linked from the SentryAppInstallation model. token_1 = self.create_internal_integration_token(install=self.install, user=self.user) token_2 = self.create_internal_integration_token(install=self.install, user=self.user) assert token_1.organization_id == self.org.id assert token_2.organization_id == self.org.id with assume_test_silo_mode(SiloMode.REGION): assert ( ApiTokenReplica.objects.get(apitoken_id=token_1.id).organization_id == self.org.id ) assert ( ApiTokenReplica.objects.get(apitoken_id=token_2.id).organization_id == self.org.id ) with outbox_runner(): for install_token in SentryAppInstallationToken.objects.all(): install_token.delete() with assume_test_silo_mode(SiloMode.REGION): assert ApiTokenReplica.objects.get(apitoken_id=token_1.id).organization_id is None assert ApiTokenReplica.objects.get(apitoken_id=token_2.id).organization_id is None
ApiTokenInternalIntegrationTest
python
numba__numba
numba/cuda/cudadrv/devicearray.py
{ "start": 18521, "end": 24888 }
class ____(DeviceNDArrayBase): ''' An on-GPU array type ''' def is_f_contiguous(self): ''' Return true if the array is Fortran-contiguous. ''' return self._dummy.is_f_contig @property def flags(self): """ For `numpy.ndarray` compatibility. Ideally this would return a `np.core.multiarray.flagsobj`, but that needs to be constructed with an existing `numpy.ndarray` (as the C- and F- contiguous flags aren't writeable). """ return dict(self._dummy.flags) # defensive copy def is_c_contiguous(self): ''' Return true if the array is C-contiguous. ''' return self._dummy.is_c_contig def __array__(self, dtype=None): """ :return: an `numpy.ndarray`, so copies to the host. """ if dtype: return self.copy_to_host().__array__(dtype) else: return self.copy_to_host().__array__() def __len__(self): return self.shape[0] def reshape(self, *newshape, **kws): """ Reshape the array without changing its contents, similarly to :meth:`numpy.ndarray.reshape`. Example:: d_arr = d_arr.reshape(20, 50, order='F') """ if len(newshape) == 1 and isinstance(newshape[0], (tuple, list)): newshape = newshape[0] cls = type(self) if newshape == self.shape: # nothing to do return cls(shape=self.shape, strides=self.strides, dtype=self.dtype, gpu_data=self.gpu_data) newarr, extents = self._dummy.reshape(*newshape, **kws) if extents == [self._dummy.extent]: return cls(shape=newarr.shape, strides=newarr.strides, dtype=self.dtype, gpu_data=self.gpu_data) else: raise NotImplementedError("operation requires copying") def ravel(self, order='C', stream=0): ''' Flattens a contiguous array without changing its contents, similar to :meth:`numpy.ndarray.ravel`. If the array is not contiguous, raises an exception. ''' stream = self._default_stream(stream) cls = type(self) newarr, extents = self._dummy.ravel(order=order) if extents == [self._dummy.extent]: return cls(shape=newarr.shape, strides=newarr.strides, dtype=self.dtype, gpu_data=self.gpu_data, stream=stream) else: raise NotImplementedError("operation requires copying") @devices.require_context def __getitem__(self, item): return self._do_getitem(item) @devices.require_context def getitem(self, item, stream=0): """Do `__getitem__(item)` with CUDA stream """ return self._do_getitem(item, stream) def _do_getitem(self, item, stream=0): stream = self._default_stream(stream) arr = self._dummy.__getitem__(item) extents = list(arr.iter_contiguous_extent()) cls = type(self) if len(extents) == 1: newdata = self.gpu_data.view(*extents[0]) if not arr.is_array: # Check for structured array type (record) if self.dtype.names is not None: return DeviceRecord(dtype=self.dtype, stream=stream, gpu_data=newdata) else: # Element indexing hostary = np.empty(1, dtype=self.dtype) _driver.device_to_host(dst=hostary, src=newdata, size=self._dummy.itemsize, stream=stream) return hostary[0] else: return cls(shape=arr.shape, strides=arr.strides, dtype=self.dtype, gpu_data=newdata, stream=stream) else: newdata = self.gpu_data.view(*arr.extent) return cls(shape=arr.shape, strides=arr.strides, dtype=self.dtype, gpu_data=newdata, stream=stream) @devices.require_context def __setitem__(self, key, value): return self._do_setitem(key, value) @devices.require_context def setitem(self, key, value, stream=0): """Do `__setitem__(key, value)` with CUDA stream """ return self._do_setitem(key, value, stream=stream) def _do_setitem(self, key, value, stream=0): stream = self._default_stream(stream) # If the array didn't have a default stream, and the user didn't provide # a stream, then we will use the default stream for the assignment # kernel and synchronize on it. synchronous = not stream if synchronous: ctx = devices.get_context() stream = ctx.get_default_stream() # (1) prepare LHS arr = self._dummy.__getitem__(key) newdata = self.gpu_data.view(*arr.extent) if isinstance(arr, dummyarray.Element): # convert to a 0d array shape = () strides = () else: shape = arr.shape strides = arr.strides lhs = type(self)( shape=shape, strides=strides, dtype=self.dtype, gpu_data=newdata, stream=stream) # (2) prepare RHS rhs, _ = auto_device(value, stream=stream, user_explicit=True) if rhs.ndim > lhs.ndim: raise ValueError("Can't assign %s-D array to %s-D self" % ( rhs.ndim, lhs.ndim)) rhs_shape = np.ones(lhs.ndim, dtype=np.int64) # negative indices would not work if rhs.ndim == 0 rhs_shape[lhs.ndim - rhs.ndim:] = rhs.shape rhs = rhs.reshape(*rhs_shape) for i, (l, r) in enumerate(zip(lhs.shape, rhs.shape)): if r != 1 and l != r: raise ValueError("Can't copy sequence with size %d to array " "axis %d with dimension %d" % ( r, i, l)) # (3) do the copy n_elements = functools.reduce(operator.mul, lhs.shape, 1) _assign_kernel(lhs.ndim).forall(n_elements, stream=stream)(lhs, rhs) if synchronous: stream.synchronize()
DeviceNDArray
python
scrapy__scrapy
tests/test_responsetypes.py
{ "start": 175, "end": 4983 }
class ____: def test_from_filename(self): mappings = [ ("data.bin", Response), ("file.txt", TextResponse), ("file.xml.gz", Response), ("file.xml", XmlResponse), ("file.html", HtmlResponse), ("file.unknownext", Response), ] for source, cls in mappings: retcls = responsetypes.from_filename(source) assert retcls is cls, f"{source} ==> {retcls} != {cls}" def test_from_content_disposition(self): mappings = [ (b'attachment; filename="data.xml"', XmlResponse), (b"attachment; filename=data.xml", XmlResponse), ("attachment;filename=data£.tar.gz".encode(), Response), ("attachment;filename=dataµ.tar.gz".encode("latin-1"), Response), ("attachment;filename=data高.doc".encode("gbk"), Response), ("attachment;filename=دورهdata.html".encode("cp720"), HtmlResponse), ( "attachment;filename=日本語版Wikipedia.xml".encode("iso2022_jp"), XmlResponse, ), ] for source, cls in mappings: retcls = responsetypes.from_content_disposition(source) assert retcls is cls, f"{source} ==> {retcls} != {cls}" def test_from_content_type(self): mappings = [ ("text/html; charset=UTF-8", HtmlResponse), ("text/xml; charset=UTF-8", XmlResponse), ("application/xhtml+xml; charset=UTF-8", HtmlResponse), ("application/vnd.wap.xhtml+xml; charset=utf-8", HtmlResponse), ("application/xml; charset=UTF-8", XmlResponse), ("application/octet-stream", Response), ("application/json; encoding=UTF8;charset=UTF-8", JsonResponse), ("application/x-json; encoding=UTF8;charset=UTF-8", JsonResponse), ("application/json-amazonui-streaming;charset=UTF-8", JsonResponse), (b"application/x-download; filename=\x80dummy.txt", Response), ] for source, cls in mappings: retcls = responsetypes.from_content_type(source) assert retcls is cls, f"{source} ==> {retcls} != {cls}" def test_from_body(self): mappings = [ (b"\x03\x02\xdf\xdd\x23", Response), (b"Some plain text\ndata with tabs\t and null bytes\0", TextResponse), (b"<html><head><title>Hello</title></head>", HtmlResponse), # https://codersblock.com/blog/the-smallest-valid-html5-page/ (b"<!DOCTYPE html>\n<title>.</title>", HtmlResponse), (b'<?xml version="1.0" encoding="utf-8"', XmlResponse), ] for source, cls in mappings: retcls = responsetypes.from_body(source) assert retcls is cls, f"{source} ==> {retcls} != {cls}" def test_from_headers(self): mappings = [ ({"Content-Type": ["text/html; charset=utf-8"]}, HtmlResponse), ( { "Content-Type": ["text/html; charset=utf-8"], "Content-Encoding": ["gzip"], }, Response, ), ( { "Content-Type": ["application/octet-stream"], "Content-Disposition": ["attachment; filename=data.txt"], }, TextResponse, ), ] for source, cls in mappings: source = Headers(source) retcls = responsetypes.from_headers(source) assert retcls is cls, f"{source} ==> {retcls} != {cls}" def test_from_args(self): # TODO: add more tests that check precedence between the different arguments mappings = [ ({"url": "http://www.example.com/data.csv"}, TextResponse), # headers takes precedence over url ( { "headers": Headers({"Content-Type": ["text/html; charset=utf-8"]}), "url": "http://www.example.com/item/", }, HtmlResponse, ), ( { "headers": Headers( {"Content-Disposition": ['attachment; filename="data.xml.gz"']} ), "url": "http://www.example.com/page/", }, Response, ), ] for source, cls in mappings: retcls = responsetypes.from_args(**source) assert retcls is cls, f"{source} ==> {retcls} != {cls}" def test_custom_mime_types_loaded(self): # check that mime.types files shipped with scrapy are loaded assert responsetypes.mimetypes.guess_type("x.scrapytest")[0] == "x-scrapy/test"
TestResponseTypes
python
walkccc__LeetCode
solutions/1248. Count Number of Nice Subarrays/1248.py
{ "start": 0, "end": 510 }
class ____: def numberOfSubarrays(self, nums: list[int], k: int) -> int: def numberOfSubarraysAtMost(k: int) -> int: ans = 0 l = 0 r = 0 while r <= len(nums): if k >= 0: ans += r - l if r == len(nums): break if nums[r] & 1: k -= 1 r += 1 else: if nums[l] & 1: k += 1 l += 1 return ans return numberOfSubarraysAtMost(k) - numberOfSubarraysAtMost(k - 1)
Solution
python
pytorch__pytorch
torch/nn/modules/padding.py
{ "start": 13813, "end": 14075 }
class ____(Module): __constants__ = ["padding"] padding: Sequence[int] def forward(self, input: Tensor) -> Tensor: return F.pad(input, self.padding, "reflect") def extra_repr(self) -> str: return f"{self.padding}"
_ReflectionPadNd
python
django__django
django/views/generic/detail.py
{ "start": 6748, "end": 7040 }
class ____(SingleObjectTemplateResponseMixin, BaseDetailView): """ Render a "detail" view of an object. By default this is a model instance looked up from `self.queryset`, but the view will support display of *any* object by overriding `self.get_object()`. """
DetailView
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 272130, "end": 272486 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("client_mutation_id", "owner") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") owner = sgqlc.types.Field("VerifiableDomainOwner", graphql_name="owner")
DeleteVerifiableDomainPayload
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/exc.py
{ "start": 10302, "end": 10462 }
class ____(InvalidRequestError): """An operation was requested from a connection, cursor, or other object that's in a closed state."""
ResourceClosedError
python
pandas-dev__pandas
asv_bench/benchmarks/gil.py
{ "start": 2055, "end": 2851 }
class ____: params = ([2, 4, 8], ["count", "last", "max", "mean", "min", "prod", "sum", "var"]) param_names = ["threads", "method"] def setup(self, threads, method): N = 10**6 ngroups = 10**3 df = DataFrame( {"key": np.random.randint(0, ngroups, size=N), "data": np.random.randn(N)} ) @run_parallel(num_threads=threads) def parallel(): getattr(df.groupby("key")["data"], method)() self.parallel = parallel def loop(): getattr(df.groupby("key")["data"], method)() self.loop = loop def time_parallel(self, threads, method): self.parallel() def time_loop(self, threads, method): for i in range(threads): self.loop()
ParallelGroupbyMethods
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_format24.py
{ "start": 315, "end": 994 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("format24.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with automatic color.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() format1 = workbook.add_format( {"rotation": 270, "indent": 1, "align": "center", "valign": "top"} ) worksheet.set_row(0, 75) worksheet.write(0, 0, "ABCD", format1) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
airbytehq__airbyte
airbyte-integrations/connectors/source-recharge/unit_tests/integration/streams/test_credit_adjustments.py
{ "start": 1848, "end": 3834 }
class ____(StreamTestCase): _STREAM_NAME = "credit_adjustments" @HttpMocker() def test_state_message_produced_while_read_and_state_match_latest_record(self, http_mocker: HttpMocker) -> None: min_cursor_value = "2025-04-13T00:00:00+00:00" max_cursor_value = "2025-05-13T00:00:00+00:00" http_mocker.get( self.stream_request().with_limit(250).with_updated_at_min(START_DATE).build(), get_stream_response(_STREAM_NAME) .with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD).with_cursor(min_cursor_value)) .with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD).with_cursor(max_cursor_value)) .build(), ) output = read_incremental(self._config, _STREAM_NAME) test_cursor_value = get_cursor_value_from_state_message(output, _CURSOR_FIELD) assert test_cursor_value == max_cursor_value @HttpMocker() def test_given_multiple_pages_when_read_then_return_records_with_state(self, http_mocker: HttpMocker) -> None: min_cursor_value = "2025-04-13T00:00:00+00:00" max_cursor_value = "2025-05-13T00:00:00+00:00" http_mocker.get( self.stream_request().with_limit(250).with_next_page_token(NEXT_PAGE_TOKEN).build(), get_stream_response(_STREAM_NAME).with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD)).build(), ) http_mocker.get( self.stream_request().with_limit(250).with_updated_at_min(START_DATE).build(), get_stream_response(_STREAM_NAME) .with_pagination() .with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD).with_cursor(min_cursor_value)) .with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD).with_cursor(max_cursor_value)) .build(), ) output = read_incremental(self._config, _STREAM_NAME) assert len(output.records) == 3
TestIncremental
python
agronholm__apscheduler
src/apscheduler/abc.py
{ "start": 602, "end": 1621 }
class ____(Iterator[datetime], metaclass=ABCMeta): """ Abstract base class that defines the interface that every trigger must implement. """ __slots__ = () @abstractmethod def next(self) -> datetime | None: """ Return the next datetime to fire on. If no such datetime can be calculated, ``None`` is returned. :raises MaxIterationsReached: if the trigger's internal logic has exceeded a set maximum of iterations (used to detect potentially infinite loops) """ @abstractmethod def __getstate__(self) -> Any: """Return the serializable state of the trigger.""" @abstractmethod def __setstate__(self, state: Any) -> None: """Initialize an empty instance from an existing state.""" def __iter__(self) -> Self: return self def __next__(self) -> datetime: dateval = self.next() if dateval is None: raise StopIteration else: return dateval
Trigger
python
huggingface__transformers
src/transformers/models/deepseek_vl/image_processing_deepseek_vl_fast.py
{ "start": 1674, "end": 7538 }
class ____(BaseImageProcessorFast): resample = PILImageResampling.BICUBIC image_mean = OPENAI_CLIP_MEAN image_std = OPENAI_CLIP_STD size = {"height": 384, "width": 384} min_size = 14 do_resize = True do_rescale = True do_normalize = True do_pad = True valid_kwargs = DeepseekVLImageProcessorKwargs def __init__(self, **kwargs: Unpack[DeepseekVLImageProcessorKwargs]): super().__init__(**kwargs) if kwargs.get("image_mean") is None: background_color = (127, 127, 127) else: background_color = tuple(int(x * 255) for x in kwargs.get("image_mean")) self.background_color = tuple(background_color) def resize( self, image: "torch.Tensor", size: SizeDict, min_size: int, interpolation: Optional["F.InterpolationMode"] = None, antialias: bool = True, **kwargs, ) -> "torch.Tensor": if size.height is None or size.width is None or size.height != size.width: raise ValueError( f"Output height and width must be the same. Got height={size['height']} and width={size['width']}" ) size = size.height height, width = image.shape[-2:] max_size = max(height, width) delta = size / max_size # Largest side becomes `size` and the other side is scaled according to the aspect ratio. output_size_nonpadded = SizeDict( height=max(int(height * delta), min_size), width=max(int(width * delta), min_size), ) return super().resize(image, size=output_size_nonpadded, interpolation=interpolation, antialias=antialias) def pad_to_square( self, images: "torch.Tensor", background_color: Union[int, tuple[int, int, int]] = 0, ) -> "torch.Tensor": """ Pads an image to a square based on the longest edge. Args: images (`torch.Tensor`): The images to pad. background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in multi-channel mode, it will default to `0` in subsequent channels. Returns: `torch.Tensor`: The padded images. """ height, width = images.shape[-2:] num_channels = images.shape[1] batch_size = images.shape[0] if height == width: return images max_dim = max(height, width) # Ensure background_color is the correct shape if isinstance(background_color, int): background_color = [background_color] elif len(background_color) != num_channels: raise ValueError( f"background_color must have no more than {num_channels} elements to match the number of channels" ) padded_images = torch.zeros( (batch_size, num_channels, max_dim, max_dim), dtype=images.dtype, device=images.device ) for i, color in enumerate(background_color): padded_images[:, i, :, :] = color if width > height: start = (max_dim - height) // 2 padded_images[:, :, start : start + height, :] = images else: start = (max_dim - width) // 2 padded_images[:, :, :, start : start + width] = images return padded_images def _preprocess( self, images: list["torch.Tensor"], do_resize: bool, size: SizeDict, min_size: int, interpolation: Optional["F.InterpolationMode"], do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], do_pad: bool = True, **kwargs, ) -> BatchFeature: # Group images by size for batched resizing grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_resize: stacked_images = self.resize( image=stacked_images, size=size, min_size=min_size, interpolation=interpolation ) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) # Group images by size for further processing # Needed in case do_resize is False, or resize returns images with different sizes grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_pad: stacked_images = self.pad_to_square(stacked_images, background_color=self.background_color) # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors) __all__ = ["DeepseekVLImageProcessorFast"]
DeepseekVLImageProcessorFast
python
numba__numba
numba/cuda/cudadrv/nvvm.py
{ "start": 1742, "end": 6589 }
class ____(object): '''Process-wide singleton. ''' _PROTOTYPES = { # nvvmResult nvvmVersion(int *major, int *minor) 'nvvmVersion': (nvvm_result, POINTER(c_int), POINTER(c_int)), # nvvmResult nvvmCreateProgram(nvvmProgram *cu) 'nvvmCreateProgram': (nvvm_result, POINTER(nvvm_program)), # nvvmResult nvvmDestroyProgram(nvvmProgram *cu) 'nvvmDestroyProgram': (nvvm_result, POINTER(nvvm_program)), # nvvmResult nvvmAddModuleToProgram(nvvmProgram cu, const char *buffer, # size_t size, const char *name) 'nvvmAddModuleToProgram': ( nvvm_result, nvvm_program, c_char_p, c_size_t, c_char_p), # nvvmResult nvvmLazyAddModuleToProgram(nvvmProgram cu, # const char* buffer, # size_t size, # const char *name) 'nvvmLazyAddModuleToProgram': ( nvvm_result, nvvm_program, c_char_p, c_size_t, c_char_p), # nvvmResult nvvmCompileProgram(nvvmProgram cu, int numOptions, # const char **options) 'nvvmCompileProgram': ( nvvm_result, nvvm_program, c_int, POINTER(c_char_p)), # nvvmResult nvvmGetCompiledResultSize(nvvmProgram cu, # size_t *bufferSizeRet) 'nvvmGetCompiledResultSize': ( nvvm_result, nvvm_program, POINTER(c_size_t)), # nvvmResult nvvmGetCompiledResult(nvvmProgram cu, char *buffer) 'nvvmGetCompiledResult': (nvvm_result, nvvm_program, c_char_p), # nvvmResult nvvmGetProgramLogSize(nvvmProgram cu, # size_t *bufferSizeRet) 'nvvmGetProgramLogSize': (nvvm_result, nvvm_program, POINTER(c_size_t)), # nvvmResult nvvmGetProgramLog(nvvmProgram cu, char *buffer) 'nvvmGetProgramLog': (nvvm_result, nvvm_program, c_char_p), # nvvmResult nvvmIRVersion (int* majorIR, int* minorIR, int* majorDbg, # int* minorDbg ) 'nvvmIRVersion': (nvvm_result, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)), # nvvmResult nvvmVerifyProgram (nvvmProgram prog, int numOptions, # const char** options) 'nvvmVerifyProgram': (nvvm_result, nvvm_program, c_int, POINTER(c_char_p)) } # Singleton reference __INSTANCE = None def __new__(cls): with _nvvm_lock: if cls.__INSTANCE is None: cls.__INSTANCE = inst = object.__new__(cls) try: inst.driver = open_cudalib('nvvm') except OSError as e: cls.__INSTANCE = None errmsg = ("libNVVM cannot be found. Do `conda install " "cudatoolkit`:\n%s") raise NvvmSupportError(errmsg % e) # Find & populate functions for name, proto in inst._PROTOTYPES.items(): func = getattr(inst.driver, name) func.restype = proto[0] func.argtypes = proto[1:] setattr(inst, name, func) return cls.__INSTANCE def __init__(self): ir_versions = self.get_ir_version() self._majorIR = ir_versions[0] self._minorIR = ir_versions[1] self._majorDbg = ir_versions[2] self._minorDbg = ir_versions[3] self._supported_ccs = get_supported_ccs() @property def data_layout(self): if (self._majorIR, self._minorIR) < (1, 8): return _datalayout_original else: return _datalayout_i128 @property def supported_ccs(self): return self._supported_ccs def get_version(self): major = c_int() minor = c_int() err = self.nvvmVersion(byref(major), byref(minor)) self.check_error(err, 'Failed to get version.') return major.value, minor.value def get_ir_version(self): majorIR = c_int() minorIR = c_int() majorDbg = c_int() minorDbg = c_int() err = self.nvvmIRVersion(byref(majorIR), byref(minorIR), byref(majorDbg), byref(minorDbg)) self.check_error(err, 'Failed to get IR version.') return majorIR.value, minorIR.value, majorDbg.value, minorDbg.value def check_error(self, error, msg, exit=False): if error: exc = NvvmError(msg, RESULT_CODE_NAMES[error]) if exit: print(exc) sys.exit(1) else: raise exc
NVVM
python
charliermarsh__ruff
crates/ruff_python_formatter/resources/test/fixtures/black/cases/annotations.py
{ "start": 28, "end": 224 }
class ____: def foo(self): if True: content_ids: Mapping[ str, Optional[ContentId] ] = self.publisher_content_store.store_config_contents(files)
Foo
python
doocs__leetcode
solution/0100-0199/0146.LRU Cache/Solution.py
{ "start": 0, "end": 162 }
class ____: def __init__(self, key: int = 0, val: int = 0): self.key = key self.val = val self.prev = None self.next = None
Node
python
Lightning-AI__lightning
tests/tests_pytorch/strategies/test_single_device.py
{ "start": 2415, "end": 3073 }
class ____: ... def test_strategy_pickle(): strategy = SingleDeviceStrategy("cpu") optimizer = MockOptimizer() strategy.optimizers = [optimizer] assert isinstance(strategy.optimizers[0], MockOptimizer) assert isinstance(strategy._lightning_optimizers[0], LightningOptimizer) state = pickle.dumps(strategy) # dumping did not get rid of the lightning optimizers assert isinstance(strategy._lightning_optimizers[0], LightningOptimizer) strategy_reloaded = pickle.loads(state) # loading restores the lightning optimizers assert isinstance(strategy_reloaded._lightning_optimizers[0], LightningOptimizer)
MockOptimizer
python
sphinx-doc__sphinx
tests/roots/test-ext-autodoc/target/docstring_signature.py
{ "start": 0, "end": 33 }
class ____: """A(foo, bar)"""
A
python
mlflow__mlflow
mlflow/gateway/app.py
{ "start": 1795, "end": 7308 }
class ____(FastAPI): def __init__(self, config: GatewayConfig, limiter: Limiter, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) self.state.limiter = limiter self.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler) self.dynamic_endpoints: dict[str, EndpointConfig] = { endpoint.name: endpoint for endpoint in config.endpoints } self.traffic_routes: dict[str, TrafficRouteConfig] = { route.name: route for route in (config.routes or []) } # config API routes for name in self.dynamic_endpoints.keys() | self.traffic_routes.keys(): # TODO: Remove deployments server URLs after deprecation window elapses self.add_api_route( path=(MLFLOW_DEPLOYMENTS_ENDPOINTS_BASE + name + MLFLOW_DEPLOYMENTS_QUERY_SUFFIX), endpoint=_get_endpoint_handler(self, name, limiter, "deployments"), methods=["POST"], ) self.add_api_route( path=f"{MLFLOW_GATEWAY_ROUTE_BASE}{name}{MLFLOW_QUERY_SUFFIX}", endpoint=_get_endpoint_handler(self, name, limiter, "gateway"), methods=["POST"], include_in_schema=False, ) def _get_provider_by_name(self, name: str) -> tuple[Provider, EndpointType]: """ If the name is an endpoint name, return the endpoint's provider If the name is a traffic route name, return a `TrafficRouteProvider` """ from mlflow.gateway.providers.base import TrafficRouteProvider if name in self.dynamic_endpoints: config = self.dynamic_endpoints[name] return get_provider(config.model.provider)(config), config.endpoint_type if name in self.traffic_routes: route_config = self.traffic_routes[name] endpoint_configs = [ self.dynamic_endpoints[destination.name] for destination in route_config.destinations ] traffic_splits = [ destination.traffic_percentage for destination in route_config.destinations ] return TrafficRouteProvider( endpoint_configs, traffic_splits, route_config.routing_strategy, ), route_config.task_type raise MlflowException.invalid_parameter_value(f"Invalid endpoint / route name: '{name}'") def get_dynamic_endpoint(self, endpoint_name: str) -> Endpoint | None: return r.to_endpoint() if (r := self.dynamic_endpoints.get(endpoint_name)) else None def _get_legacy_dynamic_route(self, route_name: str) -> _LegacyRoute | None: return r._to_legacy_route() if (r := self.dynamic_endpoints.get(route_name)) else None def _translate_http_exception(func): """ Decorator for translating MLflow exceptions to HTTP exceptions """ @functools.wraps(func) async def wrapper(*args, **kwargs): try: return await func(*args, **kwargs) except AIGatewayException as e: raise HTTPException(status_code=e.status_code, detail=e.detail) return wrapper def _create_chat_endpoint(prov: Provider): # https://slowapi.readthedocs.io/en/latest/#limitations-and-known-issues @_translate_http_exception async def _chat( request: Request, payload: chat.RequestPayload ) -> chat.ResponsePayload | chat.StreamResponsePayload: if payload.stream: return await make_streaming_response(prov.chat_stream(payload)) else: return await prov.chat(payload) return _chat def _create_completions_endpoint(prov: Provider): @_translate_http_exception async def _completions( request: Request, payload: completions.RequestPayload ) -> completions.ResponsePayload | completions.StreamResponsePayload: if payload.stream: return await make_streaming_response(prov.completions_stream(payload)) else: return await prov.completions(payload) return _completions def _create_embeddings_endpoint(prov: Provider): @_translate_http_exception async def _embeddings( request: Request, payload: embeddings.RequestPayload ) -> embeddings.ResponsePayload: return await prov.embeddings(payload) return _embeddings async def _custom(request: Request): return request.json() def _get_endpoint_handler(gateway_api: GatewayAPI, name: str, limiter: Limiter, key: str): endpoint_type_to_factory = { EndpointType.LLM_V1_CHAT: _create_chat_endpoint, EndpointType.LLM_V1_COMPLETIONS: _create_completions_endpoint, EndpointType.LLM_V1_EMBEDDINGS: _create_embeddings_endpoint, } provider, endpoint_type = gateway_api._get_provider_by_name(name) if factory := endpoint_type_to_factory.get(endpoint_type): handler = factory(provider) if name in gateway_api.dynamic_endpoints: limit = gateway_api.dynamic_endpoints[name].limit else: limit = None if limit: limit_value = f"{limit.calls}/{limit.renewal_period}" handler.__name__ = f"{handler.__name__}_{name}_{key}" return limiter.limit(limit_value)(handler) else: return handler raise HTTPException( status_code=404, detail=f"Unexpected route type {endpoint_type!r} for route {name!r}.", )
GatewayAPI
python
cython__cython
tests/run/test_grammar.py
{ "start": 10420, "end": 10624 }
class ____: def __init__(self): self._dct = {} def __setitem__(self, item, value): self._dct[item.lower()] = value def __getitem__(self, item): return self._dct[item]
CNS
python
huggingface__transformers
tests/repo_utils/test_check_copies.py
{ "start": 4681, "end": 5460 }
class ____: attr_1 = 1 attr_2 = 3 def __init__(self, a=1, b=2): self.a = a self.b = b # Ignore copy def only_in_roberta_to_be_ignored(self, c): return 3 # Copied from transformers.models.dummy_gpt2.modeling_dummy_gpt2.GPT2DummyModel.forward def forward(self, c): return 1 def only_in_roberta_not_ignored(self, c): return 2 def existing_common(self, c): return 4 def existing_diff_not_ignored(self, c): return 5 # Ignore copy def existing_diff_to_be_ignored(self, c): return 6 """ EXPECTED_REPLACED_CODE = """ # Copied from transformers.models.dummy_bert_no_match.modeling_dummy_bert_no_match.BertDummyModel with BertDummy->RobertaBertDummy
RobertaBertDummyModel