language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
walkccc__LeetCode
solutions/2714. Find Shortest Path with K Hops/2714.py
{ "start": 0, "end": 1172 }
class ____: # Similar to 787. Cheapest Flights Within K Stops def shortestPathWithHops( self, n: int, edges: list[list[int]], s: int, d: int, k: int, ) -> int: graph = [[] for _ in range(n)] for u, v, w in edges: graph[u].append((v, w)) graph[v].append((u, w)) return self._dijkstra(graph, s, d, k) def _dijkstra( self, graph: list[list[tuple[int, int]]], src: int, dst: int, k: int, ) -> int: dist = [[math.inf for _ in range(k + 1)] for _ in range(len(graph))] dist[src][k] = 0 minHeap = [(dist[src][k], src, k)] # (d, u, hops) while minHeap: d, u, hops = heapq.heappop(minHeap) if u == dst: return d if dist[u][hops] > d: continue for v, w in graph[u]: # Go from u -> v with w cost. if d + w < dist[v][hops]: dist[v][hops] = d + w heapq.heappush(minHeap, (dist[v][hops], v, hops)) # Hop from u -> v with 0 cost. if hops > 0 and d < dist[v][hops - 1]: dist[v][hops - 1] = d heapq.heappush(minHeap, (dist[v][hops - 1], v, hops - 1))
Solution
python
dagster-io__dagster
examples/project_analytics/dagster_pypi/resources.py
{ "start": 942, "end": 1321 }
class ____(PyPiResource): input_file: str = Field(description="Path to the sample pypi input file") def get_pypi_download_counts(self, date) -> pd.DataFrame: print("Pretending to fetch for a given date: ", date) df = pd.read_csv(self.input_file) df["download_date"] = datetime.datetime.strptime(date, "%Y-%m-%d") return df
PyPiLocalResource
python
Textualize__textual
docs/examples/widgets/progress_bar_isolated.py
{ "start": 171, "end": 1047 }
class ____(App[None]): BINDINGS = [("s", "start", "Start")] progress_timer: Timer """Timer to simulate progress happening.""" def compose(self) -> ComposeResult: with Center(): with Middle(): yield ProgressBar() yield Footer() def on_mount(self) -> None: """Set up a timer to simulate progess happening.""" self.progress_timer = self.set_interval(1 / 10, self.make_progress, pause=True) def make_progress(self) -> None: """Called automatically to advance the progress bar.""" self.query_one(ProgressBar).advance(1) def action_start(self) -> None: """Start the progress tracking.""" self.query_one(ProgressBar).update(total=100) self.progress_timer.resume() if __name__ == "__main__": IndeterminateProgressBar().run()
IndeterminateProgressBar
python
kamyu104__LeetCode-Solutions
Python/sum-of-consecutive-subsequences.py
{ "start": 94, "end": 746 }
class ____(object): def getSum(self, nums): """ :type nums: List[int] :rtype: int """ def count(d): result = 0 cnt = collections.defaultdict(int) prefix = collections.defaultdict(int) for x in nums: c = (cnt[x-d]+1)%MOD cnt[x] = (cnt[x]+c)%MOD total = (prefix[x-d]+x*c)%MOD prefix[x] = (prefix[x]+total)%MOD result = (result+total)%MOD return result MOD = 10**9+7 return (count(+1)+count(-1)-reduce(lambda accu, x: (accu+x)%MOD, nums, 0))%MOD
Solution
python
sphinx-doc__sphinx
sphinx/domains/python/__init__.py
{ "start": 1699, "end": 1823 }
class ____(NamedTuple): docname: str node_id: str synopsis: str platform: str deprecated: bool
ModuleEntry
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 837506, "end": 839069 }
class ____(sgqlc.types.Type): """A curatable list of repositories relating to a repository owner, which defaults to showing the most popular repositories they own. """ __schema__ = github_schema __field_names__ = ("has_pinned_items", "items") has_pinned_items = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="hasPinnedItems") """Whether or not the owner has pinned any repositories or gists.""" items = sgqlc.types.Field( sgqlc.types.non_null(PinnableItemConnection), graphql_name="items", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ("before", sgqlc.types.Arg(String, graphql_name="before", default=None)), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) """The repositories and gists in the showcase. If the profile owner has any pinned items, those will be returned. Otherwise, the profile owner's popular repositories will be returned. Arguments: * `after` (`String`): Returns the elements in the list that come after the specified cursor. * `before` (`String`): Returns the elements in the list that come before the specified cursor. * `first` (`Int`): Returns the first _n_ elements from the list. * `last` (`Int`): Returns the last _n_ elements from the list. """
ProfileItemShowcase
python
pola-rs__polars
py-polars/src/polars/_utils/udfs.py
{ "start": 1483, "end": 10357 }
class ____: BINARY: ClassVar[dict[str, str]] = { "BINARY_ADD": "+", "BINARY_AND": "&", "BINARY_FLOOR_DIVIDE": "//", "BINARY_LSHIFT": "<<", "BINARY_RSHIFT": ">>", "BINARY_MODULO": "%", "BINARY_MULTIPLY": "*", "BINARY_OR": "|", "BINARY_POWER": "**", "BINARY_SUBTRACT": "-", "BINARY_TRUE_DIVIDE": "/", "BINARY_XOR": "^", } CALL = frozenset({"CALL"} if _MIN_PY311 else {"CALL_FUNCTION", "CALL_METHOD"}) CONTROL_FLOW: ClassVar[dict[str, str]] = ( { "POP_JUMP_FORWARD_IF_FALSE": "&", "POP_JUMP_FORWARD_IF_TRUE": "|", "JUMP_IF_FALSE_OR_POP": "&", "JUMP_IF_TRUE_OR_POP": "|", } # note: 3.12 dropped POP_JUMP_FORWARD_IF_* opcodes if _MIN_PY311 and not _MIN_PY312 else { "POP_JUMP_IF_FALSE": "&", "POP_JUMP_IF_TRUE": "|", "JUMP_IF_FALSE_OR_POP": "&", "JUMP_IF_TRUE_OR_POP": "|", } ) LOAD_VALUES = frozenset(("LOAD_CONST", "LOAD_DEREF", "LOAD_FAST", "LOAD_GLOBAL")) LOAD_ATTR = frozenset({"LOAD_METHOD", "LOAD_ATTR"}) LOAD = LOAD_VALUES | LOAD_ATTR SIMPLIFY_SPECIALIZED: ClassVar[dict[str, str]] = { "LOAD_FAST_BORROW": "LOAD_FAST", "LOAD_SMALL_INT": "LOAD_CONST", } SYNTHETIC: ClassVar[dict[str, int]] = { "POLARS_EXPRESSION": 1, } UNARY: ClassVar[dict[str, str]] = { "UNARY_NEGATIVE": "-", "UNARY_POSITIVE": "+", "UNARY_NOT": "~", } PARSEABLE_OPS = frozenset( {"BINARY_OP", "BINARY_SUBSCR", "COMPARE_OP", "CONTAINS_OP", "IS_OP"} | set(UNARY) | set(CONTROL_FLOW) | set(SYNTHETIC) | LOAD_VALUES ) MATCHABLE_OPS = ( set(SIMPLIFY_SPECIALIZED) | PARSEABLE_OPS | set(BINARY) | LOAD_ATTR | CALL ) UNARY_VALUES = frozenset(UNARY.values()) # math module funcs that we can map to native expressions _MATH_FUNCTIONS = frozenset( ( "acos", "acosh", "asin", "asinh", "atan", "atanh", "cbrt", "ceil", "cos", "cosh", "degrees", "exp", "floor", "log", "log10", "log1p", "pow", "radians", "sin", "sinh", "sqrt", "tan", "tanh", ) ) # numpy functions that we can map to native expressions _NUMPY_MODULE_ALIASES = frozenset(("np", "numpy")) _NUMPY_FUNCTIONS = frozenset( ( # "abs", # TODO: this one clashes with Python builtin abs "arccos", "arccosh", "arcsin", "arcsinh", "arctan", "arctanh", "cbrt", "ceil", "cos", "cosh", "degrees", "exp", "floor", "log", "log10", "log1p", "radians", "sign", "sin", "sinh", "sqrt", "tan", "tanh", ) ) # python attrs/funcs that map to native expressions _PYTHON_ATTRS_MAP = { "date": "dt.date()", "day": "dt.day()", "hour": "dt.hour()", "microsecond": "dt.microsecond()", "minute": "dt.minute()", "month": "dt.month()", "second": "dt.second()", "year": "dt.year()", } _PYTHON_CASTS_MAP = {"float": "Float64", "int": "Int64", "str": "String"} _PYTHON_BUILTINS = frozenset(_PYTHON_CASTS_MAP) | {"abs"} _PYTHON_METHODS_MAP = { # string "endswith": "str.ends_with", "lower": "str.to_lowercase", "lstrip": "str.strip_chars_start", "removeprefix": "str.strip_prefix", "removesuffix": "str.strip_suffix", "replace": "str.replace", "rstrip": "str.strip_chars_end", "startswith": "str.starts_with", "strip": "str.strip_chars", "title": "str.to_titlecase", "upper": "str.to_uppercase", "zfill": "str.zfill", # temporal "date": "dt.date", "day": "dt.day", "hour": "dt.hour", "isoweekday": "dt.weekday", "microsecond": "dt.microsecond", "month": "dt.month", "second": "dt.second", "strftime": "dt.strftime", "time": "dt.time", "year": "dt.year", } _MODULE_FUNCTIONS: list[dict[str, list[AbstractSet[str]]]] = [ # lambda x: numpy.func(x) # lambda x: numpy.func(CONSTANT) { "argument_1_opname": [{"LOAD_FAST", "LOAD_CONST"}], "argument_2_opname": [], "module_opname": [OpNames.LOAD_ATTR], "attribute_opname": [], "module_name": [_NUMPY_MODULE_ALIASES], "attribute_name": [], "function_name": [_NUMPY_FUNCTIONS], }, # lambda x: math.func(x) # lambda x: math.func(CONSTANT) { "argument_1_opname": [{"LOAD_FAST", "LOAD_CONST"}], "argument_2_opname": [], "module_opname": [OpNames.LOAD_ATTR], "attribute_opname": [], "module_name": [{"math"}], "attribute_name": [], "function_name": [_MATH_FUNCTIONS], }, # lambda x: json.loads(x) { "argument_1_opname": [{"LOAD_FAST"}], "argument_2_opname": [], "module_opname": [OpNames.LOAD_ATTR], "attribute_opname": [], "module_name": [{"json"}], "attribute_name": [], "function_name": [{"loads"}], }, # lambda x: datetime.strptime(x, CONSTANT) { "argument_1_opname": [{"LOAD_FAST"}], "argument_2_opname": [{"LOAD_CONST"}], "module_opname": [OpNames.LOAD_ATTR], "attribute_opname": [], "module_name": [{"datetime"}], "attribute_name": [], "function_name": [{"strptime"}], "check_load_global": False, # type: ignore[dict-item] }, # lambda x: module.attribute.func(x, CONSTANT) { "argument_1_opname": [{"LOAD_FAST"}], "argument_2_opname": [{"LOAD_CONST"}], "module_opname": [{"LOAD_ATTR"}], "attribute_opname": [OpNames.LOAD_ATTR], "module_name": [{"datetime", "dt"}], "attribute_name": [{"datetime"}], "function_name": [{"strptime"}], "check_load_global": False, # type: ignore[dict-item] }, ] # In addition to `lambda x: func(x)`, also support cases when a unary operation # has been applied to `x`, like `lambda x: func(-x)` or `lambda x: func(~x)`. _MODULE_FUNCTIONS = [ {**kind, "argument_1_unary_opname": unary} # type: ignore[dict-item] for kind in _MODULE_FUNCTIONS for unary in [[set(OpNames.UNARY)], []] ] # Lookup for module functions that have different names as polars expressions _MODULE_FUNC_TO_EXPR_NAME = { "math.acos": "arccos", "math.acosh": "arccosh", "math.asin": "arcsin", "math.asinh": "arcsinh", "math.atan": "arctan", "math.atanh": "arctanh", "json.loads": "str.json_decode", } _RE_IMPLICIT_BOOL = re.compile(r'pl\.col\("([^"]*)"\) & pl\.col\("\1"\)\.(.+)') _RE_SERIES_NAMES = re.compile(r"^(s|srs\d?|series)\.") _RE_STRIP_BOOL = re.compile(r"^bool\((.+)\)$") def _get_all_caller_variables() -> dict[str, Any]: """Get all local and global variables from caller's frame.""" pkg_dir = Path(__file__).parent.parent # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow frame = inspect.currentframe() n = 0 try: while frame: fname = inspect.getfile(frame) if fname.startswith(str(pkg_dir)): frame = frame.f_back n += 1 else: break variables: dict[str, Any] if frame is None: variables = {} else: variables = {**frame.f_locals, **frame.f_globals} finally: # https://docs.python.org/3/library/inspect.html # > Though the cycle detector will catch these, destruction of the frames # > (and local variables) can be made deterministic by removing the cycle # > in a finally clause. del frame return variables def _get_target_name(col: str, expression: str, map_target: str) -> str: """The name of the object against which the 'map' is being invoked.""" col_expr = f'pl.col("{col}")' if map_target == "expr": return col_expr elif map_target == "series": if _RE_SERIES_NAMES.match(expression): return expression.split(".", 1)[0] # note: handle overlapping name from global variables; fallback # through "s", "srs", "series" and (finally) srs0 -> srsN... search_expr = expression.replace(col_expr, "") for name in ("s", "srs", "series"): if not re.search(rf"\b{name}\b", search_expr): return name n = count() while True: name = f"srs{next(n)}" if not re.search(rf"\b{name}\b", search_expr): return name msg = f"TODO: map_target = {map_target!r}" raise NotImplementedError(msg)
OpNames
python
skorch-dev__skorch
skorch/hf.py
{ "start": 29739, "end": 42162 }
class ____: """Mixin class to add support for Hugging Face accelerate This is an *experimental* feature. Use this mixin class with one of the neural net classes (e.g. ``NeuralNet``, ``NeuralNetClassifier``, or ``NeuralNetRegressor``) and pass an instance of ``Accelerator`` for mixed precision, multi-GPU, or TPU training. Install the accelerate library using: .. code-block:: python -m pip install accelerate skorch does not itself provide any facilities to enable these training features. A lot of them can still be implemented by the user with a little bit of extra work but it can be a daunting task. That is why this helper class was added: Using this mixin in conjunction with the accelerate library should cover a lot of common use cases. .. note:: Under the hood, accelerate uses :class:`~torch.cuda.amp.GradScaler`, which does not support passing the training step as a closure. Therefore, if your optimizer requires that (e.g. :class:`torch.optim.LBFGS`), you cannot use accelerate. .. warning:: Since accelerate is still quite young and backwards compatiblity breaking features might be added, we treat its integration as an experimental feature. When accelerate's API stabilizes, we will consider adding it to skorch proper. Also, models accelerated this way cannot be pickled. If you need to save and load the net, either use :py:meth:`skorch.net.NeuralNet.save_params` and :py:meth:`skorch.net.NeuralNet.load_params` or don't use ``accelerate``. Examples -------- >>> from skorch import NeuralNetClassifier >>> from skorch.hf import AccelerateMixin >>> from accelerate import Accelerator >>> >>> class AcceleratedNet(AccelerateMixin, NeuralNetClassifier): ... '''NeuralNetClassifier with accelerate support''' >>> >>> accelerator = Accelerator(...) >>> # you may pass gradient_accumulation_steps to enable grad accumulation >>> net = AcceleratedNet(MyModule, accelerator=accelerator) >>> net.fit(X, y) The same approach works with all the other skorch net classes. Parameters ---------- accelerator : accelerate.Accelerator In addition to the usual parameters, pass an instance of ``accelerate.Accelerator`` with the desired settings. device : str, torch.device, or None (default=None) The compute device to be used. When using accelerate, it is recommended to leave device handling to accelerate. Therefore, it is best to leave this argument to be None, which means that skorch does not set the device. unwrap_after_train : bool (default=True) By default, with this option being ``True``, the module(s) and criterion are automatically "unwrapped" after training. This means that their initial state -- from before they were prepared by the ``accelerator`` -- is restored. This is necessary to pickle the net. There are circumstances where you might want to disable this behavior. For instance, when you want to further train the model with AMP enabled (using ``net.partial_fit`` or ``warm_start=True``). Also, unwrapping the modules means that the advantage of using mixed precision is lost during inference. In those cases, if you don't need to pickle the net, you should set ``unwrap_after_train=False``. callbacks__print_log__sink : 'auto' or callable If 'auto', uses the ``print`` function of the accelerator, if it has one. This avoids printing the same output multiple times when training concurrently on multiple machines. If the accelerator does not have a ``print`` function, use Python's ``print`` function instead. """ def __init__( self, *args, accelerator, device=None, unwrap_after_train=True, callbacks__print_log__sink='auto', **kwargs ): super().__init__( *args, device=device, callbacks__print_log__sink=callbacks__print_log__sink, **kwargs ) self.accelerator = accelerator self.unwrap_after_train = unwrap_after_train self._wrapped_with_accelerator = False def _validate_params(self): super()._validate_params() if self.accelerator.device_placement and (self.device is not None): raise ValueError( "When device placement is performed by the accelerator, set device=None" ) def _initialize_accelerator(self): """Prepare everything for use with accelerate""" if self._wrapped_with_accelerator: return self with self._current_init_context('criterion'): for name in self._criteria: criterion = getattr(self, name + '_') if isinstance(criterion, torch.nn.Module): setattr(self, name + '_', self.accelerator.prepare(criterion)) with self._current_init_context('module'): for name in self._modules: module = getattr(self, name + '_') if isinstance(module, torch.nn.Module): setattr(self, name + '_', self.accelerator.prepare(module)) with self._current_init_context('optimizer'): for name in self._optimizers: optimizer = getattr(self, name + '_') if isinstance(optimizer, torch.optim.Optimizer): setattr(self, name + '_', self.accelerator.prepare(optimizer)) for _, callback in self.callbacks_: if isinstance(callback, LRScheduler): callback.policy_ = self.accelerator.prepare(callback.policy_) self._wrapped_with_accelerator = True return self def initialize(self): """Initializes all of its components and returns self.""" # this should be the same as the parent class, except for the one marked # line self.check_training_readiness() self._initialize_virtual_params() self._initialize_callbacks() self._initialize_module() self._initialize_criterion() self._initialize_optimizer() self._initialize_history() self._initialize_accelerator() # <= added self._validate_params() self.initialized_ = True return self def _initialize_callbacks(self): if self.callbacks__print_log__sink == 'auto': print_func = getattr(self.accelerator, 'print', print) self.callbacks__print_log__sink = print_func super()._initialize_callbacks() return self def train_step(self, batch, **fit_params): # Call training step within the accelerator context manager with self.accelerator.accumulate(self.module_): # Why are we passing only module_ here, even though there might be # other modules as well? First of all, there is no possibility to # pass multiple modules. Second, the module_ is only used to # determine if Distributed Data Parallel is being used, not for # anything else. Therefore, passing module_ should be sufficient # most of the time. return super().train_step(batch, **fit_params) def train_step_single(self, batch, **fit_params): self._set_training(True) Xi, yi = unpack_data(batch) with self.accelerator.autocast(): y_pred = self.infer(Xi, **fit_params) loss = self.get_loss(y_pred, yi, X=Xi, training=True) self.accelerator.backward(loss) return { 'loss': loss, 'y_pred': y_pred, } def get_iterator(self, *args, **kwargs): iterator = super().get_iterator(*args, **kwargs) iterator = self.accelerator.prepare(iterator) return iterator def _step_optimizer(self, step_fn): # We cannot step_fn as a 'closure' to .step because GradScaler doesn't # suppor it: # https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler.step # Therefore, we need to call step_fn explicitly and step without # argument. step_fn() for name in self._optimizers: optimizer = getattr(self, name + '_') optimizer.step() def _unwrap_accelerator(self): if not self._wrapped_with_accelerator: return for name in self._modules + self._criteria: module = getattr(self, name + '_') if isinstance(module, torch.nn.Module): orig = self.accelerator.unwrap_model(module, keep_fp32_wrapper=False) setattr(self, name + '_', orig) self._wrapped_with_accelerator = False # pylint: disable=unused-argument def on_train_end(self, net, X=None, y=None, **kwargs): self.accelerator.wait_for_everyone() super().on_train_end(net, X=X, y=y, **kwargs) if self.unwrap_after_train: self._unwrap_accelerator() return self def evaluation_step(self, batch, training=False): # More context: # https://github.com/skorch-dev/skorch/issues/944 # https://huggingface.co/docs/accelerate/quicktour#distributed-evaluation output = super().evaluation_step(batch, training=training) y_pred = self.accelerator.gather_for_metrics(output) return y_pred # pylint: disable=missing-function-docstring def save_params(self, *args, **kwargs): # has to be called even if not main process, or else there is a dead lock self.accelerator.wait_for_everyone() if not self._wrapped_with_accelerator: if self.accelerator.is_main_process: super().save_params(*args, **kwargs) else: # A potential issue with using accelerate is that a model that has # been prepared with accelerate is wrapped, so that the keys of the # state dict have an additional prefix, "module.". Therefore, when # the model is unwrapped when saving and wrapped when loading, or # vice versa, there will be a mismatch in the state dict keys. To # prevent this, always unwrap before saving. During loading, in case # the model is wrapped, this would result in an error, but we take # care of unwrapping the model in that case during loading. self._unwrap_accelerator() try: # note: although saving is only done on the main process, # unwrapping+wrapping has to be done on all processes, or else # there is an error, not sure why if self.accelerator.is_main_process: super().save_params(*args, **kwargs) finally: self._initialize_accelerator() # pylint: disable=missing-function-docstring def load_params(self, *args, **kwargs): self.accelerator.wait_for_everyone() prev_device = self.device if self.device is None: self.device = 'cpu' try: if not self._wrapped_with_accelerator: super().load_params(*args, **kwargs) else: # A potential issue with using accelerate is that a model that # has been prepared with accelerate is wrapped, so that the keys # of the state dict have an additional prefix, "module.". # Therefore, when the model is unwrapped when saving and wrapped # when loading, or vice versa, there will be a mismatch in the # state dict keys. Here, we always unwrap the model first before # loading (1st case). This would still result in an error in the # 2nd case, but we take care of unwrapping the model in that # case during saving. self._unwrap_accelerator() try: super().load_params(*args, **kwargs) finally: self._initialize_accelerator() finally: # ensure that the device remains unchanged in case it was None # before calling load_params self.device = prev_device
AccelerateMixin
python
huggingface__transformers
src/transformers/models/kosmos2_5/modeling_kosmos2_5.py
{ "start": 68900, "end": 75303 }
class ____(Kosmos2_5PreTrainedModel): config_class = Kosmos2_5TextConfig input_modalities = ("text",) _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"} def __init__(self, config: Kosmos2_5TextConfig): super().__init__(config) self.model = Kosmos2_5TextTransformer(config) self.lm_head = nn.Linear(in_features=config.embed_dim, out_features=config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self) -> nn.Module: return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(KOSMOS2_5_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=Kosmos2_5TextConfig) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, image_embeds: Optional[torch.Tensor] = None, image_embeds_position_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> CausalLMOutputWithCrossAttentions: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: """ if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False outputs: BaseModelOutputWithPastAndCrossAttentions = self.model( input_ids=input_ids, attention_mask=attention_mask, image_embeds=image_embeds, image_embeds_position_mask=image_embeds_position_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, position_ids=position_ids, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, image_embeds=None, image_embeds_position_mask=None, past_key_values=None, attention_mask=None, use_cache=None, cache_position=None, position_ids=None, **model_kwargs, ): input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) position_ids = None # cut input_ids if past_key_values is used if past_key_values is not None: position_ids = Kosmos2_5TextSinusoidalPositionalEmbedding.create_position_ids_from_input_ids( input_ids, padding_idx=self.config.pad_token_id, past_key_values_length=0, )[:, -cache_position.shape[0] :] input_ids = input_ids[:, -cache_position.shape[0] :] # the image info. is already encoded into the past keys/values if past_key_values.get_seq_length() > 0: image_embeds = None image_embeds_position_mask = None elif image_embeds_position_mask is not None: # appending `False` to `image_embeds_position_mask` (because `input_ids` grows during generation) batch_size, seq_len = input_ids.size() mask_len = image_embeds_position_mask.size()[-1] image_embeds_position_mask = torch.cat( ( image_embeds_position_mask, torch.zeros(size=(batch_size, seq_len - mask_len), dtype=torch.bool, device=input_ids.device), ), dim=1, ) model_inputs = { "input_ids": input_ids, "image_embeds": image_embeds, "image_embeds_position_mask": image_embeds_position_mask, "past_key_values": past_key_values, "attention_mask": attention_mask, "position_ids": position_ids, "use_cache": use_cache, } # Forward ALL kwargs that are uninitialized (e.g. `use_cache`). for key, value in model_kwargs.items(): if key not in model_inputs: model_inputs[key] = value return model_inputs @add_start_docstrings( """ KOSMOS-2.5 Model for generating text and bounding boxes given an image. The model consists of a vision encoder and a language model. """, KOSMOS2_5_START_DOCSTRING, )
Kosmos2_5TextForCausalLM
python
dagster-io__dagster
python_modules/libraries/dagster-shared/dagster_shared/serdes/serdes.py
{ "start": 27782, "end": 29256 }
class ____(ObjectSerializer[T_NamedTuple]): def object_as_mapping(self, value: T_NamedTuple) -> Mapping[str, Any]: if is_record(value): return as_dict_for_new(value) # Value is always a NamedTuple, we just can't express that in the type of T_NamedTuple. return value._asdict() # type: ignore @cached_property def constructor_param_names(self) -> Sequence[str]: # pyright: ignore[reportIncompatibleMethodOverride] if has_generated_new(self.klass): return list(get_record_annotations(self.klass).keys()) names = [] for name, parameter in signature(self.klass.__new__).parameters.items(): if parameter.kind is Parameter.VAR_POSITIONAL: check.failed("Can not use positional args capture on serdes object.") elif parameter.kind is Parameter.VAR_KEYWORD: names.extend( check.not_none( self.kwargs_fields, "Must specify kwargs_fields when using kwarg capture in __new__.", ) ) else: names.append(name) return names # Alias for clarity-- see note on `T_NamedTuple` for the relationship between `NamedTuple` and # `@record`-decorated classes. RecordSerializer = NamedTupleSerializer T_Dataclass = TypeVar("T_Dataclass", bound="DataclassInstance", default="DataclassInstance")
NamedTupleSerializer
python
fastai__fastai
fastai/data/core.py
{ "start": 8293, "end": 12395 }
class ____(GetAttr): "Basic wrapper around several `DataLoader`s." _default='train' def __init__(self, *loaders, # `DataLoader` objects to wrap path:str|Path='.', # Path to store export objects device=None # Device to put `DataLoaders` ): self.loaders,self.path = list(loaders),Path(path) if device is not None and (loaders!=() and hasattr(loaders[0],'to')): self.device = device def __getitem__(self, i): return self.loaders[i] def __len__(self): return len(self.loaders) def new_empty(self): loaders = [dl.new(dl.dataset.new_empty()) for dl in self.loaders] return type(self)(*loaders, path=self.path, device=self.device) def _set(i, self, v): self.loaders[i] = v train ,valid = add_props(lambda i,x: x[i], _set) train_ds,valid_ds = add_props(lambda i,x: x[i].dataset) @property def device(self): return self._device @device.setter def device(self, d # Device to put `DataLoaders` ): for dl in self.loaders: dl.to(d) self._device = d def to(self, device # Device to put `DataLoaders` ): self.device = device return self def _add_tfms(self, tfms, event, dl_idx): "Adds `tfms` to `event` on `dl`" if(isinstance(dl_idx,str)): dl_idx = 0 if(dl_idx=='train') else 1 dl_tfms = getattr(self[dl_idx], event) apply(dl_tfms.add, tfms) def add_tfms(self, tfms, # List of `Transform`(s) or `Pipeline` to apply event, # When to run `Transform`. Events mentioned in `TfmdDL` loaders=None # List of `DataLoader` objects to add `tfms` to ): "Adds `tfms` to `events` on `loaders`" if(loaders is None): loaders=range(len(self.loaders)) if not is_listy(loaders): loaders = listify(loaders) for loader in loaders: self._add_tfms(tfms,event,loader) def cuda(self): return self.to(device=default_device()) def cpu(self): return self.to(device=torch.device('cpu')) @classmethod def from_dsets(cls, *ds, # `Datasets` object(s) path:str|Path='.', # Path to put in `DataLoaders` bs:int=64, # Size of batch device=None, # Device to put `DataLoaders` dl_type=TfmdDL, # Type of `DataLoader` **kwargs ): default = (True,) + (False,) * (len(ds)-1) defaults = {'shuffle': default, 'drop_last': default} tfms = {k:tuple(Pipeline(kwargs[k]) for i in range_of(ds)) for k in _batch_tfms if k in kwargs} kwargs = merge(defaults, {k: tuplify(v, match=ds) for k,v in kwargs.items() if k not in _batch_tfms}, tfms) kwargs = [{k: v[i] for k,v in kwargs.items()} for i in range_of(ds)] return cls(*[dl_type(d, bs=bs, **k) for d,k in zip(ds, kwargs)], path=path, device=device) @classmethod def from_dblock(cls, dblock, # `DataBlock` object source, # Source of data. Can be `Path` to files path:str|Path='.', # Path to put in `DataLoaders` bs:int=64, # Size of batch val_bs:int=None, # Size of batch for validation `DataLoader` shuffle:bool=True, # Whether to shuffle data device=None, # Device to put `DataLoaders` **kwargs ): return dblock.dataloaders(source, path=path, bs=bs, val_bs=val_bs, shuffle=shuffle, device=device, **kwargs) _docs=dict(__getitem__="Retrieve `DataLoader` at `i` (`0` is training, `1` is validation)", train="Training `DataLoader`", valid="Validation `DataLoader`", train_ds="Training `Dataset`", valid_ds="Validation `Dataset`", to="Use `device`", add_tfms="Add `tfms` to `loaders` for `event", cuda="Use accelerator if available", cpu="Use the cpu", new_empty="Create a new empty version of `self` with the same transforms", from_dblock="Create a dataloaders from a given `dblock`") # %% ../../nbs/03_data.core.ipynb 51
DataLoaders
python
getsentry__sentry
tests/sentry/api/endpoints/test_system_options.py
{ "start": 213, "end": 5344 }
class ____(APITestCase): url = reverse("sentry-api-0-system-options") def test_without_superuser(self) -> None: self.login_as(user=self.user, superuser=False) response = self.client.get(self.url) assert response.status_code == 403 def test_simple(self) -> None: self.login_as(user=self.user, superuser=True) response = self.client.get(self.url) assert response.status_code == 200 assert "system.secret-key" in response.data assert "system.url-prefix" in response.data assert "system.admin-email" in response.data def test_redacted_secret(self) -> None: self.login_as(user=self.user, superuser=True) response = self.client.get(self.url) assert response.status_code == 200 assert response.data["github-login.client-secret"]["value"] == "[redacted]" def test_bad_query(self) -> None: self.login_as(user=self.user, superuser=True) response = self.client.get(self.url, {"query": "nonsense"}) assert response.status_code == 400 assert "nonsense" in response.data def test_required(self) -> None: self.login_as(user=self.user, superuser=True) response = self.client.get(self.url, {"query": "is:required"}) assert response.status_code == 200 assert "system.url-prefix" in response.data def test_not_logged_in(self) -> None: response = self.client.get(self.url) assert response.status_code == 401 response = self.client.put(self.url) assert response.status_code == 401 def test_disabled_smtp(self) -> None: self.login_as(user=self.user, superuser=True) with self.options({"mail.backend": "smtp"}): response = self.client.get(self.url) assert response.status_code == 200 assert response.data["mail.host"]["field"]["disabled"] is False assert response.data["mail.host"]["field"]["disabledReason"] is None with self.options({"mail.backend": "dummy"}): response = self.client.get(self.url) assert response.status_code == 200 assert response.data["mail.host"]["field"]["disabled"] is True assert response.data["mail.host"]["field"]["disabledReason"] == "smtpDisabled" def test_put_user_access_forbidden(self) -> None: self.login_as(user=self.user, superuser=False) response = self.client.put(self.url, {"auth.allow-registration": 1}) assert response.status_code == 403 def test_put_self_hosted_superuser_access_allowed(self) -> None: with override_settings(SENTRY_SELF_HOSTED=True): self.login_as(user=self.user, superuser=True) response = self.client.put(self.url, {"auth.allow-registration": 1}) assert response.status_code == 200 def test_put_int_for_boolean(self) -> None: self.login_as(user=self.user, superuser=True) self.add_user_permission(self.user, "options.admin") response = self.client.put(self.url, {"auth.allow-registration": 1}) assert response.status_code == 200 def test_put_unknown_option(self) -> None: self.login_as(user=self.user, superuser=True) self.add_user_permission(self.user, "options.admin") response = self.client.put(self.url, {"xxx": "lol"}) assert response.status_code == 400 assert response.data["error"] == "unknown_option" def test_put_hardwired_option(self) -> None: with override_options({"system.url-prefix": "cheese"}): self.login_as(user=self.user, superuser=True) self.add_user_permission(self.user, "options.admin") response = self.client.put(self.url, {"system.url-prefix": "bread"}) assert response.status_code == 400 assert response.data["error"] == "immutable_option" def test_allowed_option_without_permission(self) -> None: self.login_as(user=self.user, superuser=True) response = self.client.put(self.url, {"system.admin-email": "new_admin@example.com"}) assert response.status_code == 200 assert options.get("system.admin-email") == "new_admin@example.com" def test_put_simple(self) -> None: self.login_as(user=self.user, superuser=True) self.add_user_permission(self.user, "options.admin") assert options.get("mail.host") != "lolcalhost" response = self.client.put(self.url, {"mail.host": "lolcalhost"}) assert response.status_code == 200 assert options.get("mail.host") == "lolcalhost" def test_update_channel(self) -> None: assert options.get_last_update_channel("auth.allow-registration") is None self.login_as(user=self.user, superuser=True) self.add_user_permission(self.user, "options.admin") response = self.client.put(self.url, {"auth.allow-registration": 1}) assert response.status_code == 200 assert ( options.get_last_update_channel("auth.allow-registration") == options.UpdateChannel.APPLICATION )
SystemOptionsTest
python
python__mypy
mypy/moduleinspect.py
{ "start": 234, "end": 1306 }
class ____: # Note that all __init__ args must have default values def __init__( self, name: str = "", file: str | None = None, path: list[str] | None = None, all: list[str] | None = None, is_c_module: bool = False, subpackages: list[str] | None = None, ) -> None: self.name = name # __name__ attribute self.file = file # __file__ attribute self.path = path # __path__ attribute self.all = all # __all__ attribute self.is_c_module = is_c_module self.subpackages = subpackages or [] def is_c_module(module: ModuleType) -> bool: if module.__dict__.get("__file__") is None: # Could be a namespace package. These must be handled through # introspection, since there is no source file. return True return os.path.splitext(module.__dict__["__file__"])[-1] in [".so", ".pyd", ".dll"] def is_pyc_only(file: str | None) -> bool: return bool(file and file.endswith(".pyc") and not os.path.exists(file[:-1]))
ModuleProperties
python
dagster-io__dagster
python_modules/dagster/dagster_tests/storage_tests/test_compute_log_manager.py
{ "start": 6244, "end": 6574 }
class ____(TestComputeLogManager): __test__ = True @pytest.fixture(name="compute_log_manager") def compute_log_manager(self): # pyright: ignore[reportIncompatibleMethodOverride] with tempfile.TemporaryDirectory() as tmpdir_path: return LocalComputeLogManager(tmpdir_path)
TestLocalComputeLogManager
python
anthropics__anthropic-sdk-python
src/anthropic/types/beta/beta_web_search_tool_result_block_param.py
{ "start": 454, "end": 803 }
class ____(TypedDict, total=False): content: Required[BetaWebSearchToolResultBlockParamContentParam] tool_use_id: Required[str] type: Required[Literal["web_search_tool_result"]] cache_control: Optional[BetaCacheControlEphemeralParam] """Create a cache control breakpoint at this content block."""
BetaWebSearchToolResultBlockParam
python
pytorch__pytorch
torch/_dynamo/variables/functions.py
{ "start": 84482, "end": 85421 }
class ____(UserFunctionVariable): def call_function( self, tx: "InstructionTranslator", args: Sequence[VariableTracker], kwargs: dict[str, VariableTracker], ) -> VariableTracker: if not kwargs and len(args) == 1: def wraps(fn: Any) -> VariableTracker: if isinstance(fn, variables.NestedUserFunctionVariable): return fn.clone(wrapped_fn=args[0]) unimplemented( gb_type="functools.wraps", context=f"{fn}", explanation="`torch.compile` can't trace `functools.wraps` on functions defined outside the compile region", hints=[ *graph_break_hints.SUPPORTABLE, ], ) return variables.LambdaVariable(wraps) return super().call_function(tx, args, kwargs)
FunctoolsWrapsVariable
python
PrefectHQ__prefect
src/prefect/_experimental/sla/objects.py
{ "start": 1995, "end": 2491 }
class ____(ServiceLevelAgreement): """An SLA that triggers when a flow run does not start within the specified window. For example, if you schedule the deployment to run every day at 2:00pm and you pass within=timedelta(minutes=10) to this SLA, if a run hasn't started by 2:10pm the SLA violation will be recorded. """ within: timedelta = Field( default=..., description="The amount of time before a flow run is considered in violation.", )
LatenessSla
python
numba__numba
numba/core/typing/context.py
{ "start": 5428, "end": 6332 }
class ____(object): """ A compile-time call frame """ def __init__(self, target, typeinfer, func_id, args): self.typeinfer = typeinfer self.func_id = func_id self.args = args self.target = target self._inferred_retty = set() def __repr__(self): return "CallFrame({}, {})".format(self.func_id, self.args) def add_return_type(self, return_type): """Add *return_type* to the list of inferred return-types. If there are too many, raise `TypingError`. """ # The maximum limit is picked arbitrarily. # Don't think that this needs to be user configurable. RETTY_LIMIT = 16 self._inferred_retty.add(return_type) if len(self._inferred_retty) >= RETTY_LIMIT: m = "Return type of recursive function does not converge" raise errors.TypingError(m)
CallFrame
python
ray-project__ray
python/ray/tune/tests/execution/utils.py
{ "start": 3558, "end": 4916 }
class ____(Trial): def __init__(self, *args, **kwargs): kwargs.setdefault("storage", mock_storage_context()) super().__init__(*args, **kwargs) def get_trainable_cls(self): return self.trainable_name def create_placement_group_factory(self): self.placement_group_factory = self._default_placement_group_factory def set_ray_actor(self, ray_actor): pass def create_execution_test_objects( max_pending_trials: int = 8, resources: Optional[Dict[str, float]] = None, reuse_actors: bool = True, tune_controller_cls: Type[TuneController] = TuneController, **kwargs, ): os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = str(max_pending_trials) resources = resources or {"CPU": 4} storage = kwargs.pop("storage", mock_storage_context()) tune_controller = tune_controller_cls( reuse_actors=reuse_actors, storage=storage, **kwargs, ) resource_manager = BudgetResourceManager(total_resources=resources) resource_updater = _FakeResourceUpdater(resource_manager) actor_manger = NoopActorManager(resource_manager) tune_controller._actor_manager = actor_manger tune_controller._class_cache = NoopClassCache() tune_controller._resource_updater = resource_updater return tune_controller, actor_manger, resource_manager
TestingTrial
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_scatter04.py
{ "start": 315, "end": 1462 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_scatter04.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart( {"type": "scatter", "subtype": "smooth_with_markers"} ) chart.axis_ids = [54011008, 45706240] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series( {"categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$B$1:$B$5"} ) chart.add_series( {"categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$C$1:$C$5"} ) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
apache__airflow
providers/apache/kafka/tests/unit/apache/kafka/operators/test_produce.py
{ "start": 1444, "end": 2834 }
class ____: """ Test ConsumeFromTopic """ @pytest.fixture(autouse=True) def setup_connections(self, create_connection_without_db): create_connection_without_db( Connection( conn_id="kafka_d", conn_type="kafka", extra=json.dumps( { "socket.timeout.ms": 10, "message.timeout.ms": 10, "bootstrap.servers": "localhost:9092", "group.id": "test_group", } ), ) ) def test_operator_string(self): operator = ProduceToTopicOperator( kafka_config_id="kafka_d", topic="test_1", producer_function="unit.apache.kafka.operators.test_produce._simple_producer", producer_function_args=(b"test", b"test"), task_id="test", synchronous=False, ) operator.execute(context={}) def test_operator_callable(self): operator = ProduceToTopicOperator( kafka_config_id="kafka_d", topic="test_1", producer_function=_simple_producer, producer_function_args=(b"test", b"test"), task_id="test", synchronous=False, ) operator.execute(context={})
TestProduceToTopic
python
django__django
tests/migrations/test_migrations/0001_initial.py
{ "start": 43, "end": 1019 }
class ____(migrations.Migration): initial = True operations = [ migrations.CreateModel( "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=255)), ("slug", models.SlugField(null=True)), ("age", models.IntegerField(default=0)), ("silly_field", models.BooleanField(default=False)), ], ), migrations.CreateModel( "Tribble", [ ("id", models.AutoField(primary_key=True)), ("fluffy", models.BooleanField(default=True)), ], ), migrations.AddField( model_name="tribble", name="bool", field=models.BooleanField(default=False), ), migrations.AlterUniqueTogether( name="author", unique_together={("name", "slug")}, ), ]
Migration
python
huggingface__transformers
tests/models/phimoe/test_modeling_phimoe.py
{ "start": 3832, "end": 7536 }
class ____(unittest.TestCase): model = None @classmethod def get_model(cls): if cls.model is None: cls.model = PhimoeForCausalLM.from_pretrained( "microsoft/Phi-3.5-MoE-instruct", dtype="auto", device_map="auto" ) return cls.model @classmethod def tearDownClass(cls): del cls.model cleanup(torch_device, gc_collect=True) def setUp(self): cleanup(torch_device, gc_collect=True) def tearDown(self): cleanup(torch_device, gc_collect=True) def test_model_phimoe_instruct_logits(self): input_ids = {"input_ids": torch.tensor([[1212, 318, 281, 1672]], dtype=torch.long, device=torch_device)} model = self.get_model() model.eval() with torch.no_grad(): output = model(**input_ids).logits EXPECTED_OUTPUT = torch.tensor( [ [-3.4844, -2.4531, -1.1719, 0.6055, -0.4922, -0.1001, 0.8086, -0.2422, 0.3477, -1.0078], [-0.9766, 0.1631, -0.5508, 2.3594, 0.7031, 3.1719, 0.4141, 0.2305, 0.6055, -2.1250], ] ).to(device=torch_device, dtype=output.dtype) # fmt: skip torch.testing.assert_close(output[0, :2, :10], EXPECTED_OUTPUT, rtol=1e-4, atol=1e-4) def test_phimoe_instruct_generation(self): model = self.get_model() tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-MoE-instruct") messages = [ { "role": "system", "content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.", }, {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"}, ] inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt") outputs = model.generate(inputs, max_new_tokens=30) output_text = tokenizer.batch_decode(outputs) EXPECTED_OUTPUT = [ "<|system|> You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.<|end|><|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> Certainly! Bananas and dragonfruits are both delicious and nutritious fruits that can be combined in various ways to create", ] self.assertListEqual(output_text, EXPECTED_OUTPUT) def test_phimoe_instruct_with_static_cache(self): model = self.get_model() tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-MoE-instruct") messages = [ { "role": "system", "content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.", }, {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"}, ] inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to( torch_device ) response_tokens = PhimoeMiniWithStaticCache.generate(model, inputs, max_seq_len=30) output_text = tokenizer.batch_decode(torch.tensor([response_tokens], dtype=torch.long, device=torch_device)) EXPECTED_OUTPUT = [ "<|system|> You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.<|end|><|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> C" ] self.assertListEqual(output_text, EXPECTED_OUTPUT)
PhimoeIntegrationTest
python
Pylons__pyramid
tests/test_renderers.py
{ "start": 22742, "end": 24040 }
class ____(unittest.TestCase): def setUp(self): self.config = testing.setUp() def tearDown(self): testing.tearDown() def _callFUT(self, renderer_name, **kw): from pyramid.renderers import get_renderer return get_renderer(renderer_name, **kw) def test_it_no_package(self): renderer = self.config.testing_add_renderer('tests:abc/def.pt') result = self._callFUT('abc/def.pt') self.assertEqual(result, renderer) def test_it_with_package(self): import tests renderer = self.config.testing_add_renderer('tests:abc/def.pt') result = self._callFUT('abc/def.pt', package=tests) self.assertEqual(result, renderer) def test_it_with_registry(self): renderer = self.config.testing_add_renderer('tests:abc/def.pt') result = self._callFUT('abc/def.pt', registry=self.config.registry) self.assertEqual(result, renderer) def test_it_with_isolated_registry(self): from pyramid.config import Configurator isolated_config = Configurator() renderer = isolated_config.testing_add_renderer('tests:abc/def.pt') result = self._callFUT('abc/def.pt', registry=isolated_config.registry) self.assertEqual(result, renderer)
Test_get_renderer
python
pytorch__pytorch
torch/utils/_pytree.py
{ "start": 37889, "end": 47418 }
class ____: type: Any _context: Context _children: list[Self] num_nodes: int = dataclasses.field(init=False) num_leaves: int = dataclasses.field(init=False) num_children: int = dataclasses.field(init=False) def __init__( self, type: Any, context: Context, # keep for backward compatibility children_specs: list[Self], # keep for backward compatibility ) -> None: object.__setattr__(self, "type", type) object.__setattr__(self, "_context", context) object.__setattr__(self, "_children", children_specs) self.__post_init__() def __post_init__(self) -> None: if self.type is None: assert self._context is None assert len(self._children) == 0 num_nodes = 1 num_leaves = 1 num_children = 0 else: num_nodes = 1 num_leaves = 0 for child in self._children: num_nodes += child.num_nodes num_leaves += child.num_leaves num_children = len(self._children) object.__setattr__(self, "num_nodes", num_nodes) object.__setattr__(self, "num_leaves", num_leaves) object.__setattr__(self, "num_children", num_children) def __repr__(self, indent: int = 0) -> str: repr_prefix: str = f"TreeSpec({self.type.__name__}, {self._context}, [" children_specs_str: str = "" if self.num_children > 0: indent += 2 children_specs_str += self._children[0].__repr__(indent) children_specs_str += "," if self.num_children > 1 else "" children_specs_str += ",".join( [ "\n" + " " * indent + child.__repr__(indent) for child in self._children[1:] ] ) repr_suffix: str = f"{children_specs_str}])" return repr_prefix + repr_suffix def __eq__(self, other: PyTree) -> bool: if self is other: return True elif other.__class__ is self.__class__: if str(self.type) != str(other.type): return False if self._context != other._context: return False elif self._children != other._children: return False return True return NotImplemented @property def context(self) -> Context: return self._context @property @deprecated( "`treespec.children_specs` is deprecated. " "Use `treespec.child(index)` to access a single child, " "or `treespec.children()` to get all children.", category=FutureWarning, ) def children_specs(self) -> list[Self]: return self._children def is_leaf(self) -> bool: return self.num_nodes == 1 and self.num_leaves == 1 def children(self) -> list[Self]: return self._children.copy() def child(self, index: int) -> Self: return self._children[index] def flatten_up_to(self, tree: PyTree) -> list[PyTree]: def helper(treespec: TreeSpec, node: PyTree, subtrees: list[PyTree]) -> None: if treespec.is_leaf(): subtrees.append(node) return node_type = _get_node_type(node) if treespec.type not in BUILTIN_TYPES: # Always require custom node types to match exactly if node_type != treespec.type: raise ValueError( f"Type mismatch; " f"expected {treespec.type!r}, but got {node_type!r}.", ) flatten_fn = SUPPORTED_NODES[node_type].flatten_fn children, context = flatten_fn(node) if len(children) != treespec.num_children: raise ValueError( f"Node arity mismatch; " f"expected {treespec.num_children}, but got {len(children)}.", ) if context != treespec._context: raise ValueError( f"Node context mismatch for custom node type {treespec.type!r}.", ) else: # For builtin dictionary types, we allow some flexibility # Otherwise, we require exact matches both_standard_dict = ( treespec.type in STANDARD_DICT_TYPES and node_type in STANDARD_DICT_TYPES ) if not both_standard_dict and node_type != treespec.type: raise ValueError( f"Node type mismatch; " f"expected {treespec.type!r}, but got {node_type!r}.", ) if len(node) != treespec.num_children: raise ValueError( f"Node arity mismatch; " f"expected {treespec.num_children}, but got {len(node)}.", ) if both_standard_dict: # dictionary types are compatible with each other dict_context = ( treespec._context if treespec.type is not defaultdict # ignore mismatch of `default_factory` for defaultdict else treespec._context[1] ) expected_keys = dict_context got_key_set = set(node) expected_key_set = set(expected_keys) if got_key_set != expected_key_set: missing_keys = expected_key_set.difference(got_key_set) extra_keys = got_key_set.difference(expected_key_set) message = "" if missing_keys: message += f"; missing key(s): {missing_keys}" if extra_keys: message += f"; extra key(s): {extra_keys}" raise ValueError(f"Node keys mismatch{message}.") children = [node[key] for key in expected_keys] else: # node_type is treespec.type flatten_fn = SUPPORTED_NODES[node_type].flatten_fn children, context = flatten_fn(node) if ( node_type is not deque # ignore mismatch of `maxlen` for deque ) and context != treespec._context: raise ValueError( f"Node context mismatch for node type {treespec.type!r}; " f"expected {treespec._context!r}, but got {context!r}.", # namedtuple type mismatch ) for subtree, subspec in zip(children, treespec._children, strict=True): helper(subspec, subtree, subtrees) subtrees: list[PyTree] = [] helper(self, tree, subtrees) return subtrees def unflatten(self, leaves: Iterable[Any]) -> PyTree: if not isinstance(leaves, (list, tuple)): leaves = list(leaves) if len(leaves) != self.num_leaves: raise ValueError( f"treespec.unflatten(leaves): `leaves` has length {len(leaves)} " f"but the spec refers to a pytree that holds {self.num_leaves} " f"items ({self}).", ) if self.is_leaf(): return leaves[0] unflatten_fn = SUPPORTED_NODES[self.type].unflatten_fn # Recursively unflatten the children start = 0 end = 0 child_pytrees = [] for child_spec in self._children: end += child_spec.num_leaves child_pytrees.append(child_spec.unflatten(leaves[start:end])) start = end return unflatten_fn(child_pytrees, self._context) def __hash__(self) -> int: node_type = self.type if node_type is defaultdict: default_factory, dict_context = self._context hashable_context = (default_factory, tuple(dict_context)) elif node_type in (dict, OrderedDict): hashable_context = tuple(self._context) elif node_type is None or node_type in BUILTIN_TYPES: hashable_context = self._context elif isinstance(self._context, ConstantNode): hashable_context = self._context.value else: # The context for user-defined node types might not be hashable. # Ignore it for hashing. # This does not break the correctness that equal objects imply the # same hash. This might increase the hash collision rate, but we # don't care about that. hashable_context = None return hash((node_type, hashable_context, tuple(self._children))) PyTreeSpec: TypeAlias = TreeSpec # NOTE: subclassing a dataclass is subtle. In order to enable reasoning about # this class with `dataclasses.fields`, etc., while having a simplified # constructor that takes no argument, we wrap with `dataclass(init=True, ...)` # again, with fields that have `init=False`. @deprecated( "`isinstance(treespec, LeafSpec)` is deprecated, " "use `isinstance(treespec, TreeSpec) and treespec.is_leaf()` instead.", category=FutureWarning, ) @dataclasses.dataclass(init=True, frozen=True, eq=False, repr=False)
TreeSpec
python
readthedocs__readthedocs.org
readthedocs/builds/storage.py
{ "start": 574, "end": 5780 }
class ____: """ A mixin for Storage classes needed to write build artifacts. This adds and modifies some functionality to Django's File Storage API. By default, classes mixing this in will now overwrite files by default instead of finding an available name. This mixin also adds convenience methods to copy and delete entire directories. See: https://docs.djangoproject.com/en/1.11/ref/files/storage """ # Root path of the nginx internal redirect # that will serve files from this storage. internal_redirect_root_path = "proxito" @staticmethod def _dirpath(path): """ Make the path to end with `/`. It may just be Azure, but for listdir to work correctly, this is needed. """ path = str(path) if not path.endswith("/"): path += "/" return path def get_available_name(self, name, max_length=None): """ Overrides Django's storage to always return the passed name (overwrite). By default, Django will not overwrite files even if the same name is specified. This changes that functionality so that the default is to use the same name and overwrite rather than modify the path to not clobber files. """ return get_available_overwrite_name(name, max_length=max_length) def delete_directory(self, path): """ Delete all files under a certain path from storage. Many storage backends (S3, Azure storage) don't care about "directories". The directory effectively doesn't exist if there are no files in it. However, in these backends, there is no "rmdir" operation so you have to recursively delete all files. :param path: the path to the directory to remove """ if path in ("", "/"): raise SuspiciousFileOperation("Deleting all storage cannot be right") log.debug("Deleting path from media storage", path=path) folders, files = self.listdir(self._dirpath(path)) for folder_name in folders: if folder_name: # Recursively delete the subdirectory self.delete_directory(self.join(path, folder_name)) for filename in files: if filename: self.delete(self.join(path, filename)) def copy_directory(self, source, destination): """ Copy a directory recursively to storage. :param source: the source path on the local disk :param destination: the destination path in storage """ log.debug( "Copying source directory to media storage", source=source, destination=destination, ) source = Path(source) self._check_suspicious_path(source) for filepath in source.iterdir(): sub_destination = self.join(destination, filepath.name) # Don't follow symlinks when uploading to storage. if filepath.is_symlink(): log.info( "Skipping symlink upload.", path_resolved=str(filepath.resolve()), ) continue if filepath.is_dir(): # Recursively copy the subdirectory self.copy_directory(filepath, sub_destination) elif filepath.is_file(): with safe_open(filepath, "rb") as fd: self.save(sub_destination, fd) def _check_suspicious_path(self, path): """Check that the given path isn't a symlink or outside the doc root.""" path = Path(path) resolved_path = path.resolve() if path.is_symlink(): msg = "Suspicious operation over a symbolic link." log.error(msg, path=str(path), resolved_path=str(resolved_path)) raise SuspiciousFileOperation(msg) docroot = Path(settings.DOCROOT).absolute() if not path.is_relative_to(docroot): msg = "Suspicious operation outside the docroot directory." log.error(msg, path=str(path), resolved_path=str(resolved_path)) raise SuspiciousFileOperation(msg) @cached_property def _rclone(self): raise NotImplementedError def rclone_sync_directory(self, source, destination): """Sync a directory recursively to storage using rclone sync.""" if destination in ("", "/"): raise SuspiciousFileOperation("Syncing all storage cannot be right") self._check_suspicious_path(source) return self._rclone.sync(source, destination) def join(self, directory, filepath): return safe_join(directory, filepath) def walk(self, top): if top in ("", "/"): raise SuspiciousFileOperation("Iterating all storage cannot be right") log.debug("Walking path in media storage", path=top) folders, files = self.listdir(self._dirpath(top)) yield top, folders, files for folder_name in folders: if folder_name: # Recursively walk the subdirectory yield from self.walk(self.join(top, folder_name))
BuildMediaStorageMixin
python
HypothesisWorks__hypothesis
hypothesis-python/tests/conjecture/test_provider.py
{ "start": 9033, "end": 9419 }
class ____(PrimitiveProvider): def draw_integer(self, *args, **constraints): return 1 def draw_boolean(self, *args, **constraints): return True def draw_float(self, *args, **constraints): return 1.0 def draw_bytes(self, *args, **constraints): return b"" def draw_string(self, *args, **constraints): return ""
TrivialProvider
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/generic3.py
{ "start": 352, "end": 708 }
class ____(Iterable[_T2], Generic[_T1, _T2]): def __init__(self, a: _T1, b: _T2): pass def foo(self, a: _T1, b: _T2) -> _T2: return b def __iter__(self) -> Iterator[int]: ... a: Foo[int, str] = Foo(2, "") b: str = a.foo(4, "") # This should generate an error because a class shouldn't # derive from Generic more than once.
Foo
python
getsentry__sentry
src/sentry/integrations/slack/requests/options_load.py
{ "start": 277, "end": 1969 }
class ____(SlackRequest): """ An Options Load request sent from Slack. """ @property def group_id(self) -> int: if self.data.get("container", {}).get("is_app_unfurl"): return int( orjson.loads( self.data["app_unfurl"]["blocks"][0]["block_id"], )["issue"] ) return int(orjson.loads(self.data["message"]["blocks"][0]["block_id"])["issue"]) @property def substring(self) -> str: return str(self.data.get("value")) def _validate_data(self) -> None: super()._validate_data() if "payload" not in self.request.data: raise SlackRequestError(status=status.HTTP_400_BAD_REQUEST) try: self._data = orjson.loads(self.data["payload"]) except (KeyError, IndexError, TypeError, ValueError): raise SlackRequestError(status=status.HTTP_400_BAD_REQUEST) if self.data.get("type") not in VALID_PAYLOAD_TYPES: raise SlackRequestError(status=status.HTTP_400_BAD_REQUEST) if "value" not in self.data: raise SlackRequestError(status=status.HTTP_400_BAD_REQUEST) def _log_request(self) -> None: self._info("slack.options_load") def get_logging_data(self, group: Group | None = None) -> dict[str, Any]: logging_data: dict[str, Any] = {**self.logging_data, "response_url": self.response_url} if group: logging_data.update( { "group_id": group.id, "organization_id": group.organization.id, } ) return logging_data
SlackOptionsLoadRequest
python
django__django
tests/mutually_referential/models.py
{ "start": 401, "end": 606 }
class ____(models.Model): name = models.CharField(max_length=100) # You can also explicitly specify the related app. parent = models.ForeignKey("mutually_referential.Parent", models.CASCADE)
Child
python
tiangolo__fastapi
tests/test_security_api_key_cookie_description.py
{ "start": 250, "end": 2172 }
class ____(BaseModel): username: str def get_current_user(oauth_header: str = Security(api_key)): user = User(username=oauth_header) return user @app.get("/users/me") def read_current_user(current_user: User = Depends(get_current_user)): return current_user def test_security_api_key(): client = TestClient(app, cookies={"key": "secret"}) response = client.get("/users/me") assert response.status_code == 200, response.text assert response.json() == {"username": "secret"} def test_security_api_key_no_key(): client = TestClient(app) response = client.get("/users/me") assert response.status_code == 401, response.text assert response.json() == {"detail": "Not authenticated"} assert response.headers["WWW-Authenticate"] == "APIKey" def test_openapi_schema(): client = TestClient(app) response = client.get("/openapi.json") assert response.status_code == 200, response.text assert response.json() == { "openapi": "3.1.0", "info": {"title": "FastAPI", "version": "0.1.0"}, "paths": { "/users/me": { "get": { "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, } }, "summary": "Read Current User", "operationId": "read_current_user_users_me_get", "security": [{"APIKeyCookie": []}], } } }, "components": { "securitySchemes": { "APIKeyCookie": { "type": "apiKey", "name": "key", "in": "cookie", "description": "An API Cookie Key", } } }, }
User
python
google__jax
jax/_src/pallas/fuser/block_spec.py
{ "start": 20077, "end": 20139 }
class ____(enum.Enum): REGULAR = 0 SCALAR_PREFETCH = 1
Usage
python
pypa__pip
src/pip/_vendor/rich/console.py
{ "start": 13783, "end": 16535 }
class ____: """Takes a group of renderables and returns a renderable object that renders the group. Args: renderables (Iterable[RenderableType]): An iterable of renderable objects. fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True. """ def __init__(self, *renderables: "RenderableType", fit: bool = True) -> None: self._renderables = renderables self.fit = fit self._render: Optional[List[RenderableType]] = None @property def renderables(self) -> List["RenderableType"]: if self._render is None: self._render = list(self._renderables) return self._render def __rich_measure__( self, console: "Console", options: "ConsoleOptions" ) -> "Measurement": if self.fit: return measure_renderables(console, options, self.renderables) else: return Measurement(options.max_width, options.max_width) def __rich_console__( self, console: "Console", options: "ConsoleOptions" ) -> RenderResult: yield from self.renderables def group(fit: bool = True) -> Callable[..., Callable[..., Group]]: """A decorator that turns an iterable of renderables in to a group. Args: fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True. """ def decorator( method: Callable[..., Iterable[RenderableType]], ) -> Callable[..., Group]: """Convert a method that returns an iterable of renderables in to a Group.""" @wraps(method) def _replace(*args: Any, **kwargs: Any) -> Group: renderables = method(*args, **kwargs) return Group(*renderables, fit=fit) return _replace return decorator def _is_jupyter() -> bool: # pragma: no cover """Check if we're running in a Jupyter notebook.""" try: get_ipython # type: ignore[name-defined] except NameError: return False ipython = get_ipython() # type: ignore[name-defined] shell = ipython.__class__.__name__ if ( "google.colab" in str(ipython.__class__) or os.getenv("DATABRICKS_RUNTIME_VERSION") or shell == "ZMQInteractiveShell" ): return True # Jupyter notebook or qtconsole elif shell == "TerminalInteractiveShell": return False # Terminal running IPython else: return False # Other type (?) COLOR_SYSTEMS = { "standard": ColorSystem.STANDARD, "256": ColorSystem.EIGHT_BIT, "truecolor": ColorSystem.TRUECOLOR, "windows": ColorSystem.WINDOWS, } _COLOR_SYSTEMS_NAMES = {system: name for name, system in COLOR_SYSTEMS.items()} @dataclass
Group
python
pytorch__pytorch
tools/linter/adapters/testowners_linter.py
{ "start": 854, "end": 4996 }
class ____(NamedTuple): path: str | None line: int | None char: int | None code: str severity: LintSeverity name: str original: str | None replacement: str | None description: str | None def get_pytorch_labels() -> Any: url = "https://ossci-metrics.s3.amazonaws.com/pytorch_labels.json" try: labels = urlopen(url).read().decode("utf-8") except urllib.error.URLError: # This is an FB-only hack, if the json isn't available we may # need to use a forwarding proxy to get out proxy_url = "http://fwdproxy:8080" proxy_handler = urllib.request.ProxyHandler( {"http": proxy_url, "https": proxy_url} ) context = urllib.request.build_opener(proxy_handler) labels = context.open(url).read().decode("utf-8") return json.loads(labels) PYTORCH_LABELS = get_pytorch_labels() # Team/owner labels usually start with "module: " or "oncall: ", but the following are acceptable exceptions ACCEPTABLE_OWNER_LABELS = ["NNC", "high priority"] OWNERS_PREFIX = "# Owner(s): " GLOB_EXCEPTIONS = ["**/test/run_test.py"] def check_labels( labels: list[str], filename: str, line_number: int ) -> list[LintMessage]: lint_messages = [] for label in labels: if label not in PYTORCH_LABELS: lint_messages.append( LintMessage( path=filename, line=line_number, char=None, code=LINTER_CODE, severity=LintSeverity.ERROR, name="[invalid-label]", original=None, replacement=None, description=( f"{label} is not a PyTorch label " "(please choose from https://github.com/pytorch/pytorch/labels)" ), ) ) if label.startswith(("module:", "oncall:")) or label in ACCEPTABLE_OWNER_LABELS: continue lint_messages.append( LintMessage( path=filename, line=line_number, char=None, code=LINTER_CODE, severity=LintSeverity.ERROR, name="[invalid-owner]", original=None, replacement=None, description=( f"{label} is not an acceptable owner " "(please update to another label or edit ACCEPTABLE_OWNERS_LABELS " "in tools/linters/adapters/testowners_linter.py)" ), ) ) return lint_messages def check_file(filename: str) -> list[LintMessage]: lint_messages = [] has_ownership_info = False with open(filename) as f: for idx, line in enumerate(f): if not line.startswith(OWNERS_PREFIX): continue has_ownership_info = True labels = json.loads(line[len(OWNERS_PREFIX) :]) lint_messages.extend(check_labels(labels, filename, idx + 1)) if has_ownership_info is False: lint_messages.append( LintMessage( path=filename, line=None, char=None, code=LINTER_CODE, severity=LintSeverity.ERROR, name="[no-owner-info]", original=None, replacement=None, description="Missing a comment header with ownership information.", ) ) return lint_messages def main() -> None: parser = argparse.ArgumentParser( description="test ownership linter", fromfile_prefix_chars="@", ) parser.add_argument( "filenames", nargs="+", help="paths to lint", ) args = parser.parse_args() lint_messages = [] for filename in args.filenames: lint_messages.extend(check_file(filename)) for lint_message in lint_messages: print(json.dumps(lint_message._asdict()), flush=True) if __name__ == "__main__": main()
LintMessage
python
neetcode-gh__leetcode
python/0072-edit-distance.py
{ "start": 0, "end": 650 }
class ____: def minDistance(self, word1: str, word2: str) -> int: dp = [[float("inf")] * (len(word2) + 1) for i in range(len(word1) + 1)] for j in range(len(word2) + 1): dp[len(word1)][j] = len(word2) - j for i in range(len(word1) + 1): dp[i][len(word2)] = len(word1) - i for i in range(len(word1) - 1, -1, -1): for j in range(len(word2) - 1, -1, -1): if word1[i] == word2[j]: dp[i][j] = dp[i + 1][j + 1] else: dp[i][j] = 1 + min(dp[i + 1][j], dp[i][j + 1], dp[i + 1][j + 1]) return dp[0][0]
Solution
python
joke2k__faker
faker/providers/currency/ru_RU/__init__.py
{ "start": 46, "end": 6141 }
class ____(CurrencyProvider): # Format: (code, name) # See currency names in Russian: https://ru.wikipedia.org/wiki/Список_существующих_валют#Валюты currencies = ( ("AED", "Дирхам ОАЭ"), ("AFN", "Афгани"), ("ALL", "Лек"), ("AMD", "Армянский драм"), ("ANG", "Нидерландский антильский гульден"), ("AOA", "Кванза"), ("ARS", "Аргентинское песо"), ("AUD", "Австралийский доллар"), ("AWG", "Арубанский флорин"), ("AZN", "Азербайджанский манат"), ("BAM", "Конвертируемая марка Боснии и Герцеговины"), ("BBD", "Барбадосский доллар"), ("BDT", "Така"), ("BGN", "Болгарский лев"), ("BHD", "Бахрейнский динар"), ("BIF", "Бурундийский франк"), ("BMD", "Бермудский доллар"), ("BND", "Брунейский доллар"), ("BOB", "Боливиано"), ("BRL", "Бразильский реал"), ("BSD", "Багамский доллар"), ("BTN", "Нгултрум"), ("BWP", "Пула"), ("BYR", "Белорусский рубль"), ("BZD", "Белизский доллар"), ("CAD", "Канадский доллар"), ("CDF", "Конголезский франк"), ("CHF", "Швейцарский франк"), ("CLP", "Чилийское песо"), ("CNY", "Юань"), ("COP", "Колумбийское песо"), ("CRC", "Коста-риканский колон"), ("CUC", "Кубанское конвертируемое песо"), ("CUP", "Кубанское песо"), ("CVE", "Эскудо Кабо-Верде"), ("CZK", "Чешская крона"), ("DJF", "Франк Джибути"), ("DKK", "Датская крона"), ("DOP", "Доминиканское песо"), ("DZD", "Алжирский динар"), ("EGP", "Египетский фунт"), ("ERN", "Накфа"), ("ETB", "Эфиопский быр"), ("EUR", "Евро"), ("FJD", "Доллар Фиджи"), ("FKP", "Фунт Фолклендских островов"), ("GBP", "Фунт стерлингов"), ("GEL", "Лари"), ("GGP", "Гернсийский фунт"), ("GHS", "Ганский седи"), ("GIP", "Гибралтарский фунт"), ("GMD", "Даласи"), ("GNF", "Гвинейский франк"), ("GTQ", "Кетсаль"), ("GYD", "Гайанский доллар"), ("HKD", "Гонконгский доллар"), ("HNL", "Лемпира"), ("HRK", "Хорватская куна"), ("HTG", "Гурд"), ("HUF", "Форинт"), ("IDR", "Индонезийская рупия"), ("ILS", "Новый израильский шекель"), ("NIS", "Новый израильский шекель"), ("IMP", "Фунт острова Мэн"), ("INR", "Индийская рупия"), ("IQD", "Иракский динар"), ("IRR", "Иранский риал"), ("ISK", "Исландская крона"), ("JEP", "Джерсийский фунт"), ("JMD", "Ямайский доллар"), ("JOD", "Иорданский динар"), ("JPY", "Иена"), ("KES", "Кенийский шиллинг"), ("KGS", "Сом"), ("KHR", "Риель"), ("KMF", "Франк Комор"), ("KPW", "Северокорейская вона"), ("KRW", "Южнокорейская вона"), ("KWD", "Кувейтский динар"), ("KYD", "Доллар Островов Кайман"), ("KZT", "Тенге"), ("LAK", "Кип"), ("LBP", "Ливийский фунт"), ("LKR", "Шри-ланкийская рупия"), ("LRD", "Либерийский доллар"), ("LSL", "Лоти"), ("LTL", "Литовский лит"), ("LYD", "Ливийский динар"), ("MAD", "Марокканский дирхам"), ("MDL", "Молдавский лей"), ("MGA", "Малагасийский ариари"), ("MKD", "Денар"), ("MMK", "Кьят"), ("MNT", "Тугрик"), ("MOP", "Патака"), ("MRO", "Угия"), ("MUR", "Маврикийская рупия"), ("MVR", "Рувия"), ("MWK", "Квача"), ("MXN", "Мексиканское песо"), ("MYR", "Малайзийский ринггит"), ("MZN", "Мозамбикский метикал"), ("NAD", "Доллар Намибии"), ("NGN", "Найра"), ("NIO", "Кордоба"), ("NOK", "Норвежская крона"), ("NPR", "Непальская рупия"), ("NZD", "Новозеландский доллар"), ("OMR", "Оманский риал"), ("PAB", "Бальбоа"), ("PEN", "Соль"), ("PGK", "Кина"), ("PHP", "Филиппинское песо"), ("PKR", "Пакистанская рупия"), ("PLN", "Злотый"), ("PYG", "Гуарани"), ("QAR", "Катарский риал"), ("RON", "Румынский лей"), ("RSD", "Сербский динар"), ("RUB", "Российский рубль"), ("RWF", "Франк Руанды"), ("SAR", "Саудовский риял"), ("SBD", "Доллар Соломоновых Островов"), ("SCR", "Сейшельская рупия"), ("SDG", "Суданский фунт"), ("SEK", "Шведская крона"), ("SGD", "Сингапурский доллар"), ("SHP", "Фунт Святой Елены"), ("SLL", "Леоне"), ("SOS", "Сомалийский шиллинг"), ("SPL", "Луиджино"), ("SRD", "Суринамский доллар"), ("STD", "Добра"), ("SVC", "Сальвадорский колон"), ("SYP", "Сирийский фунт"), ("SZL", "Лилангени"), ("THB", "Бат"), ("TJS", "Сомони"), ("TMT", "Новый туркменский манат"), ("TND", "Тунисский динар"), ("TOP", "Паанга"), ("TRY", "Турецкая лира"), ("TTD", "Доллар Тринидада и Тобаго"), ("TVD", "Доллар Тувалу"), ("TWD", "Новый тайваньский доллар"), ("TZS", "Танзанийский шиллинг"), ("UAH", "Гривна"), ("UGX", "Угандийский шиллинг"), ("USD", "Доллар США"), ("UYU", "Уругвайское песо"), ("UZS", "Узбекский сум"), ("VEF", "Суверенный боливар"), ("VND", "Донг"), ("VUV", "Вату"), ("WST", "Тала"), ("XAF", "Франк КФА ВЕАС"), ("XCD", "Восточно-карибский доллар"), ("XDR", "СДР"), ("XOF", "Франк КФА ВСЕАО"), ("XPF", "Франк КФП"), ("YER", "Йеменский риал"), ("ZAR", "Рэнд"), ("ZMW", "Замбийская квача"), ("ZWD", "Доллар Зимбабве"), ) price_formats = ["#,##", "%#,##", "%##,##", "% ###,##", "%# ###,##"] def pricetag(self) -> str: return ( self.numerify(self.random_element(self.price_formats)) + "\N{NO-BREAK SPACE}\N{CYRILLIC SMALL LETTER ER}." )
Provider
python
anthropics__anthropic-sdk-python
src/anthropic/types/beta/beta_tool_search_tool_search_result_block_param.py
{ "start": 342, "end": 546 }
class ____(TypedDict, total=False): tool_references: Required[Iterable[BetaToolReferenceBlockParam]] type: Required[Literal["tool_search_tool_search_result"]]
BetaToolSearchToolSearchResultBlockParam
python
gevent__gevent
src/greentest/3.10/test_smtpd.py
{ "start": 31731, "end": 33987 }
class ____(unittest.TestCase): def setUp(self): smtpd.socket = asyncore.socket = mock_socket self.old_debugstream = smtpd.DEBUGSTREAM self.debug = smtpd.DEBUGSTREAM = io.StringIO() self.server = DummyServer((socket_helper.HOST, 0), ('b', 0), decode_data=True) conn, addr = self.server.accept() # Set DATA size limit to 32 bytes for easy testing self.channel = smtpd.SMTPChannel(self.server, conn, addr, 32, decode_data=True) def tearDown(self): asyncore.close_all() asyncore.socket = smtpd.socket = socket smtpd.DEBUGSTREAM = self.old_debugstream def write_line(self, line): self.channel.socket.queue_recv(line) self.channel.handle_read() def test_data_limit_dialog(self): self.write_line(b'HELO example') self.write_line(b'MAIL From:eggs@example') self.assertEqual(self.channel.socket.last, b'250 OK\r\n') self.write_line(b'RCPT To:spam@example') self.assertEqual(self.channel.socket.last, b'250 OK\r\n') self.write_line(b'DATA') self.assertEqual(self.channel.socket.last, b'354 End data with <CR><LF>.<CR><LF>\r\n') self.write_line(b'data\r\nmore\r\n.') self.assertEqual(self.channel.socket.last, b'250 OK\r\n') self.assertEqual(self.server.messages, [(('peer-address', 'peer-port'), 'eggs@example', ['spam@example'], 'data\nmore')]) def test_data_limit_dialog_too_much_data(self): self.write_line(b'HELO example') self.write_line(b'MAIL From:eggs@example') self.assertEqual(self.channel.socket.last, b'250 OK\r\n') self.write_line(b'RCPT To:spam@example') self.assertEqual(self.channel.socket.last, b'250 OK\r\n') self.write_line(b'DATA') self.assertEqual(self.channel.socket.last, b'354 End data with <CR><LF>.<CR><LF>\r\n') self.write_line(b'This message is longer than 32 bytes\r\n.') self.assertEqual(self.channel.socket.last, b'552 Error: Too much mail data\r\n')
SMTPDChannelWithDataSizeLimitTest
python
huggingface__transformers
tests/models/convnext/test_modeling_convnext.py
{ "start": 1408, "end": 5648 }
class ____: def __init__( self, parent, batch_size=13, image_size=32, num_channels=3, num_stages=4, hidden_sizes=[10, 20, 30, 40], depths=[2, 2, 3, 2], is_training=True, use_labels=True, intermediate_size=37, hidden_act="gelu", num_labels=10, initializer_range=0.02, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_stages = num_stages self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_labels = num_labels self.initializer_range = initializer_range self.out_features = out_features self.out_indices = out_indices self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return ConvNextConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=False, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, ) def create_and_check_model(self, config, pixel_values, labels): model = ConvNextModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): model = ConvNextForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_backbone(self, config, pixel_values, labels): model = ConvNextBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify hidden states self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:]) # verify backbone works with out_features=None config.out_features = None model = ConvNextBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels), 1) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch
ConvNextModelTester
python
pallets__werkzeug
src/werkzeug/routing/converters.py
{ "start": 156, "end": 340 }
class ____(ValueError): """Validation error. If a rule converter raises this exception the rule does not match the current URL and the next URL is tried. """
ValidationError
python
getsentry__sentry
src/sentry/backup/helpers.py
{ "start": 1303, "end": 1631 }
class ____(Enum): """ Used to identify the "side" in backup operations which perform comparisons between two sets of exports JSONs. The "left" side is usually the older of the two states (ie, "left" is roughly synonymous with "before", and "right" with "after"). """ left = 1 right = 2 T = TypeVar("T")
Side
python
airbytehq__airbyte
airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorMetadataDefinitionV0.py
{ "start": 7820, "end": 8148 }
class ____(BaseModel): class Config: extra = Extra.forbid name: str = Field(..., description="The secret name in the secret store") fileName: Optional[str] = Field( None, description="The name of the file to which the secret value would be persisted", ) secretStore: SecretStore
Secret
python
walkccc__LeetCode
solutions/2895. Minimum Processing Time/2895.py
{ "start": 0, "end": 272 }
class ____: def minProcessingTime( self, processorTime: list[int], tasks: list[int], ) -> int: return max(time + task for (time, task) in zip( sorted(processorTime), sorted(tasks)[:: -4]))
Solution
python
django__django
tests/postgres_tests/test_ranges.py
{ "start": 3301, "end": 6564 }
class ____(PostgreSQLTestCase): def test_all_fields(self): now = timezone.now() instance = RangesModel( ints=NumericRange(0, 10), bigints=NumericRange(10, 20), decimals=NumericRange(20, 30), timestamps=DateTimeTZRange(now - datetime.timedelta(hours=1), now), dates=DateRange(now.date() - datetime.timedelta(days=1), now.date()), ) instance.save() loaded = RangesModel.objects.get() self.assertEqual(instance.ints, loaded.ints) self.assertEqual(instance.bigints, loaded.bigints) self.assertEqual(instance.decimals, loaded.decimals) self.assertEqual(instance.timestamps, loaded.timestamps) self.assertEqual(instance.dates, loaded.dates) def test_range_object(self): r = NumericRange(0, 10) instance = RangesModel(ints=r) instance.save() loaded = RangesModel.objects.get() self.assertEqual(r, loaded.ints) def test_tuple(self): instance = RangesModel(ints=(0, 10)) instance.save() loaded = RangesModel.objects.get() self.assertEqual(NumericRange(0, 10), loaded.ints) def test_tuple_range_with_default_bounds(self): range_ = (timezone.now(), timezone.now() + datetime.timedelta(hours=1)) RangesModel.objects.create(timestamps_closed_bounds=range_, timestamps=range_) loaded = RangesModel.objects.get() self.assertEqual( loaded.timestamps_closed_bounds, DateTimeTZRange(range_[0], range_[1], "[]"), ) self.assertEqual( loaded.timestamps, DateTimeTZRange(range_[0], range_[1], "[)"), ) def test_range_object_boundaries(self): r = NumericRange(0, 10, "[]") instance = RangesModel(decimals=r) instance.save() loaded = RangesModel.objects.get() self.assertEqual(r, loaded.decimals) self.assertIn(10, loaded.decimals) def test_range_object_boundaries_range_with_default_bounds(self): range_ = DateTimeTZRange( timezone.now(), timezone.now() + datetime.timedelta(hours=1), bounds="()", ) RangesModel.objects.create(timestamps_closed_bounds=range_) loaded = RangesModel.objects.get() self.assertEqual(loaded.timestamps_closed_bounds, range_) def test_unbounded(self): r = NumericRange(None, None, "()") instance = RangesModel(decimals=r) instance.save() loaded = RangesModel.objects.get() self.assertEqual(r, loaded.decimals) def test_empty(self): r = NumericRange(empty=True) instance = RangesModel(ints=r) instance.save() loaded = RangesModel.objects.get() self.assertEqual(r, loaded.ints) def test_null(self): instance = RangesModel(ints=None) instance.save() loaded = RangesModel.objects.get() self.assertIsNone(loaded.ints) def test_model_set_on_base_field(self): instance = RangesModel() field = instance._meta.get_field("ints") self.assertEqual(field.model, RangesModel) self.assertEqual(field.base_field.model, RangesModel)
TestSaveLoad
python
google__jax
jax/_src/lax/lax.py
{ "start": 94799, "end": 236225 }
class ____(): """Describes ragged, group, and dot dimensions for ragged dot general. Args: dot_dimension_numbers: a tuple of tuples of sequences of ints of the form `((lhs_contracting_dims, rhs_contracting_dims), (lhs_batch_dims, rhs_batch_dims))`. lhs_ragged_dimensions: a sequence of ints indicating the 'lhs' ragged dimensions. rhs_group_dimensions: a sequence of ints indicating the 'rhs' group dimensions. """ dot_dimension_numbers: DotDimensionNumbers lhs_ragged_dimensions: Sequence[int] rhs_group_dimensions: Sequence[int] def __init__( self, dot_dimension_numbers, lhs_ragged_dimensions, rhs_group_dimensions ): super().__setattr__( 'dot_dimension_numbers', tuple(tuple(map(tuple, t)) for t in dot_dimension_numbers), ) super().__setattr__('lhs_ragged_dimensions', tuple(lhs_ragged_dimensions)) super().__setattr__('rhs_group_dimensions', tuple(rhs_group_dimensions)) def _from_maybe_ragged( dot_dimension_numbers: RaggedDotDimensionNumbers | DotDimensionNumbers, ) -> DotDimensionNumbers: return ( dot_dimension_numbers.dot_dimension_numbers if isinstance(dot_dimension_numbers, RaggedDotDimensionNumbers) else dot_dimension_numbers ) # RaggedDotDimensionNumbers that specify the simple case (i.e., lax.ragged_dot.) _BASIC_RAGGED_DOT_DIMENSION_NUMBERS = RaggedDotDimensionNumbers( dot_dimension_numbers=(([1], [1]), ([], [])), lhs_ragged_dimensions=[0], rhs_group_dimensions=[0], ) def ragged_dot_general( lhs: Array, rhs: Array, group_sizes: Array, ragged_dot_dimension_numbers: RaggedDotDimensionNumbers, precision: PrecisionLike = None, preferred_element_type: DTypeLike | None = None, group_offset: Array | None = None, ) -> Array: """Ragged matrix multiplication. Ragged dot takes three arrays---``lhs``, ``rhs``, and ``group_sizes``---and a ``ragged_dot_dimension_numbers`` argument. Like `dot_general`, ``lhs`` and ``rhs`` are allowed arbitrary batch and contracting dimensions. Additionally, ``lhs`` is required to have one ragged dimension, and ``rhs`` may have at most one group dimension. Let `g` be the number of groups in the lhs ragged dimension. Ragged dot has three modes, depending on the kind of the lhs ragged dimension: 1. ``[b...,m...,k...], [g,b...,k...,n...], [b...,x...,g] -> [b...,m...,n...]``. Here the ragged dimension is a non-contracting dimension (``m``) of ``lhs``, and ``x...`` are the lhs non-contracting dims outer to the ragged dim. 2. ``[b...,m...,k...], [b...,k...,n...], [b...,x...,g] -> [g,b...,m...,n...]``. Here the ragged dimension is a contracting dimension (``k``) of ``lhs`` and ``rhs``, and `x...` are the lhs contracting dims outer to the ragged dim. 3. ``[b...,m...,k...], [b...,k...,n...], [x...,g] -> [b...,m...,n...]``. Here the ragged dimension is a batch dimension (``b``) of ``lhs`` and ``rhs``, and ``x...`` are the lhs batch dims outer to the ragged dim. If ``group_sizes`` is passed-in with shape ``[g]``, it is broadcasted according to the rules above. Args: lhs: an array rhs: an array group_sizes: an array with integer element type ragged_dot_dimension_numbers: a ``RaggedDotDimensionNumbers`` object to specify the dot dimension numbers, lhs ragged dimension, and rhs group dimension. precision: Optional. Consistent with precision argument for :func:`jax.lax.dot`. preferred_element_type: Optional. Consistent with precision argument for :func:`jax.lax.dot`. group_offset: Optional. (1,) shaped array that indicates the group in group_sizes to start computing from. If not specified, defaults to [0]. Results: An array whose shape is the same as that produced by `dot_general`, with an extra leading dimension of size `g` in the case where the lhs ragged dimension is a contracting dimension. """ lhs, rhs, group_sizes = core.standard_insert_pvary(lhs, rhs, group_sizes) return ragged_dot_general_p.bind( lhs, rhs, group_sizes, ragged_dot_dimension_numbers=ragged_dot_dimension_numbers, precision=canonicalize_precision(precision), preferred_element_type=preferred_element_type, group_offset=group_offset, ) def broadcast(operand: ArrayLike, sizes: Sequence[int], *, out_sharding=None ) -> Array: """Broadcasts an array, adding new leading dimensions Args: operand: an array sizes: a sequence of integers, giving the sizes of new leading dimensions to add to the front of the array. Returns: An array containing the result. See Also: jax.lax.broadcast_in_dim : add new dimensions at any location in the array shape. """ if len(sizes) == 0 and out_sharding is None: return asarray(operand) dims = tuple(range(len(sizes), len(sizes) + np.ndim(operand))) return broadcast_in_dim(operand, tuple(sizes) + np.shape(operand), dims, out_sharding=out_sharding) def broadcast_in_dim(operand: ArrayLike, shape: Shape, broadcast_dimensions: Sequence[int], *, out_sharding=None ) -> Array: """Wraps XLA's `BroadcastInDim <https://www.openxla.org/xla/operation_semantics#broadcastindim>`_ operator. Args: operand: an array shape: the shape of the target array broadcast_dimensions: to which dimension in the target shape each dimension of the operand shape corresponds to. That is, dimension i of the operand becomes dimension broadcast_dimensions[i] of the result. Returns: An array containing the result. See Also: jax.lax.broadcast : simpler interface to add new leading dimensions. """ # TODO(dfm): Re-write this as a "reshard" when only the sharding changes. out_sharding = canonicalize_sharding(out_sharding, 'broadcast_in_dim') if (np.ndim(operand) == len(shape) and not len(broadcast_dimensions) and isinstance(operand, Array) and out_sharding is None): return operand if config.dynamic_shapes.value: # We must gate this behavior under a flag because otherwise the errors # raised are different (and have worse source provenance information). dyn_shape, static_shape = _extract_tracers_dyn_shape(shape) else: dyn_shape, static_shape = [], shape # type: ignore return broadcast_in_dim_p.bind( operand, *dyn_shape, shape=tuple(static_shape), broadcast_dimensions=tuple(broadcast_dimensions), sharding=out_sharding) def broadcast_to_rank(x: ArrayLike, rank: int) -> Array: """Adds leading dimensions of ``1`` to give ``x`` rank ``rank``.""" ndim = np.ndim(x) if ndim == rank: return asarray(x) return broadcast(x, (1,) * (rank - ndim)) def reshape(operand: ArrayLike, new_sizes: Shape, dimensions: Sequence[int] | None = None, *, out_sharding: NamedSharding | P | None = None) -> Array: """Wraps XLA's `Reshape <https://www.openxla.org/xla/operation_semantics#reshape>`_ operator. For inserting/removing dimensions of size 1, prefer using ``lax.squeeze`` / ``lax.expand_dims``. These preserve information about axis identity that may be useful for advanced transformation rules. Args: operand: array to be reshaped. new_sizes: sequence of integers specifying the resulting shape. The size of the final array must match the size of the input. dimensions: optional sequence of integers specifying the permutation order of the input shape. If specified, the length must match ``operand.shape``. Returns: out: reshaped array. Examples: Simple reshaping from one to two dimensions: >>> x = jnp.arange(6) >>> y = reshape(x, (2, 3)) >>> y Array([[0, 1, 2], [3, 4, 5]], dtype=int32) Reshaping back to one dimension: >>> reshape(y, (6,)) Array([0, 1, 2, 3, 4, 5], dtype=int32) Reshaping to one dimension with permutation of dimensions: >>> reshape(y, (6,), (1, 0)) Array([0, 3, 1, 4, 2, 5], dtype=int32) """ new_sizes = canonicalize_shape(new_sizes) # TODO new_sizes = tuple(new_sizes) same_shape = core.definitely_equal_shape(np.shape(operand), new_sizes) if dimensions is None: same_dims = True dims = None else: dims = api_util._ensure_index_tuple(dimensions) same_dims = tuple(dims) == tuple(range(np.ndim(operand))) out_sharding = canonicalize_sharding(out_sharding, 'reshape') same_sharding = (out_sharding is None or core.typeof(operand).sharding == out_sharding) if (np.shape(operand) and same_shape and same_dims and same_sharding and isinstance(operand, Array)): return operand else: dyn_shape, static_new_sizes = _extract_tracers_dyn_shape(new_sizes) return reshape_p.bind( operand, *dyn_shape, new_sizes=tuple(static_new_sizes), dimensions=None if dims is None or same_dims else dims, sharding=out_sharding) def pad(operand: ArrayLike, padding_value: ArrayLike, padding_config: Sequence[tuple[int, int, int]]) -> Array: """Applies low, high, and/or interior padding to an array. Wraps XLA's `Pad <https://www.openxla.org/xla/operation_semantics#pad>`_ operator. Args: operand: an array to be padded. padding_value: the value to be inserted as padding. Must have the same dtype as ``operand``. padding_config: a sequence of ``(low, high, interior)`` tuples of integers, giving the amount of low, high, and interior (dilation) padding to insert in each dimension. Negative values for ``low`` and ``high`` are allowed and remove elements from the edges of the array. Returns: The ``operand`` array with padding value ``padding_value`` inserted in each dimension according to the ``padding_config``. Examples: >>> from jax import lax >>> import jax.numpy as jnp Pad a 1-dimensional array with zeros, We'll specify two zeros in front and three at the end: >>> x = jnp.array([1, 2, 3, 4]) >>> lax.pad(x, 0, [(2, 3, 0)]) Array([0, 0, 1, 2, 3, 4, 0, 0, 0], dtype=int32) Pad a 1-dimensional array with *interior* zeros; i.e. insert a single zero between each value: >>> lax.pad(x, 0, [(0, 0, 1)]) Array([1, 0, 2, 0, 3, 0, 4], dtype=int32) Pad a 2-dimensional array with the value ``-1`` at front and end, with a pad size of 2 in each dimension: >>> x = jnp.array([[1, 2, 3], ... [4, 5, 6]]) >>> lax.pad(x, -1, [(2, 2, 0), (2, 2, 0)]) Array([[-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, 1, 2, 3, -1, -1], [-1, -1, 4, 5, 6, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], dtype=int32) Use negative padding to remove elements from the edges of an array: >>> x = jnp.array([1, 2, 3, 4, 5], dtype=jnp.int32) >>> lax.pad(x, 0, [(-1, -2, 0)]) Array([2, 3], dtype=int32) """ operand, padding_value = core.standard_insert_pvary(operand, padding_value) return pad_p.bind(operand, padding_value, padding_config=tuple(padding_config)) def rev(operand: ArrayLike, dimensions: Sequence[int]) -> Array: """Wraps XLA's `Rev <https://www.openxla.org/xla/operation_semantics#rev_reverse>`_ operator. """ return rev_p.bind(operand, dimensions=tuple(dimensions)) def select(pred: ArrayLike, on_true: ArrayLike, on_false: ArrayLike) -> Array: """Selects between two branches based on a boolean predicate. Wraps XLA's `Select <https://www.openxla.org/xla/operation_semantics#select>`_ operator. In general :func:`~jax.lax.select` leads to evaluation of both branches, although the compiler may elide computations if possible. For a similar function that usually evaluates only a single branch, see :func:`~jax.lax.cond`. Args: pred: boolean array on_true: array containing entries to return where ``pred`` is True. Must have the same shape as ``pred``, and the same shape and dtype as ``on_false``. on_false: array containing entries to return where ``pred`` is False. Must have the same shape as ``pred``, and the same shape and dtype as ``on_true``. Returns: result: array with same shape and dtype as ``on_true`` and ``on_false``. """ # Caution! The select_n_p primitive has the *opposite* order of arguments to # select(). This is because it implements `select_n`. pred, on_false, on_true = core.standard_insert_pvary( pred, on_false, on_true) return select_n_p.bind(pred, on_false, on_true) def select_n(which: ArrayLike, *cases: ArrayLike) -> Array: """Selects array values from multiple cases. Generalizes XLA's `Select <https://www.openxla.org/xla/operation_semantics#select>`_ operator. Unlike XLA's version, the operator is variadic and can select from many cases using an integer `pred`. Args: which: determines which case should be returned. Must be an array containing either a boolean or integer values. May either be a scalar or have shape matching ``cases``. For each array element, the value of ``which`` determines which of ``cases`` is taken. ``which`` must be in the range ``[0 .. len(cases))``; for values outside that range the behavior is implementation-defined. *cases: a non-empty list of array cases. All must have equal dtypes and equal shapes. Returns: An array with shape and dtype equal to the cases, whose values are chosen according to ``which``. """ if len(cases) == 0: raise ValueError("select_n() must have at least one case") which, *cases = core.standard_insert_pvary(which, *cases) return select_n_p.bind(which, *cases) def transpose(operand: ArrayLike, permutation: Sequence[int] | np.ndarray) -> Array: """Wraps XLA's `Transpose <https://www.openxla.org/xla/operation_semantics#transpose>`_ operator. """ permutation = tuple(operator.index(d) for d in permutation) if permutation == tuple(range(np.ndim(operand))) and isinstance(operand, Array): return operand else: return transpose_p.bind(operand, permutation=permutation) def argmin(operand: ArrayLike, axis: int, index_dtype: DTypeLike) -> Array: """Computes the index of the minimum element along ``axis``.""" index_dtype = dtypes.check_and_canonicalize_user_dtype(index_dtype, 'argmin') return argmin_p.bind(operand, axes=(axis,), index_dtype=index_dtype) def argmax(operand: ArrayLike, axis: int, index_dtype: DTypeLike) -> Array: """Computes the index of the maximum element along ``axis``.""" index_dtype = dtypes.check_and_canonicalize_user_dtype(index_dtype, 'argmax') return argmax_p.bind(operand, axes=(axis,), index_dtype=index_dtype) def reduce(operands: Any, init_values: Any, computation: Callable[[Any, Any], Any], dimensions: Sequence[int], out_sharding: NamedSharding | P | None = None) -> Any: """Wraps XLA's `Reduce <https://www.openxla.org/xla/operation_semantics#reduce>`_ operator. ``init_values`` and ``computation`` together must form a `monoid <https://en.wikipedia.org/wiki/Monoid>`_ for correctness. That is ``init_values`` must be an identity of ``computation``, and ``computation`` must be associative. XLA may exploit both of these properties during code generation; if either is violated the result is undefined. """ flat_operands, operand_tree = tree_util.tree_flatten(operands) comp_debug = api_util.debug_info("reduce comp", computation, (init_values, init_values), {}) flat_init_values, init_value_tree = tree_util.tree_flatten(init_values) if operand_tree != init_value_tree: raise ValueError('Operands must have the same tree structure as init_values:' f' {operand_tree} vs. {init_value_tree}') if len(flat_operands) != len(flat_init_values): raise ValueError('Must have same total number of operands as init_values: ' f' {len(flat_operands)} vs. {len(flat_init_values)}') monoid_reducer = _get_monoid_reducer(computation, flat_init_values) if monoid_reducer: # monoid reducers bypass the weak_type_rule, so we set it explicitly. weak_type = (dtypes.is_weakly_typed(*flat_operands) and dtypes.is_weakly_typed(*flat_init_values)) if out_sharding is not None and monoid_reducer is not reduce_sum: raise NotImplementedError out_sharding_dict = ({'out_sharding': out_sharding} if out_sharding is not None else {}) out = monoid_reducer(*flat_operands, dimensions, **out_sharding_dict) return _convert_element_type(out, weak_type=weak_type) else: flat_init_avals = safe_map(core.get_aval, flat_init_values) closed_jaxpr, out_tree = _variadic_reduction_jaxpr( computation, comp_debug, tuple(flat_init_avals), init_value_tree) flat_operands = core.standard_insert_pvary(*flat_operands) flat_init_values = core.standard_insert_pvary(*flat_init_values) out = reduce_p.bind(*flat_operands, *flat_init_values, computation=computation, jaxpr=closed_jaxpr, dimensions=tuple(dimensions)) return tree_util.tree_unflatten(out_tree, out) @cache() def _reduction_jaxpr(computation: Callable, aval: core.AbstractValue): def comp(x, y): result = computation(x, y) if not (isinstance(result, core.Tracer) or core.valid_jaxtype(result)): raise ValueError( f"Invalid return type from reduction function: {type(result)}\n" f"Reduction functions should only return an array.\n" f"Full return value: {result}") return (result,) comp_wrapped = lu.wrap_init( comp, debug_info=api_util.debug_info("reduction_jaxpr", computation, (aval, aval), {})) jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(comp_wrapped, (aval, aval)) if any(isinstance(c, core.Tracer) for c in consts): raise NotImplementedError( "Reduction computations can't close over Tracers. Please open an issue " "at https://github.com/jax-ml/jax.") return jaxpr, tuple(consts) @cache() def _variadic_reduction_jaxpr(computation: Callable[[Any, Any], Any], debug_info: core.DebugInfo, flat_avals, aval_tree: tree_util.PyTreeDef): avals = tree_util.tree_unflatten(aval_tree, flat_avals) flat_in_avals, in_tree = tree_util.tree_flatten((avals, avals)) comp = lu.wrap_init(computation, debug_info=debug_info) flat_comp, out_tree = api_util.flatten_fun_nokwargs(comp, in_tree) jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(flat_comp, tuple(flat_in_avals)) if any(isinstance(c, core.Tracer) for c in consts): raise NotImplementedError( "Reduction computations can't close over Tracers. Please open an issue " "at https://github.com/jax-ml/jax.") return core.ClosedJaxpr(jaxpr, consts), out_tree() def _get_monoid_reducer(monoid_op: Callable, xs: Sequence[Array]) -> Callable | None: if len(xs) != 1: return None x, = xs aval = core.get_aval(x) dtype = _dtype(x) if core.is_concrete(x) and aval.shape == (): val = core.to_concrete_value(x) # allow bitwise reductions for boolean and integer types _is_intlike = dtype == np.bool_ or dtypes.issubdtype(dtype, np.integer) if monoid_op is add: return reduce_sum if np.equal(val, 0) else None elif monoid_op is mul: return reduce_prod if np.equal(val, 1) else None elif monoid_op is bitwise_or and _is_intlike: return reduce_or if np.equal(val, _get_bitwise_or_identity(dtype)) else None elif monoid_op is bitwise_and and _is_intlike: return reduce_and if np.equal(val, _get_bitwise_and_identity(dtype)) else None elif monoid_op is bitwise_xor and _is_intlike: return reduce_xor if np.equal(val, _get_bitwise_or_identity(dtype)) else None elif monoid_op is max: return reduce_max if np.equal(val, _get_max_identity(dtype)) else None elif monoid_op is min: return reduce_min if np.equal(val, _get_min_identity(dtype)) else None return None def _get_bitwise_and_identity(dtype: DTypeLike) -> np.ndarray: return np.array(-1).astype(dtype) def _get_bitwise_or_identity(dtype: DTypeLike) -> np.ndarray: return np.array(0, dtype) def _get_sum_identity(dtype: DTypeLike) -> np.ndarray: return np.array(0, dtype) def _get_prod_identity(dtype: DTypeLike) -> np.ndarray: return np.array(1, dtype) def _get_max_identity(dtype: DTypeLike) -> np.ndarray: if dtypes.issubdtype(dtype, np.inexact): return np.array(-np.inf if dtypes.supports_inf(dtype) else dtypes.finfo(dtype).min, dtype=dtype) elif dtypes.issubdtype(dtype, np.integer): return np.array(dtypes.iinfo(dtype).min, dtype) elif dtypes.issubdtype(dtype, np.bool_): return np.array(False, np.bool_) else: raise ValueError(f"Unsupported dtype for max: {dtype}") def _get_min_identity(dtype: DTypeLike) -> np.ndarray: if dtypes.issubdtype(dtype, np.inexact): return np.array(np.inf if dtypes.supports_inf(dtype) else dtypes.finfo(dtype).max, dtype=dtype) elif dtypes.issubdtype(dtype, np.integer): return np.array(dtypes.iinfo(dtype).max, dtype) elif dtypes.issubdtype(dtype, np.bool_): return np.array(True, np.bool_) else: raise ValueError(f"Unsupported dtype for min: {dtype}") def reduce_sum(operand: ArrayLike, axes: Sequence[int], *, out_sharding=None) -> Array: """Compute the sum of elements over one or more array axes. Args: operand: array over which to sum. Must have numerical dtype. axes: sequence of zero or more unique integers specifying the axes over which to sum. Each entry must satisfy ``0 <= axis < operand.ndim``. Returns: An array of the same dtype as ``operand``, with shape corresponding to the dimensions of ``operand.shape`` with ``axes`` removed. Notes: Unlike :func:`jax.numpy.sum`, :func:`jax.lax.reduce_sum` does not upcast narrow-width types for accumulation, so sums of 8-bit or 16-bit types may be subject to rounding errors. See also: - :func:`jax.numpy.sum`: more flexible NumPy-style summation API, built around :func:`jax.lax.reduce_sum`. - Other low-level :mod:`jax.lax` reduction operators: :func:`jax.lax.reduce_prod`, :func:`jax.lax.reduce_max`, :func:`jax.lax.reduce_min`, :func:`jax.lax.reduce_and`, :func:`jax.lax.reduce_or`, :func:`jax.lax.reduce_xor`. """ out_sharding = canonicalize_sharding(out_sharding, 'reduce_sum') return reduce_sum_p.bind(operand, axes=tuple(axes), out_sharding=out_sharding) def reduce_prod(operand: ArrayLike, axes: Sequence[int]) -> Array: """Compute the product of elements over one or more array axes. Args: operand: array over which to sum. Must have numerical dtype. axes: sequence of zero or more unique integers specifying the axes over which to sum. Each entry must satisfy ``0 <= axis < operand.ndim``. Returns: An array of the same dtype as ``operand``, with shape corresponding to the dimensions of ``operand.shape`` with ``axes`` removed. Notes: Unlike :func:`jax.numpy.prod`, :func:`jax.lax.reduce_prod` does not upcast narrow-width types for accumulation, so products of 8-bit or 16-bit types may be subject to rounding errors. See also: - :func:`jax.numpy.prod`: more flexible NumPy-style product API, built around :func:`jax.lax.reduce_prod`. - Other low-level :mod:`jax.lax` reduction operators: :func:`jax.lax.reduce_sum`, :func:`jax.lax.reduce_max`, :func:`jax.lax.reduce_min`, :func:`jax.lax.reduce_and`, :func:`jax.lax.reduce_or`, :func:`jax.lax.reduce_xor`. """ return reduce_prod_p.bind(operand, axes=tuple(axes)) def reduce_max(operand: ArrayLike, axes: Sequence[int]) -> Array: """Compute the maximum of elements over one or more array axes. Args: operand: array over which to compute maximum. axes: sequence of zero or more unique integers specifying the axes over which to reduce. Each entry must satisfy ``0 <= axis < operand.ndim``. Returns: An array of the same dtype as ``operand``, with shape corresponding to the dimensions of ``operand.shape`` with ``axes`` removed. See also: - :func:`jax.numpy.max`: more flexible NumPy-style max-reduction API, built around :func:`jax.lax.reduce_max`. - Other low-level :mod:`jax.lax` reduction operators: :func:`jax.lax.reduce_sum`, :func:`jax.lax.reduce_prod`, :func:`jax.lax.reduce_min`, :func:`jax.lax.reduce_and`, :func:`jax.lax.reduce_or`, :func:`jax.lax.reduce_xor`. """ return reduce_max_p.bind(operand, axes=tuple(axes)) def reduce_min(operand: ArrayLike, axes: Sequence[int]) -> Array: """Compute the minimum of elements over one or more array axes. Args: operand: array over which to compute minimum. axes: sequence of zero or more unique integers specifying the axes over which to reduce. Each entry must satisfy ``0 <= axis < operand.ndim``. Returns: An array of the same dtype as ``operand``, with shape corresponding to the dimensions of ``operand.shape`` with ``axes`` removed. See also: - :func:`jax.numpy.min`: more flexible NumPy-style min-reduction API, built around :func:`jax.lax.reduce_min`. - Other low-level :mod:`jax.lax` reduction operators: :func:`jax.lax.reduce_sum`, :func:`jax.lax.reduce_prod`, :func:`jax.lax.reduce_max`, :func:`jax.lax.reduce_and`, :func:`jax.lax.reduce_or`, :func:`jax.lax.reduce_xor`. """ return reduce_min_p.bind(operand, axes=tuple(axes)) def reduce_or(operand: ArrayLike, axes: Sequence[int]) -> Array: """Compute the bitwise OR of elements over one or more array axes. Args: operand: array over which to compute the reduction. Must have boolean or integer dtype. axes: sequence of zero or more unique integers specifying the axes over which to reduce. Each entry must satisfy ``0 <= axis < operand.ndim``. Returns: An array of the same dtype as ``operand``, with shape corresponding to the dimensions of ``operand.shape`` with ``axes`` removed. See also: - :func:`jax.numpy.bitwise_or.reduce`: more flexible NumPy-style logical reduction API, built around :func:`jax.lax.reduce_or`. - Other low-level :mod:`jax.lax` reduction operators: :func:`jax.lax.reduce_sum`, :func:`jax.lax.reduce_prod`, :func:`jax.lax.reduce_max`, :func:`jax.lax.reduce_min`, :func:`jax.lax.reduce_and`, :func:`jax.lax.reduce_xor`. """ return reduce_or_p.bind(operand, axes=tuple(axes)) def reduce_and(operand: ArrayLike, axes: Sequence[int]) -> Array: """Compute the bitwise AND of elements over one or more array axes. Args: operand: array over which to compute the reduction. Must have boolean or integer dtype. axes: sequence of zero or more unique integers specifying the axes over which to reduce. Each entry must satisfy ``0 <= axis < operand.ndim``. Returns: An array of the same dtype as ``operand``, with shape corresponding to the dimensions of ``operand.shape`` with ``axes`` removed. See also: - :func:`jax.numpy.bitwise_and.reduce`: more flexible NumPy-style logical reduction API, built around :func:`jax.lax.reduce_and`. - Other low-level :mod:`jax.lax` reduction operators: :func:`jax.lax.reduce_sum`, :func:`jax.lax.reduce_prod`, :func:`jax.lax.reduce_max`, :func:`jax.lax.reduce_min`, :func:`jax.lax.reduce_or`, :func:`jax.lax.reduce_xor`. """ return reduce_and_p.bind(operand, axes=tuple(axes)) def reduce_xor(operand: ArrayLike, axes: Sequence[int]) -> Array: """Compute the bitwise XOR of elements over one or more array axes. Args: operand: array over which to compute the reduction. Must have boolean or integer dtype. axes: sequence of zero or more unique integers specifying the axes over which to reduce. Each entry must satisfy ``0 <= axis < operand.ndim``. Returns: An array of the same dtype as ``operand``, with shape corresponding to the dimensions of ``operand.shape`` with ``axes`` removed. See also: - :func:`jax.numpy.bitwise_xor.reduce`: more flexible NumPy-style logical reduction API, built around :func:`jax.lax.reduce_xor`. - Other low-level :mod:`jax.lax` reduction operators: :func:`jax.lax.reduce_sum`, :func:`jax.lax.reduce_prod`, :func:`jax.lax.reduce_max`, :func:`jax.lax.reduce_min`, :func:`jax.lax.reduce_and`, :func:`jax.lax.reduce_or`. """ return reduce_xor_p.bind(operand, axes=tuple(axes)) @overload def sort(operand: Array, dimension: int = -1, is_stable: bool = True, num_keys: int = 1) -> Array: ... @overload def sort(operand: Sequence[Array], dimension: int = -1, is_stable: bool = True, num_keys: int = 1) -> tuple[Array, ...]: ... def sort(operand: Array | Sequence[Array], dimension: int = -1, is_stable: bool = True, num_keys: int = 1) -> Array | tuple[Array, ...]: """Wraps XLA's `Sort <https://www.openxla.org/xla/operation_semantics#sort>`_ operator. For floating point inputs, -0.0 and 0.0 are treated as equivalent, and NaN values are sorted to the end of the array. For complex inputs, the sort order is lexicographic over the real and imaginary parts, with the real part primary. Args: operand : Array or sequence of arrays dimension : integer dimension along which to sort. Default: -1. is_stable : boolean specifying whether to use a stable sort. Default: True. num_keys : number of operands to treat as sort keys. Default: 1. For num_keys > 1, the sort order will be determined lexicographically using the first `num_keys` arrays, with the first key being primary. The remaining operands will be returned with the same permutation. Returns: operand : sorted version of the input or inputs. """ if isinstance(operand, Sequence): if len(operand) == 0: raise TypeError("Sort requires at least one operand") if not (1 <= num_keys <= len(operand)): raise ValueError(f"{num_keys=} must be between 1 and {len(operand)=}") dimension = canonicalize_axis(dimension, len(operand[0].shape)) operand = core.standard_insert_pvary(*operand) return tuple(sort_p.bind(*operand, dimension=dimension, is_stable=is_stable, num_keys=num_keys)) else: if num_keys != 1: raise ValueError(f"{num_keys=} must equal 1 for a single operand.") dimension = canonicalize_axis(dimension, len(operand.shape)) return sort_p.bind(operand, dimension=dimension, is_stable=is_stable, num_keys=1)[0] def sort_key_val(keys: Array, values: ArrayLike, dimension: int = -1, is_stable: bool = True) -> tuple[Array, Array]: """Sorts ``keys`` along ``dimension`` and applies the same permutation to ``values``.""" dimension = canonicalize_axis(dimension, len(keys.shape)) k, v = sort_p.bind(keys, values, dimension=dimension, is_stable=is_stable, num_keys=1) return k, v def top_k(operand: ArrayLike, k: int, *, axis: int = -1) -> tuple[Array, Array]: """Returns top ``k`` values and their indices along the specified axis of ``operand``. Args: operand: N-dimensional array of non-complex type. k: integer specifying the number of top entries. axis: optional integer specifying the axis along which to compute the top ``k`` entries. Default is -1, indicating the last axis. Returns: A tuple ``(values, indices)`` where - ``values`` is an array containing the top k values along the last axis. - ``indices`` is an array containing the indices corresponding to values. ``values[..., i, ...]`` is the ``i``-th largest entry in ``operand`` along the specified axis, and its index is ``indices[..., i, ...]``. If two elements are equal, the lower-index element appears first. See also: - :func:`jax.lax.approx_max_k` - :func:`jax.lax.approx_min_k` Examples: Find the largest three values, and their indices, within an array: >>> x = jnp.array([9., 3., 6., 4., 10.]) >>> values, indices = jax.lax.top_k(x, 3) >>> values Array([10., 9., 6.], dtype=float32) >>> indices Array([4, 0, 2], dtype=int32) """ if core.is_constant_dim(k): k = int(k) if k < 0: raise ValueError(f"k argument to top_k must be nonnegative, got {k}") axis = canonicalize_axis(axis, np.ndim(operand)) return top_k_p.bind(operand, k=k, axis=axis) def tie_in(x: Any, y: T) -> T: """Deprecated. Ignores ``x`` and returns ``y``.""" return y def full(shape: Shape, fill_value: ArrayLike, dtype: DTypeLike | None = None, *, sharding: Sharding | None = None) -> Array: """Returns an array of `shape` filled with `fill_value`. Args: shape: sequence of integers, describing the shape of the output array. fill_value: the value to fill the new array with. dtype: the type of the output array, or `None`. If not `None`, `fill_value` will be cast to `dtype`. sharding: an optional sharding specification for the resulting array, note, sharding will currently be ignored in jitted mode, this might change in the future. """ shape = canonicalize_shape(shape) if np.shape(fill_value): msg = "full must be called with scalar fill_value, got fill_value.shape {}." raise TypeError(msg.format(np.shape(fill_value))) if dtype is None: weak_type = dtypes.is_weakly_typed(fill_value) fill_dtype = _dtype(fill_value) else: if dtypes.issubdtype(dtype, dtypes.extended): return dtype._rules.full(shape, fill_value, dtype) # type: ignore[union-attr] weak_type = False fill_dtype = dtypes.check_and_canonicalize_user_dtype(dtype, "full") fill_value = _convert_element_type(fill_value, fill_dtype, weak_type) if (sharding is not None and not isinstance(sharding, PmapSharding) and isinstance(fill_value, array.ArrayImpl) and sharding._is_concrete): broadcast_shape = sharding.shard_shape(shape) shard = broadcast(fill_value, broadcast_shape) shard = shard.addressable_data(0) return array.make_array_from_callback( shape, sharding, lambda _: shard, dtype=fill_dtype) if sharding is not None and not sharding._is_concrete: return broadcast(fill_value, shape, out_sharding=sharding) else: return broadcast(fill_value, shape) def zeros_like_shaped_array(aval: ShapedArray) -> Array: assert isinstance(aval, ShapedArray) if dtypes.issubdtype(aval.dtype, dtypes.extended): scalar_zero = aval.dtype._rules.zero(aval.dtype) elif aval.dtype == dtypes.float0: scalar_zero = np.zeros((), dtype=aval.dtype) else: scalar_zero = _convert_element_type(0, aval.dtype, aval.weak_type) out = broadcast(scalar_zero, aval.shape, out_sharding=aval.sharding) return core.pvary(out, tuple(aval.vma)) ad_util.aval_zeros_likers[ShapedArray] = zeros_like_shaped_array def zeros_like_abstract_ref(aval: state.AbstractRef) -> core.Ref: val = ad_util.zeros_like_aval(aval.inner_aval) return core.new_ref(val) # TODO(dougalm): this is nonsense but it's here because in places like # custom_vjp we assume that all arguments have tangent spaces. We could have # a distinct NotATangentType value instead. ad_util.aval_zeros_likers[state.AbstractRef] = zeros_like_abstract_ref # type: ignore def iota(dtype: DTypeLike, size: int) -> Array: """Wraps XLA's `Iota <https://www.openxla.org/xla/operation_semantics#iota>`_ operator. """ return broadcasted_iota(dtype, (size,), 0) def broadcasted_iota(dtype: DTypeLike, shape: Shape, dimension: int, *, out_sharding=None) -> Array: """Convenience wrapper around ``iota``.""" dtype = dtypes.check_and_canonicalize_user_dtype(dtype, "broadcasted_iota") shape = canonicalize_shape(shape) dynamic_shape = [d for d in shape if isinstance(d, core.Tracer)] static_shape = [None if isinstance(d, core.Tracer) else d for d in shape] dimension = core.concrete_or_error( int, dimension, "dimension argument of lax.broadcasted_iota") out_sharding = canonicalize_sharding(out_sharding, 'broadcasted_iota') return iota_p.bind(*dynamic_shape, dtype=dtype, shape=tuple(static_shape), dimension=dimension, sharding=out_sharding) def _eye(dtype: DTypeLike, shape: Shape, offset: DimSize = 0) -> Array: """Like numpy.eye, create a 2D array with ones on a diagonal.""" offset = _clip_int_to_valid_range(offset, np.int32, "argument `offset` of jax.numpy.eye") dtype = dtypes.check_and_canonicalize_user_dtype(dtype, "eye") bool_eye = eq(add(broadcasted_iota(np.int32, shape, 0), np.int32(offset)), broadcasted_iota(np.int32, shape, 1)) return convert_element_type_p.bind(bool_eye, new_dtype=dtype, weak_type=False, sharding=None) def _delta(dtype: DTypeLike, shape: Shape, axes: Sequence[int]) -> Array: """This utility function exists for creating Kronecker delta arrays.""" axes = map(int, axes) dtype = dtypes.check_and_canonicalize_user_dtype(dtype, "delta") base_shape = tuple(np.take(shape, axes)) iotas = [broadcasted_iota(np.uint32, base_shape, i) for i in range(len(base_shape))] eyes = [eq(i1, i2) for i1, i2 in zip(iotas[:-1], iotas[1:])] result = convert_element_type_p.bind( _reduce(operator.and_, eyes), new_dtype=dtype, weak_type=False, sharding=None) return broadcast_in_dim(result, shape, axes) def _tri(dtype: DTypeLike, shape: Shape, offset: DimSize) -> Array: """Like numpy.tri, create a 2D array with ones below a diagonal.""" offset = asarray(core.dimension_as_value(offset)) if not dtypes.issubdtype(offset, np.integer): raise TypeError(f"offset must be an integer, got {offset!r}") shape_dtype = lax_utils.int_dtype_for_shape(shape, signed=True) if ( np.iinfo(offset.dtype).min < np.iinfo(shape_dtype).min or np.iinfo(offset.dtype).max > np.iinfo(shape_dtype).max ): shape_dtype = np.dtype(np.int64) dtype = dtypes.check_and_canonicalize_user_dtype(dtype, "tri") bool_tri = ge(add(broadcasted_iota(shape_dtype, shape, 0), offset.astype(shape_dtype)), broadcasted_iota(shape_dtype, shape, 1)) return convert_element_type_p.bind(bool_tri, new_dtype=dtype, weak_type=False, sharding=None) def stop_gradient(x: T) -> T: """Stops gradient computation. Operationally ``stop_gradient`` is the identity function, that is, it returns argument `x` unchanged. However, ``stop_gradient`` prevents the flow of gradients during forward or reverse-mode automatic differentiation. If there are multiple nested gradient computations, ``stop_gradient`` stops gradients for all of them. For some discussion of where this is useful, refer to :ref:`stopping-gradients`. Args: x: array or pytree of arrays Returns: input value is returned unchanged, but within autodiff will be treated as a constant. Examples: Consider a simple function that returns the square of the input value: >>> def f1(x): ... return x ** 2 >>> x = jnp.float32(3.0) >>> f1(x) Array(9.0, dtype=float32) >>> jax.grad(f1)(x) Array(6.0, dtype=float32) The same function with ``stop_gradient`` around ``x`` will be equivalent under normal evaluation, but return a zero gradient because ``x`` is effectively treated as a constant: >>> def f2(x): ... return jax.lax.stop_gradient(x) ** 2 >>> f2(x) Array(9.0, dtype=float32) >>> jax.grad(f2)(x) Array(0.0, dtype=float32) This is used in a number of places within the JAX codebase; for example :func:`jax.nn.softmax` internally normalizes the input by its maximum value, and this maximum value is wrapped in ``stop_gradient`` for efficiency. Refer to :ref:`stopping-gradients` for more discussion of the applicability of ``stop_gradient``. """ def stop(x): # only bind primitive on inexact dtypes, to avoid some staging if dtypes.issubdtype(core.get_aval(x).dtype, dtypes.extended): return x elif (dtypes.issubdtype(_dtype(x), np.floating) or dtypes.issubdtype(_dtype(x), np.complexfloating)): # break abstractions to support legacy leaked tracer use cases if isinstance(x, ad.JVPTracer): return stop(x.primal) return ad_util.stop_gradient_p.bind(x) else: return x return tree_util.tree_map(stop, x) def reduce_precision(operand: float | ArrayLike, exponent_bits: int, mantissa_bits: int) -> Array: """Wraps XLA's `ReducePrecision <https://www.openxla.org/xla/operation_semantics#reduceprecision>`_ operator. """ exponent_bits = core.concrete_or_error( operator.index, exponent_bits, "exponent_bits argument of lax.reduce_precision") mantissa_bits = core.concrete_or_error( operator.index, mantissa_bits, "mantissa_bits argument of lax.reduce_precision") return reduce_precision_p.bind(operand, exponent_bits=exponent_bits, mantissa_bits=mantissa_bits) def squeeze(array: ArrayLike, dimensions: Sequence[int]) -> Array: """Squeeze any number of size 1 dimensions from an array.""" ndim = np.ndim(array) dimensions = tuple(sorted(canonicalize_axis(i, ndim) for i in dimensions)) if not dimensions and isinstance(array, Array): return array return squeeze_p.bind(array, dimensions=dimensions) def expand_dims(array: ArrayLike, dimensions: Sequence[int]) -> Array: """Insert any number of size 1 dimensions into an array.""" if len(set(dimensions)) != len(dimensions): raise ValueError(f'repeated axis in lax.expand_dims: {dimensions}') ndim_out = np.ndim(array) + len(dimensions) dims = [canonicalize_axis(i, ndim_out) for i in dimensions] if len(set(dims)) != len(dims): # check again after canonicalizing raise ValueError(f'repeated axis in lax.expand_dims: {dims}') dims_set = frozenset(dims) result_shape = list(np.shape(array)) for i in sorted(dims_set): result_shape.insert(i, 1) broadcast_dims = [i for i in range(ndim_out) if i not in dims_set] return broadcast_in_dim(array, result_shape, broadcast_dims) ### convenience wrappers around traceables def full_like(x: ArrayLike | DuckTypedArray, fill_value: ArrayLike, dtype: DTypeLike | None = None, shape: Shape | None = None, sharding: Sharding | None = None) -> Array: """Create a full array like np.full based on the example array `x`. Args: x: example array-like, used for shape and dtype information. fill_value: a scalar value to fill the entries of the output array. dtype: optional, a dtype parameter for the output ndarray. shape: optional, a shape parameter for the output ndarray. sharding: an optional sharding specification for the resulting array. If not specified, the output will have the same sharding as the input, with a few exceptions/limitations in particular: 1. Sharding is not available during tracing, thus this will rely on jit. 2. If x is weakly typed or uncommitted, will use default sharding. 3. Shape is not None and is different from x.shape, default will be used. Returns: An ndarray with the same shape as `x` with its entries set equal to `fill_value`, similar to the output of np.full. """ fill_shape = np.shape(x) if shape is None else canonicalize_shape(shape) # type: ignore[arg-type] weak_type = dtype is None and dtypes.is_weakly_typed(x) dtype = _dtype(dtype) if dtype is not None else _dtype(x) if dtypes.issubdtype(dtype, dtypes.extended): return dtype._rules.full(fill_shape, fill_value, dtype) # type: ignore[union-attr] if sharding is None and shape is None and isinstance(x, core.Tracer): sharding = x.aval.sharding else: # If `x` has a sharding but no `_committed` attribute # (in case of ShapeDtypeStruct), default it to True. use_x_sharding = ( sharding is None # Tracer have special logic in handling sharding and even # though hasattr(x, 'sharding') returns False, it is very slow. # This bypasses the check. and not isinstance(x, core.Tracer) and hasattr(x, 'sharding') and x.sharding is not None and (x.sharding._is_concrete or not get_concrete_mesh().empty) and getattr(x, '_committed', True) and not weak_type and fill_shape == np.shape(x) # type: ignore[arg-type] ) if use_x_sharding: sharding = x.sharding # type: ignore val = full(fill_shape, _convert_element_type(fill_value, dtype, weak_type), sharding=sharding) if config._check_vma.value: # TODO(yashkatariya): Maybe use `shaped_abstractify` here instead of # `typeof` because `x` can be anything that implements the # `DuckTypedArray` protocol. val = core.pvary(val, tuple(core.typeof(x).vma)) return val def collapse(operand: Array, start_dimension: int, stop_dimension: int | None = None) -> Array: """Collapses dimensions of an array into a single dimension. For example, if ``operand`` is an array with shape ``[2, 3, 4]``, ``collapse(operand, 0, 2).shape == [6, 4]``. The elements of the collapsed dimension are laid out major-to-minor, i.e., with the lowest-numbered dimension as the slowest varying dimension. Args: operand: an input array. start_dimension: the start of the dimensions to collapse (inclusive). stop_dimension: the end of the dimensions to collapse (exclusive). Pass None to collapse all the dimensions after start. Returns: An array where dimensions ``[start_dimension, stop_dimension)`` have been collapsed (raveled) into a single dimension. """ lo, hi, _ = slice(start_dimension, stop_dimension).indices(len(operand.shape)) if hi < lo: raise ValueError(f"Invalid dimension range passed to collapse: {operand.shape}" f"[{start_dimension}:{stop_dimension}]") size = math.prod(operand.shape[lo:hi]) new_shape = operand.shape[:lo] + (size,) + operand.shape[hi:] return reshape(operand, new_shape) def batch_matmul(lhs: Array, rhs: Array, precision: PrecisionLike = None) -> Array: """Batch matrix multiplication.""" if _min(lhs.ndim, rhs.ndim) < 2: raise ValueError('Arguments to batch_matmul must be at least 2D, got {}, {}' .format(lhs.ndim, rhs.ndim)) if lhs.ndim != rhs.ndim: raise ValueError('Arguments to batch_matmul must have same ndim, got {}, {}' .format(lhs.ndim, rhs.ndim)) lhs_contract = (lhs.ndim - 1,) rhs_contract = (rhs.ndim - 2,) batch = tuple(range(lhs.ndim - 2)) return dot_general(lhs, rhs, ((lhs_contract, rhs_contract), (batch, batch)), precision=precision) # These functions also exist in the XLA client library, but we treat them # as non-primitive to maintain a smaller set of autodiff primitives. def square(x: ArrayLike) -> Array: r"""Elementwise square: :math:`x^2`.""" return square_p.bind(x) def reciprocal(x: ArrayLike) -> Array: r"""Elementwise reciprocal: :math:`1 \over x`.""" return integer_pow(x, -1) @export def tan(x: ArrayLike, accuracy=None) -> Array: r"""Elementwise tangent: :math:`\mathrm{tan}(x)`. This function lowers directly to the `stablehlo.tangent`_ operation. Args: x: input array. Must have floating-point or complex type. accuracy: Optional `lax.Tolerance` or `lax.AccuracyMode` object that selects the implementation of the op based on the requested accuracy. If the implementation cannot satisfy the requested tolerance, the compiler will return an error. If mode is specified and there are no multiple implementations available, the default implementation will be used. Returns: Array of the same shape and dtype as ``x`` containing the element-wise tangent. See also: - :func:`jax.lax.cos`: elementwise cosine. - :func:`jax.lax.sin`: elementwise sine. - :func:`jax.lax.atan`: elementwise arc tangent. - :func:`jax.lax.atan2`: elementwise 2-term arc tangent. .. _stablehlo.tangent: https://openxla.org/stablehlo/spec#tangent """ return tan_p.bind(x, accuracy=accuracy) @export def asin(x: ArrayLike) -> Array: r"""Elementwise arc sine: :math:`\mathrm{asin}(x)`. This function lowers directly to the ``chlo.asin`` operation. Args: x: input array. Must have floating-point or complex type. Returns: Array of the same shape and dtype as ``x`` containing the element-wise arc sine. See also: - :func:`jax.lax.sin`: elementwise sine. - :func:`jax.lax.acos`: elementwise arc cosine. - :func:`jax.lax.atan`: elementwise arc tangent. """ return asin_p.bind(x) @export def acos(x: ArrayLike) -> Array: r"""Elementwise arc cosine: :math:`\mathrm{acos}(x)`. This function lowers directly to the ``chlo.acos`` operation. Args: x: input array. Must have floating-point or complex type. Returns: Array of the same shape and dtype as ``x`` containing the element-wise arc cosine. See also: - :func:`jax.lax.cos`: elementwise cosine. - :func:`jax.lax.asin`: elementwise arc sine. - :func:`jax.lax.atan`: elementwise arc tangent. """ return acos_p.bind(x) @export def atan(x: ArrayLike) -> Array: r"""Elementwise arc tangent: :math:`\mathrm{atan}(x)`. This function lowers directly to the ``chlo.atan`` operation. Args: x: input array. Must have floating-point or complex type. Returns: Array of the same shape and dtype as ``x`` containing the element-wise arc tangent. See also: - :func:`jax.lax.tan`: elementwise tangent. - :func:`jax.lax.acos`: elementwise arc cosine. - :func:`jax.lax.asin`: elementwise arc sine. - :func:`jax.lax.atan2`: elementwise 2-term arc tangent. """ return atan_p.bind(x) @export def sinh(x: ArrayLike) -> Array: r"""Elementwise hyperbolic sine: :math:`\mathrm{sinh}(x)`. This function lowers directly to the ``chlo.sinh`` operation. Args: x: input array. Must have floating-point or complex type. Returns: Array of the same shape and dtype as ``x`` containing the element-wise hyperbolic sine. See also: - :func:`jax.lax.asinh`: elementwise inverse hyperbolic sine. - :func:`jax.lax.cosh`: elementwise hyperbolic cosine. - :func:`jax.lax.tanh`: elementwise hyperbolic tangent. """ return sinh_p.bind(x) @export def cosh(x: ArrayLike) -> Array: r"""Elementwise hyperbolic cosine: :math:`\mathrm{cosh}(x)`. This function lowers directly to the ``chlo.cosh`` operation. Args: x: input array. Must have floating-point or complex type. Returns: Array of the same shape and dtype as ``x`` containing the element-wise hyperbolic cosine. See also: - :func:`jax.lax.acosh`: elementwise inverse hyperbolic cosine. - :func:`jax.lax.sinh`: elementwise hyperbolic sine. - :func:`jax.lax.tanh`: elementwise hyperbolic tangent. """ return cosh_p.bind(x) @export def asinh(x: ArrayLike) -> Array: r"""Elementwise inverse hyperbolic sine: :math:`\mathrm{asinh}(x)`. This function lowers directly to the ``chlo.asinh`` operation. Args: x: input array. Must have floating-point or complex type. Returns: Array of the same shape and dtype as ``x`` containing the element-wise inverse hyperbolic sine. See also: - :func:`jax.lax.acosh`: elementwise inverse hyperbolic cosine. - :func:`jax.lax.atanh`: elementwise inverse hyperbolic tangent. - :func:`jax.lax.sinh`: elementwise hyperbolic sine. """ return asinh_p.bind(x) @export def acosh(x: ArrayLike) -> Array: r"""Elementwise inverse hyperbolic cosine: :math:`\mathrm{acosh}(x)`. This function lowers directly to the ``chlo.acosh`` operation. Args: x: input array. Must have floating-point or complex type. Returns: Array of the same shape and dtype as ``x`` containing the element-wise inverse hyperbolic cosine. See also: - :func:`jax.lax.asinh`: elementwise inverse hyperbolic sine. - :func:`jax.lax.atanh`: elementwise inverse hyperbolic tangent. - :func:`jax.lax.cosh`: elementwise hyperbolic cosine. """ return acosh_p.bind(x) @export def atanh(x: ArrayLike) -> Array: r"""Elementwise inverse hyperbolic tangent: :math:`\mathrm{atanh}(x)`. This function lowers directly to the ``chlo.atanh`` operation. Args: x: input array. Must have floating-point or complex type. Returns: Array of the same shape and dtype as ``x`` containing the element-wise inverse hyperbolic tangent. See also: - :func:`jax.lax.acosh`: elementwise inverse hyperbolic cosine. - :func:`jax.lax.asinh`: elementwise inverse hyperbolic sine. - :func:`jax.lax.tanh`: elementwise hyperbolic tangent. """ return atanh_p.bind(x) # Add some methods to ShapedArray that rely on lax primitives ShapedArray.broadcast = core.aval_method(broadcast) ShapedArray.transpose = core.aval_method(transpose) # clobbered by lax_numpy ShapedArray.reshape = core.aval_method(reshape) # clobbered by lax_numpy def _iter(tracer): if tracer.ndim == 0: raise TypeError("iteration over a 0-d array") # same as numpy error else: n = int(tracer.shape[0]) if any(isinstance(d, core.Tracer) for d in tracer.shape): return (slicing.dynamic_index_in_dim(tracer, i, keepdims=False) for i in range(n)) else: return (slicing.index_in_dim(tracer, i, keepdims=False) for i in range(n)) ShapedArray._iter = staticmethod(_iter) core.DShapedArray._iter = staticmethod(_iter) def zeros_like_array(x: ArrayLike) -> Array: return full_like(x, 0) def _add_arrays(x, y): if (isinstance(a := core.get_aval(x), ShapedArray) and dtypes.issubdtype(a.dtype, dtypes.extended)): return dtype._rules.add(dtype, x, y) # pytype: disable=attribute-error return add(x, y) for t in itertools.chain( dtypes.python_scalar_types, array_types, [array.ArrayImpl], literals.typed_scalar_types): ad_util.raw_jaxval_adders[t] = _add_arrays ### primitives _fixed_dtype = \ lambda dtype: lambda *args, **kwargs: np.dtype(dtype) _complex_basetype = lambda dtype, **kwargs: np.abs(np.zeros((), dtype)).dtype _strip_weak_type = lambda *args, **_: False def unop_dtype_rule(result_dtype, accepted_dtypes, name, aval, supports_narrow_ints=True, **kwargs): if aval.dtype == dtypes.float0: raise TypeError( f"Called {name} with a float0 array. " "float0s do not support any operations by design, because they " "are not compatible with non-trivial vector spaces. No implicit dtype " "conversion is done. You can use np.zeros_like(arr, dtype=np.float) " "to cast a float0 array to a regular zeros array. \n" "If you didn't expect to get a float0 you might have accidentally " "taken a gradient with respect to an integer argument.") if not any(dtypes.issubdtype(aval.dtype, t) for t in accepted_dtypes): msg = '{} does not accept dtype {}. Accepted dtypes are subtypes of {}.' typename = dtype_to_string(aval.dtype) accepted_typenames = (t.__name__ for t in accepted_dtypes) raise TypeError(msg.format(name, typename, ', '.join(accepted_typenames))) if (not supports_narrow_ints) and aval.dtype in [dtypes.uint2, dtypes.int2, dtypes.uint4, dtypes.int4]: raise TypeError(f'{name} does not accept dtype {dtype_to_string(aval.dtype)}.' ' Support for narrow-width integers is platform-dependent' ' and limited to a few specific operations, e.g. basic' ' arithmetic and type casting.') return result_dtype(aval.dtype, **kwargs) def unop_reduced_rule(out_s, aval, **kwargs): return out_s.update(spec=out_s.spec.update(reduced=aval.sharding.spec.reduced)) def unop(result_dtype, accepted_dtypes, name, supports_narrow_ints=True): dtype_rule = partial(unop_dtype_rule, result_dtype, accepted_dtypes, name, supports_narrow_ints=supports_narrow_ints) prim = standard_primitive(_attrgetter('shape'), dtype_rule, name, sharding_rule=_attrgetter('sharding'), vma_rule=_attrgetter('vma'), reduced_rule=unop_reduced_rule) batching.defvectorized(prim) pe.def_trivial_padding(prim) return prim standard_unop = partial(unop, _identity) _attrgetter = lambda name: lambda x, **kwargs: getattr(x, name) def naryop_dtype_rule(result_dtype, accepted_dtypes, name, *avals, require_same=True, allow_extended_dtype=False, **kwargs): assert len(avals) == len(accepted_dtypes), (avals, accepted_dtypes) for i, aval in enumerate(avals): if allow_extended_dtype and isinstance(aval.dtype, dtypes.ExtendedDType): continue types = accepted_dtypes[i] if not any(dtypes.issubdtype(aval.dtype, t) for t in types): if aval.dtype == dtypes.float0: raise TypeError( f"Called {name} with a float0 at position {i}. " "float0s do not support any operations by design, because they " "are not compatible with non-trivial vector spaces. No implicit dtype " "conversion is done. You can use np.zeros_like(arr, dtype=np.float) " "to cast a float0 array to a regular zeros array. \n" "If you didn't expect to get a float0 you might have accidentally " "taken a gradient with respect to an integer argument.") else: msg = ('{} does not accept dtype {} at position {}. ' 'Accepted dtypes at position {} are subtypes of {}.') typename = dtype_to_string(aval.dtype) typenames = ', '.join(t.__name__ for t in types) raise TypeError(msg.format(name, typename, i, i, typenames)) if require_same: check_same_dtypes(name, *avals) return result_dtype(*avals, **kwargs) def broadcasting_shape_rule(name, *avals): shapes = [aval.shape for aval in avals if aval.shape] if not shapes: return () return _try_broadcast_shapes(*shapes, name=name) def broadcasting_sharding_rule(name, *avals): mesh = None for a in avals: if a.sharding is not None and not a.sharding.mesh.empty: if mesh is not None and mesh != a.sharding.mesh: raise core.ShardingTypeError( f'Mesh for all inputs should be equal. Got one mesh: {mesh} and' f' another mesh: {a.sharding.mesh}') mesh = a.sharding.mesh mesh = get_abstract_mesh() if mesh is None else mesh shapes = [aval.shape for aval in avals if aval.shape] if not shapes: return NamedSharding(mesh, P()) if len({len(shape) for shape in shapes}) != 1: msg = '{}: arrays must have same number of dimensions, got {}.' raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes))))) specs = [a.sharding.spec for a in avals if a.shape] result_specs = [None] * len(shapes[0]) for i, (ss, ds) in enumerate(zip(zip(*specs), zip(*shapes))): if all(ss[0] == s for s in ss[1:]): # if all dimension shardings are same, the resulting dimension sharding is # the same. result_specs[i] = ss[0] else: non_trivial_s = [s for s, d in zip(ss, ds) if not (core.definitely_equal(d, 1) and s is None)] if not non_trivial_s: result_specs[i] = None elif all(non_trivial_s[0] == s for s in non_trivial_s[1:]): result_specs[i] = non_trivial_s[0] else: for s in ss: if result_specs[i] is None and s is not None: result_specs[i] = s elif (result_specs[i] is not None and s is not None and result_specs[i] != s): raise core.ShardingTypeError( f'{name} got incompatible shardings for broadcasting: ' f'{", ".join(map(str, map(tuple, specs)))}.') return NamedSharding(mesh, P(*result_specs)) def nary_reduced_rule(out_s, *avals, **params): non_empty_avals = [a for a in avals if a.shape] specs = [a.sharding.spec for a in non_empty_avals] reduced_spec = {s.reduced for s in specs if s.reduced} if len(reduced_spec) > 1: raise core.ShardingTypeError( 'All inputs should be reduced across the same mesh axes. Got specs:' f' {reduced_spec}') reduced_s, = reduced_spec if reduced_spec else (frozenset(),) if reduced_s: for a in non_empty_avals: s = a.sharding.spec flat_spec = flatten_spec(s) if a.sharding.replicated_axes & reduced_s: raise core.ShardingTypeError( 'Inputs cannot be replicated on the same axes that another input' f' is reduced on. Got input spec: {s} and reduced spec: {reduced_s}') if frozenset(flat_spec) & reduced_s: raise core.ShardingTypeError( 'Inputs cannot be sharded on the same axes that another input is' ' reduced on. Reshard the input which is reduced to be sharded on' ' the mesh axes it is reduced on via `jax.sharding.reshard(inp,' f' jax.P(...))`. Got input spec: {s} and reduced spec: {reduced_s}') return out_s.update(spec=out_s.spec.update(reduced=reduced_s)) def naryop(result_dtype, accepted_dtypes, name, allow_extended_dtype=False, require_same_dtypes=True, unreduced_rule=None, reduced_rule=None): dtype_rule = partial(naryop_dtype_rule, result_dtype, accepted_dtypes, name, allow_extended_dtype=allow_extended_dtype, require_same=require_same_dtypes) shape_rule = partial(broadcasting_shape_rule, name) sharding_rule = partial(broadcasting_sharding_rule, name) prim = standard_primitive( shape_rule, dtype_rule, name, sharding_rule=sharding_rule, vma_rule=partial(core.standard_vma_rule, name), unreduced_rule=unreduced_rule, reduced_rule=nary_reduced_rule) batching.defbroadcasting(prim) pe.def_trivial_padding(prim) return prim standard_naryop = partial(naryop, input_dtype) # Like autograd.numpy.numpy_vjps.unbroadcast, this utility handles transposition # involving linear primitives with implicit broadcasting. def _unbroadcast(aval, x): if not isinstance(aval, (core.DShapedArray, ShapedArray)): raise TypeError("transpose with implicit broadcasting of unshaped values") x_shape = np.shape(x) if (core.definitely_equal_shape(aval.shape, x_shape) and aval.sharding == core.typeof(x).sharding): return x assert not aval.shape or len(x_shape) == len(aval.shape) if not aval.shape: return reduce_sum(x, list(range(len(x_shape)))) else: dims = [i for i, (a, b) in enumerate(zip(x_shape, aval.shape)) if not core.definitely_equal(a, b)] if config.enable_checks.value: assert all(aval.shape[i] == 1 for i in dims) x = reduce_sum(x, dims) if dims else x return reshape(x, aval.shape, out_sharding=aval.to_cotangent_aval().sharding) def _maybe_broadcast(target_shape, x): x_shape = np.shape(x) if core.definitely_equal_shape(x_shape, target_shape): return x elif not x_shape: return broadcast_in_dim(x, target_shape, ()) else: dims = [i for i, (a, b) in enumerate(zip(x_shape, target_shape)) if core.definitely_equal(a, b)] squeeze_shape = [x_shape[i] for i in dims] return broadcast_in_dim(reshape(x, squeeze_shape), target_shape, dims) def broadcast_hlo( aval_out: core.ShapedArray, avals: Sequence[core.ShapedArray], args: Sequence[ir.Value]) -> Sequence[ir.Value]: """Broadcasts HLO values with broadcast-compatible shapes to the same shape. """ out = [] for aval, arg in zip(avals, args): if aval.shape != aval_out.shape: assert len(aval.shape) <= len(aval_out.shape), (aval, aval_out) dims = mlir.dense_int_array( list(range(len(aval_out.shape) - len(aval.shape), len(aval_out.shape)))) if any(isinstance(d, ir.Value) for d in aval_out.shape): arg = hlo.dynamic_broadcast_in_dim( mlir.aval_to_ir_type(aval_out), arg, mlir.shape_tensor(aval_out.shape), dims) else: arg = hlo.broadcast_in_dim( mlir.aval_to_ir_type(aval.update(shape=aval_out.shape)), arg, dims) out.append(arg) return out def _nary_lower_hlo( op: Callable, ctx, *args: ir.Value, accuracy=None, **params ) -> Sequence[ir.Value]: """Lowers an elementwise operator to its MLIR equivalent. """ del params avals_in, (aval_out,) = ctx.avals_in, ctx.avals_out args = mlir.multi_broadcast_in_dim(ctx, args, avals_in, aval_out.shape, aval_out.sharding) out = op(*args) if accuracy: out = op(*args, result_accuracy=accuracy_attr(accuracy)) return [mlir.lower_with_sharding_in_types(ctx, out, aval_out)] def _unary_with_accuracy_pp_rule(eqn, context, settings): params = dict(eqn.params) if 'accuracy' in params and params['accuracy'] is None: del params['accuracy'] return core._pp_eqn(eqn.replace(params=params), context, settings) _float = {np.floating} _complex = {np.complexfloating} _complex_elem_types = {np.float32, np.float64} _int = {np.integer} _bool = {np.bool_} _signedint = {np.signedinteger} _num = _int | _float | _complex _any = _int | _float | _complex | _bool _bool_or_int = _int | _bool _ordered = _int | _float | _bool neg_p = standard_unop(_num, 'neg') ad.deflinear2(neg_p, lambda t, operand: [neg(t)]) mlir.register_lowering(neg_p, partial(_nary_lower_hlo, hlo.negate)) sign_p = standard_unop(_num, 'sign') ad.defjvp_zero(sign_p) def _sign_lower_hlo(ctx, x): x_aval, = ctx.avals_in if dtypes.issubdtype(x_aval.dtype, np.unsignedinteger): return [hlo.select( mlir.compare_hlo(x, mlir.full_like_aval(ctx, 0, x_aval), 'EQ', 'UNSIGNED'), mlir.full_like_aval(ctx, 0, x_aval), mlir.full_like_aval(ctx, 1, x_aval))] return [hlo.sign(x)] mlir.register_lowering(sign_p, _sign_lower_hlo) nextafter_p = standard_naryop([_float, _float], 'nextafter') mlir.register_lowering(nextafter_p, partial(_nary_lower_hlo, chlo.next_after)) floor_p = standard_unop(_float, 'floor') ad.defjvp_zero(floor_p) mlir.register_lowering(floor_p, partial(_nary_lower_hlo, hlo.floor)) ceil_p = standard_unop(_float, 'ceil') ad.defjvp_zero(ceil_p) mlir.register_lowering(ceil_p, partial(_nary_lower_hlo, hlo.ceil)) round_p = standard_unop(_float, 'round') ad.defjvp_zero(round_p) def _round_lower(ctx, x, *, rounding_method): if rounding_method is RoundingMethod.AWAY_FROM_ZERO: return [hlo.round_nearest_afz(x)] else: assert rounding_method is RoundingMethod.TO_NEAREST_EVEN return [hlo.round_nearest_even(x)] mlir.register_lowering(round_p, _round_lower) is_finite_p = unop(_fixed_dtype(np.bool_), _float, 'is_finite') ad.defjvp_zero(is_finite_p) mlir.register_lowering(is_finite_p, partial(_nary_lower_hlo, hlo.is_finite)) exp_p = standard_unop(_float | _complex, 'exp') ad.defjvp2(exp_p, lambda g, ans, x, **kwargs: mul(g, ans)) mlir.register_lowering(exp_p, partial(_nary_lower_hlo, hlo.exponential)) batching.ragged_prop_rules[exp_p] = batching.ragged_mask_elementwise_rule core.pp_eqn_rules[exp_p] = _unary_with_accuracy_pp_rule exp2_p = standard_unop(_float | _complex, 'exp2') ad.defjvp2( exp2_p, lambda g, ans, x, **kwargs: mul(log(_const(x, 2)), mul(g, ans)) ) def _exp2_lower(ctx, x, accuracy): x_aval, = ctx.avals_in log2 = mlir.ir_constant(np.array(np.log(2), x_aval.dtype)) log2 = mlir.broadcast_in_dim(ctx, log2, x_aval, broadcast_dimensions=()) return [ hlo.exponential( hlo.multiply(log2, x), result_accuracy=accuracy_attr(accuracy) ) ] mlir.register_lowering(exp2_p, _exp2_lower) core.pp_eqn_rules[exp2_p] = _unary_with_accuracy_pp_rule log_p = standard_unop(_float | _complex, 'log') ad.defjvp(log_p, lambda g, x, **kwargs: div(g, x)) mlir.register_lowering(log_p, partial(_nary_lower_hlo, hlo.log)) core.pp_eqn_rules[log_p] = _unary_with_accuracy_pp_rule expm1_p = standard_unop(_float | _complex, 'expm1') ad.defjvp2( expm1_p, lambda g, ans, x, accuracy: ( mul(g, exp(x, accuracy=accuracy)) if accuracy is AccuracyMode.HIGHEST else mul(g, add(ans, _one(ans))) ), ) mlir.register_lowering(expm1_p, partial(_nary_lower_hlo, hlo.exponential_minus_one)) core.pp_eqn_rules[expm1_p] = _unary_with_accuracy_pp_rule log1p_p = standard_unop(_float | _complex, 'log1p') ad.defjvp(log1p_p, lambda g, x, **kwargs: div(g, add(x, _one(x)))) mlir.register_lowering(log1p_p, partial(_nary_lower_hlo, hlo.log_plus_one)) core.pp_eqn_rules[log1p_p] = _unary_with_accuracy_pp_rule tanh_p = standard_unop(_float | _complex, 'tanh') ad.defjvp2( tanh_p, lambda g, ans, x, accuracy: mul(g, mul(_const(x, 4), mul(logistic(mul(_const(x, 2), x), accuracy=accuracy), logistic(mul(_const(x, -2), x), accuracy=accuracy)), ), ) if accuracy is AccuracyMode.HIGHEST else mul(add(g, mul(g, ans)), sub(_one(x), ans)), ) mlir.register_lowering(tanh_p, partial(_nary_lower_hlo, hlo.tanh)) core.pp_eqn_rules[tanh_p] = _unary_with_accuracy_pp_rule logistic_p = standard_unop(_float | _complex, 'logistic') ad.defjvp2( logistic_p, lambda g, ans, x, accuracy: mul(g, mul(ans, logistic(neg(x)))) if accuracy is AccuracyMode.HIGHEST else mul(g, mul(ans, sub(_one(ans), ans))), ) # TODO(phawkins): switch to LogisticOp lowering; debug numerical problems. # mlir.register_lowering(logistic_p, partial(_nary_lower_hlo, hlo.logistic)) def logistic_impl(x, accuracy): del accuracy one = _const(x, 1) return div(one, add(one, exp(neg(x)))) mlir.register_lowering(logistic_p, mlir.lower_fun(logistic_impl, multiple_results=False)) core.pp_eqn_rules[logistic_p] = _unary_with_accuracy_pp_rule def _sin_complex(x): # use expm1 instead of exp to avoid cancellation when abs(x) is small # relies on the quality of real-valued expm1, sin, cos # sin(x) = complex(sin(real(x)) * cosh(imag(x)), cos(real(x)) * sinh(imag(x))) # 2 * sinh(x) = exp(x) - 1 - (exp(-x) - 1) = expm1(x) - expm1(-x) # 2 * cosh(x) = exp(x) - 1 + (exp(-x) - 1) + 2 = expm1(x) + expm1(-x) + 2 a, b = real(x), imag(x) a_is_zero = eq(a, _const(a, 0)) two = _const(a, 2) sn, cs = sin(a), cos(a) e1m, e2m = expm1(b), expm1(neg(b)) snh, csh = div(sub(e1m, e2m), two), div(add(add(e1m, e2m), two), two) re, im = mul(sn, csh), mul(cs, snh) # avoid nan value when real(x) is zero and abs(x) is so large that abs(expm1(x)) is inf return select(a_is_zero, complex(_const(a, 0), im), complex(re, im)) def _sin_lowering(ctx, x, accuracy): if dtypes.issubdtype(ctx.avals_in[0].dtype, np.complexfloating): sine = mlir.lower_fun(_sin_complex, multiple_results=False) return sine(ctx, x) return _nary_lower_hlo(hlo.sine, ctx, x, accuracy=accuracy) def _sin_lin(nzs, x, accuracy): nz, = nzs return (sin_p.bind(x, accuracy=accuracy), nz, cos(x), lambda cos_x, t: mul(t, cos_x)) sin_p = standard_unop(_float | _complex, 'sin') ad.defjvp(sin_p, lambda g, x, accuracy: mul(g, cos(x, accuracy=accuracy))) ad.primitive_linearizations[sin_p] = _sin_lin mlir.register_lowering(sin_p, _sin_lowering) core.pp_eqn_rules[sin_p] = _unary_with_accuracy_pp_rule batching.ragged_prop_rules[sin_p] = batching.ragged_mask_elementwise_rule def _cos_complex(x): # cos(x) = complex(cos(real(x)) * cosh(imag(x)), -sin(real(x)) * sinh(imag(x))) # see also _sin_complex a, b = real(x), imag(x) a_is_zero = eq(a, _const(a, 0)) two = _const(a, 2) sn, cs = sin(a), cos(a) e1m, e2m = expm1(b), expm1(neg(b)) snh, csh = div(sub(e1m, e2m), two), div(add(add(e1m, e2m), two), two) re, im = mul(cs, csh), mul(neg(sn), snh) return select(a_is_zero, complex(re, _const(a, 0)), complex(re, im)) def _cos_lowering(ctx, x, accuracy): if dtypes.issubdtype(ctx.avals_in[0].dtype, np.complexfloating): cosine = mlir.lower_fun(_cos_complex, multiple_results=False) return cosine(ctx, x) return _nary_lower_hlo(hlo.cosine, ctx, x, accuracy=accuracy) cos_p = standard_unop(_float | _complex, 'cos') ad.defjvp( cos_p, lambda g, x, accuracy: neg(mul(g, sin(x, accuracy=accuracy))) ) mlir.register_lowering(cos_p, _cos_lowering) core.pp_eqn_rules[cos_p] = _unary_with_accuracy_pp_rule tan_p = standard_unop(_float | _complex, 'tan') ad.defjvp2(tan_p, lambda g, ans, x, **kwargs: mul(g, add(_const(x, 1), square(ans)))) mlir.register_lowering(tan_p, partial(_nary_lower_hlo, hlo.tan)) core.pp_eqn_rules[tan_p] = _unary_with_accuracy_pp_rule asin_p = standard_unop(_float | _complex, 'asin') ad.defjvp(asin_p, lambda g, x: mul(g, rsqrt(sub(_const(x, 1), square(x))))) mlir.register_lowering(asin_p, partial(_nary_lower_hlo, chlo.asin)) acos_p = standard_unop(_float | _complex, 'acos') ad.defjvp(acos_p, lambda g, x: mul(g, neg(rsqrt(sub(_const(x, 1), square(x)))))) mlir.register_lowering(acos_p, partial(_nary_lower_hlo, chlo.acos)) def atan_impl(x): return atan2(x, _const(x, 1)) atan_p = standard_unop(_float | _complex, 'atan') ad.defjvp(atan_p, lambda g, x: div(g, add(_const(x, 1), square(x)))) mlir.register_lowering(atan_p, partial(_nary_lower_hlo, chlo.atan)) atan2_p = standard_naryop([_float | _complex, _float | _complex], 'atan2') ad.defjvp(atan2_p, lambda g, x, y: mul(g, div(y, add(square(x), square(y)))), lambda g, x, y: mul(g, div(neg(x), add(square(x), square(y))))) mlir.register_lowering(atan2_p, partial(_nary_lower_hlo, hlo.atan2)) sinh_p = standard_unop(_float | _complex, 'sinh') ad.defjvp(sinh_p, lambda g, x: mul(g, cosh(x))) mlir.register_lowering(sinh_p, partial(_nary_lower_hlo, chlo.sinh)) cosh_p = standard_unop(_float | _complex, 'cosh') ad.defjvp(cosh_p, lambda g, x: mul(g, sinh(x))) mlir.register_lowering(cosh_p, partial(_nary_lower_hlo, chlo.cosh)) asinh_p = standard_unop(_float | _complex, 'asinh') ad.defjvp(asinh_p, lambda g, x: mul(g, rsqrt(add(square(x), _one(x))))) mlir.register_lowering(asinh_p, partial(_nary_lower_hlo, chlo.asinh)) acosh_p = standard_unop(_float | _complex, 'acosh') ad.defjvp(acosh_p, # We use x^2-1 rather than (x+1)(x-1). The latter is more accurate # for x near zero, but the function domain is x>=1. lambda g, x: mul(g, rsqrt(sub(square(x), _one(x))))) mlir.register_lowering(acosh_p, partial(_nary_lower_hlo, chlo.acosh)) atanh_p = standard_unop(_float | _complex, 'atanh') ad.defjvp(atanh_p, lambda g, x: mul(reciprocal(add(_one(x), x)), div(g, sub(_one(x), x)))) mlir.register_lowering(atanh_p, partial(_nary_lower_hlo, chlo.atanh)) real_p = unop(_complex_basetype, _complex, 'real') ad.deflinear2(real_p, lambda t, _: [complex(t, np.zeros((), _dtype(t)))]) mlir.register_lowering(real_p, partial(_nary_lower_hlo, hlo.real)) imag_p = unop(_complex_basetype, _complex, 'imag') ad.deflinear2(imag_p, lambda t, _: [complex(np.zeros((), _dtype(t)), neg(t))]) mlir.register_lowering(imag_p, partial(_nary_lower_hlo, hlo.imag)) def _complex_transpose_rule(t, x, y): assert ad.is_undefined_primal(x) or ad.is_undefined_primal(y) if ad.is_undefined_primal(x) and ad.is_undefined_primal(y): if type(t) is ad_util.Zero: return [ad_util.Zero(x.aval), ad_util.Zero(y.aval)] else: return [_unbroadcast(x.aval, real(t)), _unbroadcast(y.aval, imag(neg(t)))] elif ad.is_undefined_primal(x): if type(t) is ad_util.Zero: return [ad_util.Zero(x.aval), None] else: return [_unbroadcast(x.aval, real(t)), None] else: if type(t) is ad_util.Zero: return [None, ad_util.Zero(y.aval)] else: return [None, _unbroadcast(y.aval, imag(neg(t)))] def _complex_dtype(dtype, *args, **kwargs): return (np.zeros((), dtype) + np.zeros((), np.complex64)).dtype complex_p = naryop(_complex_dtype, [_complex_elem_types, _complex_elem_types], 'complex') ad.deflinear2(complex_p, _complex_transpose_rule) mlir.register_lowering(complex_p, partial(_nary_lower_hlo, hlo.complex)) conj_p = unop(_complex_dtype, _complex_elem_types | _complex, 'conj') def _conj_impl(x, **kw): if dtypes.issubdtype(x.dtype, np.complexfloating): return complex(real(x), -imag(x)) else: return complex(x, _zeros(x)) mlir.register_lowering(conj_p, mlir.lower_fun(_conj_impl, multiple_results=False)) def _conj_transpose_rule(t, x, *, input_dtype): assert ad.is_undefined_primal(x) if type(t) is ad_util.Zero: return [ad_util.Zero(x.aval)] elif dtypes.issubdtype(input_dtype, np.complexfloating): return [conj(t)] else: return [real(t)] ad.primitive_jvps[conj_p] = partial(ad.linear_jvp, conj_p) ad.primitive_transposes[conj_p] = _conj_transpose_rule abs_p = unop(_complex_basetype, _signedint | _float | _complex, 'abs', supports_narrow_ints=False) mlir.register_lowering(abs_p, partial(_nary_lower_hlo, hlo.abs)) def _abs_jvp_rule(g, ans, x): if _iscomplex(x): return _maybe_real(mul(g, div(_maybe_conj(x), _replace_zero(convert_element_type(ans, _dtype(x)))))) else: return select(ge(x, _zero(x)), g, neg(g)) ad.defjvp2(abs_p, _abs_jvp_rule) _maybe_conj = lambda x: conj(x) if _iscomplex(x) else x _maybe_real = lambda x: real(x) if _iscomplex(x) else x sqrt_p = standard_unop(_float | _complex, 'sqrt') ad.defjvp2(sqrt_p, lambda g, ans, x, **kwargs: mul(g, div(_const(x, 0.5), ans))) mlir.register_lowering(sqrt_p, partial(_nary_lower_hlo, hlo.sqrt)) core.pp_eqn_rules[sqrt_p] = _unary_with_accuracy_pp_rule rsqrt_p = standard_unop(_float | _complex, 'rsqrt') ad.defjvp2( rsqrt_p, lambda g, ans, x, **kwargs: mul(g, mul(_const(x, -0.5), div(ans, x))), ) mlir.register_lowering(rsqrt_p, partial(_nary_lower_hlo, hlo.rsqrt)) core.pp_eqn_rules[rsqrt_p] = _unary_with_accuracy_pp_rule cbrt_p = standard_unop(_float, 'cbrt') ad.defjvp2( cbrt_p, lambda g, ans, x, **kwargs: mul( g, mul(_const(x, 1 / 3), integer_pow(ans, -2)) ), ) mlir.register_lowering(cbrt_p, partial(_nary_lower_hlo, hlo.cbrt)) core.pp_eqn_rules[cbrt_p] = _unary_with_accuracy_pp_rule square_p = standard_unop(_int | _float | _complex, 'square') def _square_lower_hlo(ctx, x): if dtypes.issubdtype(ctx.avals_in[0].dtype, np.integer): return [hlo.multiply(x, x)] return [chlo.square(x)] ad.defjvp2(square_p, lambda g, ans, x: mul(g, mul(_const(x, 2), x))) mlir.register_lowering(square_p, _square_lower_hlo) def _pow_dtype_rule(x, y): if (dtypes.issubdtype(x.dtype, np.inexact) and dtypes.issubdtype(y.dtype, np.integer)): return x.dtype if x.dtype == y.dtype: return x.dtype raise TypeError("the first argument to pow must have an inexact dtype (float " "or complex), and the second argument must have an inexact or" " integer dtype, and two inexact dtypes must match, but got " f"{x.dtype} and {y.dtype} respectively.") pow_p = naryop(_pow_dtype_rule, [_float | _complex, _int | _float | _complex], 'pow', require_same_dtypes=False) def _pow_jvp_lhs(g, ans, x, y): y_dtype = dtypes.dtype(y) result_dtype = dtypes.result_type(x, y) if result_dtype == bool: result_dtype = 'int32' x = convert_element_type(x, result_dtype) y = convert_element_type(y, result_dtype) if dtypes.issubdtype(y_dtype, np.integer): if x.shape != y.shape: shape = broadcast_shapes(x.shape, y.shape) x = _maybe_broadcast(shape, x) y = _maybe_broadcast(shape, y) jac = select(eq(y, _const(y, 0)), _zeros(y), mul(_replace_zero(y), pow(x, sub(y, _ones(y))))) else: jac = mul(y, pow(x, sub(y, _ones(y)))) return mul(g, jac) def _pow_jvp_rhs(g, ans, x, y): y_dtype = dtypes.dtype(y) assert dtypes.issubdtype(y_dtype, np.inexact) return convert_element_type(mul(g, mul(log(_replace_zero(x)), ans)), y_dtype) ad.defjvp2(pow_p, _pow_jvp_lhs, _pow_jvp_rhs) def _pow_lower(ctx, x, y): x_aval, y_aval = ctx.avals_in if x_aval.dtype != y_aval.dtype: out_aval, = ctx.avals_out y_aval = y_aval.update(dtype=out_aval.dtype) y = hlo.convert(mlir.aval_to_ir_type(y_aval), y) ctx = ctx.replace(avals_in=[x_aval, y_aval]) return _nary_lower_hlo(hlo.power, ctx, x, y) mlir.register_lowering(pow_p, _pow_lower) def _integer_pow_dtype_rule(x, *, y): dtype = unop_dtype_rule(_identity, _int | _float | _complex, 'integer_pow', x) if y < 0 and dtypes.issubdtype(dtype, np.integer): raise TypeError("Integers cannot be raised to negative powers, got " f"integer_pow({x}, {y})") return dtype def _integer_pow_jvp(g, x, *, y): return _zeros(g) if y == 0 else mul(g, mul(_const(x, y), integer_pow(x, y - 1))) integer_pow_p = standard_primitive( _attrgetter('shape'), _integer_pow_dtype_rule, 'integer_pow', sharding_rule=_attrgetter('sharding'), vma_rule=_attrgetter('vma')) batching.defvectorized(integer_pow_p) ad.defjvp(integer_pow_p, _integer_pow_jvp) pe.def_trivial_padding(integer_pow_p) def _integer_pow(x, *, y): # This should be kept in sync with the jax2tf translation rule. if y == 0: return full_like(x, 1) is_reciprocal = y < 0 if is_reciprocal: y = -y acc = None while y > 0: if y & 1: acc = x if acc is None else mul(acc, x) y >>= 1 if y > 0: # We don't call square because it calls integer_pow. x = mul(x, x) return div(full_like(acc, 1), acc) if is_reciprocal else acc def _integer_pow_lowering(ctx, x, *, y): # These cases are subsumed by the general case, but it's faster to emit these # common cases directly. if y == 1: out = x elif y == 2: out = hlo.multiply(x, x) elif y == 3: out = hlo.multiply(hlo.multiply(x, x), x) elif y == -1: out = hlo.divide(mlir.full_like_aval(ctx, 1, ctx.avals_in[0]), x) else: lowering = mlir.lower_fun(_integer_pow, multiple_results=False) out, = lowering(ctx, x, y=y) aval_out, = ctx.avals_out return [mlir.lower_with_sharding_in_types(ctx, out, aval_out)] mlir.register_lowering(integer_pow_p, _integer_pow_lowering) _replace_zero = lambda x: select(eq(x, _const(x, 0)), _ones(x), x) not_p = standard_unop(_bool_or_int, 'not') ad.defjvp_zero(not_p) mlir.register_lowering(not_p, partial(_nary_lower_hlo, hlo.not_)) and_p = standard_naryop([_bool_or_int, _bool_or_int], 'and') ad.defjvp_zero(and_p) mlir.register_lowering(and_p, partial(_nary_lower_hlo, hlo.and_)) or_p = standard_naryop([_bool_or_int, _bool_or_int], 'or') ad.defjvp_zero(or_p) mlir.register_lowering(or_p, partial(_nary_lower_hlo, hlo.or_)) xor_p = standard_naryop([_bool_or_int, _bool_or_int], 'xor') ad.defjvp_zero(xor_p) mlir.register_lowering(xor_p, partial(_nary_lower_hlo, hlo.xor)) population_count_p = standard_unop(_int, 'population_count') mlir.register_lowering(population_count_p, partial(_nary_lower_hlo, hlo.popcnt)) clz_p = standard_unop(_int, 'clz') mlir.register_lowering(clz_p, partial(_nary_lower_hlo, hlo.count_leading_zeros)) def _add_jvp(primals, tangents): x, y = primals xdot, ydot = tangents primal_out = add(x, y) if type(xdot) is type(ydot) is ad_util.Zero: return primal_out, ad_util.Zero.from_primal_value(primal_out) if type(xdot) is ad_util.Zero: return primal_out, _maybe_broadcast(primal_out.shape, ydot) elif type(ydot) is ad_util.Zero: return primal_out, _maybe_broadcast(primal_out.shape, xdot) else: return primal_out, add(xdot, ydot) def _add_transpose(t, x, y): # Morally the following assertion is true, but because we instantiate zeros in # some places (e.g. in custom_jvp) it may not always hold. For example, see # api_test.py's CustomJVPTest.test_jaxpr_zeros. # assert ad.is_undefined_primal(x) and ad.is_undefined_primal(y) x_aval = x.aval if ad.is_undefined_primal(x) else core.get_aval(x) y_aval = y.aval if ad.is_undefined_primal(y) else core.get_aval(y) if type(t) is ad_util.Zero: return [ad_util.Zero(x_aval), ad_util.Zero(y_aval)] else: return [_unbroadcast(x_aval, t), _unbroadcast(y_aval, t)] def _add_unreduced_rule(out_sharding, x, y): x_ur, y_ur = x.sharding.spec.unreduced, y.sharding.spec.unreduced if x_ur and y_ur: if x_ur != y_ur: raise core.ShardingTypeError( 'lhs and rhs to `add` must be unreduced along the same mesh axes. ' f'Got lhs={x_ur}, rhs={y_ur}') res_unreduced = x_ur elif x_ur or y_ur: if x_ur and not y_ur: lhs_str, rhs_str = 'lhs', 'rhs' else: assert not x_ur and y_ur lhs_str, rhs_str = 'rhs', 'lhs' raise core.ShardingTypeError( f'{lhs_str} is unreduced while {rhs_str} is not. `add` operation does' ' not allow this because there will be implicit communication. Please' f' reduce {lhs_str} via `reshard` before calling `add`.') else: res_unreduced = frozenset() return out_sharding.update(spec=out_sharding.spec.update(unreduced=res_unreduced)) add_p: Primitive = naryop(input_dtype, [_num, _num], 'add', unreduced_rule=_add_unreduced_rule) ad.primitive_jvps[add_p] = _add_jvp ad.primitive_transposes[add_p] = _add_transpose mlir.register_lowering(add_p, partial(_nary_lower_hlo, hlo.add)) batching.ragged_prop_rules[add_p] = batching.ragged_mask_elementwise_rule def _sub_jvp(primals, tangents): x, y = primals xdot, ydot = tangents primal_out = sub(x, y) if type(xdot) is type(ydot) is ad_util.Zero: return primal_out, ad_util.Zero.from_primal_value(primal_out) if type(xdot) is ad_util.Zero: return primal_out, _maybe_broadcast(primal_out.shape, neg(ydot)) elif type(ydot) is ad_util.Zero: return primal_out, _maybe_broadcast(primal_out.shape, xdot) else: return primal_out, sub(xdot, ydot) def _sub_transpose(t, x, y): # Morally the following assertion is true, but see the comment in add_p's # transpose rule. # assert ad.is_undefined_primal(x) and ad.is_undefined_primal(y) x_aval = x.aval if ad.is_undefined_primal(x) else core.get_aval(x) y_aval = y.aval if ad.is_undefined_primal(y) else core.get_aval(y) if type(t) is ad_util.Zero: return [ad_util.Zero(x_aval), ad_util.Zero(y_aval)] else: return [_unbroadcast(x_aval, t), _unbroadcast(y_aval, neg(t))] sub_p = standard_naryop([_num, _num], 'sub') ad.primitive_jvps[sub_p] = _sub_jvp ad.primitive_transposes[sub_p] = _sub_transpose mlir.register_lowering(sub_p, partial(_nary_lower_hlo, hlo.subtract)) batching.ragged_prop_rules[sub_p] = batching.ragged_mask_elementwise_rule def _mul_unreduced_rule(out_sharding, x, y): x_ur, y_ur = x.sharding.spec.unreduced, y.sharding.spec.unreduced if x_ur and y_ur: raise core.ShardingTypeError( 'lhs and rhs to `mul` cannot be unreduced since mul is bilinear. ' f'Got lhs={x_ur}, rhs={y_ur}') elif x_ur and not y_ur: if x_ur != y.sharding.spec.reduced: raise core.ShardingTypeError( 'RHS should be reduced along the same axes LHS is unreduced on. Got' f' lhs={x} and rhs={y}') out_unreduced = x_ur elif not x_ur and y_ur: if x.sharding.spec.reduced != y_ur: raise core.ShardingTypeError( 'LHS should be reduced along the same axes RHS is unreduced on. Got' f' lhs={x} and rhs={y}') out_unreduced = y_ur else: assert not x_ur and not y_ur out_unreduced = frozenset() if out_unreduced: assert out_sharding.spec.reduced == out_unreduced out_reduced = frozenset() # if both are equal, set difference is empty. else: out_reduced = out_sharding.spec.reduced return out_sharding.update(spec=out_sharding.spec.update( unreduced=out_unreduced, reduced=out_reduced)) mul_p = standard_naryop([_num, _num], 'mul', unreduced_rule=_mul_unreduced_rule) ad.defjvp(mul_p, lambda xdot, x, y: mul(xdot, y), lambda ydot, x, y: mul(x, ydot)) ad.defbilinear(mul_p, lambda ct, x, y: _unbroadcast(x.aval, mul(ct, y)), lambda ct, x, y: _unbroadcast(y.aval, mul(x, ct))) mlir.register_lowering(mul_p, partial(_nary_lower_hlo, hlo.multiply)) batching.ragged_prop_rules[mul_p] = batching.ragged_mask_elementwise_rule def _div_transpose_rule(cotangent, x, y): assert ad.is_undefined_primal(x) and not ad.is_undefined_primal(y) if type(cotangent) is ad_util.Zero: return [ad_util.Zero(x.aval), None] else: return [_unbroadcast(x.aval, div(cotangent, y)), None] div_p = standard_naryop([_num, _num], 'div') ad.defjvp(div_p, lambda g, x, y: div(g, y), lambda g, x, y: mul(mul(neg(g), x), integer_pow(y, -2))) ad.primitive_transposes[div_p] = _div_transpose_rule mlir.register_lowering(div_p, partial(_nary_lower_hlo, hlo.divide)) batching.ragged_prop_rules[div_p] = batching.ragged_mask_elementwise_rule rem_p = standard_naryop([_int | _float, _int | _float], 'rem') ad.defjvp( rem_p, lambda g, x, y: _maybe_broadcast(broadcast_shapes(np.shape(x), np.shape(y)), g), lambda g, x, y: mul(neg(g), mul(sign(div(x, y)), floor(abs(div(x, y)))))) mlir.register_lowering(rem_p, partial(_nary_lower_hlo, hlo.remainder)) def _minmax_complex_lowering(x, y, *, lax_cmp_pick_x): result_shape = broadcast_shapes(np.shape(x), np.shape(y)) x = _maybe_broadcast(result_shape, x) y = _maybe_broadcast(result_shape, y) rx = real(x) ry = real(y) pick_x = select(eq(rx, ry), lax_cmp_pick_x(imag(x), imag(y)), lax_cmp_pick_x(rx, ry)) return select(pick_x, x, y) max_p: core.Primitive = standard_naryop([_any, _any], 'max') ad.defjvp2(max_p, lambda g, ans, x, y: mul(g, _balanced_eq(x, ans, y)), lambda g, ans, x, y: mul(g, _balanced_eq(y, ans, x))) mlir.register_lowering(max_p, partial(_nary_lower_hlo, mlir.max_hlo)) batching.ragged_prop_rules[max_p] = batching.ragged_mask_elementwise_rule min_p: core.Primitive = standard_naryop([_any, _any], 'min') ad.defjvp2(min_p, lambda g, ans, x, y: mul(g, _balanced_eq(x, ans, y)), lambda g, ans, x, y: mul(g, _balanced_eq(y, ans, x))) mlir.register_lowering(min_p, partial(_nary_lower_hlo, mlir.min_hlo)) batching.ragged_prop_rules[min_p] = batching.ragged_mask_elementwise_rule shift_left_p = standard_naryop([_int, _int], 'shift_left') ad.defjvp_zero(shift_left_p) mlir.register_lowering(shift_left_p, partial(_nary_lower_hlo, hlo.shift_left)) shift_right_arithmetic_p = standard_naryop([_int, _int], 'shift_right_arithmetic') ad.defjvp_zero(shift_right_arithmetic_p) mlir.register_lowering(shift_right_arithmetic_p, partial(_nary_lower_hlo, hlo.shift_right_arithmetic)) shift_right_logical_p = standard_naryop([_int, _int], 'shift_right_logical') ad.defjvp_zero(shift_right_logical_p) mlir.register_lowering(shift_right_logical_p, partial(_nary_lower_hlo, hlo.shift_right_logical)) def _opaque_comparison_hlo(direction, reduction_op, identity, ctx, avals_in, aval_out, x, y): aval_x, aval_y = avals_in base_aval_x = core.physical_aval(aval_x) base_aval_y = core.physical_aval(aval_y) base_aval_out = core.ShapedArray(base_aval_x.shape, aval_out.dtype) reduce_axes = tuple(range(aval_out.ndim, base_aval_out.ndim)) res, = mlir.delegate_lowering( ctx, partial(_compare_lower_hlo, direction, False), x, y, avals_in=[base_aval_x, base_aval_y], avals_out=[base_aval_out]) return mlir.delegate_lowering( ctx, partial(_unary_reduce_lower, reduction_op, identity, axes=reduce_axes), res, avals_in=[base_aval_out], avals_out=[aval_out]) _opaque_eq_hlo = partial( _opaque_comparison_hlo, 'EQ', hlo.AndOp, _get_bitwise_and_identity) _opaque_ne_hlo = partial( _opaque_comparison_hlo, 'NE', hlo.OrOp, _get_bitwise_or_identity) def _compare_lower_hlo_opaque(direction: str, ctx, avals_in, aval_out, x, y): broadcast_avals_in = tuple( core.ShapedArray(aval_out.shape, aval.dtype) for aval in avals_in) if direction == 'EQ': return _opaque_eq_hlo(ctx, broadcast_avals_in, aval_out, x, y) elif direction == 'NE': return _opaque_ne_hlo(ctx, broadcast_avals_in, aval_out, x, y) else: raise NotImplementedError( f"HLO comparison {direction} for extended dtype {avals_in[0].dtype}") def _compare_lower_hlo(direction: str, total_order: bool, ctx, x, y): avals_in, (aval_out,) = ctx.avals_in, ctx.avals_out x_dtype = avals_in[0].dtype x, y = mlir.multi_broadcast_in_dim(ctx, (x, y), avals_in, aval_out.shape, aval_out.sharding) if dtypes.issubdtype(x_dtype, dtypes.extended): assert not total_order return _compare_lower_hlo_opaque(direction, ctx, avals_in, aval_out, x, y) if dtypes.issubdtype(x_dtype, np.inexact): compare_type = "TOTALORDER" if total_order else "FLOAT" elif dtypes.issubdtype(x_dtype, np.signedinteger): compare_type = "SIGNED" else: compare_type = "UNSIGNED" return [mlir.compare_hlo(x, y, direction, compare_type)] eq_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'eq', allow_extended_dtype=True) ad.defjvp_zero(eq_p) mlir.register_lowering(eq_p, partial(_compare_lower_hlo, "EQ", False)) batching.ragged_prop_rules[eq_p] = batching.ragged_mask_elementwise_rule ne_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'ne', allow_extended_dtype=True) ad.defjvp_zero(ne_p) mlir.register_lowering(ne_p, partial(_compare_lower_hlo, "NE", False)) ge_p = naryop(_fixed_dtype(np.bool_), [_ordered, _ordered], 'ge') ad.defjvp_zero(ge_p) mlir.register_lowering(ge_p, partial(_compare_lower_hlo, "GE", False)) gt_p = naryop(_fixed_dtype(np.bool_), [_ordered, _ordered], 'gt') ad.defjvp_zero(gt_p) mlir.register_lowering(gt_p, partial(_compare_lower_hlo, "GT", False)) le_p = naryop(_fixed_dtype(np.bool_), [_ordered, _ordered], 'le') ad.defjvp_zero(le_p) mlir.register_lowering(le_p, partial(_compare_lower_hlo, "LE", False)) lt_p = naryop(_fixed_dtype(np.bool_), [_ordered, _ordered], 'lt') ad.defjvp_zero(lt_p) mlir.register_lowering(lt_p, partial(_compare_lower_hlo, "LT", False)) batching.ragged_prop_rules[lt_p] = batching.ragged_mask_elementwise_rule eq_to_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'eq_to') ad.defjvp_zero(eq_to_p) mlir.register_lowering(eq_to_p, partial(_compare_lower_hlo, "EQ", True)) le_to_p = naryop(_fixed_dtype(np.bool_), [_ordered, _ordered], 'le_to') ad.defjvp_zero(le_to_p) mlir.register_lowering(le_to_p, partial(_compare_lower_hlo, "LE", True)) lt_to_p = naryop(_fixed_dtype(np.bool_), [_ordered, _ordered], 'lt_to') ad.defjvp_zero(lt_to_p) mlir.register_lowering(lt_to_p, partial(_compare_lower_hlo, "LT", True)) def _convert_element_type_shape_rule(operand, *, new_dtype, weak_type, sharding): return operand.shape def _convert_element_type_sharding_rule(operand, *, new_dtype, weak_type, sharding): if sharding is None: return operand.sharding if sharding._is_concrete: if isinstance(sharding, NamedSharding): return NamedSharding(sharding.mesh.abstract_mesh, sharding.spec) else: return core.get_cur_mesh_sharding() return sharding def _convert_element_type_unreduced_rule(out_s, operand, *, new_dtype, weak_type, sharding): return out_s.update(spec=out_s.spec.update( unreduced=operand.sharding.spec.unreduced)) def _convert_element_type_reduced_rule(out_s, operand, *, new_dtype, weak_type, sharding): return out_s.update(spec=out_s.spec.update( reduced=operand.sharding.spec.reduced)) def _convert_element_type_dtype_rule(operand, *, new_dtype, weak_type, sharding): return new_dtype def _convert_element_type_weak_type_rule(operand, *, new_dtype, weak_type, sharding): return weak_type def _convert_element_type_transpose_rule(ct, operand, *, new_dtype, weak_type, sharding): assert ad.is_undefined_primal(operand) old_dtype = operand.aval.dtype old_weak_type = dtypes.is_weakly_typed(operand) if type(ct) is ad_util.Zero: return [ad_util.Zero(operand.aval)] elif core.primal_dtype_to_tangent_dtype(old_dtype) == dtypes.float0: return [ad_util.Zero(operand.aval.update(dtype=dtypes.float0, weak_type=False))] else: out = convert_element_type_p.bind( ct, new_dtype=old_dtype, weak_type=old_weak_type, sharding=operand.aval.to_cotangent_aval().sharding) return [out] def _convert_element_type_jvp_rule(tangent, primal_result, operand, *, new_dtype, weak_type, sharding): new_tangent_dtype = core.primal_dtype_to_tangent_dtype(new_dtype) if new_tangent_dtype == dtypes.float0: return ad_util.Zero.from_primal_value(primal_result) else: return convert_element_type_p.bind(tangent, new_dtype=new_tangent_dtype, weak_type=weak_type, sharding=sharding) _foldable_types = { literals.TypedNdArray, np.ndarray, *dtypes.python_scalar_types, *literals.typed_scalar_types, } def _convert_elt_type_folding_rule(consts, params, out_avals): # We constant-fold convert_element_types applied to constants if those # constants are Python builtin numeric types or numpy.ndarrays (so as not # to perform any device operations when constant-folding) and if the output # type can be faithfully represented by a Python builtin numeric type or # numpy.ndarray. If those conditions are met, we output a numpy.ndarray # constant if the output type is not weak, and if the output type is weak then # we output a Python builtin numeric type. # TODO(mattjj): allow constant-folding CPU-backed JAX arrays c, = consts out_aval, = out_avals new_dtype = params['new_dtype'] if (type(c) in _foldable_types and isinstance(out_aval, ShapedArray) and not np.shape(c) and not dtypes.issubdtype(new_dtype, dtypes.extended)): out = np.asarray(c) if (dtypes.issubdtype(out.dtype, np.complexfloating) and not dtypes.issubdtype(new_dtype, np.complexfloating)): out = out.real out = out.astype(new_dtype) return [literals.TypedNdArray(out, weak_type=out_aval.weak_type)] return None def _convert_elt_type_fwd_rule(eqn): t, = eqn.invars aval = t.aval if (aval.dtype == eqn.params['new_dtype'] and aval.weak_type == eqn.params['weak_type'] and not dtypes.issubdtype(aval.dtype, dtypes.extended) and (eqn.params['sharding'] is None or eqn.params['sharding'] == aval.sharding)): return [0], None else: return [None], eqn def _convert_elt_type_pp_rule(eqn, context, settings): params = dict(eqn.params) if params['sharding'] is None: del params['sharding'] # don't show trivial case return core._pp_eqn(eqn.replace(params=params), context, settings) convert_element_type_p = standard_primitive( _convert_element_type_shape_rule, _convert_element_type_dtype_rule, 'convert_element_type', weak_type_rule=_convert_element_type_weak_type_rule, sharding_rule=_convert_element_type_sharding_rule, vma_rule=partial(core.standard_vma_rule, 'convert_element_type'), unreduced_rule=_convert_element_type_unreduced_rule, reduced_rule=_convert_element_type_reduced_rule) # TODO(dougalm): I'm overriding bind_with_trace here because that's the closest thing to # the old "custom bind" but it might not be the best way to do this. def _convert_element_type_bind_with_trace(trace, args, params): sharding = params['sharding'] operand = core.Primitive.bind_with_trace(convert_element_type_p, trace, args, params) if sharding is not None and sharding._is_concrete: with core.set_current_trace(trace): operand = pjit.with_sharding_constraint(operand, sharding) return operand convert_element_type_p.def_bind_with_trace(_convert_element_type_bind_with_trace) convert_element_type_p.def_impl(partial(dispatch.apply_primitive, convert_element_type_p)) ad.defjvp2(convert_element_type_p, _convert_element_type_jvp_rule) ad.primitive_transposes[convert_element_type_p] = _convert_element_type_transpose_rule def _convert_element_type_batching_rule( axis_data, batched_args, batch_dims, *, new_dtype, weak_type, sharding): if sharding is not None: sharding = batching.get_sharding_for_vmap(axis_data, sharding, 0) new_params = dict(new_dtype=new_dtype, weak_type=weak_type, sharding=sharding) return convert_element_type_p.bind(*batched_args, **new_params), batch_dims[0] batching.fancy_primitive_batchers[convert_element_type_p] = _convert_element_type_batching_rule batching.skippable_batchers[convert_element_type_p] = lambda _: () pe.const_fold_rules[convert_element_type_p] = _convert_elt_type_folding_rule pe.forwarding_rules[convert_element_type_p] = _convert_elt_type_fwd_rule pe.def_trivial_padding(convert_element_type_p) core.pp_eqn_rules[convert_element_type_p] = _convert_elt_type_pp_rule batching.ragged_prop_rules[convert_element_type_p] = ( batching.ragged_mask_elementwise_rule ) def _real_dtype(dtype): return np.finfo(dtype).dtype def _convert_element_type_lower(ctx, operand, *, new_dtype, weak_type, sharding): aval_in, = ctx.avals_in aval_out, = ctx.avals_out if (dtypes.issubdtype(aval_in.dtype, np.complexfloating) and not dtypes.issubdtype(new_dtype, np.complexfloating)): operand = hlo.real(operand) aval_in = aval_in.update(dtype=_real_dtype(aval_in.dtype)) out = mlir.convert_hlo(ctx, operand, aval_in, aval_out) return [mlir.lower_with_sharding_in_types(ctx, out, aval_out)] mlir.register_lowering(convert_element_type_p, _convert_element_type_lower) def _to_edtype_abstract_eval(x, *, edtype): assert (isinstance(edtype, dtypes.ExtendedDType) and not isinstance(x.dtype, dtypes.ExtendedDType)) # For backward compatibility, if the edtype rules have a `convert_to` method, # use that rather than looking for an `allow_conversion: bool` attribute. if not isinstance(x, (ShapedArray, core.DShapedArray)): raise TypeError("can only convert to an extended dtype on an array type," f"but got {type(x)}") if convert_to := getattr(edtype._rules, 'convert_to', None): allow_conversion = convert_to(x.dtype, edtype) else: allow_conversion = edtype._rules.allow_conversion if not allow_conversion: raise ValueError( f"Cannot convert_element_type from {dtype_to_string(x.dtype)} " f"to {dtype_to_string(edtype)}") rep_aval = core.physical_element_aval(edtype) assert tuple(rep_aval.sharding.spec) == (None,) * rep_aval.ndim if x.dtype != rep_aval.dtype: raise ValueError( "can only convert to extended dtype from its representation dtype, " f"but tried to convert from {dtype_to_string(x.dtype)} to " f"{dtype_to_string(edtype)} which doesn't match the representation type " f"{dtype_to_string(rep_aval.dtype)}.") if x.ndim < rep_aval.ndim: raise ValueError( "can only convert to extended dtype from an array of its " f"representation type, but the extended dtype {dtype_to_string(edtype)}" f" has a representation shape {rep_aval.shape} (rank {rep_aval.ndim}) " f"while the given representation array has shape {x.shape} (rank " f"{x.ndim} < {rep_aval.ndim}).") n = x.ndim - rep_aval.ndim shape_prefix, shape_suffix = x.shape[:n], x.shape[n:] if shape_suffix != rep_aval.shape: raise ValueError( "can only convert to extended dtype from an array of its " f"representation type, but the extended dtype {dtype_to_string(edtype)}" f" has a representation shape {rep_aval.shape} while the given " f"representation array has shape {x.shape}, so the shape suffix " f"does not match: given {shape_suffix} but required {rep_aval.shape}.") if isinstance(x, ShapedArray): spec_prefix, spec_suffix = x.sharding.spec[:n], x.sharding.spec[n:] if tuple(spec_suffix) != (None,) * len(spec_suffix): raise ValueError( "can only convert to extended dtype from an array with trailing " "axes that are not explicitly sharded, but tried to convert from " f"{x.str_short(short_dtypes=True)} to an extended dtype with element " f"shape {rep_aval.shape}") return x.update(shape=shape_prefix, dtype=edtype, sharding=x.sharding.update(spec=spec_prefix)) elif isinstance(x, core.DShapedArray): return x.update(shape=shape_prefix, dtype=edtype) else: assert False # unreachable, see isinstance check above to_edtype_p = Primitive('to_edtype') to_edtype_p.def_impl(partial(dispatch.apply_primitive, to_edtype_p)) to_edtype_p.def_abstract_eval(_to_edtype_abstract_eval) ad.defjvp(to_edtype_p, lambda t, x, edtype: convert_element_type(t, core.primal_dtype_to_tangent_dtype(edtype))) ad.primitive_transposes[to_edtype_p] = \ lambda ct, x, edtype: [from_edtype_p.bind(ct, dtype=x.aval.dtype)] # type: ignore batching.defvectorized(to_edtype_p) mlir.register_lowering(to_edtype_p, lambda _, x, **__: [x]) def _from_edtype_abstract_eval(x, *, dtype): assert (isinstance(x.dtype, dtypes.ExtendedDType) and not isinstance(dtype, dtypes.ExtendedDType)) if not isinstance(x, (ShapedArray, core.DShapedArray)): raise TypeError("can only convert from an extended dtype on an array type," f"but got {type(x)}") if convert_from := getattr(x.dtype._rules, 'convert_from', None): allow_conversion = convert_from(x.dtype, dtype) else: allow_conversion = x.dtype._rules.allow_conversion if not allow_conversion: raise ValueError( f"Cannot convert_element_type from {dtype_to_string(x.dtype)} " f"to {dtype_to_string(dtype)}") rep_aval = core.physical_element_aval(x.dtype) assert tuple(rep_aval.sharding.spec) == (None,) * rep_aval.ndim if rep_aval.dtype != dtype: raise ValueError( "can only convert from extended dtype to its representation dtype, " f"but tried to convert from {dtype_to_string(x.dtype)} to " f"{dtype_to_string(dtype)} which doesn't match the representation type " f"{dtype_to_string(rep_aval.dtype)}.") if isinstance(x, ShapedArray): return x.update(shape=(*x.shape, *rep_aval.shape), dtype=dtype) elif isinstance(x, core.DShapedArray): if all(isinstance(d, int) for d in x.shape): return core.ShapedArray(shape=(*x.shape, *rep_aval.shape), dtype=dtype) else: raise NotImplementedError else: assert False # unreachable, see isinstance check above from_edtype_p = Primitive('from_edtype') from_edtype_p.def_impl(partial(dispatch.apply_primitive, from_edtype_p)) from_edtype_p.def_abstract_eval(_from_edtype_abstract_eval) ad.defjvp(from_edtype_p, lambda t, x, dtype: convert_element_type(t, core.primal_dtype_to_tangent_dtype(dtype))) ad.primitive_transposes[from_edtype_p] = \ lambda ct, x, dtype: [to_edtype_p.bind(ct, edtype=x.dtype)] batching.defvectorized(from_edtype_p) mlir.register_lowering(from_edtype_p, lambda _, x, **__: [x]) def _bitcast_convert_type_shape_rule(operand, *, new_dtype): old_dtype = operand.dtype old_nbits = dtypes.itemsize_bits(old_dtype) new_nbits = dtypes.itemsize_bits(new_dtype) if old_nbits == new_nbits: return operand.shape elif old_nbits > new_nbits: return (*operand.shape, old_nbits // new_nbits) else: dim_size = operand.shape[-1] if operand.shape else 1 if dim_size * old_nbits != new_nbits: raise ValueError( f"Attempting to convert array of shape {operand.shape} " f"from {old_dtype} of size {old_nbits} bits " f"to {new_dtype} of size {new_nbits}, bits " f"but {dim_size} * {old_nbits} != {new_nbits}") return operand.shape[:-1] def _bitcast_convert_type_sharding_rule(operand, *, new_dtype): old_dtype = operand.dtype old_nbits = dtypes.itemsize_bits(old_dtype) new_nbits = dtypes.itemsize_bits(new_dtype) if old_nbits == new_nbits: return operand.sharding elif old_nbits > new_nbits: return operand.sharding.update(spec=(*operand.sharding.spec, None)) else: return operand.sharding.update(spec=operand.sharding.spec[:-1]) def _bitcast_convert_type_dtype_rule(operand, *, new_dtype): old_dtype = operand.dtype if (dtypes.issubdtype(old_dtype, np.bool_) or dtypes.issubdtype(old_dtype, np.complexfloating) or dtypes.issubdtype(new_dtype, np.bool_) or dtypes.issubdtype(new_dtype, np.complexfloating)): if old_dtype != new_dtype: raise TypeError("lax.bitcast_convert_type does not support bool or complex values " "unless the operand and destination types match. " f"Got operand dtype={old_dtype}, {new_dtype=}. " "Consider using the arr.view() method instead.") return new_dtype bitcast_convert_type_p = standard_primitive( _bitcast_convert_type_shape_rule, _bitcast_convert_type_dtype_rule, 'bitcast_convert_type', weak_type_rule=_strip_weak_type, sharding_rule=_bitcast_convert_type_sharding_rule, vma_rule=partial(core.standard_vma_rule, 'bitcast_convert_type')) ad.defjvp_zero(bitcast_convert_type_p) batching.defvectorized(bitcast_convert_type_p) def _bitcast_convert_type_lower(ctx, operand, *, new_dtype): aval_out, = ctx.avals_out out = hlo.bitcast_convert(mlir.aval_to_ir_type(aval_out), operand) return [mlir.lower_with_sharding_in_types(ctx, out, aval_out)] mlir.register_lowering(bitcast_convert_type_p, _bitcast_convert_type_lower) def _validate_preferred_element_type(input_dtype, preferred_element_type): if (dtypes.issubdtype(input_dtype, np.integer) and dtypes.issubdtype(preferred_element_type, np.floating)): # Special-case integer->float multiply. This is allowed, and also allows # different signedness between input and output. pass else: allowed_types = (np.integer, np.floating, np.complexfloating) if any(dtypes.issubdtype(input_dtype, t) and not dtypes.issubdtype(preferred_element_type, t) for t in allowed_types): raise TypeError("Input type is incompatible with " "`preferred_element_type`. The compatible combinations " "of (input_type, preferred_element_type) are " "(integral, integral), (integral, floating), " "(floating, floating), (complex, complex.") if (dtypes.issubdtype(input_dtype, np.signedinteger) and not dtypes.issubdtype(preferred_element_type, np.signedinteger)): raise TypeError("`preferred_element_type` must have the same signedness " "as the original type.") input_bitwidth = np.dtype(input_dtype).itemsize preferred_bitwidth = np.dtype(preferred_element_type).itemsize if preferred_bitwidth < input_bitwidth: raise TypeError("`preferred_element_type` must not be narrower than the " "original type.") def _dot_general_shape_rule(lhs, rhs, *, dimension_numbers, precision, preferred_element_type: DTypeLike | None, out_sharding): if out_sharding is not None and not isinstance(out_sharding, NamedSharding): raise NotImplementedError (lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = _from_maybe_ragged(dimension_numbers) if not all(np.all(np.greater_equal(d, 0)) and np.all(np.less(d, lhs.ndim)) for d in (lhs_contracting, lhs_batch)): msg = ("dot_general requires lhs dimension numbers to be nonnegative and " "less than the number of axes of the lhs value, got " f"lhs_batch of {lhs_batch} and lhs_contracting of {lhs_contracting} " f"for lhs of rank {lhs.ndim}") raise TypeError(msg) if not all(np.all(np.greater_equal(d, 0)) and np.all(np.less(d, rhs.ndim)) for d in (rhs_contracting, rhs_batch)): msg = ("dot_general requires rhs dimension numbers to be nonnegative and " "less than the number of axes of the rhs value, got " f"rhs_batch of {rhs_batch} and rhs_contracting of {rhs_contracting} " f"for rhs of rank {rhs.ndim}") raise TypeError(msg) if len(lhs_batch) != len(rhs_batch): msg = ("dot_general requires equal numbers of lhs_batch and rhs_batch " "dimensions, got lhs_batch {} and rhs_batch {}.") raise TypeError(msg.format(lhs_batch, rhs_batch)) lhs_contracting_set, lhs_batch_set = set(lhs_contracting), set(lhs_batch) rhs_contracting_set, rhs_batch_set = set(rhs_contracting), set(rhs_batch) if len(lhs_batch_set) != len(lhs_batch): msg = ("dot_general requires lhs batch dimensions to be distinct, got " f"lhs_batch {lhs_batch}.") raise TypeError(msg) if len(rhs_batch_set) != len(rhs_batch): msg = ("dot_general requires rhs batch dimensions to be distinct, got " f"rhs_batch {rhs_batch}.") raise TypeError(msg) if len(lhs_contracting_set) != len(lhs_contracting): msg = ("dot_general requires lhs contracting dimensions to be distinct, " f"got lhs_contracting {lhs_contracting}.") raise TypeError(msg) if len(rhs_contracting_set) != len(rhs_contracting): msg = ("dot_general requires rhs contracting dimensions to be distinct, " f"got rhs_contracting {rhs_contracting}.") raise TypeError(msg) if lhs_contracting_set & lhs_batch_set: msg = ("dot_general requires lhs batch dimensions to be disjoint from " "contracting dimensions, got lhs_batch {} and lhs_contracting {}.") raise TypeError(msg.format(lhs_batch, lhs_contracting)) if rhs_contracting_set & rhs_batch_set: msg = ("dot_general requires rhs batch dimensions to be disjoint from " "contracting dimensions, got rhs_batch {} and rhs_contracting {}.") raise TypeError(msg.format(rhs_batch, rhs_contracting)) lhs_batch_shape = tuple(lhs.shape[i] for i in lhs_batch) rhs_batch_shape = tuple(rhs.shape[i] for i in rhs_batch) if not core.definitely_equal_shape(lhs_batch_shape, rhs_batch_shape): msg = ("dot_general requires lhs batch dimensions and rhs batch dimensions " "to have the same shape, got {} and {}.") raise TypeError(msg.format(lhs_batch_shape, rhs_batch_shape)) lhs_contracting_shape = tuple(lhs.shape[i] for i in lhs_contracting) rhs_contracting_shape = tuple(rhs.shape[i] for i in rhs_contracting) if not core.definitely_equal_shape(lhs_contracting_shape, rhs_contracting_shape): msg = ("dot_general requires contracting dimensions to have the same " "shape, got {} and {}.") raise TypeError(msg.format(lhs_contracting_shape, rhs_contracting_shape)) return _dot_general_shape_computation(lhs.shape, rhs.shape, dimension_numbers) def _dot_general_shape_computation(lhs_shape, rhs_shape, dimension_numbers): (lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = _from_maybe_ragged(dimension_numbers) batch_shape = tuple(lhs_shape[i] for i in lhs_batch) lhs_contract_or_batch = tuple(sorted(tuple(lhs_contracting) + tuple(lhs_batch))) lhs_tensored_shape = tuple_delete(lhs_shape, lhs_contract_or_batch) rhs_group = () if isinstance(dimension_numbers, RaggedDotDimensionNumbers): rhs_group = tuple(dimension_numbers.rhs_group_dimensions) # pytype: disable=attribute-error rhs_contract_or_batch_or_group = tuple( sorted(tuple(rhs_contracting) + tuple(rhs_batch) + rhs_group) ) rhs_tensored_shape = tuple_delete(rhs_shape, rhs_contract_or_batch_or_group) return batch_shape + lhs_tensored_shape + rhs_tensored_shape def _check_specs_match(lhs_spec, rhs_spec, msg): for l, r in zip(lhs_spec, rhs_spec): if l is not None and r is not None and l != r: raise core.ShardingTypeError(msg) def _dot_general_sharding_rule(lhs, rhs, *, dimension_numbers, precision, preferred_element_type: DTypeLike | None, out_sharding): if (not lhs.sharding.mesh.empty and not rhs.sharding.mesh.empty and lhs.sharding.mesh != rhs.sharding.mesh): raise core.ShardingTypeError( 'Mesh of both lhs and rhs should match. Got lhs:' f' {lhs.sharding.mesh} and rhs: {rhs.sharding.mesh}') if out_sharding is not None: assert isinstance(out_sharding, NamedSharding) return out_sharding (lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers lhs_contracting_spec = tuple(lhs.sharding.spec[i] for i in lhs_contracting) rhs_contracting_spec = tuple(rhs.sharding.spec[i] for i in rhs_contracting) lhs_batch_spec = tuple(lhs.sharding.spec[i] for i in lhs_batch) rhs_batch_spec = tuple(rhs.sharding.spec[i] for i in rhs_batch) msg = ("dot_general requires lhs batch dimensions and rhs batch dimensions " f"to have the consistent sharding, got {lhs_batch_spec} and " f"{rhs_batch_spec}.") _check_specs_match(lhs_batch_spec, rhs_batch_spec, msg) msg = ("dot_general requires contracting dimensions to have consistent " f"sharding, got {lhs_contracting_spec} and {rhs_contracting_spec}.") _check_specs_match(lhs_contracting_spec, rhs_contracting_spec, msg) for l, r in zip(lhs_contracting_spec, rhs_contracting_spec): if l is not None and r is not None: raise core.ShardingTypeError( 'Contracting dimensions are sharded and it is ambiguous how the' ' output should be sharded. Please specify the output sharding via' ' the `out_sharding` parameter.' f' Got {lhs_contracting_spec=} and {rhs_contracting_spec=}') if lhs.sharding.mesh.empty and not rhs.sharding.mesh.empty: mesh = rhs.sharding.mesh else: mesh = lhs.sharding.mesh return _dot_general_sharding_computation( lhs.sharding.spec, rhs.sharding.spec, dimension_numbers, mesh) def _dot_general_sharding_computation(lhs_spec, rhs_spec, dimension_numbers, mesh): (lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers batch_spec = tuple(lhs_spec[i] for i in lhs_batch) lhs_contract_or_batch = tuple(sorted(tuple(lhs_contracting) + tuple(lhs_batch))) lhs_tensored_spec = tuple_delete(lhs_spec, lhs_contract_or_batch) rhs_contract_or_batch = tuple(sorted(tuple(rhs_contracting) + tuple(rhs_batch))) rhs_tensored_spec = tuple_delete(rhs_spec, rhs_contract_or_batch) return NamedSharding(mesh, P(*(batch_spec + lhs_tensored_spec + rhs_tensored_spec))) def _dot_general_unreduced_rule(out_s, lhs, rhs, *, dimension_numbers, **kwargs): if lhs.sharding.spec.unreduced or rhs.sharding.spec.unreduced: raise core.ShardingTypeError( f'lhs or rhs passed to dot_general cannot be unreduced. Got {lhs=} and' f' {rhs=}') if out_s.spec.unreduced: (lhs_contracting, rhs_contracting), _ = dimension_numbers lhs_contracting_spec = tuple(lhs.sharding.spec[i] for i in lhs_contracting) rhs_contracting_spec = tuple(rhs.sharding.spec[i] for i in rhs_contracting) if lhs_contracting_spec != rhs_contracting_spec: raise core.ShardingTypeError( 'lhs and rhs contracting dims should be sharded identically when' ' out_sharding provided to dot_general mentions unreduced_axes.' f' Got {out_s=}, {lhs_contracting_spec=},' f' {rhs_contracting_spec=}') flat_spec = [s for s in flatten_spec(lhs_contracting_spec) if s is not None] if out_s.spec.unreduced != frozenset(flat_spec): raise core.ShardingTypeError( "out_sharding's unreduced axes should be equal to the contracting" f' specs. Got unreduced axes={out_s.spec.unreduced} and' f' contracting spec={lhs_contracting_spec}') return out_s def _dot_general_reduced_rule(out_s, lhs, rhs, *, dimension_numbers, **kwargs): return out_s def tuple_delete(tup, idx): idx_ = set(idx) return tuple(tup[i] for i in range(len(tup)) if i not in idx_) def _dot_general_dtype_rule(lhs, rhs, *, dimension_numbers, precision, preferred_element_type: DTypeLike | None, out_sharding, name: str = 'lax.dot_general'): if out_sharding is not None and not isinstance(out_sharding, NamedSharding): raise NotImplementedError del dimension_numbers # unused # We're mostly matching XLA's logic here, namely in shape_inference.cc and # primitive_util.h's HigherPrecisionType, e.g. # https://github.com/openxla/xla/blob/ea3a841768d0dcf192e5820c9b25c34c73f2226a/xla/primitive_util.h#L329 def type_properties(dt): c = _real_dtype(dt) if dtypes.issubdtype(dt, np.complexfloating) else dt return (dtypes.issubdtype(dt, np.complexfloating), dtypes.finfo(c).maxexp if dtypes.issubdtype(c, np.floating) else -1, dtypes.finfo(c).nmant if dtypes.issubdtype(c, np.floating) else -1, _bit_width(c), not dtypes.issubdtype(c, np.unsignedinteger)) lhs_prop, rhs_prop = type_properties(lhs.dtype), type_properties(rhs.dtype) if lhs_prop > rhs_prop: result_dtype = lhs.dtype elif rhs_prop > lhs_prop: result_dtype = rhs.dtype else: if lhs.dtype != rhs.dtype: raise TypeError(f'{name} argument type error: {lhs.dtype}, {rhs.dtype}') result_dtype = lhs.dtype has_algorithm = isinstance(precision, (DotAlgorithm, DotAlgorithmPreset)) return _maybe_upcast(result_dtype, preferred_element_type, check_bit_width=not has_algorithm) def _bit_width(d): if dtypes.issubdtype(d, np.inexact): return dtypes.finfo(d).bits elif dtypes.issubdtype(d, np.integer): return dtypes.iinfo(d).bits elif d == np.dtype('bool'): return 1 else: assert False, d # should be unreachable, open an issue! def _maybe_upcast(result_dtype, preferred_element_type, check_bit_width): # replicates the logic in shape_inference.cc's MaybeUpcast if (preferred_element_type is None or result_dtype == preferred_element_type): return result_dtype if (check_bit_width and not dtypes.issubdtype(result_dtype, np.floating) and _bit_width(preferred_element_type) < _bit_width(result_dtype)): raise TypeError("`preferred_element_type` must not be narrower than the " "original type, got preferred_element_type of " f"{preferred_element_type} for result type of " f"{result_dtype}.") return preferred_element_type def _dot_general_transpose_lhs(g, x, y, *, dimension_numbers, precision, preferred_element_type: DTypeLike | None, out_sharding, swap_ans=False): (x_contract, y_contract), (x_batch, y_batch) = dimension_numbers x_ndim = x.aval.ndim x_kept = remaining(range(x_ndim), x_contract, x_batch) y_kept = remaining(range(np.ndim(y)), y_contract, y_batch) if swap_ans: ans_batch, ans_y, _ = ranges_like(x_batch, y_kept, x_kept) else: ans_batch, _, ans_y = ranges_like(x_batch, x_kept, y_kept) dims = ((ans_y, y_kept), (ans_batch, y_batch)) x_contract_sorted_by_y = list(np.take(x_contract, np.argsort(y_contract))) unsorted_axes = list(x_batch) + x_kept + x_contract_sorted_by_y out_axes = np.argsort(unsorted_axes) xs = x.aval.to_cotangent_aval().sharding inverse_spec = tuple(xs.spec[o] for o in unsorted_axes) ds = xs.update(spec=xs.spec.update(partitions=inverse_spec)) dot_general_out = dot_general(g, y, dims, precision=precision, preferred_element_type=preferred_element_type, out_sharding=ds) x_bar = transpose(dot_general_out, tuple(out_axes)) if x_bar.dtype != x.aval.dtype: x_bar = _convert_element_type(x_bar, x.aval.dtype, x.aval.weak_type) return x_bar def _dot_general_transpose_rhs(g, x, y, *, dimension_numbers, precision, preferred_element_type: DTypeLike | None, out_sharding): (x_contract, y_contract), (x_batch, y_batch) = dimension_numbers swapped_dimension_numbers = ((y_contract, x_contract), (y_batch, x_batch)) return _dot_general_transpose_lhs( g, y, x, dimension_numbers=swapped_dimension_numbers, precision=precision, preferred_element_type=preferred_element_type, out_sharding=out_sharding, swap_ans=True) def _dot_batch_rule( unpack_args, unpack_dims, invoke_prim, axis_data, batched_args, batch_dims, *, dimension_numbers, out_sharding, precision, preferred_element_type: DTypeLike | None, **_, ): lhs, rhs = unpack_args(batched_args) lbd, rbd = unpack_dims(batch_dims) left_stack_dim = lbd.stacked_axis if type(lbd) is RaggedAxis else lbd right_stack_dim = rbd.stacked_axis if type(rbd) is RaggedAxis else rbd new_dimension_numbers, result_stack_dim = _dot_general_batch_dim_nums( (np.ndim(lhs), np.ndim(rhs)), (left_stack_dim, right_stack_dim), dimension_numbers) # TODO Should probably check that any ragged dimensions have corresponding # sizes, because otherwise the dot product is technically undefined. # # This masking is not strictly necessary for non-contraction dimensions; # we could micro-optimize here by avoiding computing that mask. if type(lbd) is RaggedAxis: lhs = batching.mask_ragged_axes(lhs, _get_sum_identity, lbd) lhs_shape = batching.bdim_as_shape(lbd, lhs.shape) else: lhs_shape = np.shape(lhs) if type(rbd) is RaggedAxis: rhs = batching.mask_ragged_axes(rhs, _get_sum_identity, rbd) rhs_shape = batching.bdim_as_shape(rbd, rhs.shape) else: rhs_shape = np.shape(rhs) result_batch_dim = batching.shape_as_bdim( result_stack_dim, _dot_general_shape_computation(lhs_shape, rhs_shape, new_dimension_numbers)) if out_sharding is not None: out_sharding = batching.get_sharding_for_vmap( axis_data, out_sharding, result_batch_dim) batched_out = invoke_prim( lhs, rhs, new_dimension_numbers, precision=precision, preferred_element_type=preferred_element_type, out_sharding=out_sharding, ) return batched_out, result_batch_dim def _dot_general_batch_dim_nums(ndims, batch_dims, dimension_numbers): # There are three kinds of dimensions in a dot_general: # - contraction dimensions appear in lhs and rhs but not the result # - batch dimensions appear in lhs, rhs, and result # - tensor product dimensions appear in the result and one of lhs or rhs # The dimensions of the result are ordered as # - Batch dimensions # - Q: In what order? The order of appearance in lhs, rhs, or # dimension_numbers? # - Tensor dimensions from the LHS # - Tensor dimensions from the RHS lhs_ndim, rhs_ndim = ndims # lbd and rbd are "batch" dimensions in the sense of dimensions being # vmapped, not to be confused with "batch" dimensions in the sense of # explicitly present dimensions that this dot_general is zipping together. lbd, rbd = batch_dims assert lbd is not None or rbd is not None (lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = _from_maybe_ragged(dimension_numbers) is_ragged_dot = isinstance(dimension_numbers, RaggedDotDimensionNumbers) def bump_dims(dims, b): return tuple(np.add(dims, np.greater_equal(dims, b))) if type(lbd) is type(rbd) is int: # The vmapped dimensions become an additional batch dimension in the # batched dot_general, which we arbitrarily put first. lhs_batch = (lbd,) + bump_dims(lhs_batch, lbd) rhs_batch = (rbd,) + bump_dims(rhs_batch, rbd) lhs_contract = bump_dims(lhs_contract, lbd) rhs_contract = bump_dims(rhs_contract, rbd) result_batch_dim = 0 elif (type(lbd) is int and rbd is None): # The left vmapped dimension becomes an additional tensor dimension in the # batched dot_general. lhs_tensor = [d for d in range(lhs_ndim) if d not in lhs_batch and d not in lhs_contract] result_batch_dim = len(lhs_batch) + int(sum(np.less(lhs_tensor, lbd))) lhs_batch = bump_dims(lhs_batch, lbd) lhs_contract = bump_dims(lhs_contract, lbd) elif (type(rbd) is int and lbd is None): # The right vmapped dimension becomes an additional tensor dimension in the # batched dot_general. rhs_tensor = list( remaining( range(rhs_ndim), rhs_batch, rhs_contract, dimension_numbers.rhs_group_dimensions if is_ragged_dot else [], ) ) result_batch_dim = (lhs_ndim - len(lhs_contract) + int(sum(np.less(rhs_tensor, rbd)))) rhs_batch = bump_dims(rhs_batch, rbd) rhs_contract = bump_dims(rhs_contract, rbd) else: # We wouldn't be here if we didn't have at least one vmapped dimension. assert False new_dimension_numbers = ((lhs_contract, rhs_contract), (lhs_batch, rhs_batch)) if is_ragged_dot: new_dimension_numbers = RaggedDotDimensionNumbers( dot_dimension_numbers=new_dimension_numbers, lhs_ragged_dimensions=bump_dims( dimension_numbers.lhs_ragged_dimensions, lbd ), rhs_group_dimensions=bump_dims( dimension_numbers.rhs_group_dimensions, rbd ), ) return new_dimension_numbers, result_batch_dim def _dot_general_padding_rule(in_avals, out_avals, lhs, rhs, *, dimension_numbers, **params): lhs_aval, _ = in_avals (lhs_contract, _), _ = dimension_numbers padded_axes = [(i, lhs_aval.shape[i].val) for i in lhs_contract if isinstance(lhs_aval.shape[i], pe.BoundedAxisSize)] lhs_ = _replace_masked_values(lhs, 0, padded_axes) return [dot_general(lhs_, rhs, dimension_numbers=dimension_numbers, **params)] def _dot_general_pp_rule(eqn, context, settings) -> pp.Doc: # * suppress printing precision or preferred_element_type when None. # * print dimension_numbers as list-of-lists to be shorter. printed_params = {k: v for k, v in eqn.params.items() if v is not None} (lhs_cont, rhs_cont), (lhs_batch, rhs_batch) = eqn.params['dimension_numbers'] printed_params['dimension_numbers'] = ( (list(lhs_cont), list(rhs_cont)), (list(lhs_batch), list(rhs_batch))) return core._pp_eqn(eqn.replace(params=printed_params), context, settings) def _dot_general_ragged_prop_rule(eqn_params, invar_raggedness, outvars): assert len(invar_raggedness) == 2 assert len(outvars) == 1 invar_raggedness_lhs = invar_raggedness[0] invar_raggedness_rhs = invar_raggedness[1] dimension_numbers = eqn_params['dimension_numbers'] (lhs_contracting, rhs_contracting), (_, _) = dimension_numbers if not invar_raggedness_lhs and not invar_raggedness_rhs: # Both are dense - it is valid to reach here, because dense operations # are legal in code running under ragged prop. return invar_raggedness, [None] if not invar_raggedness_lhs or not invar_raggedness_rhs: # One ragged, one dense if not invar_raggedness_lhs: # left is dense, right is ragged _, ragged_axis_dim_rhs, _, _ = invar_raggedness_rhs if rhs_contracting != ragged_axis_dim_rhs: # Contraction is on a dense dimension, this is valid! return invar_raggedness, [None] if not invar_raggedness_rhs: # left is ragged, right is dense _, ragged_axis_dim_lhs, _, _ = invar_raggedness_lhs if lhs_contracting != ragged_axis_dim_lhs: # Contraction is on a dense dimension, this is valid! return invar_raggedness, [None] raise NotImplementedError('NYI - dense and ragged dim contraction') stacked_axis_lhs, ragged_axis_dim_lhs, _, _ = invar_raggedness_lhs stacked_axis_rhs, ragged_axis_dim_rhs, _, _ = invar_raggedness_rhs if stacked_axis_rhs != 0 or stacked_axis_lhs != 0: raise NotImplementedError( 'Dot general ragged prop for non 0 stacked axis, NYI' ) # We only support ragged k atm, that is, lhs is (m, ragged_k) and rhs is # (ragged_k, n), meaning the output is dense. if ragged_axis_dim_lhs != 2 or ragged_axis_dim_rhs != 1: raise NotImplementedError( 'Dot general ragged prop for non contraction raggedness, NYI' ) assert len(outvars) == 1 # TODO(mvoz): A constant on batching.* ? # Dense (m, n) - no jumble only atm return invar_raggedness, [None] dot_general_p = standard_primitive( _dot_general_shape_rule, _dot_general_dtype_rule, 'dot_general', sharding_rule=_dot_general_sharding_rule, vma_rule=partial(core.standard_vma_rule, 'dot_general'), unreduced_rule=_dot_general_unreduced_rule, reduced_rule=_dot_general_reduced_rule, ) def _dot_general_batch_unpack_args(batch_args): lhs, rhs = batch_args return (lhs, rhs) def _dot_general_batch_unpack_dims(batch_dims): lbd, rbd = batch_dims return (lbd, rbd) ad.defbilinear(dot_general_p, _dot_general_transpose_lhs, _dot_general_transpose_rhs) _dot_general_batch_rule = functools.partial( _dot_batch_rule, _dot_general_batch_unpack_args, _dot_general_batch_unpack_dims, dot_general, ) batching.fancy_primitive_batchers[dot_general_p] = _dot_general_batch_rule batching.skippable_batchers[dot_general_p] = lambda _: () pe.padding_rules[dot_general_p] = _dot_general_padding_rule core.pp_eqn_rules[dot_general_p] = _dot_general_pp_rule batching.ragged_prop_rules[dot_general_p] = _dot_general_ragged_prop_rule def _full_precision(precision: Precision) -> tuple[Precision, Precision]: if precision is None or isinstance(precision, (DotAlgorithm, DotAlgorithmPreset)): return (Precision.DEFAULT, Precision.DEFAULT) elif not isinstance(precision, tuple): return (precision, precision) else: return precision def precision_attr(precision: Precision) -> ir.ArrayAttr: return ir.ArrayAttr.get( [hlo.PrecisionAttr.get(str(p)) for p in _full_precision(precision)] ) def chlo_precision_attr(precision: Precision) -> ir.ArrayAttr: return ir.ArrayAttr.get( [chlo.PrecisionAttr.get(str(p)) for p in _full_precision(precision)] ) def dot_algorithm_attr(precision: CanonicalPrecision, lhs_dtype: DTypeLike, rhs_dtype: DTypeLike) -> hlo.DotAlgorithm | None: if not isinstance(precision, (DotAlgorithm, DotAlgorithmPreset)): return None return precision._convert_to_hlo_attr(lhs_dtype, rhs_dtype) def get_algorithm_compute_types( algorithm: DotAlgorithm | DotAlgorithmPreset, lhs_dtype: DTypeLike, rhs_dtype: DTypeLike, out_dtype: DTypeLike | None = None, ) -> tuple[DTypeLike | None, DTypeLike | None, DTypeLike | None]: if isinstance(algorithm, DotAlgorithm): return ( algorithm.lhs_precision_type, algorithm.rhs_precision_type, algorithm.accumulation_type, ) def maybe_convert_dtype(input_dtype, target_dtypes): if target_dtypes is None: return input_dtype if np.dtype(input_dtype) in map(np.dtype, target_dtypes): return input_dtype return target_dtypes[0] lhs_dtype = maybe_convert_dtype(lhs_dtype, algorithm.supported_lhs_types) rhs_dtype = maybe_convert_dtype(rhs_dtype, algorithm.supported_rhs_types) out_type = maybe_convert_dtype( out_dtype, algorithm.supported_output_types(lhs_dtype, rhs_dtype) ) return lhs_dtype, rhs_dtype, out_type def accuracy_attr(accuracy) -> hlo.ResultAccuracyAttr: if isinstance(accuracy, AccuracyMode): return hlo.ResultAccuracyAttr.get(0.0, 0.0, int(0), str(accuracy.name)) elif isinstance(accuracy, Tolerance): return hlo.ResultAccuracyAttr.get( atol=accuracy.atol, rtol=accuracy.rtol, ulps=accuracy.ulps, mode='TOLERANCE', ) def _handle_dot_precision(ctx, lhs, rhs, precision, platform): def _is_fp8_mixed_precision_matmul(_lhs_dtypes, _rhs_dtypes): fp8_dtypes = (dtypes.float8_e4m3fn, dtypes.float8_e5m2, dtypes.float8_e5m2fnuz, dtypes.float8_e4m3fnuz, dtypes.float8_e3m4, dtypes.float8_e4m3, dtypes.float8_e8m0fnu) return _lhs_dtypes in fp8_dtypes and _rhs_dtypes in fp8_dtypes # The *_ lets us reuse this for ragged_dot_general, which has group_sizes. lhs_aval, rhs_aval, *_ = ctx.avals_in lhs_dtype, rhs_dtype = lhs_aval.dtype, rhs_aval.dtype aval_out, = ctx.avals_out accumulation_aval = aval_out algorithm_kwarg = {} if isinstance(precision, (DotAlgorithm, DotAlgorithmPreset)): # The CPU backend silently ignores the algorithm spec, so we check here to # make sure that the selected algorithm is supported. We could be a little # bit more liberal here (any algorithm where the input and output types # match and all the other parameters have default values should work), but # it's probably sufficient to just check the presets here. if platform == "cpu" and precision not in { DotAlgorithmPreset.DEFAULT, DotAlgorithmPreset.F16_F16_F16, DotAlgorithmPreset.F32_F32_F32, DotAlgorithmPreset.F64_F64_F64, DotAlgorithmPreset.BF16_BF16_F32, DotAlgorithmPreset.BF16_BF16_F32_X3, DotAlgorithmPreset.BF16_BF16_F32_X6, }: raise ValueError( f"The precision '{precision}' is not supported by dot_general on CPU") # If an explicit algorithm was specified, we always cast the input types to # the correct types. def maybe_convert_dtype(operand, operand_aval, target_dtype): if target_dtype is None or operand_aval.dtype == target_dtype: return operand aval = core.ShapedArray(operand_aval.shape, target_dtype) return mlir.convert_hlo(ctx, operand, operand_aval, aval) lhs_dtype, rhs_dtype, accumulation_dtype = get_algorithm_compute_types( precision, lhs_dtype, rhs_dtype, aval_out.dtype) lhs = maybe_convert_dtype(lhs, lhs_aval, lhs_dtype) rhs = maybe_convert_dtype(rhs, rhs_aval, rhs_dtype) if accumulation_dtype is not None: accumulation_aval = core.ShapedArray(aval_out.shape, accumulation_dtype) if precision != DotAlgorithmPreset.DEFAULT: algorithm_kwarg = { "algorithm": dot_algorithm_attr(precision, lhs_dtype, rhs_dtype) } else: # TODO(b/...): JAX's dot_general primitive accepts the same input dtype # combinations that are accepted in XLA's shape_inference.cc (the canonical # reference for the HLO type system), but actually different XLA platforms # fail on codegen for different accepted cases. To handle those cases, we # insert ConvertOps on the input, in a platform-dependent way. if lhs_dtype != rhs_dtype: if platform == "tpu": handled = lambda dt: (dtypes.issubdtype(dt, np.floating) or dtypes.issubdtype(dt, np.integer)) if not (handled(lhs_dtype) and handled(rhs_dtype)): lhs = mlir.convert_hlo(ctx, lhs, lhs_aval, core.ShapedArray(lhs_aval.shape, aval_out.dtype)) rhs = mlir.convert_hlo(ctx, rhs, rhs_aval, core.ShapedArray(rhs_aval.shape, aval_out.dtype)) else: # cpu and gpu # Do not convert mixed fp8 types to output type. if not _is_fp8_mixed_precision_matmul(lhs_dtype, rhs_dtype): lhs = mlir.convert_hlo(ctx, lhs, lhs_aval, core.ShapedArray(lhs_aval.shape, aval_out.dtype)) rhs = mlir.convert_hlo(ctx, rhs, rhs_aval, core.ShapedArray(rhs_aval.shape, aval_out.dtype)) return lhs, rhs, accumulation_aval, algorithm_kwarg def _dot_general_lower(ctx, lhs, rhs, *, dimension_numbers, precision, preferred_element_type: np.dtype | None, out_sharding, platform: str = "default"): del preferred_element_type # Implied by the output aval lhs, rhs, accumulation_aval, algorithm_kwarg = _handle_dot_precision( ctx, lhs, rhs, precision, platform ) (lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers dot_dnums = hlo.DotDimensionNumbers.get( lhs_batching_dimensions=list(lhs_batch), rhs_batching_dimensions=list(rhs_batch), lhs_contracting_dimensions=list(lhs_contracting), rhs_contracting_dimensions=list(rhs_contracting)) result = hlo.dot_general( mlir.aval_to_ir_type(accumulation_aval), lhs, rhs, dot_dnums, precision_config=precision_attr(precision), **algorithm_kwarg, ) aval_out, = ctx.avals_out result = mlir.lower_with_sharding_in_types(ctx, result, aval_out) if accumulation_aval.dtype != aval_out.dtype: result = mlir.convert_hlo(ctx, result, accumulation_aval, aval_out) return [result] mlir.register_lowering(dot_general_p, _dot_general_lower) for platform in ["cpu", "tpu"]: mlir.register_lowering(dot_general_p, partial(_dot_general_lower, platform=platform), platform=platform)
RaggedDotDimensionNumbers
python
run-llama__llama_index
llama-index-core/llama_index/core/graph_stores/simple.py
{ "start": 390, "end": 2071 }
class ____(DataClassJsonMixin): """ Simple Graph Store Data container. Args: graph_dict (Optional[dict]): dict mapping subject to """ graph_dict: Dict[str, List[List[str]]] = field(default_factory=dict) def get_rel_map( self, subjs: Optional[List[str]] = None, depth: int = 2, limit: int = 30 ) -> Dict[str, List[List[str]]]: """Get subjects' rel map in max depth.""" if subjs is None: subjs = list(self.graph_dict.keys()) rel_map = {} for subj in subjs: rel_map[subj] = self._get_rel_map(subj, depth=depth, limit=limit) # TBD, truncate the rel_map in a spread way, now just truncate based # on iteration order rel_count = 0 return_map = {} for subj in rel_map: if rel_count + len(rel_map[subj]) > limit: return_map[subj] = rel_map[subj][: limit - rel_count] break else: return_map[subj] = rel_map[subj] rel_count += len(rel_map[subj]) return return_map def _get_rel_map( self, subj: str, depth: int = 2, limit: int = 30 ) -> List[List[str]]: """Get one subect's rel map in max depth.""" if depth == 0: return [] rel_map = [] rel_count = 0 if subj in self.graph_dict: for rel, obj in self.graph_dict[subj]: if rel_count >= limit: break rel_map.append([subj, rel, obj]) rel_map += self._get_rel_map(obj, depth=depth - 1) rel_count += 1 return rel_map
SimpleGraphStoreData
python
getsentry__sentry
src/sentry/backup/crypto.py
{ "start": 7877, "end": 9547 }
class ____(Decryptor): """ Decrypt using a private key stored on the local machine. """ def __init__(self, fp: IO[bytes]): self.__key = fp.read() @classmethod def from_bytes(cls, b: bytes) -> LocalFileDecryptor: return cls(io.BytesIO(b)) def decrypt_data_encryption_key(self, unwrapped: UnwrappedEncryptedExportTarball) -> bytes: """ Decrypt the encrypted data encryption key used to encrypt the actual export JSON. """ # Compare the public and private key, to ensure that they are a match. private_key_pem = self.__key private_key = serialization.load_pem_private_key( private_key_pem, password=None, backend=default_backend(), ) generated_public_key_pem = private_key.public_key().public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo, ) if unwrapped.plain_public_key_pem != generated_public_key_pem: raise DecryptionError( "The public key does not match that generated by the `decrypt_with` private key." ) private_key = serialization.load_pem_private_key( private_key_pem, password=None, backend=default_backend(), ) return private_key.decrypt( # type: ignore[union-attr] unwrapped.encrypted_data_encryption_key, padding.OAEP( mgf=padding.MGF1(algorithm=hashes.SHA256()), algorithm=hashes.SHA256(), label=None, ), )
LocalFileDecryptor
python
ansible__ansible
test/units/plugins/action/test_action.py
{ "start": 32705, "end": 33334 }
class ____(unittest.TestCase): def test(self): data = {'ansible_playbook_python': '/usr/bin/python', 'ansible_python_interpreter': '/usr/bin/python', 'ansible_ssh_some_var': 'whatever', 'ansible_ssh_host_key_somehost': 'some key here', 'some_other_var': 'foo bar'} data = clean_facts(data) self.assertNotIn('ansible_playbook_python', data) self.assertNotIn('ansible_python_interpreter', data) self.assertIn('ansible_ssh_host_key_somehost', data) self.assertIn('some_other_var', data)
TestActionBaseCleanReturnedData
python
django__django
tests/model_inheritance/models.py
{ "start": 2203, "end": 2298 }
class ____(Restaurant): serves_gnocchi = models.BooleanField(default=False)
ItalianRestaurant
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/dataclassTransform2.py
{ "start": 821, "end": 1081 }
class ____(ModelBase, frozen=True): id: int = model_field() name: str = model_field() name2: str = model_field(alias="other_name", default="None") # This should generate an error because a non-frozen class cannot # derive from a frozen one.
Customer1
python
scipy__scipy
scipy/optimize/tests/test__basinhopping.py
{ "start": 1136, "end": 1693 }
class ____(RandomDisplacement): """use a copy of displace, but have it set a special parameter to make sure it's actually being used.""" def __init__(self): self.been_called = False super().__init__() def __call__(self, x): self.been_called = True return super().__call__(x) def myTakeStep2(x): """redo RandomDisplacement in function form without the attribute stepsize to make sure everything still works ok """ s = 0.5 x += np.random.uniform(-s, s, np.shape(x)) return x
MyTakeStep1
python
Netflix__metaflow
metaflow/client/core.py
{ "start": 37453, "end": 63842 }
class ____(MetaflowObject): """ A `Task` represents an execution of a `Step`. It contains all `DataArtifact` objects produced by the task as well as metadata related to execution. Note that the `@retry` decorator may cause multiple attempts of the task to be present. Usually you want the latest attempt, which is what instantiating a `Task` object returns by default. If you need to e.g. retrieve logs from a failed attempt, you can explicitly get information about a specific attempt by using the following syntax when creating a task: `Task('flow/run/step/task', attempt=<attempt>)` where `attempt=0` corresponds to the first attempt etc. Attributes ---------- metadata : List[Metadata] List of all metadata events associated with the task. metadata_dict : Dict[str, str] A condensed version of `metadata`: A dictionary where keys are names of metadata events and values the latest corresponding event. data : MetaflowData Container of all data artifacts produced by this task. Note that this call downloads all data locally, so it can be slower than accessing artifacts individually. See `MetaflowData` for more information. artifacts : MetaflowArtifacts Container of `DataArtifact` objects produced by this task. successful : bool True if the task completed successfully. finished : bool True if the task completed. exception : object Exception raised by this task if there was one. finished_at : datetime Time this task finished. runtime_name : str Runtime this task was executed on. stdout : str Standard output for the task execution. stderr : str Standard error output for the task execution. code : MetaflowCode Code package for this task (if present). See `MetaflowCode`. environment_info : Dict[str, str] Information about the execution environment. """ _NAME = "task" _PARENT_CLASS = "step" _CHILD_CLASS = "artifact" def _iter_filter(self, x): # exclude private data artifacts return x.id[0] != "_" def _get_matching_pathspecs(self, steps, metadata_key, metadata_pattern): """ Yield pathspecs of tasks from specified steps that match a given metadata pattern. Parameters ---------- steps : List[str] List of Step objects to search for tasks. metadata_key : str Metadata key to filter tasks on (e.g., 'foreach-execution-path'). metadata_pattern : str Regular expression pattern to match against the metadata value. Yields ------ str Pathspec of each task whose metadata value for the specified key matches the pattern. """ flow_id, run_id, _, _ = self.path_components for step in steps: task_pathspecs = self._metaflow.metadata.filter_tasks_by_metadata( flow_id, run_id, step, metadata_key, metadata_pattern ) for task_pathspec in task_pathspecs: yield task_pathspec @staticmethod def _get_previous_steps(graph_info, step_name): # Get the parent steps steps = [] for node_name, attributes in graph_info["steps"].items(): if step_name in attributes["next"]: steps.append(node_name) return steps @property def parent_task_pathspecs(self) -> Iterator[str]: """ Yields pathspecs of all parent tasks of the current task. Yields ------ str Pathspec of the parent task of the current task """ _, _, step_name, _ = self.path_components metadata_dict = self.metadata_dict graph_info = self["_graph_info"].data # Get the parent steps steps = self._get_previous_steps(graph_info, step_name) node_type = graph_info["steps"][step_name]["type"] metadata_key = "foreach-execution-path" current_path = metadata_dict.get(metadata_key) if len(steps) > 1: # Static join - use exact path matching pattern = current_path or ".*" else: if not steps: return # No parent steps, yield nothing if not current_path: # Current task is not part of a foreach # Pattern: ".*" pattern = ".*" else: current_depth = len(current_path.split(",")) if node_type == "join": # Foreach join # (Current task, "A:10,B:13") and (Parent task, "A:10,B:13,C:21") # Pattern: "A:10,B:13,.*" pattern = f"{current_path},.*" else: # Foreach split or linear step # Pattern: "A:10,B:13" parent_step_type = graph_info["steps"][steps[0]]["type"] target_depth = current_depth if ( parent_step_type == "split-foreach" or parent_step_type == "split-parallel" ) and current_depth == 1: # (Current task, "A:10") and (Parent task, "") pattern = ".*" else: # (Current task, "A:10,B:13,C:21") and (Parent task, "A:10,B:13") # (Current task, "A:10,B:13") and (Parent task, "A:10,B:13") if ( parent_step_type == "split-foreach" or parent_step_type == "split-parallel" ): target_depth = current_depth - 1 pattern = ",".join(current_path.split(",")[:target_depth]) for pathspec in self._get_matching_pathspecs(steps, metadata_key, pattern): yield pathspec @property def child_task_pathspecs(self) -> Iterator[str]: """ Yields pathspecs of all child tasks of the current task. Yields ------ str Pathspec of the child task of the current task """ flow_id, run_id, step_name, _ = self.path_components metadata_dict = self.metadata_dict graph_info = self["_graph_info"].data # Get the child steps steps = graph_info["steps"][step_name]["next"] node_type = graph_info["steps"][step_name]["type"] metadata_key = "foreach-execution-path" current_path = metadata_dict.get(metadata_key) if len(steps) > 1: # Static split - use exact path matching pattern = current_path or ".*" else: if not steps: return # No child steps, yield nothing if not current_path: # Current task is not part of a foreach # Pattern: ".*" pattern = ".*" else: current_depth = len(current_path.split(",")) if node_type == "split-foreach" or node_type == "split-parallel": # Foreach split # (Current task, "A:10,B:13") and (Child task, "A:10,B:13,C:21") # Pattern: "A:10,B:13,.*" pattern = f"{current_path},.*" else: # Foreach join or linear step # Pattern: "A:10,B:13" child_step_type = graph_info["steps"][steps[0]]["type"] # We need to know if the child step is a foreach join or a static join child_step_prev_steps = self._get_previous_steps( graph_info, steps[0] ) if len(child_step_prev_steps) > 1: child_step_type = "static-join" target_depth = current_depth if child_step_type == "join" and current_depth == 1: # (Current task, "A:10") and (Child task, "") pattern = ".*" else: # (Current task, "A:10,B:13,C:21") and (Child task, "A:10,B:13") # (Current task, "A:10,B:13") and (Child task, "A:10,B:13") if child_step_type == "join": target_depth = current_depth - 1 pattern = ",".join(current_path.split(",")[:target_depth]) for pathspec in self._get_matching_pathspecs(steps, metadata_key, pattern): yield pathspec @property def parent_tasks(self) -> Iterator["Task"]: """ Yields all parent tasks of the current task if one exists. Yields ------ Task Parent task of the current task """ parent_task_pathspecs = self.parent_task_pathspecs for pathspec in parent_task_pathspecs: yield Task(pathspec=pathspec, _namespace_check=False) @property def child_tasks(self) -> Iterator["Task"]: """ Yields all child tasks of the current task if one exists. Yields ------ Task Child task of the current task """ for pathspec in self.child_task_pathspecs: yield Task(pathspec=pathspec, _namespace_check=False) @property def metadata(self) -> List[Metadata]: """ Metadata events produced by this task across all attempts of the task *except* if you selected a specific task attempt. Note that Metadata is different from tags. Returns ------- List[Metadata] Metadata produced by this task """ all_metadata = self._metaflow.metadata.get_object( self._NAME, "metadata", None, self._attempt, *self.path_components ) all_metadata = all_metadata if all_metadata else [] # For "clones" (ie: they have an origin-run-id AND a origin-task-id), we # copy a set of metadata from the original task. This is needed to make things # like logs work (which rely on having proper values for ds-root for example) origin_run_id = None origin_task_id = None result = [] existing_keys = [] for obj in all_metadata: result.append( Metadata( name=obj.get("field_name"), value=obj.get("value"), created_at=obj.get("ts_epoch"), type=obj.get("type"), task=self, ) ) existing_keys.append(obj.get("field_name")) if obj.get("field_name") == "origin-run-id": origin_run_id = obj.get("value") elif obj.get("field_name") == "origin-task-id": origin_task_id = obj.get("value") if origin_task_id: # This is a "cloned" task. We consider that it has the same # metadata as the last attempt of the cloned task. origin_obj_pathcomponents = self.path_components origin_obj_pathcomponents[1] = origin_run_id origin_obj_pathcomponents[3] = origin_task_id origin_task = Task( "/".join(origin_obj_pathcomponents), _namespace_check=False ) latest_metadata = { m.name: m for m in sorted(origin_task.metadata, key=lambda m: m.created_at) } # We point to ourselves in the Metadata object for v in latest_metadata.values(): if v.name in existing_keys: continue result.append( Metadata( name=v.name, value=v.value, created_at=v.created_at, type=v.type, task=self, ) ) return result @property def metadata_dict(self) -> Dict[str, str]: """ Dictionary mapping metadata names (keys) and their associated values. Note that unlike the metadata() method, this call will only return the latest metadata for a given name. For example, if a task executes multiple times (retries), the same metadata name will be generated multiple times (one for each execution of the task). The metadata() method returns all those metadata elements whereas this call will return the metadata associated with the latest execution of the task. Returns ------- Dict[str, str] Dictionary mapping metadata name with value """ # use the newest version of each key, hence sorting return { m.name: m.value for m in sorted(self.metadata, key=lambda m: m.created_at) } @property def index(self) -> Optional[int]: """ Returns the index of the innermost foreach loop if this task is run inside at least one foreach. The index is what distinguishes the various tasks inside a given step. This call returns None if this task was not run in a foreach loop. Returns ------- int, optional Index in the innermost loop for this task """ try: return self["_foreach_stack"].data[-1].index except (KeyError, IndexError): return None @property def data(self) -> MetaflowData: """ Returns a container of data artifacts produced by this task. You can access data produced by this task as follows: ``` print(task.data.my_var) ``` Returns ------- MetaflowData Container of all artifacts produced by this task """ return MetaflowData(self) @property def artifacts(self) -> MetaflowArtifacts: """ Returns a container of DataArtifacts produced by this task. You can access each DataArtifact by name like so: ``` print(task.artifacts.my_var) ``` This method differs from data() because it returns DataArtifact objects (which contain additional metadata) as opposed to just the data. Returns ------- MetaflowArtifacts Container of all DataArtifacts produced by this task """ arts = list(self) obj = namedtuple("MetaflowArtifacts", [art.id for art in arts]) return obj._make(arts) @property def successful(self) -> bool: """ Indicates whether or not the task completed successfully. This information is always about the latest task to have completed (in case of retries). Returns ------- bool True if the task completed successfully and False otherwise """ try: return self["_success"].data except KeyError: return False @property def finished(self) -> bool: """ Indicates whether or not the task completed. This information is always about the latest task to have completed (in case of retries). Returns ------- bool True if the task completed and False otherwise """ try: return self["_task_ok"].data except KeyError: return False @property def exception(self) -> Optional[Any]: """ Returns the exception that caused the task to fail, if any. This information is always about the latest task to have completed (in case of retries). If successful() returns False and finished() returns True, this method can help determine what went wrong. Returns ------- object Exception raised by the task or None if not applicable """ try: return self["_exception"].data except KeyError: return None @property def finished_at(self) -> Optional[datetime]: """ Returns the datetime object of when the task finished (successfully or not). This information is always about the latest task to have completed (in case of retries). This call will return None if the task is not finished. Returns ------- datetime Datetime of when the task finished """ try: return self["_task_ok"].created_at except KeyError: return None @property def runtime_name(self) -> Optional[str]: """ Returns the name of the runtime this task executed on. Returns ------- str Name of the runtime this task executed on """ for t in self._tags: if t.startswith("runtime:"): return t.split(":")[1] return None @property def stdout(self) -> str: """ Returns the full standard out of this task. If you specify a specific attempt for this task, it will return the standard out for that attempt. If you do not specify an attempt, this will return the current standard out for the latest *started* attempt of the task. In both cases, multiple calls to this method will return the most up-to-date log (so if an attempt is not done, each call will fetch the latest log). Returns ------- str Standard output of this task """ return self._load_log("stdout") @property def stdout_size(self) -> int: """ Returns the size of the stdout log of this task. Similar to `stdout`, the size returned is the latest size of the log (so for a running attempt, this value will increase as the task produces more output). Returns ------- int Size of the stdout log content (in bytes) """ return self._get_logsize("stdout") @property def stderr(self) -> str: """ Returns the full standard error of this task. If you specify a specific attempt for this task, it will return the standard error for that attempt. If you do not specify an attempt, this will return the current standard error for the latest *started* attempt. In both cases, multiple calls to this method will return the most up-to-date log (so if an attempt is not done, each call will fetch the latest log). Returns ------- str Standard error of this task """ return self._load_log("stderr") @property def stderr_size(self) -> int: """ Returns the size of the stderr log of this task. Similar to `stderr`, the size returned is the latest size of the log (so for a running attempt, this value will increase as the task produces more output). Returns ------- int Size of the stderr log content (in bytes) """ return self._get_logsize("stderr") @property def current_attempt(self) -> int: """ Get the relevant attempt for this Task. Returns the specific attempt used when initializing the instance, or the latest *started* attempt for the Task. Returns ------- int attempt id for this task object """ if self._attempt is not None: attempt = self._attempt else: # It is possible that a task fails before any metadata has been # recorded. In this case, we assume that we are executing the # first attempt. # # FIXME: Technically we are looking at the latest *recorded* attempt # here. It is possible that logs exists for a newer attempt that # just failed to record metadata. We could make this logic more robust # and guarantee that we always return the latest available log. attempt = int(self.metadata_dict.get("attempt", 0)) return attempt @cached_property def code(self) -> Optional[MetaflowCode]: """ Returns the MetaflowCode object for this task, if present. Not all tasks save their code so this call may return None in those cases. Returns ------- MetaflowCode Code package for this task """ code_package = self.metadata_dict.get("code-package") if code_package: return MetaflowCode(self.path_components[0], code_package) return None @cached_property def environment_info(self) -> Dict[str, Any]: """ Returns information about the environment that was used to execute this task. As an example, if the Conda environment is selected, this will return information about the dependencies that were used in the environment. This environment information is only available for tasks that have a code package. Returns ------- Dict Dictionary describing the environment """ my_code = self.code if not my_code: return None env_type = my_code.info["environment_type"] if not env_type: return None env = [m for m in ENVIRONMENTS + [MetaflowEnvironment] if m.TYPE == env_type][0] meta_dict = self.metadata_dict return env.get_client_info(self.path_components[0], meta_dict) def _load_log(self, stream): meta_dict = self.metadata_dict log_location = meta_dict.get("log_location_%s" % stream) if log_location: return self._load_log_legacy(log_location, stream) else: return "".join( line + "\n" for _, line in self.loglines(stream, meta_dict=meta_dict) ) def _get_logsize(self, stream): meta_dict = self.metadata_dict log_location = meta_dict.get("log_location_%s" % stream) if log_location: return self._legacy_log_size(log_location, stream) else: return self._log_size(stream, meta_dict) def loglines( self, stream: str, as_unicode: bool = True, meta_dict: Optional[Dict[str, Any]] = None, ) -> Iterator[Tuple[datetime, str]]: """ Return an iterator over (utc_timestamp, logline) tuples. Parameters ---------- stream : str Either 'stdout' or 'stderr'. as_unicode : bool, default: True If as_unicode=False, each logline is returned as a byte object. Otherwise, it is returned as a (unicode) string. Yields ------ Tuple[datetime, str] Tuple of timestamp, logline pairs. """ from metaflow.mflog.mflog import merge_logs global filecache if meta_dict is None: meta_dict = self.metadata_dict ds_type = meta_dict.get("ds-type") ds_root = meta_dict.get("ds-root") if ds_type is None or ds_root is None: yield None, "" return if filecache is None: filecache = FileCache() attempt = self.current_attempt logs = filecache.get_logs_stream( ds_type, ds_root, stream, attempt, *self.path_components ) for line in merge_logs([blob for _, blob in logs]): msg = to_unicode(line.msg) if as_unicode else line.msg yield line.utc_tstamp, msg def _load_log_legacy(self, log_location, logtype, as_unicode=True): # this function is used to load pre-mflog style logfiles global filecache log_info = json.loads(log_location) location = log_info["location"] ds_type = log_info["ds_type"] attempt = log_info["attempt"] if filecache is None: filecache = FileCache() ret_val = filecache.get_log_legacy( ds_type, location, logtype, int(attempt), *self.path_components ) if as_unicode and (ret_val is not None): return ret_val.decode(encoding="utf8") else: return ret_val def _legacy_log_size(self, log_location, logtype): global filecache log_info = json.loads(log_location) location = log_info["location"] ds_type = log_info["ds_type"] attempt = log_info["attempt"] if filecache is None: filecache = FileCache() return filecache.get_legacy_log_size( ds_type, location, logtype, int(attempt), *self.path_components ) def _log_size(self, stream, meta_dict): global filecache ds_type = meta_dict.get("ds-type") ds_root = meta_dict.get("ds-root") if ds_type is None or ds_root is None: return 0 if filecache is None: filecache = FileCache() attempt = self.current_attempt return filecache.get_log_size( ds_type, ds_root, stream, attempt, *self.path_components ) def __iter__(self) -> Iterator[DataArtifact]: """ Iterate over all children DataArtifact of this Task Yields ------ DataArtifact A DataArtifact in this Step """ for d in super(Task, self).__iter__(): yield d def __getitem__(self, name: str) -> DataArtifact: """ Returns the DataArtifact object with the artifact name 'name' Parameters ---------- name : str Data artifact name Returns ------- DataArtifact DataArtifact for this artifact name in this task Raises ------ KeyError If the name does not identify a valid DataArtifact object """ return super(Task, self).__getitem__(name) def __getstate__(self): return super(Task, self).__getstate__() def __setstate__(self, state): super(Task, self).__setstate__(state)
Task
python
huggingface__transformers
src/transformers/models/visual_bert/modeling_visual_bert.py
{ "start": 16820, "end": 17625 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->VisualBert
VisualBertPredictionHeadTransform
python
PrefectHQ__prefect
src/prefect/client/schemas/schedules.py
{ "start": 1149, "end": 3855 }
class ____(PrefectBaseModel): """ A schedule formed by adding `interval` increments to an `anchor_date`. If no `anchor_date` is supplied, the current UTC time is used. If a timezone-naive datetime is provided for `anchor_date`, it is assumed to be in the schedule's timezone (or UTC). Even if supplied with an IANA timezone, anchor dates are always stored as UTC offsets, so a `timezone` can be provided to determine localization behaviors like DST boundary handling. If none is provided it will be inferred from the anchor date. NOTE: If the `IntervalSchedule` `anchor_date` or `timezone` is provided in a DST-observing timezone, then the schedule will adjust itself appropriately. Intervals greater than 24 hours will follow DST conventions, while intervals of less than 24 hours will follow UTC intervals. For example, an hourly schedule will fire every UTC hour, even across DST boundaries. When clocks are set back, this will result in two runs that *appear* to both be scheduled for 1am local time, even though they are an hour apart in UTC time. For longer intervals, like a daily schedule, the interval schedule will adjust for DST boundaries so that the clock-hour remains constant. This means that a daily schedule that always fires at 9am will observe DST and continue to fire at 9am in the local time zone. Args: interval (datetime.timedelta): an interval to schedule on anchor_date (DateTime, optional): an anchor date to schedule increments against; if not provided, the current timestamp will be used timezone (str, optional): a valid timezone string """ model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid") interval: datetime.timedelta = Field(gt=datetime.timedelta(0)) anchor_date: Annotated[DateTime, AfterValidator(default_anchor_date)] = Field( # pyright: ignore[reportAssignmentType] DateTime is split into two types depending on Python version default_factory=lambda: now("UTC"), examples=["2020-01-01T00:00:00Z"], ) timezone: Optional[str] = Field(default=None, examples=["America/New_York"]) @model_validator(mode="after") def validate_timezone(self): self.timezone = default_timezone(self.timezone, self.model_dump()) return self if TYPE_CHECKING: # The model accepts str or datetime values for `anchor_date` def __init__( self, /, interval: datetime.timedelta, anchor_date: Optional[Union[DateTime, datetime.datetime, str]] = None, timezone: Optional[str] = None, ) -> None: ...
IntervalSchedule
python
pytorch__pytorch
torchgen/model.py
{ "start": 14088, "end": 41921 }
class ____: # The namespace for this operator. For example, if we have "at::add" # then the namespace would be "at". This enables ops to be registered # through the same DSL with a custom namespace. If not specified, the # default namespace would be "at". namespace: str # The function schema of the operator in question. This schema # has been parsed; see FunctionSchema for more about its structure. # (This type is quoted as we are forward referencing a type # defined later in the file. I opted for this ordering of the # classes for expository clarity.) func: FunctionSchema # Whether or not to generate mutable tensor arguments like regular # ones use_const_ref_for_mutable_tensors: bool # Whether or not to omit automatic generation of a DeviceGuard device_guard: bool # How to emit automatic generation of device check device_check: DeviceCheckType # What python module to put the function in python_module: str | None # TODO: figure out what this does category_override: str | None # If no variants are specified in native_functions.yaml, this is # assumed to be {'function'}. variants: set[Variant] # Whether or not we should skip generating registrations for # this kernel. This is a bit of a double-edged sword, as manual # registrations don't participate in codegen-based selective build! manual_kernel_registration: bool # Whether or not to skip generating TensorMethod/Functions bindings # for this kernel. Technically, this doesn't actually skip generating # the binding; instead, the binding gets generated to __dispatch_{funcname} # so you can make use of the normal binding if you need it. manual_cpp_binding: bool # The location in the YAML file were this native function entry was # defined. This is for conveniently reporting error messages! loc: Location # A list of operators that are expected to be auto-generated for this NativeFunction. # Note: This list isn't actually directly used by the codegen to generate anything. # Instead, the codegen figures out what operators to generate purely based off of # function schema, and uses the autogen declarations to error check. # We expect every NativeFunction that gets auto-generated be explicitly called out # in native_functions.yaml autogen: list[OperatorName] # If non-empty, this kernel is subject to ufunc codegen. # Sorted by ufunc_key ufunc_inner_loop: dict[UfuncKey, UfuncInnerLoop] # Whether or not this out functions is a "structured kernel". Structured # kernels are defined a little differently from normal kernels; in # particular, their shape checking logic is defined separately from # the kernel. Only out functions can be structured; other functions # delegate to the out function using the structured_delegate keyword. # Every structured kernel must have at least an out and a functional # variant. structured: bool # Whether or not this non-out function is a structured kernel, defined # in terms of the out kernel referenced by the string here. structured_delegate: OperatorName | None # Only valid for structured kernels. Specifies alternative of what # to inherit from when defining the meta class for the structured # operator. This will usually be TensorIteratorBase. This also # changes the semantics of set_output to call the parent class. structured_inherits: str | None # Structured kernels can declare elements as "precomputed". These elements # are returned by the meta function in one struct and passed to the impl # function in lieu of certain kernel arguments that these precomputed # elements supersede. Information about the names and types of these # precomputed elements and how they correspond to kernel arguments is stored # in this member, if applicable. precomputed: Precompute | None # Argument names whose default should be excluded from the C++ interface. # Intended for resolving overload ambiguities between signatures. cpp_no_default_args: set[str] # Note [Abstract ATen methods] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # An abstract ATen method is one whose dispatch differs between # types. These are implemented in derived types (with a # standard (throwing) definition in Type). A concrete ATen # method is one which has the same dispatch for all types; # we just implement it in the base Type. This is exposed # in Declarations.yaml via a field named 'abstract'. is_abstract: bool # Whether or not the NativeFunction contains a backend-agnostic kernel has_composite_implicit_autograd_kernel: bool has_composite_implicit_autograd_nested_tensor_kernel: bool has_composite_explicit_autograd_kernel: bool has_composite_explicit_autograd_non_functional_kernel: bool # Tags are used to describe semantic information about (groups of) operators, # That aren't easily inferable directly from the operator's schema. tags: set[str] # NB: The benefit of defining a dataclass is that we automatically get # a constructor defined for all the fields we specify. No need # to explicitly write it out. # We parse both the NativeFunction + backend-specific information about it, which it stored in a corresponding BackendIndex. @staticmethod def from_yaml( ei: dict[str, object], loc: Location, valid_tags: set[str], ignore_keys: set[DispatchKey] | None = None, ) -> tuple[NativeFunction, dict[DispatchKey, dict[OperatorName, BackendMetadata]]]: """ Parse a NativeFunction from a dictionary as directly parsed from native_functions.yaml """ e = ei.copy() funcs = e.pop("func") assert isinstance(funcs, str), f"not a str: {funcs}" # only support one level of namespace. E.g., aten::add namespace_helper = NamespaceHelper.from_namespaced_entity( namespaced_entity=funcs, max_level=1 ) namespace = namespace_helper.get_cpp_namespace(default="aten") func = FunctionSchema.parse(namespace_helper.entity_name) cpp_no_default_args_list = e.pop("cpp_no_default_args", []) assert isinstance(cpp_no_default_args_list, list) cpp_no_default_args = set(cpp_no_default_args_list) use_const_ref_for_mutable_tensors = e.pop( "use_const_ref_for_mutable_tensors", False ) assert isinstance(use_const_ref_for_mutable_tensors, bool) if use_const_ref_for_mutable_tensors: assert not func.arguments.out, ( "see https://github.com/pytorch/pytorch/issues/145522" ) variants_s = e.pop("variants", "function") assert isinstance(variants_s, str) variants: set[Variant] = set() for v in variants_s.split(", "): if v == "function": variants.add(Variant.function) elif v == "method": variants.add(Variant.method) else: raise AssertionError(f"illegal variant {v}") manual_kernel_registration = e.pop("manual_kernel_registration", False) assert isinstance(manual_kernel_registration, bool), ( f"not a bool: {manual_kernel_registration}" ) manual_cpp_binding = e.pop("manual_cpp_binding", False) assert isinstance(manual_cpp_binding, bool), f"not a bool: {manual_cpp_binding}" device_guard = e.pop("device_guard", True) assert isinstance(device_guard, bool), f"not a bool: {device_guard}" device_check_s = e.pop("device_check", None) assert device_check_s is None or isinstance(device_check_s, str), ( f"not a str: {device_check_s}" ) assert ( device_check_s is None or device_check_s in DeviceCheckType.__members__ ), f"illegal device_check: {device_check_s}" device_check: DeviceCheckType if device_check_s is None: device_check = DeviceCheckType.ExactSame else: device_check = DeviceCheckType[device_check_s] structured = e.pop("structured", False) assert isinstance(structured, bool), f"not a bool: {structured}" structured_delegate_s = e.pop("structured_delegate", None) assert structured_delegate_s is None or isinstance( structured_delegate_s, str ), f"not a str: {structured_delegate_s}" assert structured_delegate_s is None or "::" not in structured_delegate_s, ( "namespace is not supported in structured delegate," " using the same namespace as the native function" ) structured_delegate: OperatorName | None = None if structured_delegate_s is not None: structured_delegate = OperatorName.parse(structured_delegate_s) structured_inherits = e.pop("structured_inherits", None) assert structured_inherits is None or isinstance(structured_inherits, str), ( f"not a str: {structured_inherits}" ) assert structured_inherits is None or "::" not in structured_inherits, ( "namespace is not supported in structured inherits," " using the same namespace as the native function" ) python_module = e.pop("python_module", None) assert python_module is None or isinstance(python_module, str), ( f"not a str: {python_module}" ) assert python_module is None or Variant.method not in variants, ( "functions in modules cannot be methods" ) category_override = e.pop("category_override", None) assert category_override is None or isinstance(category_override, str), ( f"not a str: {category_override}" ) precomputed_dict = e.pop("precomputed", None) assert precomputed_dict is None or structured is True precomputed = Precompute.parse(precomputed_dict) if precomputed_dict else None tags_inp = e.pop("tags", []) if isinstance(tags_inp, str): tags_inp = [tags_inp] assert isinstance(tags_inp, list) # All aten ops generated by torchgen receive the pt2_compliant tag. if namespace == "aten" and "pt2_compliant_tag" in valid_tags: tags_inp.append("pt2_compliant_tag") tags: set[str] = set() for t in tags_inp: assert len(valid_tags) > 0 # TODO: verify that the tag is valid and has an entry in tags.yaml if t in valid_tags: tags.add(t) else: raise AssertionError(f"illegal tag {t}") from torchgen.api import cpp raw_dispatch = e.pop("dispatch", None) assert raw_dispatch is None or isinstance(raw_dispatch, dict), e dispatch: dict[DispatchKey, BackendMetadata] = {} num_dispatch_keys: int = 0 if raw_dispatch is not None: assert not manual_kernel_registration, ( "cannot specify both manual_kernel_registration and dispatch; with " "manual registration, dispatch has no effect!" ) redundant_composite_implicit_autograd = False for ks, v in raw_dispatch.items(): if ks == "__line__": continue # not worth tracking line numbers for dispatch entries assert isinstance(ks, str), ( f"illegal dispatch key '{ks}' in {raw_dispatch}" ) assert isinstance(v, str), ( f"illegal dispatch value '{v}' in {raw_dispatch}" ) for k in ks.split(","): dispatch_key = DispatchKey.parse(k.strip()) num_dispatch_keys += 1 if ignore_keys and dispatch_key in ignore_keys: continue assert dispatch_key in dispatch_keys, ( f"Dispatch key {dispatch_key} of kernel {v} " "is not a supported dispatch key." ) # We only allow at most 3 levels of namespace for kernels. # We will append "native" to a custom kernel namespace. namespace_helper = NamespaceHelper.from_namespaced_entity( v, max_level=3 ) kernel_namespace = namespace_helper.get_cpp_namespace(default="at") # Why is 'structured' included? External backends (e.g. # XLA) opt into which ops are structured independently # of which in-tree ops are structured dispatch[dispatch_key] = BackendMetadata( kernel=namespace_helper.entity_name, structured=structured and is_structured_dispatch_key(dispatch_key), cpp_namespace=(kernel_namespace + "::native"), ) if ( dispatch_key is DispatchKey.CompositeImplicitAutograd and v == cpp.name(func) ): redundant_composite_implicit_autograd = True # We count the number of dispatch keys which have not been ignored to prevent a dispatch table # in which all backend keys are ignored but necessarily kept, remaining compositeimplicit, # from being treated as redundant. assert not ( num_dispatch_keys == 1 and redundant_composite_implicit_autograd ), ( "unnecessary dispatch table for this function; just delete the dispatch " "key entirely" ) # if a function is a structured delegate, deleting the dispatch # table is NOT semantics preserving assert ( structured_delegate or dispatch.keys() != {DispatchKey.CompositeImplicitAutograd} or dispatch[DispatchKey.CompositeImplicitAutograd].supports_symint() or num_dispatch_keys != 1 ), ( f"unexpected name for singleton CompositeImplicitAutograd dispatch entry: expected {cpp.name(func)} " f"but got {dispatch[DispatchKey.CompositeImplicitAutograd]}. Rename your implementation to the expected " "name, then delete the dispatch table" ) elif not structured and structured_delegate is None: name = str(func.name.name) assert not ( name.startswith("new_") or name.endswith("_like") # TODO: maybe it's better to test the return or ( func.arguments.tensor_options and not func.arguments.has_tensor_arg() ) ), ( f"expected {name} to have a CompositeExplicitAutograd " "dispatch entry, but there was no dispatch table. Factory functions " "should not have implicit dispatch as they should not be decomposed " "for __torch_dispatch__" ) dispatch[DispatchKey.CompositeImplicitAutograd] = BackendMetadata( cpp.name(func), structured=False, cpp_namespace=DEFAULT_KERNEL_NAMESPACE ) composites_in_dispatch = [ d for d in dispatch if d == DispatchKey.CompositeExplicitAutograd or d == DispatchKey.CompositeExplicitAutogradNonFunctional or d == DispatchKey.CompositeImplicitAutograd or d == DispatchKey.CompositeImplicitAutogradNestedTensor ] assert len(composites_in_dispatch) <= 1 or ( len(composites_in_dispatch) == 2 and ( DispatchKey.CompositeExplicitAutogradNonFunctional not in composites_in_dispatch ) and ( DispatchKey.CompositeImplicitAutogradNestedTensor in composites_in_dispatch ) ), ( "cannot specify more than one of CompositeExplicitAutograd, CompositeExplicitAutogradNonFunctional, " "or CompositeImplicitAutograd on a single kernel; each " "strictly subsumes the other. If you wanted to provide an explicit autograd " "implementation, specify CompositeExplicitAutograd; otherwise specify CompositeImplicitAutograd only" ) autogen_str = e.pop("autogen", "") assert isinstance(autogen_str, str) autogen = ( [] if autogen_str == "" else [OperatorName.parse(x) for x in autogen_str.split(", ")] ) raw_ufunc_inner_loop = e.pop("ufunc_inner_loop", {}) ufunc_inner_loop = {} if isinstance(raw_ufunc_inner_loop, str): ufunc_inner_loop[UfuncKey.Generic] = UfuncInnerLoop.parse( raw_ufunc_inner_loop, UfuncKey.Generic ) elif isinstance(raw_ufunc_inner_loop, dict): for k, vo in raw_ufunc_inner_loop.items(): if k == "__line__": continue assert isinstance(k, str), f"ufunc_inner_loop key is not a str: {k}" assert isinstance(vo, str), f"ufunc_inner_loop value is not a str: {v}" ufunc_key = UfuncKey.parse(k) ufunc_inner_loop[ufunc_key] = UfuncInnerLoop.parse(vo, ufunc_key) else: raise AssertionError( f"ufunc_inner_loop not str or dict: {raw_ufunc_inner_loop}" ) # Program the BackendIndex for the implicit dispatch entry from ufunc if ufunc_inner_loop: assert structured, "ufunc must be structured" # Delay import ufunc here to avoid circular import issue # See: https://github.com/pytorch/pytorch/issues/81294 import torchgen.api.ufunc as ufunc for dispatch_key in UFUNC_DISPATCH_KEYS: assert dispatch_key not in dispatch, ( f"ufunc should not have explicit dispatch entry for {dispatch_key}" ) dispatch[dispatch_key] = BackendMetadata( kernel=ufunc.schema_kernel_name(func, dispatch_key), structured=True, cpp_namespace=DEFAULT_KERNEL_NAMESPACE, ) if structured_delegate: # Structured functions MUST have a dispatch table is_abstract = True else: is_abstract = ( dispatch.keys() != {DispatchKey.CompositeImplicitAutograd} and dispatch.keys() != {DispatchKey.CompositeImplicitAutogradNestedTensor} and dispatch.keys() != { DispatchKey.CompositeImplicitAutograd, DispatchKey.CompositeImplicitAutogradNestedTensor, } ) has_composite_implicit_autograd_kernel = ( DispatchKey.CompositeImplicitAutograd in dispatch ) has_composite_implicit_autograd_nested_tensor_kernel = ( DispatchKey.CompositeImplicitAutogradNestedTensor in dispatch ) has_composite_explicit_autograd_kernel = ( DispatchKey.CompositeExplicitAutograd in dispatch ) has_composite_explicit_autograd_non_functional_kernel = ( DispatchKey.CompositeExplicitAutogradNonFunctional in dispatch ) # We aren't going to store dispatch metadata inline in NativeFunctions; # instead it is separately indexed by backend (so other backends can # add more dispatch entries after the fact). Reindex the individual # metadata by OperatorName! backend_metadata = {k: {func.name: v} for k, v in dispatch.items()} # don't care if it exists or not; make it easier to use this function # with other yaml parsers that aren't setting __line__ in the dict e.pop("__line__", None) assert not e, f"leftover entries: {e}" # Asserts that we can't do in post_init, because they rely on backend-specific info if structured_delegate is not None: for key in STRUCTURED_DISPATCH_KEYS: assert key not in dispatch, ( f"if structured_delegate, then must not have {key} in dispatch dictionary " "(it is delegated!)" ) return ( NativeFunction( func=func, use_const_ref_for_mutable_tensors=use_const_ref_for_mutable_tensors, variants=variants, structured=structured, structured_delegate=structured_delegate, structured_inherits=structured_inherits, precomputed=precomputed, autogen=autogen, ufunc_inner_loop=ufunc_inner_loop, manual_kernel_registration=manual_kernel_registration, manual_cpp_binding=manual_cpp_binding, python_module=python_module, category_override=category_override, device_guard=device_guard, device_check=device_check, loc=loc, cpp_no_default_args=cpp_no_default_args, is_abstract=is_abstract, has_composite_implicit_autograd_kernel=has_composite_implicit_autograd_kernel, has_composite_implicit_autograd_nested_tensor_kernel=has_composite_implicit_autograd_nested_tensor_kernel, has_composite_explicit_autograd_kernel=has_composite_explicit_autograd_kernel, has_composite_explicit_autograd_non_functional_kernel=has_composite_explicit_autograd_non_functional_kernel, tags=tags, namespace=namespace, ), backend_metadata, ) def validate_unstructured(self) -> None: # TODO: probably better to accumulate these errors and report them all # at once assert not self.structured, ( "This function is structured, but there was " "no valid functional variant of it." ) assert self.structured_delegate, ( "This function delegates to another structured out function, " "but no valid function was found (the delegate may not exist, or it has the wrong type)" ) # __post_init__ functions in dataclasses can be used to do extra # validation after construction. # # Notice that we don't do any type validation here. In fact, we # rely exclusively on mypy to check if you've done types correctly! # Validation is for nontrivial invariants that cannot be (conveniently) # encoded in the type system. def __post_init__(self) -> None: if self.func.arguments.out: assert self.variants == {Variant.function}, ( "Native functions with out arguments MUST " "be declared with only function variant; e.g., variants: function; " "otherwise you will tickle a Python argument binding bug " "(which usually manifests itself as the result variable being undefined.)" ) if self.structured: assert self.func.kind() == SchemaKind.out, ( "Put structured field on the out= " "variant of a function; did you mean structured_delegate?" ) assert self.device_guard, ( "device_guard: False is not respected by structured kernels" ) if self.structured_delegate: assert self.func.kind() != SchemaKind.out, ( "structured_delegate field not allowed " "on out= functions; did you mean structured?" ) assert self.device_guard, ( "device_guard: False is not respected by structured kernels" ) # Technically, with the asserts above, this assert is impossible to # happen assert not (self.structured and self.structured_delegate), ( "Cannot have both structured and structured_delegate on function" ) defaulted_arguments = { a.name for a in self.func.schema_order_arguments() if a.default is not None } invalid_args = set.difference(self.cpp_no_default_args, defaulted_arguments) assert len(invalid_args) == 0, f"Invalid cpp_no_default_args: {invalid_args}" if self.structured_inherits is not None: assert self.structured, ( "structured_inherits must also imply structured: True" ) if str(self.func.name).startswith("_foreach"): assert self.device_check == DeviceCheckType.NoCheck, ( "foreach kernels fall back to slow path when tensor are on different devices, " "device_check not allowed to be enabled" ) # NB: if your function accidentally has rand/dropout/... in its name # but is not actually random, feel free to amend this to special case if ( "rand" in str(self.func.name) or ( ( "dropout" in str(self.func.name) or any( "dropout" in arg.name for arg in self.func.arguments.flat_all ) ) # Backwards of dropout is typically deterministic and "backward" not in str(self.func.name) and str(self.func.name.name) not in ["_cudnn_init_dropout_state"] ) or self.func.arguments.has_generator_arg() ): assert "nondeterministic_seeded" in self.tags, str(self.func.name) @property def has_composite_kernel(self) -> bool: return ( self.has_composite_implicit_autograd_kernel or self.has_composite_explicit_autograd_kernel or self.has_composite_explicit_autograd_non_functional_kernel ) or ( self.has_composite_implicit_autograd_kernel and self.has_composite_implicit_autograd_nested_tensor_kernel ) @property def is_view_op(self) -> bool: rets = self.func.returns is_non_mutating_view = len(rets) > 0 and any( r.annotation is not None and not r.annotation.is_write for r in rets ) # See Note [resize_ in Functionalization] for more dtails is_inplace_view = ( "inplace_view" in self.tags and str(self.func.name) != "resize_" and str(self.func.name) != "resize_as_" ) is_wildcard_view = any( inp.annotation is not None and "*" in inp.annotation.alias_set_after for inp in self.func.schema_order_arguments() ) return is_non_mutating_view or is_inplace_view or is_wildcard_view @property def view_schema_kind(self) -> ViewSchemaKind: if self.is_view_op and self.func.name.name.inplace: assert "inplace_view" in self.tags return ViewSchemaKind.aliasing_inplace if self.is_view_op: return ViewSchemaKind.aliasing else: return ViewSchemaKind.non_aliasing @property def root_name(self) -> str: return self.func.name.name.base @property def part_of_structured_group(self) -> bool: return self.structured or self.structured_delegate is not None
NativeFunction
python
getsentry__sentry
src/sentry/hybridcloud/outbox/category.py
{ "start": 8878, "end": 12575 }
class ____(IntEnum): ORGANIZATION_SCOPE = scope_categories( 0, { OutboxCategory.ORGANIZATION_MEMBER_UPDATE, OutboxCategory.MARK_INVALID_SSO, OutboxCategory.RESET_IDP_FLAGS, OutboxCategory.ORGANIZATION_UPDATE, OutboxCategory.PROJECT_UPDATE, OutboxCategory.ORGANIZATION_INTEGRATION_UPDATE, OutboxCategory.SEND_SIGNAL, OutboxCategory.ORGAUTHTOKEN_UPDATE_USED, OutboxCategory.POST_ORGANIZATION_PROVISION, OutboxCategory.DISABLE_AUTH_PROVIDER, OutboxCategory.ORGANIZATION_MAPPING_CUSTOMER_ID_UPDATE, OutboxCategory.TEAM_UPDATE, OutboxCategory.AUTH_PROVIDER_UPDATE, OutboxCategory.ORGANIZATION_MEMBER_TEAM_UPDATE, OutboxCategory.API_KEY_UPDATE, OutboxCategory.ORGANIZATION_SLUG_RESERVATION_UPDATE, OutboxCategory.ORG_AUTH_TOKEN_UPDATE, OutboxCategory.PARTNER_ACCOUNT_UPDATE, OutboxCategory.UNUSED_FOUR, OutboxCategory.ISSUE_COMMENT_UPDATE, OutboxCategory.SEND_VERCEL_INVOICE, OutboxCategory.FTC_CONSENT, }, ) USER_SCOPE = scope_categories( 1, { OutboxCategory.USER_UPDATE, OutboxCategory.API_TOKEN_UPDATE, OutboxCategory.UNUSED_ONE, OutboxCategory.UNUSED_TWO, OutboxCategory.UNUSUED_THREE, OutboxCategory.AUTH_IDENTITY_UPDATE, }, ) # Webhook scope is no longer in use WEBHOOK_SCOPE = scope_categories(2, {OutboxCategory.WEBHOOK_PROXY}) AUDIT_LOG_SCOPE = scope_categories(3, {OutboxCategory.AUDIT_LOG_EVENT}) USER_IP_SCOPE = scope_categories( 4, { OutboxCategory.USER_IP_EVENT, }, ) INTEGRATION_SCOPE = scope_categories( 5, {OutboxCategory.INTEGRATION_UPDATE, OutboxCategory.EXTERNAL_ACTOR_UPDATE}, ) APP_SCOPE = scope_categories( 6, { OutboxCategory.API_APPLICATION_UPDATE, OutboxCategory.SENTRY_APP_INSTALLATION_UPDATE, OutboxCategory.SENTRY_APP_UPDATE, OutboxCategory.SERVICE_HOOK_UPDATE, OutboxCategory.SENTRY_APP_DELETE, OutboxCategory.SENTRY_APP_INSTALLATION_DELETE, }, ) # No longer in use TEAM_SCOPE = scope_categories(7, set()) PROVISION_SCOPE = scope_categories( 8, { OutboxCategory.PROVISION_ORGANIZATION, }, ) SUBSCRIPTION_SCOPE = scope_categories(9, {OutboxCategory.SUBSCRIPTION_UPDATE}) # relocation scope is no longer in use. RELOCATION_SCOPE = scope_categories( 10, {OutboxCategory.RELOCATION_EXPORT_REQUEST, OutboxCategory.RELOCATION_EXPORT_REPLY} ) def __str__(self) -> str: return self.name @classmethod def scope_has_category(cls, shard_scope: int, category: int) -> bool: return OutboxCategory(category) in _outbox_categories_for_scope[shard_scope] @classmethod def as_choices(cls) -> Sequence[tuple[int, int]]: return [(i.value, i.value) for i in cls] @staticmethod def get_tag_name(scope: OutboxScope) -> str: if scope == OutboxScope.ORGANIZATION_SCOPE: return "organization_id" if scope == OutboxScope.USER_SCOPE: return "user_id" if scope == OutboxScope.APP_SCOPE: return "app_id" return "shard_identifier" _missing_categories = set(OutboxCategory) - _used_categories assert ( not _missing_categories ), f"OutboxCategories {_missing_categories} not registered to an OutboxScope"
OutboxScope
python
doocs__leetcode
solution/0200-0299/0283.Move Zeroes/Solution.py
{ "start": 0, "end": 211 }
class ____: def moveZeroes(self, nums: List[int]) -> None: k = 0 for i, x in enumerate(nums): if x: nums[k], nums[i] = nums[i], nums[k] k += 1
Solution
python
falconry__falcon
tests/test_httperror.py
{ "start": 4453, "end": 4580 }
class ____: def on_get(self, req, resp): raise falcon.HTTPGone(description='Gone with the wind')
GoneResourceWithBody
python
weaviate__weaviate-python-client
weaviate/rbac/models.py
{ "start": 3801, "end": 4127 }
class ____(str, _Action, Enum): MANAGE = "manage_roles" # backward compatibility, remove in a bit CREATE = "create_roles" READ = "read_roles" UPDATE = "update_roles" DELETE = "delete_roles" @staticmethod def values() -> List[str]: return [action.value for action in RolesAction]
RolesAction
python
apache__airflow
airflow-core/tests/unit/cli/commands/test_plugins_command.py
{ "start": 1769, "end": 1886 }
class ____(AirflowPlugin): name = "test-plugin-cli" global_operator_extra_links = [AirflowNewLink()]
TestPlugin
python
google__jax
jax/_src/hijax.py
{ "start": 5299, "end": 6954 }
class ____(MutableHiType): has_qdd = True # forwarded to value get = core.aval_method(box_get) set = core.aval_method(box_set) # aval interface: hashability and str_short def __hash__(self): return hash(BoxTy) def __eq__(self, other): return isinstance(other, BoxTy) def str_short(self, short_dtypes=False, **_) -> str: # type: ignore return 'BoxTy' # mutable interface def lo_ty_qdd(self, box_state): return [lo_ty for t in box_state.leaf_avals for lo_ty in t.lo_ty()] def new_from_loval(self, box_state: BoxTypeState, *lo_vals) -> Box: # type: ignore lo_vals_ = iter(lo_vals) hi_vals = [hi_ty.raise_val(*it.islice(lo_vals_, len(hi_ty.lo_ty()))) # type: ignore for hi_ty in box_state.leaf_avals] assert next(lo_vals_, None) is None return Box._new(tree_unflatten(box_state.treedef, hi_vals)) # will be mutated def read_loval(self, box_state: BoxTypeState, box) -> list: # type: ignore leaf_vals, treedef = tree_flatten(box_get(box)) assert treedef == box_state.treedef return [lo_val for hi_ty, hi_val in zip(box_state.leaf_avals, leaf_vals) for lo_val in hi_ty.lower_val(hi_val)] # type: ignore def update_from_loval(self, box_state: BoxTypeState, box, *lo_vals) -> None: # type: ignore lo_vals_ = iter(lo_vals) hi_vals = [hi_ty.raise_val(*it.islice(lo_vals_, len(hi_ty.lo_ty()))) # type: ignore for hi_ty in box_state.leaf_avals] assert next(lo_vals_, None) is None box_set(box, tree_unflatten(box_state.treedef, hi_vals)) def to_tangent_aval(self): return BoxTy() # Override isinstance checks under tracing
BoxTy
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/constructor12.py
{ "start": 216, "end": 499 }
class ____(Generic[T]): def return_from_variable(self) -> "ClassA[T]": value = ClassA[T]() reveal_type(value, expected_text="ClassA[T@ClassA]") return value x = ClassA[int]() v1 = x.return_from_variable() reveal_type(v1, expected_text="ClassA[int]")
ClassA
python
GoogleCloudPlatform__python-docs-samples
appengine/standard/ndb/properties/snippets.py
{ "start": 1506, "end": 1629 }
class ____(ndb.Model): name = ndb.StringProperty() addresses = ndb.StructuredProperty(Address, repeated=True)
Contact
python
openai__openai-python
src/openai/resources/moderations.py
{ "start": 894, "end": 3835 }
class ____(SyncAPIResource): @cached_property def with_raw_response(self) -> ModerationsWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers """ return ModerationsWithRawResponse(self) @cached_property def with_streaming_response(self) -> ModerationsWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/openai/openai-python#with_streaming_response """ return ModerationsWithStreamingResponse(self) def create( self, *, input: Union[str, SequenceNotStr[str], Iterable[ModerationMultiModalInputParam]], model: Union[str, ModerationModel] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ModerationCreateResponse: """Classifies if text and/or image inputs are potentially harmful. Learn more in the [moderation guide](https://platform.openai.com/docs/guides/moderation). Args: input: Input (or inputs) to classify. Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models. model: The content moderation model you would like to use. Learn more in [the moderation guide](https://platform.openai.com/docs/guides/moderation), and learn about available models [here](https://platform.openai.com/docs/models#moderation). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return self._post( "/moderations", body=maybe_transform( { "input": input, "model": model, }, moderation_create_params.ModerationCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ModerationCreateResponse, )
Moderations
python
pandas-dev__pandas
asv_bench/benchmarks/join_merge.py
{ "start": 6441, "end": 7158 }
class ____: # outer join of non-unique # GH 6329 def setup(self): date_index = date_range("01-Jan-2013", "23-Jan-2013", freq="min") daily_dates = date_index.to_period("D").to_timestamp("s", "s") self.fracofday = date_index.values - daily_dates.values self.fracofday = self.fracofday.astype("timedelta64[ns]") self.fracofday = self.fracofday.astype(np.float64) / 86_400_000_000_000 self.fracofday = Series(self.fracofday, daily_dates) index = date_range(date_index.min(), date_index.max(), freq="D") self.temp = Series(1.0, index)[self.fracofday.index] def time_join_non_unique_equal(self): self.fracofday * self.temp
JoinNonUnique
python
lxml__lxml
src/lxml/html/tests/test_html5parser.py
{ "start": 1739, "end": 3096 }
class ____(unittest.TestCase): def call_it(self, *args, **kwargs): if html5lib is None: raise unittest.SkipTest("html5lib is not installed") from lxml.html.html5parser import document_fromstring return document_fromstring(*args, **kwargs) def test_basic(self): parser = DummyParser(doc=DummyElementTree(root='dummy root')) elem = self.call_it(b'dummy input', parser=parser) self.assertEqual(elem, 'dummy root') self.assertEqual(parser.parse_args, (b'dummy input',)) self.assertEqual(parser.parse_kwargs, {'useChardet': True}) def test_guess_charset_not_used_for_unicode(self): parser = DummyParser() elem = self.call_it(b''.decode('ascii'), parser=parser) self.assertEqual(parser.parse_kwargs, {}) def test_guess_charset_arg_gets_passed_to_parser(self): parser = DummyParser() elem = self.call_it(b'', guess_charset='gc_arg', parser=parser) self.assertEqual(parser.parse_kwargs, {'useChardet': 'gc_arg'}) def test_raises_type_error_on_nonstring_input(self): not_a_string = None self.assertRaises(TypeError, self.call_it, not_a_string) def test_integration(self): elem = self.call_it(XHTML_TEST_DOCUMENT) self.assertEqual(elem.tag, xhtml_tag('html'))
Test_document_fromstring
python
allegroai__clearml
clearml/backend_api/services/v2_23/tasks.py
{ "start": 226077, "end": 228969 }
class ____(Request): """ Delete task configuration items :param task: Task ID :type task: str :param configuration: List of configuration itemss to delete :type configuration: Sequence[str] :param force: If set to True then both new and running task configuration can be deleted. Otherwise only the new task ones. Default is False :type force: bool """ _service = "tasks" _action = "delete_configuration" _version = "2.23" _schema = { "definitions": {}, "properties": { "configuration": { "description": "List of configuration itemss to delete", "items": {"type": "string"}, "type": "array", }, "force": { "description": ( "If set to True then both new and running task configuration can be deleted. Otherwise only the new" " task ones. Default is False" ), "type": "boolean", }, "task": {"description": "Task ID", "type": "string"}, }, "required": ["task", "configuration"], "type": "object", } def __init__(self, task, configuration, force=None, **kwargs): super(DeleteConfigurationRequest, self).__init__(**kwargs) self.task = task self.configuration = configuration self.force = force @schema_property("task") def task(self): return self._property_task @task.setter def task(self, value): if value is None: self._property_task = None return self.assert_isinstance(value, "task", six.string_types) self._property_task = value @schema_property("configuration") def configuration(self): return self._property_configuration @configuration.setter def configuration(self, value): if value is None: self._property_configuration = None return self.assert_isinstance(value, "configuration", dict) self.assert_isinstance( value.keys(), "configuration_keys", six.string_types, is_array=True ) self.assert_isinstance( value.values(), "configuration_values", (ConfigurationItem, dict), is_array=True, ) value = dict( (k, ConfigurationItem(**v) if isinstance(v, dict) else v) for k, v in value.items() ) self._property_configuration = value @schema_property("force") def force(self): return self._property_force @force.setter def force(self, value): if value is None: self._property_force = None return self.assert_isinstance(value, "force", (bool,)) self._property_force = value
DeleteConfigurationRequest
python
PyCQA__pylint
tests/functional/u/unused/unused_argument.py
{ "start": 2946, "end": 3238 }
class ____(Ancestor): def set_thing(self, thing, *, other=None): """Subclass does not raise unused-argument""" self.thing = thing # Test that Class with both `__init__` and `__new__` don't check # on `__new__` for unused arguments # pylint: disable=invalid-name
Descendant
python
airbytehq__airbyte
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/publish/context.py
{ "start": 743, "end": 6839 }
class ____(ConnectorContext): def __init__( self, connector: ConnectorWithModifiedFiles, pre_release: bool, spec_cache_gcs_credentials: Secret, spec_cache_bucket_name: str, metadata_service_gcs_credentials: Secret, metadata_bucket_name: str, docker_hub_username: Secret, docker_hub_password: Secret, ci_gcp_credentials: Secret, slack_webhook: str, ci_report_bucket: str, report_output_prefix: str, is_local: bool, git_branch: str, git_revision: str, diffed_branch: str, git_repo_url: str, python_registry_url: str, python_registry_check_url: str, rollout_mode: RolloutMode, gha_workflow_run_url: Optional[str] = None, dagger_logs_url: Optional[str] = None, pipeline_start_timestamp: Optional[int] = None, ci_context: Optional[str] = None, pull_request: Optional[PullRequest.PullRequest] = None, s3_build_cache_access_key_id: Optional[Secret] = None, s3_build_cache_secret_key: Optional[Secret] = None, use_local_cdk: bool = False, use_cdk_ref: Optional[str] = None, python_registry_token: Optional[Secret] = None, ci_github_access_token: Optional[Secret] = None, ) -> None: self.pre_release = pre_release self.spec_cache_bucket_name = spec_cache_bucket_name self.metadata_bucket_name = metadata_bucket_name self.spec_cache_gcs_credentials = spec_cache_gcs_credentials self.metadata_service_gcs_credentials = metadata_service_gcs_credentials self.python_registry_token = python_registry_token self.python_registry_url = python_registry_url self.python_registry_check_url = python_registry_check_url self.rollout_mode = rollout_mode pipeline_name = f"{rollout_mode.value} {connector.technical_name}" pipeline_name = pipeline_name + " (pre-release)" if pre_release else pipeline_name if (use_local_cdk or use_cdk_ref) and not self.pre_release: raise click.UsageError("Publishing with CDK overrides is only supported for pre-release publishing.") super().__init__( pipeline_name=pipeline_name, connector=connector, report_output_prefix=report_output_prefix, ci_report_bucket=ci_report_bucket, is_local=is_local, git_branch=git_branch, git_revision=git_revision, diffed_branch=diffed_branch, git_repo_url=git_repo_url, gha_workflow_run_url=gha_workflow_run_url, dagger_logs_url=dagger_logs_url, pipeline_start_timestamp=pipeline_start_timestamp, ci_context=ci_context, slack_webhook=slack_webhook, ci_gcp_credentials=ci_gcp_credentials, should_save_report=True, use_local_cdk=use_local_cdk, use_cdk_ref=use_cdk_ref, docker_hub_username=docker_hub_username, docker_hub_password=docker_hub_password, s3_build_cache_access_key_id=s3_build_cache_access_key_id, s3_build_cache_secret_key=s3_build_cache_secret_key, ci_github_access_token=ci_github_access_token, ) # Reassigning current class required instance attribute # Which are optional in the super class # for type checking self.docker_hub_username: Secret = docker_hub_username self.docker_hub_password: Secret = docker_hub_password self.ci_gcp_credentials: Secret = ci_gcp_credentials @property def pre_release_suffix(self) -> str: return self.git_revision[:10] @property def docker_image_tag(self) -> str: # get the docker image tag from the parent class metadata_tag = super().docker_image_tag if self.pre_release: return f"{metadata_tag}-dev.{self.pre_release_suffix}" else: return metadata_tag @property def should_send_slack_message(self) -> bool: should_send = super().should_send_slack_message if not should_send: return False if self.pre_release: return False return True def get_slack_channels(self) -> List[str]: if self.state in [ContextState.FAILURE, ContextState.ERROR]: return [PUBLISH_UPDATES_SLACK_CHANNEL, PUBLISH_FAILURE_SLACK_CHANNEL] else: return [PUBLISH_UPDATES_SLACK_CHANNEL] def create_slack_message(self) -> str: docker_hub_url = f"https://hub.docker.com/r/{self.connector.metadata['dockerRepository']}/tags" message = f"*{self.rollout_mode.value} <{docker_hub_url}|{self.docker_image}>*\n" if self.is_ci: message += f"🤖 <{self.gha_workflow_run_url}|GitHub Action workflow>\n" else: message += "🧑‍💻 Local run\n" message += f"*Connector:* {self.connector.technical_name}\n" message += f"*Version:* {self.connector.version}\n" branch_url = f"{AIRBYTE_GITHUB_REPO_URL_PREFIX}/tree/{self.git_branch}" message += f"*Branch:* <{branch_url}|{self.git_branch}>\n" commit_url = f"{AIRBYTE_GITHUB_REPO_URL_PREFIX}/commit/{self.git_revision}" message += f"*Commit:* <{commit_url}|{self.git_revision[:10]}>\n" if self.state in [ContextState.INITIALIZED, ContextState.RUNNING]: message += "🟠" if self.state is ContextState.SUCCESSFUL: message += "🟢" if self.state in [ContextState.FAILURE, ContextState.ERROR]: message += "🔴" message += f" {self.state.value['description']}\n" if self.state is ContextState.SUCCESSFUL: assert self.report is not None, "Report should be set when state is successful" message += f"⏲️ Run duration: {format_duration(self.report.run_duration)}\n" if self.state is ContextState.FAILURE: message += "\ncc. <!subteam^S08SQDL2RS9>" return message
PublishConnectorContext
python
tiangolo__fastapi
fastapi/params.py
{ "start": 24596, "end": 27724 }
class ____(Form): # type: ignore[misc] def __init__( self, default: Any = Undefined, *, default_factory: Union[Callable[[], Any], None] = _Unset, annotation: Optional[Any] = None, media_type: str = "multipart/form-data", alias: Optional[str] = None, alias_priority: Union[int, None] = _Unset, # TODO: update when deprecating Pydantic v1, import these types # validation_alias: str | AliasPath | AliasChoices | None validation_alias: Union[str, None] = None, serialization_alias: Union[str, None] = None, title: Optional[str] = None, description: Optional[str] = None, gt: Optional[float] = None, ge: Optional[float] = None, lt: Optional[float] = None, le: Optional[float] = None, min_length: Optional[int] = None, max_length: Optional[int] = None, pattern: Optional[str] = None, regex: Annotated[ Optional[str], deprecated( "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead." ), ] = None, discriminator: Union[str, None] = None, strict: Union[bool, None] = _Unset, multiple_of: Union[float, None] = _Unset, allow_inf_nan: Union[bool, None] = _Unset, max_digits: Union[int, None] = _Unset, decimal_places: Union[int, None] = _Unset, examples: Optional[List[Any]] = None, example: Annotated[ Optional[Any], deprecated( "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, " "although still supported. Use examples instead." ), ] = _Unset, openapi_examples: Optional[Dict[str, Example]] = None, deprecated: Union[deprecated, str, bool, None] = None, include_in_schema: bool = True, json_schema_extra: Union[Dict[str, Any], None] = None, **extra: Any, ): super().__init__( default=default, default_factory=default_factory, annotation=annotation, media_type=media_type, alias=alias, alias_priority=alias_priority, validation_alias=validation_alias, serialization_alias=serialization_alias, title=title, description=description, gt=gt, ge=ge, lt=lt, le=le, min_length=min_length, max_length=max_length, pattern=pattern, regex=regex, discriminator=discriminator, strict=strict, multiple_of=multiple_of, allow_inf_nan=allow_inf_nan, max_digits=max_digits, decimal_places=decimal_places, deprecated=deprecated, example=example, examples=examples, openapi_examples=openapi_examples, include_in_schema=include_in_schema, json_schema_extra=json_schema_extra, **extra, ) @dataclass(frozen=True)
File
python
getsentry__sentry
tests/sentry/toolbar/views/test_login_success_view.py
{ "start": 185, "end": 1394 }
class ____(APITestCase): view_name = "sentry-toolbar-login-success" def setUp(self) -> None: super().setUp() self.url = reverse(self.view_name, args=(self.organization.slug, self.project.slug)) # Note no login def test_get_requires_auth(self) -> None: """Unauthenticated requests should redirect to /auth/login.""" res = self.client.get(self.url) assert res.status_code == 302 assert reverse("sentry-login") in res["Location"] def test_get(self) -> None: self.login_as(self.user) res = self.client.get(self.url) assert res.status_code == 200 self.assertTemplateUsed(res, TEMPLATE) @override_settings(CSP_REPORT_ONLY=False) def test_csp_script_src_nonce(self) -> None: self.login_as(self.user) res = self.client.get(self.url) assert _has_nonce(res) def _has_nonce(response): content = response.content.decode("utf-8") # Middleware automatically injects the `nonce` attribute onto our <script> # tag; so if that attribute is there then we can assume the nonce header and # value are set correctly. return "<script nonce=" in content
LoginSuccessViewTest
python
sympy__sympy
sympy/matrices/exceptions.py
{ "start": 91, "end": 174 }
class ____(ValueError, MatrixError): """Wrong matrix shape""" pass
ShapeError
python
scrapy__scrapy
scrapy/pipelines/files.py
{ "start": 14283, "end": 25796 }
class ____(MediaPipeline): """Abstract pipeline that implement the file downloading This pipeline tries to minimize network transfers and file processing, doing stat of the files and determining if file is new, up-to-date or expired. ``new`` files are those that pipeline never processed and needs to be downloaded from supplier site the first time. ``uptodate`` files are the ones that the pipeline processed and are still valid files. ``expired`` files are those that pipeline already processed but the last modification was made long time ago, so a reprocessing is recommended to refresh it in case of change. """ MEDIA_NAME: str = "file" EXPIRES: int = 90 STORE_SCHEMES: dict[str, type[FilesStoreProtocol]] = { "": FSFilesStore, "file": FSFilesStore, "s3": S3FilesStore, "gs": GCSFilesStore, "ftp": FTPFilesStore, } DEFAULT_FILES_URLS_FIELD: str = "file_urls" DEFAULT_FILES_RESULT_FIELD: str = "files" def __init__( self, store_uri: str | PathLike[str], download_func: Callable[[Request, Spider], Response] | None = None, *, crawler: Crawler, ): if not (store_uri and (store_uri := _to_string(store_uri))): from scrapy.pipelines.images import ImagesPipeline # noqa: PLC0415 setting_name = ( "IMAGES_STORE" if isinstance(self, ImagesPipeline) else "FILES_STORE" ) raise NotConfigured( f"{setting_name} setting must be set to a valid path (not empty) " f"to enable {self.__class__.__name__}." ) settings = crawler.settings cls_name = "FilesPipeline" self.store: FilesStoreProtocol = self._get_store(store_uri) resolve = functools.partial( self._key_for_pipe, base_class_name=cls_name, settings=settings ) self.expires: int = settings.getint(resolve("FILES_EXPIRES"), self.EXPIRES) if not hasattr(self, "FILES_URLS_FIELD"): self.FILES_URLS_FIELD = self.DEFAULT_FILES_URLS_FIELD if not hasattr(self, "FILES_RESULT_FIELD"): self.FILES_RESULT_FIELD = self.DEFAULT_FILES_RESULT_FIELD self.files_urls_field: str = settings.get( resolve("FILES_URLS_FIELD"), self.FILES_URLS_FIELD ) self.files_result_field: str = settings.get( resolve("FILES_RESULT_FIELD"), self.FILES_RESULT_FIELD ) super().__init__(download_func=download_func, crawler=crawler) @classmethod def from_crawler(cls, crawler: Crawler) -> Self: settings = crawler.settings cls._update_stores(settings) store_uri = settings["FILES_STORE"] return cls(store_uri, crawler=crawler) @classmethod def _update_stores(cls, settings: BaseSettings) -> None: s3store: type[S3FilesStore] = cast( "type[S3FilesStore]", cls.STORE_SCHEMES["s3"] ) s3store.AWS_ACCESS_KEY_ID = settings["AWS_ACCESS_KEY_ID"] s3store.AWS_SECRET_ACCESS_KEY = settings["AWS_SECRET_ACCESS_KEY"] s3store.AWS_SESSION_TOKEN = settings["AWS_SESSION_TOKEN"] s3store.AWS_ENDPOINT_URL = settings["AWS_ENDPOINT_URL"] s3store.AWS_REGION_NAME = settings["AWS_REGION_NAME"] s3store.AWS_USE_SSL = settings["AWS_USE_SSL"] s3store.AWS_VERIFY = settings["AWS_VERIFY"] s3store.POLICY = settings["FILES_STORE_S3_ACL"] gcs_store: type[GCSFilesStore] = cast( "type[GCSFilesStore]", cls.STORE_SCHEMES["gs"] ) gcs_store.GCS_PROJECT_ID = settings["GCS_PROJECT_ID"] gcs_store.POLICY = settings["FILES_STORE_GCS_ACL"] or None ftp_store: type[FTPFilesStore] = cast( "type[FTPFilesStore]", cls.STORE_SCHEMES["ftp"] ) ftp_store.FTP_USERNAME = settings["FTP_USER"] ftp_store.FTP_PASSWORD = settings["FTP_PASSWORD"] ftp_store.USE_ACTIVE_MODE = settings.getbool("FEED_STORAGE_FTP_ACTIVE") def _get_store(self, uri: str) -> FilesStoreProtocol: # to support win32 paths like: C:\\some\dir scheme = "file" if Path(uri).is_absolute() else urlparse(uri).scheme store_cls = self.STORE_SCHEMES[scheme] return store_cls(uri) def media_to_download( self, request: Request, info: MediaPipeline.SpiderInfo, *, item: Any = None ) -> Deferred[FileInfo | None] | None: def _onsuccess(result: StatInfo) -> FileInfo | None: if not result: return None # returning None force download last_modified = result.get("last_modified", None) if not last_modified: return None # returning None force download age_seconds = time.time() - last_modified age_days = age_seconds / 60 / 60 / 24 if age_days > self.expires: return None # returning None force download referer = referer_str(request) logger.debug( "File (uptodate): Downloaded %(medianame)s from %(request)s " "referred in <%(referer)s>", {"medianame": self.MEDIA_NAME, "request": request, "referer": referer}, extra={"spider": info.spider}, ) self.inc_stats("uptodate") checksum = result.get("checksum", None) return { "url": request.url, "path": path, "checksum": checksum, "status": "uptodate", } path = self.file_path(request, info=info, item=item) # maybeDeferred() overloads don't seem to support a Union[_T, Deferred[_T]] return type dfd: Deferred[StatInfo] = maybeDeferred(self.store.stat_file, path, info) # type: ignore[call-overload] dfd2: Deferred[FileInfo | None] = dfd.addCallback(_onsuccess) dfd2.addErrback(lambda _: None) dfd2.addErrback( lambda f: logger.error( self.__class__.__name__ + ".store.stat_file", exc_info=failure_to_exc_info(f), extra={"spider": info.spider}, ) ) return dfd2 def media_failed( self, failure: Failure, request: Request, info: MediaPipeline.SpiderInfo ) -> NoReturn: if not isinstance(failure.value, IgnoreRequest): referer = referer_str(request) logger.warning( "File (unknown-error): Error downloading %(medianame)s from " "%(request)s referred in <%(referer)s>: %(exception)s", { "medianame": self.MEDIA_NAME, "request": request, "referer": referer, "exception": failure.value, }, extra={"spider": info.spider}, ) raise FileException def media_downloaded( self, response: Response, request: Request, info: MediaPipeline.SpiderInfo, *, item: Any = None, ) -> FileInfo: referer = referer_str(request) if response.status != 200: logger.warning( "File (code: %(status)s): Error downloading file from " "%(request)s referred in <%(referer)s>", {"status": response.status, "request": request, "referer": referer}, extra={"spider": info.spider}, ) raise FileException("download-error") if not response.body: logger.warning( "File (empty-content): Empty file from %(request)s referred " "in <%(referer)s>: no-content", {"request": request, "referer": referer}, extra={"spider": info.spider}, ) raise FileException("empty-content") status = "cached" if "cached" in response.flags else "downloaded" logger.debug( "File (%(status)s): Downloaded file from %(request)s referred in " "<%(referer)s>", {"status": status, "request": request, "referer": referer}, extra={"spider": info.spider}, ) self.inc_stats(status) try: path = self.file_path(request, response=response, info=info, item=item) checksum = self.file_downloaded(response, request, info, item=item) except FileException as exc: logger.warning( "File (error): Error processing file from %(request)s " "referred in <%(referer)s>: %(errormsg)s", {"request": request, "referer": referer, "errormsg": str(exc)}, extra={"spider": info.spider}, exc_info=True, ) raise except Exception as exc: logger.error( "File (unknown-error): Error processing file from %(request)s " "referred in <%(referer)s>", {"request": request, "referer": referer}, exc_info=True, extra={"spider": info.spider}, ) raise FileException(str(exc)) return { "url": request.url, "path": path, "checksum": checksum, "status": status, } def inc_stats(self, status: str) -> None: assert self.crawler.stats self.crawler.stats.inc_value("file_count") self.crawler.stats.inc_value(f"file_status_count/{status}") # Overridable Interface def get_media_requests( self, item: Any, info: MediaPipeline.SpiderInfo ) -> list[Request]: urls = ItemAdapter(item).get(self.files_urls_field, []) if not isinstance(urls, list): raise TypeError( f"{self.files_urls_field} must be a list of URLs, got {type(urls).__name__}. " ) return [Request(u, callback=NO_CALLBACK) for u in urls] def file_downloaded( self, response: Response, request: Request, info: MediaPipeline.SpiderInfo, *, item: Any = None, ) -> str: path = self.file_path(request, response=response, info=info, item=item) buf = BytesIO(response.body) checksum = _md5sum(buf) buf.seek(0) self.store.persist_file(path, buf, info) return checksum def item_completed( self, results: list[FileInfoOrError], item: Any, info: MediaPipeline.SpiderInfo ) -> Any: with suppress(KeyError): ItemAdapter(item)[self.files_result_field] = [x for ok, x in results if ok] return item def file_path( self, request: Request, response: Response | None = None, info: MediaPipeline.SpiderInfo | None = None, *, item: Any = None, ) -> str: media_guid = hashlib.sha1(to_bytes(request.url)).hexdigest() # noqa: S324 media_ext = Path(request.url).suffix # Handles empty and wild extensions by trying to guess the # mime type then extension or default to empty string otherwise if media_ext not in mimetypes.types_map: media_ext = "" media_type = mimetypes.guess_type(request.url)[0] if media_type: media_ext = cast("str", mimetypes.guess_extension(media_type)) return f"full/{media_guid}{media_ext}"
FilesPipeline
python
google__pytype
pytype/tests/test_attributes1.py
{ "start": 151, "end": 8951 }
class ____(test_base.BaseTest): """Tests for strict attribute checking on None.""" def test_module_constant(self): errors = self.CheckWithErrors(""" x = None def f(): return x.upper() # attribute-error[e] """) self.assertErrorRegexes(errors, {"e": r"upper.*None"}) def test_class_constant(self): errors = self.CheckWithErrors(""" class Foo: x = None def f(self): return self.x.upper() # attribute-error[e] """) self.assertErrorRegexes(errors, {"e": r"upper.*None"}) def test_class_constant_error(self): errors = self.CheckWithErrors(""" x = None class Foo: x = x.upper() # attribute-error[e] """) self.assertErrorRegexes(errors, {"e": r"upper.*None"}) def test_multiple_paths(self): errors = self.CheckWithErrors(""" x = None def f(): z = None if __random__ else x y = z return y.upper() # attribute-error[e] """) self.assertErrorRegexes(errors, {"e": r"upper.*None"}) def test_late_initialization(self): ty, _ = self.InferWithErrors(""" class Foo: def __init__(self): self.x = None def f(self): return self.x.upper() # attribute-error def set_x(self): self.x = "" """) self.assertTypesMatchPytd( ty, """ from typing import Any, Optional class Foo: x = ... # type: Optional[str] def __init__(self) -> None: ... def f(self) -> Any: ... def set_x(self) -> None: ... """, ) def test_pyi_constant(self): self.options.tweak(strict_none_binding=False) with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ x = ... # type: None """, ) self.Check( """ import foo def f(): return foo.x.upper() """, pythonpath=[d.path], ) def test_pyi_attribute(self): self.options.tweak(strict_none_binding=False) with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ class Foo: x = ... # type: None """, ) self.Check( """ import foo def f(): return foo.Foo.x.upper() """, pythonpath=[d.path], ) def test_return_value(self): errors = self.CheckWithErrors(""" def f(): pass def g(): return f().upper() # attribute-error[e] """) self.assertErrorRegexes(errors, {"e": r"upper.*None"}) def test_method_return_value(self): errors = self.CheckWithErrors(""" class Foo: def f(self): pass def g(): return Foo().f().upper() # attribute-error[e] """) self.assertErrorRegexes(errors, {"e": r"upper.*None"}) def test_pyi_return_value(self): with test_utils.Tempdir() as d: d.create_file("foo.pyi", "def f() -> None: ...") errors = self.CheckWithErrors( """ import foo def g(): return foo.f().upper() # attribute-error[e] """, pythonpath=[d.path], ) self.assertErrorRegexes(errors, {"e": r"upper.*None"}) def test_pass_through_none(self): errors = self.CheckWithErrors(""" def f(x): return x def g(): return f(None).upper() # attribute-error[e] """) self.assertErrorRegexes(errors, {"e": r"upper.*None"}) def test_shadowed_local_origin(self): self.options.tweak(strict_none_binding=False) self.Check(""" x = None def f(): y = None y = "hello" return x if __random__ else y def g(): return f().upper() """) @test_base.skip("has_strict_none_origins can't tell if an origin is blocked.") def test_blocked_local_origin(self): self.Check(""" x = None def f(): v = __random__ if v: y = None return x if v else y def g(): return f().upper() """) def test_return_constant(self): self.options.tweak(strict_none_binding=False) self.Check(""" x = None def f(): return x def g(): return f().upper() """) def test_unpacked_none(self): errors = self.CheckWithErrors(""" _, a = 42, None b = a.upper() # attribute-error[e] """) self.assertErrorRegexes(errors, {"e": r"upper.*None"}) def test_function_default(self): errors = self.CheckWithErrors(""" class Foo: def __init__(self, v=None): v.upper() # attribute-error[e] def f(): Foo() """) self.assertErrorRegexes(errors, {"e": r"upper.*None.*traceback.*line 5"}) def test_keep_none_return(self): ty = self.Infer(""" def f(): pass """) self.assertTypesMatchPytd( ty, """ def f() -> None: ... """, ) def test_keep_none_yield(self): ty = self.Infer(""" def f(): yield None """) self.assertTypesMatchPytd( ty, """ from typing import Generator, Any def f() -> Generator[None, Any, None]: ... """, ) def test_keep_contained_none_return(self): ty = self.Infer(""" def f(): return [None] """) self.assertTypesMatchPytd( ty, """ from typing import List def f() -> List[None]: ... """, ) def test_discard_none_return(self): self.options.tweak(strict_none_binding=False) ty = self.Infer(""" x = None def f(): return x """) self.assertTypesMatchPytd( ty, """ from typing import Any x = ... # type: None def f() -> Any: ... """, ) def test_discard_none_yield(self): self.options.tweak(strict_none_binding=False) ty = self.Infer(""" x = None def f(): yield x """) self.assertTypesMatchPytd( ty, """ from typing import Any, Generator x = ... # type: None def f() -> Generator[Any, Any, None]: ... """, ) def test_discard_contained_none_return(self): ty = self.Infer(""" x = None def f(): return [x] """) self.assertTypesMatchPytd( ty, """ x: None def f() -> list[None]: ... """, ) def test_discard_attribute_none_return(self): self.options.tweak(strict_none_binding=False) ty = self.Infer(""" class Foo: x = None def f(): return Foo.x """) self.assertTypesMatchPytd( ty, """ from typing import Any class Foo: x = ... # type: None def f() -> Any: ... """, ) def test_getitem(self): errors = self.CheckWithErrors(""" def f(): x = None return x[0] # unsupported-operands[e] """) self.assertErrorRegexes(errors, {"e": r"item retrieval.*None.*int"}) def test_ignore_getitem(self): self.CheckWithErrors(""" x = None def f(): return x[0] # unsupported-operands """) def test_ignore_iter(self): self.CheckWithErrors(""" x = None def f(): return [y for y in x] # attribute-error """) def test_contains(self): errors = self.CheckWithErrors(""" def f(): x = None return 42 in x # unsupported-operands[e] """) self.assertErrorRegexes(errors, {"e": r"'in'.*None.*int"}) def test_ignore_contains(self): self.CheckWithErrors(""" x = None def f(): return 42 in x # unsupported-operands """) def test_property(self): self.Check(""" class Foo: def __init__(self): self._dofoo = __random__ @property def foo(self): return "hello" if self._dofoo else None foo = Foo() if foo.foo: foo.foo.upper() """) def test_isinstance(self): self.Check(""" class Foo: def f(self): instance = None if __random__ else {} if instance is not None: self.g(instance) def g(self, instance): if isinstance(instance, str): instance.upper() # line 10 """) def test_impossible_return_type(self): self.Check(""" from typing import Dict def f(): d = None # type: Dict[str, str] instance = d.get("hello") return instance if instance else "world" def g(): return f().upper() """) def test_no_return(self): self.Check(""" def f(): text_value = "hello" if __random__ else None if not text_value: missing_value() return text_value.strip() def missing_value(): raise ValueError() """)
TestStrictNone
python
django__django
tests/migrations/migrations_test_apps/unmigrated_app/models.py
{ "start": 31, "end": 243 }
class ____(models.Model): silly_field = models.BooleanField(default=False) silly_tribble = models.ForeignKey("migrations.Tribble", models.CASCADE) is_trouble = models.BooleanField(default=True)
SillyModel
python
Textualize__textual
src/textual/css/query.py
{ "start": 1054, "end": 1135 }
class ____(QueryError): """Query did not parse correctly."""
InvalidQueryFormat
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 566129, "end": 567704 }
class ____(sgqlc.types.Type): """A protection rule.""" __schema__ = github_schema __field_names__ = ("database_id", "reviewers", "timeout", "type") database_id = sgqlc.types.Field(Int, graphql_name="databaseId") """Identifies the primary key from the database.""" reviewers = sgqlc.types.Field( sgqlc.types.non_null("DeploymentReviewerConnection"), graphql_name="reviewers", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ("before", sgqlc.types.Arg(String, graphql_name="before", default=None)), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) """The teams or users that can review the deployment Arguments: * `after` (`String`): Returns the elements in the list that come after the specified cursor. * `before` (`String`): Returns the elements in the list that come before the specified cursor. * `first` (`Int`): Returns the first _n_ elements from the list. * `last` (`Int`): Returns the last _n_ elements from the list. """ timeout = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="timeout") """The timeout in minutes for this protection rule.""" type = sgqlc.types.Field(sgqlc.types.non_null(DeploymentProtectionRuleType), graphql_name="type") """The type of protection rule."""
DeploymentProtectionRule
python
mlflow__mlflow
mlflow/server/graphql/autogenerated_graphql_schema.py
{ "start": 7692, "end": 7805 }
class ____(graphene.InputObjectType): experiment_ids = graphene.List(graphene.String)
MlflowSearchDatasetsInput
python
ray-project__ray
python/ray/tune/stopper/stopper.py
{ "start": 97, "end": 1723 }
class ____(abc.ABC): """Base class for implementing a Tune experiment stopper. Allows users to implement experiment-level stopping via ``stop_all``. By default, this class does not stop any trials. Subclasses need to implement ``__call__`` and ``stop_all``. Examples: >>> import time >>> from ray import tune >>> from ray.tune import Stopper >>> >>> class TimeStopper(Stopper): ... def __init__(self): ... self._start = time.time() ... self._deadline = 2 # Stop all trials after 2 seconds ... ... def __call__(self, trial_id, result): ... return False ... ... def stop_all(self): ... return time.time() - self._start > self._deadline ... >>> def train_fn(config): ... for i in range(100): ... time.sleep(1) ... tune.report({"iter": i}) ... >>> tuner = tune.Tuner( ... train_fn, ... tune_config=tune.TuneConfig(num_samples=2), ... run_config=tune.RunConfig(stop=TimeStopper()), ... ) >>> print("[ignore]"); result_grid = tuner.fit() # doctest: +ELLIPSIS [ignore]... """ def __call__(self, trial_id: str, result: Dict[str, Any]) -> bool: """Returns true if the trial should be terminated given the result.""" raise NotImplementedError def stop_all(self) -> bool: """Returns true if the experiment should be terminated.""" raise NotImplementedError @PublicAPI
Stopper
python
ansible__ansible
test/integration/targets/ansible-doc/broken-docs/collections/ansible_collections/testns/testcol/plugins/inventory/statichost.py
{ "start": 662, "end": 881 }
class ____(BaseInventoryPlugin, Cacheable): NAME = 'testns.content_adj.statichost' def verify_file(self, path): pass def parse(self, inventory, loader, path, cache=None): pass
InventoryModule
python
PrefectHQ__prefect
src/integrations/prefect-aws/prefect_aws/observers/ecs.py
{ "start": 3050, "end": 4628 }
class ____: def __init__(self): self.ecs_client: "ECSClient | None" = None self._cache: LRUCache[str, dict[str, str]] = LRUCache(maxsize=100) async def read_tags(self, cluster_arn: str, task_arn: str) -> dict[str, str]: if not self.ecs_client: raise RuntimeError("ECS client not initialized for EcsTaskTagsReader") if task_arn in self._cache: return self._cache[task_arn] try: response = await self.ecs_client.describe_tasks( cluster=cluster_arn, tasks=[task_arn], include=["TAGS"], ) except Exception as e: print(f"Error reading tags for task {task_arn}: {e}") return {} if not (tasks := response.get("tasks", [])): return {} if len(tasks) == 0: return {} tags = { tag["key"]: tag["value"] for tag in tasks[0].get("tags", []) if "key" in tag and "value" in tag } self._cache[task_arn] = tags return tags async def __aenter__(self): self.ecs_client = ( await aiobotocore.session.get_session().create_client("ecs").__aenter__() ) return self async def __aexit__(self, *args: Any) -> None: if self.ecs_client: await self.ecs_client.__aexit__(*args) SQS_MEMORY = 10 SQS_CONSECUTIVE_FAILURES = 3 SQS_BACKOFF = 1 SQS_MAX_BACKOFF_ATTEMPTS = 5 OBSERVER_RESTART_BASE_DELAY = 30 OBSERVER_MAX_RESTART_ATTEMPTS = 5
EcsTaskTagsReader
python
great-expectations__great_expectations
docs/docusaurus/versioned_docs/version-0.18/oss/guides/expectations/creating_custom_expectations/query_expectation_template.py
{ "start": 784, "end": 4806 }
class ____(QueryExpectation): # </snippet> # <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/query_expectation_template.py docstring"> """TODO: Add a docstring here""" # </snippet> # This is the id string of the Metric(s) used by this Expectation. # <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/query_expectation_template.py metric_dependencies"> metric_dependencies = ("METRIC NAME GOES HERE",) # </snippet> # This is the default, baked-in SQL Query for this QueryExpectation # <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/query_expectation_template.py sql_query"> query: str = """ SQL QUERY GOES HERE """ # </snippet> # This is a list of parameter names that can affect whether the Expectation evaluates to True or False # <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/query_expectation_template.py success_keys"> success_keys = ("query",) # </snippet> domain_keys = ("batch_id", "row_condition", "condition_parser") def validate_configuration( self, configuration: Optional[ExpectationConfiguration] = None ) -> None: """ Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that necessary configuration arguments have been provided for the validation of the expectation. Args: configuration (OPTIONAL[ExpectationConfiguration]): \ An optional Expectation Configuration entry that will be used to configure the expectation Returns: None. Raises InvalidExpectationConfigurationError if the config is not validated successfully """ super().validate_configuration(configuration) configuration = configuration or self.configuration # # Check other things in configuration.kwargs and raise Exceptions if needed # try: # assert ( # ... # ), "message" # assert ( # ... # ), "message" # except AssertionError as e: # raise InvalidExpectationConfigurationError(str(e)) # This method performs a validation of your metrics against your success keys, returning a dict indicating the success or failure of the Expectation. # <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/query_expectation_template.py _validate"> def _validate( self, configuration: ExpectationConfiguration, metrics: dict, runtime_configuration: Optional[dict] = None, execution_engine: ExecutionEngine = None, ) -> Union[ExpectationValidationResult, dict]: raise NotImplementedError # </snippet> # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. # <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/query_expectation_template.py examples"> examples = [] # </snippet> # This dictionary contains metadata for display in the public gallery # <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/query_expectation_template.py library_metadata"> library_metadata = { "tags": [], # Tags for this Expectation in the Gallery "contributors": [ # Github handles for all contributors to this Expectation. "@your_name_here", # Don't forget to add your github handle here! ], } # </snippet> if __name__ == "__main__": # <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/query_expectation_template.py print_diagnostic_checklist"> ExpectQueryToMatchSomeCriteria().print_diagnostic_checklist() # </snippet>
ExpectQueryToMatchSomeCriteria
python
django-extensions__django-extensions
tests/testapp/models.py
{ "start": 9428, "end": 9583 }
class ____(models.Model): a = models.IntegerField() j_field = JSONField() class Meta: app_label = "django_extensions"
JSONFieldTestModel
python
getsentry__sentry
tests/apidocs/endpoints/organizations/test_org_index.py
{ "start": 136, "end": 577 }
class ____(APIDocsTestCase): def setUp(self) -> None: self.create_organization(owner=self.user, name="Rowdy Tiger") self.url = reverse( "sentry-api-0-organizations", ) self.login_as(user=self.user) def test_get(self) -> None: response = self.client.get(self.url) request = RequestFactory().get(self.url) self.validate_schema(request, response)
OrganizationIndexDocs
python
PrefectHQ__prefect
tests/server/models/test_flows.py
{ "start": 3750, "end": 10151 }
class ____: @pytest.fixture async def flows(self, session): flow_1 = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name="my-flow-1") ) flow_2 = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name="my-flow-2") ) await session.commit() return [flow_1, flow_2] async def test_read_flows(self, flows, session): read_flows = await models.flows.read_flows(session=session) assert len(read_flows) == len(flows) async def test_read_flows_applies_limit(self, flows, session): read_flows = await models.flows.read_flows(session=session, limit=1) assert len(read_flows) == 1 async def test_read_flows_applies_offset(self, flows, session): read_flows = await models.flows.read_flows(session=session, offset=1) # note this test only works right now because flows are ordered by # name by default, when the actual ordering logic is implemented # this test case will need to be modified assert len(read_flows) == 1 assert read_flows[0].name == "my-flow-2" async def test_read_flows_returns_empty_list(self, session): read_flows = await models.flows.read_flows(session=session) assert len(read_flows) == 0 async def test_read_flows_filters_by_tags(self, session): flow_1 = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name="my-flow-1", tags=["db", "blue"]), ) flow_2 = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name="my-flow-2", tags=["db"]) ) flow_3 = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name="my-flow-3") ) # exact tag match result = await models.flows.read_flows( session=session, flow_filter=schemas.filters.FlowFilter( tags=schemas.filters.FlowFilterTags(all_=["db", "blue"]) ), ) assert {res.id for res in result} == {flow_1.id} # subset of tags match result = await models.flows.read_flows( session=session, flow_filter=schemas.filters.FlowFilter( tags=schemas.filters.FlowFilterTags(all_=["db"]) ), ) assert {res.id for res in result} == {flow_1.id, flow_2.id} # is_null_ result = await models.flows.read_flows( session=session, flow_filter=schemas.filters.FlowFilter( tags=schemas.filters.FlowFilterTags(is_null_=True) ), ) assert {res.id for res in result} == {flow_3.id} result = await models.flows.read_flows( session=session, flow_filter=schemas.filters.FlowFilter( tags=schemas.filters.FlowFilterTags(is_null_=False) ), ) assert {res.id for res in result} == {flow_1.id, flow_2.id} async def test_flows_filters_by_name_any(self, session): flow_1 = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name="my-flow-1", tags=["db", "blue"]), ) flow_2 = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name="my-flow-2", tags=["db"]) ) flow_3 = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name="my-flow-3") ) # filter based on flow names result = await models.flows.read_flows( session=session, flow_filter=schemas.filters.FlowFilter( name=schemas.filters.FlowFilterName(any_=["my-flow-1"]) ), ) assert {res.id for res in result} == {flow_1.id} result = await models.flows.read_flows( session=session, flow_filter=schemas.filters.FlowFilter( name=schemas.filters.FlowFilterName(any_=["my-flow-2", "my-flow-3"]) ), ) assert {res.id for res in result} == {flow_2.id, flow_3.id} async def test_read_flows_filters_by_ids_any(self, session): flow_1 = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name="my-flow-1", tags=["db", "blue"]), ) flow_2 = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name="my-flow-2", tags=["db"]) ) await models.flows.create_flow( session=session, flow=schemas.core.Flow(name="my-flow-3") ) # filter based on flow ids result = await models.flows.read_flows( session=session, flow_filter=schemas.filters.FlowFilter( id=schemas.filters.FlowFilterId(any_=[flow_1.id]) ), ) assert {res.id for res in result} == {flow_1.id} result = await models.flows.read_flows( session=session, flow_filter=schemas.filters.FlowFilter( id=schemas.filters.FlowFilterId(any_=[flow_1.id, flow_2.id]) ), ) assert {res.id for res in result} == {flow_1.id, flow_2.id} async def test_read_flows_filters_by_deployment_criteria( self, flow, deployment, session ): result = await models.flows.read_flows( session=session, deployment_filter=schemas.filters.DeploymentFilter( id=dict(any_=[deployment.id]) ), ) assert {res.id for res in result} == {flow.id} result = await models.flows.read_flows( session=session, flow_filter=schemas.filters.FlowFilter(id=dict(any_=[flow.id])), deployment_filter=schemas.filters.DeploymentFilter(id=dict(any_=[uuid4()])), ) assert len(result) == 0 async def test_read_flows_applies_sort(self, flows, session): read_flows = await models.flows.read_flows( session=session, sort=schemas.sorting.FlowSort.NAME_ASC ) assert read_flows[0].name == "my-flow-1" read_flows_name_desc = await models.flows.read_flows( session=session, sort=schemas.sorting.FlowSort.NAME_DESC ) assert read_flows_name_desc[0].name == "my-flow-2"
TestReadFlows
python
huggingface__transformers
src/transformers/models/markuplm/modeling_markuplm.py
{ "start": 20765, "end": 25678 }
class ____(MarkupLMPreTrainedModel): # Copied from transformers.models.clap.modeling_clap.ClapTextModel.__init__ with ClapText->MarkupLM def __init__(self, config, add_pooling_layer=True): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config self.embeddings = MarkupLMEmbeddings(config) self.encoder = MarkupLMEncoder(config) self.pooler = MarkupLMPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, xpath_tags_seq: Optional[torch.LongTensor] = None, xpath_subs_seq: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: r""" xpath_tags_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*): Tag IDs for each token in the input sequence, padded up to config.max_depth. xpath_subs_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*): Subscript IDs for each token in the input sequence, padded up to config.max_depth. Examples: ```python >>> from transformers import AutoProcessor, MarkupLMModel >>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base") >>> model = MarkupLMModel.from_pretrained("microsoft/markuplm-base") >>> html_string = "<html> <head> <title>Page Title</title> </head> </html>" >>> encoding = processor(html_string, return_tensors="pt") >>> outputs = model(**encoding) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 4, 768] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 embedding_output = self.embeddings( input_ids=input_ids, xpath_tags_seq=xpath_tags_seq, xpath_subs_seq=xpath_subs_seq, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring
MarkupLMModel
python
MongoEngine__mongoengine
tests/fields/test_uuid_field.py
{ "start": 112, "end": 176 }
class ____(Document): api_key = UUIDField(binary=False)
Person
python
realpython__materials
python-getter-setter/point.py
{ "start": 0, "end": 251 }
class ____: def __init__(self, x, y): self.x = x self.y = y def __getattr__(self, name: str): return self.__dict__[f"_{name}"] def __setattr__(self, name, value): self.__dict__[f"_{name}"] = float(value)
Point
python
scrapy__scrapy
scrapy/core/downloader/handlers/s3.py
{ "start": 651, "end": 3821 }
class ____: def __init__( self, settings: BaseSettings, *, crawler: Crawler, aws_access_key_id: str | None = None, aws_secret_access_key: str | None = None, aws_session_token: str | None = None, httpdownloadhandler: type[HTTP11DownloadHandler] = HTTP11DownloadHandler, **kw: Any, ): if not is_botocore_available(): raise NotConfigured("missing botocore library") if not aws_access_key_id: aws_access_key_id = settings["AWS_ACCESS_KEY_ID"] if not aws_secret_access_key: aws_secret_access_key = settings["AWS_SECRET_ACCESS_KEY"] if not aws_session_token: aws_session_token = settings["AWS_SESSION_TOKEN"] # If no credentials could be found anywhere, # consider this an anonymous connection request by default; # unless 'anon' was set explicitly (True/False). anon = kw.get("anon") if anon is None and not aws_access_key_id and not aws_secret_access_key: kw["anon"] = True self.anon = kw.get("anon") self._signer = None import botocore.auth # noqa: PLC0415 import botocore.credentials # noqa: PLC0415 kw.pop("anon", None) if kw: raise TypeError(f"Unexpected keyword arguments: {kw}") if not self.anon: assert aws_access_key_id is not None assert aws_secret_access_key is not None SignerCls = botocore.auth.AUTH_TYPE_MAPS["s3"] # botocore.auth.BaseSigner doesn't have an __init__() with args, only subclasses do self._signer = SignerCls( # type: ignore[call-arg] botocore.credentials.Credentials( aws_access_key_id, aws_secret_access_key, aws_session_token ) ) _http_handler = build_from_crawler( httpdownloadhandler, crawler, ) self._download_http = _http_handler.download_request @classmethod def from_crawler(cls, crawler: Crawler, **kwargs: Any) -> Self: return cls(crawler.settings, crawler=crawler, **kwargs) def download_request(self, request: Request, spider: Spider) -> Deferred[Response]: p = urlparse_cached(request) scheme = "https" if request.meta.get("is_secure") else "http" bucket = p.hostname path = p.path + "?" + p.query if p.query else p.path url = f"{scheme}://{bucket}.s3.amazonaws.com{path}" if self.anon: request = request.replace(url=url) else: import botocore.awsrequest # noqa: PLC0415 awsrequest = botocore.awsrequest.AWSRequest( method=request.method, url=f"{scheme}://s3.amazonaws.com/{bucket}{path}", headers=request.headers.to_unicode_dict(), data=request.body, ) assert self._signer self._signer.add_auth(awsrequest) request = request.replace(url=url, headers=awsrequest.headers.items()) return self._download_http(request, spider)
S3DownloadHandler
python
apache__airflow
airflow-core/tests/unit/utils/test_memray_utils.py
{ "start": 1064, "end": 6182 }
class ____: """Test suite for enable_memray_trace decorator functionality.""" def setup_method(self): self.mock_function = Mock(return_value="test_result") self.mock_function.__name__ = "mock_function" # Set up memray module mock self.mock_memray_module = MagicMock() self.mock_tracker = MagicMock() self.mock_memray_module.Tracker.return_value = self.mock_tracker # Configure tracker as context manager self.mock_tracker.__enter__ = Mock(return_value=self.mock_tracker) self.mock_tracker.__exit__ = Mock(return_value=None) # Start patching memray module self.memray_patcher = patch.dict("sys.modules", {"memray": self.mock_memray_module}) self.memray_patcher.start() def teardown_method(self): self.memray_patcher.stop() @conf_vars({("profiling", "memray_trace_components"): "api,dag_processor"}) def test_memray_config(self): _memray_trace_components = conf.getenumlist( "profiling", "memray_trace_components", MemrayTraceComponents ) assert _memray_trace_components == [ MemrayTraceComponents.api, MemrayTraceComponents.dag_processor, ] def test_memray_not_used_when_default_trace_component(self): """ Verify that memray is not imported or used when memray_trace_components is default (blank). """ import builtins original_import = builtins.__import__ import_attempts = [] def track_imports(name, *args, **kwargs): import_attempts.append(name) return original_import(name, *args, **kwargs) with patch("builtins.__import__", side_effect=track_imports): decorated_function = enable_memray_trace(MemrayTraceComponents.scheduler)(self.mock_function) result = decorated_function("arg1", kwarg="value") assert "memray" not in import_attempts, "memray should not be imported when tracking is disabled" self.mock_memray_module.Tracker.assert_not_called() self.mock_tracker.__enter__.assert_not_called() self.mock_tracker.__exit__.assert_not_called() self.mock_function.assert_called_once_with("arg1", kwarg="value") assert result == "test_result" @conf_vars({("profiling", "memray_trace_components"): "scheduler,dag_processor"}) def test_memray_not_used_when_not_in_trace_component(self): """ Verify that memray is not imported or used when the component is not in memray_trace_components. """ import builtins original_import = builtins.__import__ import_attempts = [] def track_imports(name, *args, **kwargs): import_attempts.append(name) return original_import(name, *args, **kwargs) with patch("builtins.__import__", side_effect=track_imports): decorated_function = enable_memray_trace(MemrayTraceComponents.api)(self.mock_function) result = decorated_function("arg1", kwarg="value") assert "memray" not in import_attempts, "memray should not be imported when tracking is disabled" self.mock_memray_module.Tracker.assert_not_called() self.mock_tracker.__enter__.assert_not_called() self.mock_tracker.__exit__.assert_not_called() self.mock_function.assert_called_once_with("arg1", kwarg="value") assert result == "test_result" @conf_vars({("profiling", "memray_trace_components"): "scheduler,api,dag_processor"}) def test_memray_tracker_activated_when_enabled(self): """ Verify that memray.Tracker is properly used when tracking is enabled. """ decorated_function = enable_memray_trace(MemrayTraceComponents.scheduler)(self.mock_function) result = decorated_function("arg1", "arg2", kwarg1="value1") expected_profile_path = f"{AIRFLOW_HOME}/{MemrayTraceComponents.scheduler.value}_memory.bin" self.mock_memray_module.Tracker.assert_called_once_with(expected_profile_path) self.mock_tracker.__enter__.assert_called_once() self.mock_function.assert_called_once_with("arg1", "arg2", kwarg1="value1") self.mock_tracker.__exit__.assert_called_once() assert result == "test_result" @conf_vars({("profiling", "memray_trace_components"): "scheduler,api,dag_processor"}) def test_function_metadata_preserved_after_decoration(self): """ Verify that decorator preserves original function metadata. """ def sample_function(a: int, b: str = "default") -> str: """Sample function with metadata.""" return f"{a}-{b}" decorated_function = enable_memray_trace(MemrayTraceComponents.api)(sample_function) assert decorated_function.__name__ == "sample_function" assert decorated_function.__doc__ == "Sample function with metadata." if hasattr(sample_function, "__annotations__"): assert decorated_function.__annotations__ == sample_function.__annotations__
TestEnableMemrayTrackDecorator
python
getsentry__sentry
src/sentry/users/api/serializers/userip.py
{ "start": 544, "end": 1039 }
class ____(Serializer): def serialize( self, obj: UserIP, attrs: Mapping[str, Any], user: User | RpcUser | AnonymousUser, **kwargs: Any, ) -> UserIPSerializerResponse: return { "id": str(obj.id), "ipAddress": obj.ip_address, "countryCode": obj.country_code, "regionCode": obj.region_code, "lastSeen": obj.last_seen, "firstSeen": obj.first_seen, }
UserIPSerializer
python
realpython__materials
html-css-python/python_scripts/parse_image_links.py
{ "start": 96, "end": 452 }
class ____(HTMLParser): def handle_starttag(self, tag, attrs): for attr, val in attrs: if attr == "src" and tag == "img": print(f"Found Image: {val!r}") with open("gallery.html", mode="r", encoding="utf-8") as html_file: html_content = html_file.read() parser = ImageParser() parser.feed(html_content)
ImageParser
python
wandb__wandb
wandb/vendor/pygments/lexers/installers.py
{ "start": 6882, "end": 9485 }
class ____(RegexLexer): """ For RPM ``.spec`` files. .. versionadded:: 1.6 """ name = 'RPMSpec' aliases = ['spec'] filenames = ['*.spec'] mimetypes = ['text/x-rpm-spec'] _directives = ('(?:package|prep|build|install|clean|check|pre[a-z]*|' 'post[a-z]*|trigger[a-z]*|files)') tokens = { 'root': [ (r'#.*\n', Comment), include('basic'), ], 'description': [ (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text), '#pop'), (r'\n', Text), (r'.', Text), ], 'changelog': [ (r'\*.*\n', Generic.Subheading), (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text), '#pop'), (r'\n', Text), (r'.', Text), ], 'string': [ (r'"', String.Double, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), include('interpol'), (r'.', String.Double), ], 'basic': [ include('macro'), (r'(?i)^(Name|Version|Release|Epoch|Summary|Group|License|Packager|' r'Vendor|Icon|URL|Distribution|Prefix|Patch[0-9]*|Source[0-9]*|' r'Requires\(?[a-z]*\)?|[a-z]+Req|Obsoletes|Suggests|Provides|Conflicts|' r'Build[a-z]+|[a-z]+Arch|Auto[a-z]+)(:)(.*)$', bygroups(Generic.Heading, Punctuation, using(this))), (r'^%description', Name.Decorator, 'description'), (r'^%changelog', Name.Decorator, 'changelog'), (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text)), (r'%(attr|defattr|dir|doc(?:dir)?|setup|config(?:ure)?|' r'make(?:install)|ghost|patch[0-9]+|find_lang|exclude|verify)', Keyword), include('interpol'), (r"'.*?'", String.Single), (r'"', String.Double, 'string'), (r'.', Text), ], 'macro': [ (r'%define.*\n', Comment.Preproc), (r'%\{\!\?.*%define.*\}', Comment.Preproc), (r'(%(?:if(?:n?arch)?|else(?:if)?|endif))(.*)$', bygroups(Comment.Preproc, Text)), ], 'interpol': [ (r'%\{?__[a-z_]+\}?', Name.Function), (r'%\{?_([a-z_]+dir|[a-z_]+path|prefix)\}?', Keyword.Pseudo), (r'%\{\?\w+\}', Name.Variable), (r'\$\{?RPM_[A-Z0-9_]+\}?', Name.Variable.Global), (r'%\{[a-zA-Z]\w+\}', Keyword.Constant), ] }
RPMSpecLexer
python
ipython__ipython
IPython/core/magics/osm.py
{ "start": 927, "end": 30695 }
class ____(Magics): """Magics to interact with the underlying OS (shell-type functionality). """ cd_force_quiet = Bool(False, help="Force %cd magic to be quiet even if -q is not passed." ).tag(config=True) def __init__(self, shell=None, **kwargs): # Now define isexec in a cross platform manner. self.is_posix = False self.execre = None if os.name == 'posix': self.is_posix = True else: try: winext = os.environ['pathext'].replace(';','|').replace('.','') except KeyError: winext = 'exe|com|bat|py' try: self.execre = re.compile(r'(.*)\.(%s)$' % winext,re.IGNORECASE) except re.error: warn("Seems like your pathext environmental " "variable is malformed. Please check it to " "enable a proper handle of file extensions " "managed for your system") winext = 'exe|com|bat|py' self.execre = re.compile(r'(.*)\.(%s)$' % winext,re.IGNORECASE) # call up the chain super().__init__(shell=shell, **kwargs) def _isexec_POSIX(self, file): """ Test for executable on a POSIX system """ if os.access(file.path, os.X_OK): # will fail on maxOS if access is not X_OK return file.is_file() return False def _isexec_WIN(self, file): """ Test for executable file on non POSIX system """ return file.is_file() and self.execre.match(file.name) is not None def isexec(self, file): """ Test for executable file on non POSIX system """ if self.is_posix: return self._isexec_POSIX(file) else: return self._isexec_WIN(file) @skip_doctest @line_magic def alias(self, parameter_s=''): """Define an alias for a system command. '%alias alias_name cmd' defines 'alias_name' as an alias for 'cmd' Then, typing 'alias_name params' will execute the system command 'cmd params' (from your underlying operating system). Aliases have lower precedence than magic functions and Python normal variables, so if 'foo' is both a Python variable and an alias, the alias can not be executed until 'del foo' removes the Python variable. You can use the %l specifier in an alias definition to represent the whole line when the alias is called. For example:: In [2]: alias bracket echo "Input in brackets: <%l>" In [3]: bracket hello world Input in brackets: <hello world> You can also define aliases with parameters using %s specifiers (one per parameter):: In [1]: alias parts echo first %s second %s In [2]: %parts A B first A second B In [3]: %parts A Incorrect number of arguments: 2 expected. parts is an alias to: 'echo first %s second %s' Note that %l and %s are mutually exclusive. You can only use one or the other in your aliases. Aliases expand Python variables just like system calls using ! or !! do: all expressions prefixed with '$' get expanded. For details of the semantic rules, see PEP-215: https://peps.python.org/pep-0215/. This is the library used by IPython for variable expansion. If you want to access a true shell variable, an extra $ is necessary to prevent its expansion by IPython:: In [6]: alias show echo In [7]: PATH='A Python string' In [8]: show $PATH A Python string In [9]: show $$PATH /usr/local/lf9560/bin:/usr/local/intel/compiler70/ia32/bin:... You can use the alias facility to access all of $PATH. See the %rehashx function, which automatically creates aliases for the contents of your $PATH. If called with no parameters, %alias prints the current alias table for your system. For posix systems, the default aliases are 'cat', 'cp', 'mv', 'rm', 'rmdir', and 'mkdir', and other platform-specific aliases are added. For windows-based systems, the default aliases are 'copy', 'ddir', 'echo', 'ls', 'ldir', 'mkdir', 'ren', and 'rmdir'. You can see the definition of alias by adding a question mark in the end:: In [1]: cat? Repr: <alias cat for 'cat'>""" par = parameter_s.strip() if not par: aliases = sorted(self.shell.alias_manager.aliases) # stored = self.shell.db.get('stored_aliases', {} ) # for k, v in stored: # atab.append(k, v[0]) print("Total number of aliases:", len(aliases)) sys.stdout.flush() return aliases # Now try to define a new one try: alias,cmd = par.split(None, 1) except TypeError: print(oinspect.getdoc(self.alias)) return try: self.shell.alias_manager.define_alias(alias, cmd) except AliasError as e: print(e) # end magic_alias @line_magic def unalias(self, parameter_s=''): """Remove an alias""" aname = parameter_s.strip() try: self.shell.alias_manager.undefine_alias(aname) except ValueError as e: print(e) return stored = self.shell.db.get('stored_aliases', {} ) if aname in stored: print("Removing %stored alias",aname) del stored[aname] self.shell.db['stored_aliases'] = stored @line_magic def rehashx(self, parameter_s=''): """Update the alias table with all executable files in $PATH. rehashx explicitly checks that every entry in $PATH is a file with execute access (os.X_OK). Under Windows, it checks executability as a match against a '|'-separated string of extensions, stored in the IPython config variable win_exec_ext. This defaults to 'exe|com|bat'. This function also resets the root module cache of module completer, used on slow filesystems. """ from IPython.core.alias import InvalidAliasError # for the benefit of module completer in ipy_completers.py del self.shell.db['rootmodules_cache'] path = [os.path.abspath(os.path.expanduser(p)) for p in os.environ.get('PATH','').split(os.pathsep)] syscmdlist = [] savedir = os.getcwd() # Now walk the paths looking for executables to alias. try: # write the whole loop for posix/Windows so we don't have an if in # the innermost part if self.is_posix: for pdir in path: try: os.chdir(pdir) except OSError: continue # for python 3.6+ rewrite to: with os.scandir(pdir) as dirlist: dirlist = os.scandir(path=pdir) for ff in dirlist: if self.isexec(ff): fname = ff.name try: # Removes dots from the name since ipython # will assume names with dots to be python. if not self.shell.alias_manager.is_alias(fname): self.shell.alias_manager.define_alias( fname.replace('.',''), fname) except InvalidAliasError: pass else: syscmdlist.append(fname) else: no_alias = Alias.blacklist for pdir in path: try: os.chdir(pdir) except OSError: continue # for python 3.6+ rewrite to: with os.scandir(pdir) as dirlist: dirlist = os.scandir(pdir) for ff in dirlist: fname = ff.name base, ext = os.path.splitext(fname) if self.isexec(ff) and base.lower() not in no_alias: if ext.lower() == '.exe': fname = base try: # Removes dots from the name since ipython # will assume names with dots to be python. self.shell.alias_manager.define_alias( base.lower().replace('.',''), fname) except InvalidAliasError: pass syscmdlist.append(fname) self.shell.db['syscmdlist'] = syscmdlist finally: os.chdir(savedir) @skip_doctest @line_magic def pwd(self, parameter_s=''): """Return the current working directory path. Examples -------- :: In [9]: pwd Out[9]: '/home/tsuser/sprint/ipython' """ try: return os.getcwd() except FileNotFoundError as e: raise UsageError("CWD no longer exists - please use %cd to change directory.") from e @skip_doctest @line_magic def cd(self, parameter_s=''): """Change the current working directory. This command automatically maintains an internal list of directories you visit during your IPython session, in the variable ``_dh``. The command :magic:`%dhist` shows this history nicely formatted. You can also do ``cd -<tab>`` to see directory history conveniently. Usage: - ``cd 'dir'``: changes to directory 'dir'. - ``cd -``: changes to the last visited directory. - ``cd -<n>``: changes to the n-th directory in the directory history. - ``cd --foo``: change to directory that matches 'foo' in history - ``cd -b <bookmark_name>``: jump to a bookmark set by %bookmark - Hitting a tab key after ``cd -b`` allows you to tab-complete bookmark names. .. note:: ``cd <bookmark_name>`` is enough if there is no directory ``<bookmark_name>``, but a bookmark with the name exists. Options: -q Be quiet. Do not print the working directory after the cd command is executed. By default IPython's cd command does print this directory, since the default prompts do not display path information. .. note:: Note that ``!cd`` doesn't work for this purpose because the shell where ``!command`` runs is immediately discarded after executing 'command'. Examples -------- :: In [10]: cd parent/child /home/tsuser/parent/child """ try: oldcwd = os.getcwd() except FileNotFoundError: # Happens if the CWD has been deleted. oldcwd = None numcd = re.match(r'(-)(\d+)$',parameter_s) # jump in directory history by number if numcd: nn = int(numcd.group(2)) try: ps = self.shell.user_ns['_dh'][nn] except IndexError: print('The requested directory does not exist in history.') return else: opts = {} elif parameter_s.startswith('--'): ps = None fallback = None pat = parameter_s[2:] dh = self.shell.user_ns['_dh'] # first search only by basename (last component) for ent in reversed(dh): if pat in os.path.basename(ent) and os.path.isdir(ent): ps = ent break if fallback is None and pat in ent and os.path.isdir(ent): fallback = ent # if we have no last part match, pick the first full path match if ps is None: ps = fallback if ps is None: print("No matching entry in directory history") return else: opts = {} else: opts, ps = self.parse_options(parameter_s, 'qb', mode='string') # jump to previous if ps == '-': try: ps = self.shell.user_ns['_dh'][-2] except IndexError as e: raise UsageError('%cd -: No previous directory to change to.') from e # jump to bookmark if needed else: if not os.path.isdir(ps) or 'b' in opts: bkms = self.shell.db.get('bookmarks', {}) if ps in bkms: target = bkms[ps] print('(bookmark:%s) -> %s' % (ps, target)) ps = target else: if 'b' in opts: raise UsageError("Bookmark '%s' not found. " "Use '%%bookmark -l' to see your bookmarks." % ps) # at this point ps should point to the target dir if ps: try: os.chdir(os.path.expanduser(ps)) if hasattr(self.shell, 'term_title') and self.shell.term_title: set_term_title(self.shell.term_title_format.format(cwd=abbrev_cwd())) except OSError: print(sys.exc_info()[1]) else: cwd = pathlib.Path.cwd() dhist = self.shell.user_ns['_dh'] if oldcwd != cwd: dhist.append(cwd) self.shell.db['dhist'] = compress_dhist(dhist)[-100:] else: os.chdir(self.shell.home_dir) if hasattr(self.shell, 'term_title') and self.shell.term_title: set_term_title(self.shell.term_title_format.format(cwd="~")) cwd = pathlib.Path.cwd() dhist = self.shell.user_ns['_dh'] if oldcwd != cwd: dhist.append(cwd) self.shell.db["dhist"] = compress_dhist(dhist)[-100:] if "q" not in opts and not self.cd_force_quiet and self.shell.user_ns["_dh"]: print(self.shell.user_ns["_dh"][-1]) @line_magic def env(self, parameter_s=''): """Get, set, or list environment variables. Usage:\\ :``%env``: lists all environment variables/values :``%env var``: get value for var :``%env var val``: set value for var :``%env var=val``: set value for var :``%env var=$val``: set value for var, using python expansion if possible """ if parameter_s.strip(): split = '=' if '=' in parameter_s else ' ' bits = parameter_s.split(split) if len(bits) == 1: key = parameter_s.strip() if key in os.environ: return os.environ[key] else: err = "Environment does not have key: {0}".format(key) raise UsageError(err) if len(bits) > 1: return self.set_env(parameter_s) env = dict(os.environ) # hide likely secrets when printing the whole environment for key in list(env): if any(s in key.lower() for s in ('key', 'token', 'secret')): env[key] = '<hidden>' return env @line_magic def set_env(self, parameter_s): """Set environment variables. Assumptions are that either "val" is a name in the user namespace, or val is something that evaluates to a string. Usage:\\ :``%set_env var val``: set value for var :``%set_env var=val``: set value for var :``%set_env var=$val``: set value for var, using python expansion if possible """ split = '=' if '=' in parameter_s else ' ' bits = parameter_s.split(split, 1) if not parameter_s.strip() or len(bits)<2: raise UsageError("usage is 'set_env var=val'") var = bits[0].strip() val = bits[1].strip() if re.match(r'.*\s.*', var): # an environment variable with whitespace is almost certainly # not what the user intended. what's more likely is the wrong # split was chosen, ie for "set_env cmd_args A=B", we chose # '=' for the split and should have chosen ' '. to get around # this, users should just assign directly to os.environ or use # standard magic {var} expansion. err = "refusing to set env var with whitespace: '{0}'" err = err.format(val) raise UsageError(err) os.environ[var] = val print('env: {0}={1}'.format(var,val)) @line_magic def pushd(self, parameter_s=''): """Place the current dir on stack and change directory. Usage:\\ %pushd ['dirname'] """ dir_s = self.shell.dir_stack tgt = os.path.expanduser(parameter_s) cwd = os.getcwd().replace(self.shell.home_dir,'~') if tgt: self.cd(parameter_s) dir_s.insert(0,cwd) return self.shell.run_line_magic('dirs', '') @line_magic def popd(self, parameter_s=''): """Change to directory popped off the top of the stack. """ if not self.shell.dir_stack: raise UsageError("%popd on empty stack") top = self.shell.dir_stack.pop(0) self.cd(top) print("popd ->",top) @line_magic def dirs(self, parameter_s=''): """Return the current directory stack.""" return self.shell.dir_stack @line_magic def dhist(self, parameter_s=''): """Print your history of visited directories. %dhist -> print full history\\ %dhist n -> print last n entries only\\ %dhist n1 n2 -> print entries between n1 and n2 (n2 not included)\\ This history is automatically maintained by the %cd command, and always available as the global list variable _dh. You can use %cd -<n> to go to directory number <n>. Note that most of time, you should view directory history by entering cd -<TAB>. """ dh = self.shell.user_ns['_dh'] if parameter_s: try: args = map(int,parameter_s.split()) except: self.arg_err(self.dhist) return if len(args) == 1: ini,fin = max(len(dh)-(args[0]),0),len(dh) elif len(args) == 2: ini,fin = args fin = min(fin, len(dh)) else: self.arg_err(self.dhist) return else: ini,fin = 0,len(dh) print('Directory history (kept in _dh)') for i in range(ini, fin): print("%d: %s" % (i, dh[i])) @skip_doctest @line_magic def sc(self, parameter_s=''): """Shell capture - run shell command and capture output (DEPRECATED use !). DEPRECATED. Suboptimal, retained for backwards compatibility. You should use the form 'var = !command' instead. Example: "%sc -l myfiles = ls ~" should now be written as "myfiles = !ls ~" myfiles.s, myfiles.l and myfiles.n still apply as documented below. -- %sc [options] varname=command IPython will run the given command using commands.getoutput(), and will then update the user's interactive namespace with a variable called varname, containing the value of the call. Your command can contain shell wildcards, pipes, etc. The '=' sign in the syntax is mandatory, and the variable name you supply must follow Python's standard conventions for valid names. (A special format without variable name exists for internal use) Options: -l: list output. Split the output on newlines into a list before assigning it to the given variable. By default the output is stored as a single string. -v: verbose. Print the contents of the variable. In most cases you should not need to split as a list, because the returned value is a special type of string which can automatically provide its contents either as a list (split on newlines) or as a space-separated string. These are convenient, respectively, either for sequential processing or to be passed to a shell command. For example:: # Capture into variable a In [1]: sc a=ls *py # a is a string with embedded newlines In [2]: a Out[2]: 'setup.py\\nwin32_manual_post_install.py' # which can be seen as a list: In [3]: a.l Out[3]: ['setup.py', 'win32_manual_post_install.py'] # or as a whitespace-separated string: In [4]: a.s Out[4]: 'setup.py win32_manual_post_install.py' # a.s is useful to pass as a single command line: In [5]: !wc -l $a.s 146 setup.py 130 win32_manual_post_install.py 276 total # while the list form is useful to loop over: In [6]: for f in a.l: ...: !wc -l $f ...: 146 setup.py 130 win32_manual_post_install.py Similarly, the lists returned by the -l option are also special, in the sense that you can equally invoke the .s attribute on them to automatically get a whitespace-separated string from their contents:: In [7]: sc -l b=ls *py In [8]: b Out[8]: ['setup.py', 'win32_manual_post_install.py'] In [9]: b.s Out[9]: 'setup.py win32_manual_post_install.py' In summary, both the lists and strings used for output capture have the following special attributes:: .l (or .list) : value as list. .n (or .nlstr): value as newline-separated string. .s (or .spstr): value as space-separated string. """ opts,args = self.parse_options(parameter_s, 'lv') # Try to get a variable name and command to run try: # the variable name must be obtained from the parse_options # output, which uses shlex.split to strip options out. var,_ = args.split('=', 1) var = var.strip() # But the command has to be extracted from the original input # parameter_s, not on what parse_options returns, to avoid the # quote stripping which shlex.split performs on it. _,cmd = parameter_s.split('=', 1) except ValueError: var,cmd = '','' # If all looks ok, proceed split = 'l' in opts out = self.shell.getoutput(cmd, split=split) if 'v' in opts: print('%s ==\n%s' % (var, pformat(out))) if var: self.shell.user_ns.update({var:out}) else: return out @line_cell_magic def sx(self, line='', cell=None): """Shell execute - run shell command and capture output (!! is short-hand). %sx command IPython will run the given command using commands.getoutput(), and return the result formatted as a list (split on '\\n'). Since the output is _returned_, it will be stored in ipython's regular output cache Out[N] and in the '_N' automatic variables. Notes: 1) If an input line begins with '!!', then %sx is automatically invoked. That is, while:: !ls causes ipython to simply issue system('ls'), typing:: !!ls is a shorthand equivalent to:: %sx ls 2) %sx differs from %sc in that %sx automatically splits into a list, like '%sc -l'. The reason for this is to make it as easy as possible to process line-oriented shell output via further python commands. %sc is meant to provide much finer control, but requires more typing. 3) Just like %sc -l, this is a list with special attributes: :: .l (or .list) : value as list. .n (or .nlstr): value as newline-separated string. .s (or .spstr): value as whitespace-separated string. This is very useful when trying to use such lists as arguments to system commands.""" if cell is None: # line magic return self.shell.getoutput(line) else: opts,args = self.parse_options(line, '', 'out=') output = self.shell.getoutput(cell) out_name = opts.get('out', opts.get('o')) if out_name: self.shell.user_ns[out_name] = output else: return output system = line_cell_magic('system')(sx) bang = cell_magic('!')(sx) @line_magic def bookmark(self, parameter_s=''): """Manage IPython's bookmark system. %bookmark <name> - set bookmark to current dir %bookmark <name> <dir> - set bookmark to <dir> %bookmark -l - list all bookmarks %bookmark -d <name> - remove bookmark %bookmark -r - remove all bookmarks You can later on access a bookmarked folder with:: %cd -b <name> or simply '%cd <name>' if there is no directory called <name> AND there is such a bookmark defined. Your bookmarks persist through IPython sessions, but they are associated with each profile.""" opts,args = self.parse_options(parameter_s,'drl',mode='list') if len(args) > 2: raise UsageError("%bookmark: too many arguments") bkms = self.shell.db.get('bookmarks',{}) if 'd' in opts: try: todel = args[0] except IndexError as e: raise UsageError( "%bookmark -d: must provide a bookmark to delete") from e else: try: del bkms[todel] except KeyError as e: raise UsageError( "%%bookmark -d: Can't delete bookmark '%s'" % todel) from e elif 'r' in opts: bkms = {} elif 'l' in opts: bks = sorted(bkms) if bks: size = max(map(len, bks)) else: size = 0 fmt = '%-'+str(size)+'s -> %s' print('Current bookmarks:') for bk in bks: print(fmt % (bk, bkms[bk])) else: if not args: raise UsageError("%bookmark: You must specify the bookmark name") elif len(args)==1: bkms[args[0]] = os.getcwd() elif len(args)==2: bkms[args[0]] = args[1] self.shell.db['bookmarks'] = bkms @line_magic def pycat(self, parameter_s=''): """Show a syntax-highlighted file through a pager. This magic is similar to the cat utility, but it will assume the file to be Python source and will show it with syntax highlighting. This magic command can either take a local filename, an url, an history range (see %history) or a macro as argument. If no parameter is given, prints out history of current session up to this point. :: %pycat myscript.py %pycat 7-27 %pycat myMacro %pycat http://www.example.com/myscript.py """ try: cont = self.shell.find_user_code(parameter_s, skip_encoding_cookie=False) except (ValueError, IOError): print("Error: no such file, variable, URL, history range or macro") return page.page(self.shell.pycolorize(source_to_unicode(cont))) @magic_arguments.magic_arguments() @magic_arguments.argument( '-a', '--append', action='store_true', default=False, help='Append contents of the cell to an existing file. ' 'The file will be created if it does not exist.' ) @magic_arguments.argument( 'filename', type=str, help='file to write' ) @cell_magic def writefile(self, line, cell): """Write the contents of the cell to a file. The file will be overwritten unless the -a (--append) flag is specified. """ args = magic_arguments.parse_argstring(self.writefile, line) if re.match(r'^(\'.*\')|(".*")$', args.filename): filename = os.path.expanduser(args.filename[1:-1]) else: filename = os.path.expanduser(args.filename) if os.path.exists(filename): if args.append: print("Appending to %s" % filename) else: print("Overwriting %s" % filename) else: print("Writing %s" % filename) mode = 'a' if args.append else 'w' with io.open(filename, mode, encoding='utf-8') as f: f.write(cell)
OSMagics
python
joke2k__faker
tests/providers/test_geo.py
{ "start": 2571, "end": 3214 }
class ____(unittest.TestCase): def setUp(self): self.fake = Faker("sk_SK") Faker.seed(0) def test_location_on_land(self): loc = self.fake.location_on_land() assert isinstance(loc, tuple) assert len(loc) == 5 assert Decimal(loc[0]) # Should be able to cast first two elements of tuple to Decimal assert Decimal(loc[1]) assert isinstance(loc[2], str) # Place is a string assert isinstance(loc[3], str) # Country code is a string assert len(loc[3]) == 2 # Country code is two letters assert isinstance(loc[4], str) # Timezone is a string
TestSkSk
python
gevent__gevent
src/greentest/3.14/test_smtplib.py
{ "start": 27703, "end": 28876 }
class ____(unittest.TestCase): def setUp(self): self.msg = EmailMessage() self.msg['From'] = 'Páolo <főo@bar.com>' self.smtp = smtplib.SMTP() self.smtp.ehlo = Mock(return_value=(200, 'OK')) self.smtp.has_extn, self.smtp.sendmail = Mock(), Mock() def testSendMessage(self): expected_mail_options = ('SMTPUTF8', 'BODY=8BITMIME') self.smtp.send_message(self.msg) self.smtp.send_message(self.msg) self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3], expected_mail_options) self.assertEqual(self.smtp.sendmail.call_args_list[1][0][3], expected_mail_options) def testSendMessageWithMailOptions(self): mail_options = ['STARTTLS'] expected_mail_options = ('STARTTLS', 'SMTPUTF8', 'BODY=8BITMIME') self.smtp.send_message(self.msg, None, None, mail_options) self.assertEqual(mail_options, ['STARTTLS']) self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3], expected_mail_options) # test response of client to a non-successful HELO message
DefaultArgumentsTests
python
wandb__wandb
wandb/vendor/pygments/lexers/webmisc.py
{ "start": 776, "end": 1786 }
class ____(RegexLexer): """ Lexer for Duel Views Engine (formerly JBST) markup with JavaScript code blocks. See http://duelengine.org/. See http://jsonml.org/jbst/. .. versionadded:: 1.4 """ name = 'Duel' aliases = ['duel', 'jbst', 'jsonml+bst'] filenames = ['*.duel', '*.jbst'] mimetypes = ['text/x-duel', 'text/x-jbst'] flags = re.DOTALL tokens = { 'root': [ (r'(<%[@=#!:]?)(.*?)(%>)', bygroups(Name.Tag, using(JavascriptLexer), Name.Tag)), (r'(<%\$)(.*?)(:)(.*?)(%>)', bygroups(Name.Tag, Name.Function, Punctuation, String, Name.Tag)), (r'(<%--)(.*?)(--%>)', bygroups(Name.Tag, Comment.Multiline, Name.Tag)), (r'(<script.*?>)(.*?)(</script>)', bygroups(using(HtmlLexer), using(JavascriptLexer), using(HtmlLexer))), (r'(.+?)(?=<)', using(HtmlLexer)), (r'.+', using(HtmlLexer)), ], }
DuelLexer
python
Textualize__rich
tests/test_inspect.py
{ "start": 1664, "end": 1758 }
class ____(Exception): def __str__(self) -> str: return "INSPECT ERROR"
InspectError
python
pallets__jinja
tests/test_ext.py
{ "start": 7132, "end": 10520 }
class ____: def test_extend_late(self): env = Environment() t = env.from_string('{% autoescape true %}{{ "<test>" }}{% endautoescape %}') assert t.render() == "&lt;test&gt;" def test_loop_controls(self): env = Environment(extensions=["jinja2.ext.loopcontrols"]) tmpl = env.from_string( """ {%- for item in [1, 2, 3, 4] %} {%- if item % 2 == 0 %}{% continue %}{% endif -%} {{ item }} {%- endfor %}""" ) assert tmpl.render() == "13" tmpl = env.from_string( """ {%- for item in [1, 2, 3, 4] %} {%- if item > 2 %}{% break %}{% endif -%} {{ item }} {%- endfor %}""" ) assert tmpl.render() == "12" def test_do(self): env = Environment(extensions=["jinja2.ext.do"]) tmpl = env.from_string( """ {%- set items = [] %} {%- for char in "foo" %} {%- do items.append(loop.index0 ~ char) %} {%- endfor %}{{ items|join(', ') }}""" ) assert tmpl.render() == "0f, 1o, 2o" def test_extension_nodes(self): env = Environment(extensions=[ExampleExtension]) tmpl = env.from_string("{% test %}") assert tmpl.render() == "False|42|23|{}|None" def test_contextreference_node_passes_context(self): env = Environment(extensions=[ExampleExtension]) tmpl = env.from_string('{% set test_var="test_content" %}{% test %}') assert tmpl.render() == "False|42|23|{}|test_content" def test_contextreference_node_can_pass_locals(self): env = Environment(extensions=[DerivedExampleExtension]) tmpl = env.from_string( '{% for test_var in ["test_content"] %}{% test %}{% endfor %}' ) assert tmpl.render() == "False|42|23|{}|test_content" def test_identifier(self): assert ExampleExtension.identifier == __name__ + ".ExampleExtension" def test_rebinding(self): original = Environment(extensions=[ExampleExtension]) overlay = original.overlay() for env in original, overlay: for ext in env.extensions.values(): assert ext.environment is env def test_preprocessor_extension(self): env = Environment(extensions=[PreprocessorExtension]) tmpl = env.from_string("{[[TEST]]}") assert tmpl.render(foo=42) == "{(42)}" def test_streamfilter_extension(self): env = Environment(extensions=[StreamFilterExtension]) env.globals["gettext"] = lambda x: x.upper() tmpl = env.from_string("Foo _(bar) Baz") out = tmpl.render() assert out == "Foo BAR Baz" def test_extension_ordering(self): class T1(Extension): priority = 1 class T2(Extension): priority = 2 env = Environment(extensions=[T1, T2]) ext = list(env.iter_extensions()) assert ext[0].__class__ is T1 assert ext[1].__class__ is T2 def test_debug(self): env = Environment(extensions=["jinja2.ext.debug"]) t = env.from_string("Hello\n{% debug %}\nGoodbye") out = t.render() for value in ("context", "cycler", "filters", "abs", "tests", "!="): assert f"'{value}'" in out
TestExtensions
python
django__django
tests/auth_tests/test_forms.py
{ "start": 33414, "end": 36265 }
class ____(TestDataMixin, TestCase): def test_incorrect_password(self): user = User.objects.get(username="testclient") data = { "old_password": "test", "new_password1": "abc123", "new_password2": "abc123", } form = PasswordChangeForm(user, data) self.assertFalse(form.is_valid()) self.assertEqual( form["old_password"].errors, [str(form.error_messages["password_incorrect"])], ) def test_password_verification(self): # The two new passwords do not match. user = User.objects.get(username="testclient") data = { "old_password": "password", "new_password1": "abc123", "new_password2": "abc", } form = PasswordChangeForm(user, data) self.assertFalse(form.is_valid()) self.assertEqual( form["new_password2"].errors, [str(form.error_messages["password_mismatch"])], ) @mock.patch("django.contrib.auth.password_validation.password_changed") def test_success(self, password_changed): # The success case. user = User.objects.get(username="testclient") data = { "old_password": "password", "new_password1": "abc123", "new_password2": "abc123", } form = PasswordChangeForm(user, data) self.assertTrue(form.is_valid()) form.save(commit=False) self.assertEqual(password_changed.call_count, 0) form.save() self.assertEqual(password_changed.call_count, 1) def test_field_order(self): # Regression test - check the order of fields: user = User.objects.get(username="testclient") self.assertEqual( list(PasswordChangeForm(user, {}).fields), ["old_password", "new_password1", "new_password2"], ) def test_password_whitespace_not_stripped(self): user = User.objects.get(username="testclient") user.set_password(" oldpassword ") data = { "old_password": " oldpassword ", "new_password1": " pass ", "new_password2": " pass ", } form = PasswordChangeForm(user, data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data["old_password"], data["old_password"]) self.assertEqual(form.cleaned_data["new_password1"], data["new_password1"]) self.assertEqual(form.cleaned_data["new_password2"], data["new_password2"]) def test_html_autocomplete_attributes(self): user = User.objects.get(username="testclient") form = PasswordChangeForm(user) self.assertEqual( form.fields["old_password"].widget.attrs["autocomplete"], "current-password" )
PasswordChangeFormTest