language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
readthedocs__readthedocs.org
readthedocs/gold/forms.py
{ "start": 469, "end": 1848 }
class ____(forms.Form): """Gold users form to select projects to remove ads from.""" project = forms.ChoiceField( required=True, help_text="Select a project.", ) def __init__(self, active_user, *args, **kwargs): self.user = kwargs.pop("user", None) self.projects = kwargs.pop("projects", None) super().__init__(*args, **kwargs) self.fields["project"].choices = self.generate_choices(active_user) def generate_choices(self, active_user): queryset = Project.objects.filter(users=active_user) choices = ((proj.slug, str(proj)) for proj in queryset) return choices def clean_project(self): project_slug = self.cleaned_data.get("project", "") project_instance = Project.objects.filter(slug=project_slug) if not project_instance.exists(): raise forms.ValidationError(_("No project found.")) if project_instance.first() in self.projects: raise forms.ValidationError(_("This project is already Ad-Free.")) return project_slug def clean(self): cleaned_data = super().clean() if self.projects.count() < self.user.num_supported_projects: return cleaned_data self.add_error( None, "You already have the max number of supported projects.", )
GoldProjectForm
python
run-llama__llama_index
llama-index-core/llama_index/core/node_parser/relational/unstructured_element.py
{ "start": 494, "end": 5469 }
class ____(BaseElementNodeParser): """ Unstructured element node parser. Splits a document into Text Nodes and Index Nodes corresponding to embedded objects (e.g. tables). """ partitioning_parameters: Optional[Dict[str, Any]] = Field( default={}, description="Extra dictionary representing parameters of the partitioning process.", ) def __init__( self, callback_manager: Optional[CallbackManager] = None, llm: Optional[Any] = None, summary_query_str: str = DEFAULT_SUMMARY_QUERY_STR, partitioning_parameters: Optional[Dict[str, Any]] = {}, ) -> None: """Initialize.""" try: import lxml # noqa # pants: no-infer-dep import unstructured # noqa # pants: no-infer-dep except ImportError: raise ImportError( "You must install the `unstructured` and `lxml` " "package to use this node parser." ) callback_manager = callback_manager or CallbackManager([]) return super().__init__( callback_manager=callback_manager, llm=llm, summary_query_str=summary_query_str, partitioning_parameters=partitioning_parameters, ) @classmethod def class_name(cls) -> str: return "UnstructuredElementNodeParser" def get_nodes_from_node(self, node: TextNode) -> List[BaseNode]: """Get nodes from node.""" elements = self.extract_elements( node.get_content(), table_filters=[self.filter_table] ) table_elements = self.get_table_elements(elements) # extract summaries over table elements self.extract_table_summaries(table_elements) # convert into nodes # will return a list of Nodes and Index Nodes nodes = self.get_nodes_from_elements( elements, node, ref_doc_text=node.get_content() ) source_document = node.source_node or node.as_related_node_info() for n in nodes: n.relationships[NodeRelationship.SOURCE] = source_document n.metadata.update(node.metadata) return nodes async def aget_nodes_from_node(self, node: TextNode) -> List[BaseNode]: """Get nodes from node.""" elements = self.extract_elements( node.get_content(), table_filters=[self.filter_table] ) table_elements = self.get_table_elements(elements) # extract summaries over table elements await self.aextract_table_summaries(table_elements) # convert into nodes # will return a list of Nodes and Index Nodes nodes = self.get_nodes_from_elements( elements, node, ref_doc_text=node.get_content() ) source_document = node.source_node or node.as_related_node_info() for n in nodes: n.relationships[NodeRelationship.SOURCE] = source_document n.metadata.update(node.metadata) return nodes def extract_elements( self, text: str, table_filters: Optional[List[Callable]] = None, **kwargs: Any ) -> List[Element]: """Extract elements from text.""" from unstructured.partition.html import partition_html # pants: no-infer-dep table_filters = table_filters or [] partitioning_parameters = self.partitioning_parameters or {} elements = partition_html(text=text, **partitioning_parameters) output_els = [] for idx, element in enumerate(elements): if "unstructured.documents.elements.Table" in str(type(element)): should_keep = all(tf(element) for tf in table_filters) if should_keep: table_df = html_to_df(str(element.metadata.text_as_html)) output_els.append( Element( id=f"id_{idx}", type="table", element=element, table=table_df, ) ) else: # if not a table, keep it as Text as we don't want to lose context from unstructured.documents.elements import Text new_element = Text(str(element)) output_els.append( Element(id=f"id_{idx}", type="text", element=new_element) ) else: output_els.append(Element(id=f"id_{idx}", type="text", element=element)) return output_els def filter_table(self, table_element: Any) -> bool: """Filter tables.""" table_df = html_to_df(table_element.metadata.text_as_html) # check if table_df is not None, has more than one row, and more than one column return table_df is not None and not table_df.empty and len(table_df.columns) > 1
UnstructuredElementNodeParser
python
pytorch__pytorch
torch/testing/_internal/opinfo/core.py
{ "start": 25633, "end": 65204 }
class ____: """Operator information and helper functions for acquiring it.""" # the string name of the function name: str # An optional reference function that accepts ndarrays (AKA "NumPy arrays"). # If given, the op will be compared with its reference on each of its sample inputs. ref: Optional[Callable] = None # the following metadata describes the operator, its variants, and its aliases, if any # iterable of aliases, e.g. ("absolute",) for torch.abs aliases: Iterable = None # additional string to include in the test name # this is useful when an op needs multiple OpInfos, # like divide does, often because it's really several # different ops behind the scenes variant_test_name: str = "" # the function variant of the operation, populated as torch.<name> if None op: Callable = None # allows the method variant of this operation to be specified as follows: # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name # - if None, then the OpInfo explicitly specifies is has no associated method # - if a Callable, then that callable should be the method associated with this operation method_variant: Callable = _NOTHING # allows the inplace variant of this operation to be specified as follows: # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name # - if None, then the OpInfo explicitly specifies is has no associated inplace variant # - if a Callable, then that callable should be the inplace variant associated with this operation inplace_variant: Callable = _NOTHING # allows the operator variant of this operation to be specified as follows: # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name # - if None, then the OpInfo explicitly specifies is has no associated operator # - if a Callable, then that callable should be the operator associated with this operation operator_variant: Callable = _NOTHING # allows the inplace operator variant of this operation to be specified as follows: # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name # - if None, then the OpInfo explicitly specifies is has no associated inplace operator # - if a Callable, then that callable should be the inplace operator associated with this operation inplace_operator_variant: Callable = _NOTHING # the following metadata are test directives for skipping or modifying tests # information about which tests to skip skips: tuple = () # decorators to apply to generated tests decorators: tuple = () # the following are pointers to functions to generate certain classes of inputs # function to generate sample inputs with strided layouts sample_inputs_func: Callable = None # function to generate a more thorough set of samples inputs with strided layouts reference_inputs_func: Callable = None # function to generate inputs that will throw errors error_inputs_func: Callable = None # function to generate sparse (coo, csr, csc, bsr, bsc) inputs that will throw errors error_inputs_sparse_func: Callable = None # function to generate sample inputs with sparse coo layouts sample_inputs_sparse_coo_func: Callable = None # function to generate sample inputs with sparse csr layouts sample_inputs_sparse_csr_func: Callable = None # function to generate sample inputs with sparse csc layouts sample_inputs_sparse_csc_func: Callable = None # function to generate sample inputs with sparse bsr layouts sample_inputs_sparse_bsr_func: Callable = None # function to generate sample inputs with sparse bsc layouts sample_inputs_sparse_bsc_func: Callable = None # the following metadata relates to dtype support and is tested for correctness in test_ops.py # dtypes this function works with on the CPU, # inherited by other device types that don't specify their own dtypes dtypes: _dispatch_dtypes = None # the following dtypesIf... options override the dtypes value on their respective device types # I.e. instead of writing multiple `dtypesIfCUDA`, `dtypesIfROCM`, etc one can simply define a dict # dtypesIf = { 'cuda': (torch.float, torch.double), 'rocm': (torch.half, torch.bfloat16) } dtypesIf: dict[str, _dispatch_dtypes] = field(default_factory=dict) def __getattribute__(self, name: str) -> Any: if name.startswith("dtypesIf") and name != "dtypesIf": # TODO: Warn if used dev_name = name.removeprefix("dtypesIf").lower() return self.dtypesIf.get(dev_name) return super().__getattribute__(name) def __setattr__(self, name: str, value: Any) -> None: # TODO: After migration, start adding warnings here if name.startswith("dtypesIf") and name != "dtypesIf": assert isinstance(value, (_dispatch_dtypes, type(None))) dev_name = name.removeprefix("dtypesIf").lower() self.dtypesIf[dev_name] = value return super().__setattr__(name, value) # dtypes this function is expected to work with on CUDA dtypesIfCUDA: _dispatch_dtypes = None # dtypes this function is expected to work with on ROCM dtypesIfROCM: _dispatch_dtypes = None dtypesIfHpu: _dispatch_dtypes = None # dtypes this function is expected to work with on XPU dtypesIfXPU: _dispatch_dtypes = None # backward dtypes this function is expected to work with backward_dtypes: _dispatch_dtypes = None # backward dtypes this function is expected to work with on CUDA backward_dtypesIfCUDA: _dispatch_dtypes = None # backward dtypes this function is expected to work with on ROCM backward_dtypesIfROCM: _dispatch_dtypes = None backward_dtypesIfHpu: _dispatch_dtypes = None # the following metadata describes the operators out= support # whether the op supports the out kwarg # defaults to True, if the op does not allow the out kwarg or # supports it incorrectly then test_out in test_ops.py should fail supports_out: bool = True # the following metadata relates to autograd support # whether the operation supports backward mode AD # if true, gradient correctness is tested in test_ops.py # using the op's sample inputs supports_autograd: bool = True # whether the op supports second order gradients # if true, gradgrad correctness is tested in test_ops.py # defaults to support_autograd's value # TODO: rename this to supports_bwgrad_bwgrad to be consistent with below supports_gradgrad: bool = None # whether the ops supports second order gradients via # forward-over-reverse. If True, forward-over-reverse gradgrad correctness # is tested. If False, test that forward grad is not implemented. # Defaults to False. supports_fwgrad_bwgrad: bool = False # whether the operation supports inplace autograd # if true, tested in test_ops.py # defaults to supports_autograd's value supports_inplace_autograd: bool = None # Whether the operation support forward mode AD # If the value is True, we check that the gradients are correct # If the value is False, we test that forward grad is not implemented supports_forward_ad: bool = False # Whether the operation has a varargs variant # (e.g. functions like ones, zeros, methods like view, permute) supports_varargs: bool = False # Whether the forward operation avoids materializing COW tensor inputs supports_cow_input_no_materialize_forward: bool = True # Whether the backward operation avoids materializing COW tensor inputs supports_cow_input_no_materialize_backward: bool = True # Whether to skip the backward part of the COW tensor input test skip_cow_input_backward: bool = False # If `supports_cow_input_no_materialize_forward == True`, this list contains # the arg indices or kwarg names of inputs that are expected to materialize allow_cow_input_materialize_forward: list[Union[int, str]] = None # If `supports_cow_input_no_materialize_backward == True`, this list contains # the arg indices or kwarg names of inputs that are expected to materialize allow_cow_input_materialize_backward: list[Union[int, str]] = None # wrapper function for gradcheck gradcheck_wrapper: Callable = lambda op, *args, **kwargs: op(*args, **kwargs) # whether to check batched grad when doing gradcheck # defaults to support_autograd's value check_batched_grad: bool = None # whether to check batched grad grad when doing gradgradcheck # default's to support_gradgrad's value check_batched_gradgrad: bool = None # whether to check batched forward grad when doing gradcheck # defaults to the value of `supports_forward_ad` check_batched_forward_grad: bool = None # whether to check batched forward grad when doing gradcheck # defaults to the value of `check_batched_forward_grad` check_inplace_batched_forward_grad: bool = None # tolerance for nondeterminism while performing gradcheck gradcheck_nondet_tol: float = 0.0 # Whether to use the fast implementation for gradcheck/gradgradcheck. # When set to None, defers to the default value provided by the wrapper # function around gradcheck (testing._internal.common_utils.gradcheck) gradcheck_fast_mode: bool = None # the following metadata relates to JIT support and is tested for correctness in test_ops.py # name of the corresponding aten:: operator aten_name: str = None # if this is a composite implicit autograd op, the decomposed op decomp_aten_name: Optional[str] = None # name of the corresponding aten:: operator for backwards aten_backward_name: Optional[str] = None # if a op's aten::node is expected to be symbolically autodiffed assert_autodiffed: bool = False # a list of strings with node names that are expected to be in a # DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'], # default is populated to be ['aten::(name of Python operator)'] autodiff_nonfusible_nodes: list[str] = None # a list of strings with node names that are expected to be in FusionGroups # inside of DifferentiableGraphs when this operation is autodiffed. # Ex: ['aten::add', 'aten::mm'], defaults to an empty list # Note: currently no ops use fusible nodes autodiff_fusible_nodes: list[str] = None # the following metadata relates to sparse support and is used in test_sparse.py # whether the op supports sparse coo inputs, defaults to False # TODO: rename supports_sparse to supports_sparse_coo supports_sparse: bool = None # only run tracing tests supports_scripting: bool = True # if the operator can be traced supports_tracing: bool = True # the following metadata relates to sparse compressed support and # is used in test_sparse_csr.py and test_sparse.py # whether the op supports sparse csr inputs, defaults to False supports_sparse_csr: bool = None # whether the op supports sparse csc inputs, defaults to False supports_sparse_csc: bool = None # whether the op supports sparse bsr inputs, defaults to False supports_sparse_bsr: bool = None # whether the op supports sparse bsc inputs, defaults to False supports_sparse_bsc: bool = None # whether the op supports nested jagged inputs, defaults to False supports_njt: bool = None # whether the op promotes integer inputs to float promotes_int_to_float: bool = False # the following metadata relates to complex support and is checked in test_ops.py test_conjugated_samples: bool = True test_neg_view: bool = True # assert that jit shape analysis fully propagates shape assert_jit_shape_analysis: bool = False # the following metadata relates to ExpandedWeights support and is checked in test_expanded_weights.py supports_expanded_weight: bool = False is_factory_function: bool = False skip_correctness_check_compile_vs_eager: bool = False def __post_init__(self): self._original_opinfo_args = asdict(self).copy() assert self.dtypes is not None, f"OpInfo for {self.name} has no dtypes!" # Validates the dtypes are generated from the dispatch-related functions for name, val in self.dtypesIf.items(): if val is not None: assert isinstance(val, _dispatch_dtypes) self.dtypesIf[name] = set(val) if self.aten_name is None: self.aten_name = self.name # Attribute to verify dynamic_dtypes are used. self.dynamic_dtypes = any( isinstance(dtypes, utils._dynamic_dispatch_dtypes) for dtypes in self.dtypesIf.values() ) if self.dynamic_dtypes: # Make sure `dtyesIfCUDA` is dynamic, if dynamic dispatch is used for CPU # This is because, below we set dtypesIfCUDA to dtypes if they are None. assert isinstance(self.dtypesIfCUDA, utils._dynamic_dispatch_dtypes), ( f"To use dynamic dtypes for operator {self.name}, " "acquire the dtypes dynamically for argument `dtypesIfCUDA`." "This is to ensure that CUDA dtypes are acquired correctly as they" "differ from CPU dtypes occasionally" ) self.dtypes = set(self.dtypes) # NOTE: backward dtypes must be acquired before forward dtypes # since they fallback to explicit (not implicit!) specifications of # forward dtypes self.backward_dtypesIfROCM = ( set(self.backward_dtypesIfROCM) if self.backward_dtypesIfROCM is not None else ( self.backward_dtypesIfCUDA if self.backward_dtypesIfCUDA is not None else self.backward_dtypes if self.backward_dtypes is not None else self.dtypesIfROCM if self.dtypesIfROCM is not None else self.dtypesIfCUDA if self.dtypesIfCUDA is not None else self.dtypes ) ) self.backward_dtypesIfCUDA = ( set(self.backward_dtypesIfCUDA) if self.backward_dtypesIfCUDA is not None else ( self.backward_dtypes if self.backward_dtypes is not None else self.dtypesIfCUDA if self.dtypesIfCUDA is not None else self.dtypes ) ) self.backward_dtypesIfHpu = ( set(self.backward_dtypesIfHpu) if self.backward_dtypesIfHpu is not None else ( self.backward_dtypes if self.backward_dtypes is not None else self.dtypes ) ) self.backward_dtypes = ( set(self.backward_dtypes) if self.backward_dtypes is not None else self.dtypes ) # Inherit from cpu for dev_type in ["cuda", "hpu"]: if self.dtypesIf.get(dev_type) is None: self.dtypesIf[dev_type] = self.dtypes # Inherit from CUDA for dev_type in ["rocm", "xpu"]: if self.dtypesIf.get(dev_type) is None: self.dtypesIf[dev_type] = self.dtypesIf["cuda"] # NOTE: if the op is unspecified it is assumed to be under the torch namespace if not self.op: self.op = _getattr_qual(torch, self.name) if self.method_variant is _NOTHING: self.method_variant = getattr(torch.Tensor, self.name, None) # attributes like real, imag are not callable if not callable(self.method_variant): self.method_variant = None if self.inplace_variant is _NOTHING: inplace_name = self.name + "_" self.inplace_variant = getattr(torch.Tensor, inplace_name, None) if self.operator_variant is _NOTHING: self.operator_variant = getattr(operator, self.name, None) if self.inplace_operator_variant is _NOTHING: # Note: operator.i<op> will use operator.<op> and assign the result to the lhs when no # __i<op>__ method is found. This results in the appearance of an inplace operator variant which # does not have the correct inplace behavior. To avoid this, we guard automatic detection of the inplace # operator with a check that an inplace variant exists. if self.inplace_variant is not None: inplace_operator_name = "i" + self.name self.inplace_operator_variant = getattr( operator, inplace_operator_name, None ) else: self.inplace_operator_variant = None self.decorators = (*self.decorators, *self.skips) # Specifying sample inputs function without specifying the # corresponding layout support implies the layout support: if self.supports_sparse is None: self.supports_sparse = self.sample_inputs_sparse_coo_func is not None if self.sample_inputs_sparse_coo_func is None: self.sample_inputs_sparse_coo_func = self._sample_inputs_unspecified if self.supports_sparse_csr is None: self.supports_sparse_csr = self.sample_inputs_sparse_csr_func is not None if self.sample_inputs_sparse_csr_func is None: self.sample_inputs_sparse_csr_func = self._sample_inputs_unspecified if self.supports_sparse_csc is None: self.supports_sparse_csc = self.sample_inputs_sparse_csc_func is not None if self.sample_inputs_sparse_csc_func is None: self.sample_inputs_sparse_csc_func = self._sample_inputs_unspecified if self.supports_sparse_bsr is None: self.supports_sparse_bsr = self.sample_inputs_sparse_bsr_func is not None if self.sample_inputs_sparse_bsr_func is None: self.sample_inputs_sparse_bsr_func = self._sample_inputs_unspecified if self.supports_sparse_bsc is None: self.supports_sparse_bsc = self.sample_inputs_sparse_bsc_func is not None if self.sample_inputs_sparse_bsc_func is None: self.sample_inputs_sparse_bsc_func = self._sample_inputs_unspecified if self.supports_njt is None: self.supports_njt = False # We run the sampling functions without tracking the gradiends of the creation of inputs self.sample_inputs_func = torch.no_grad()(self.sample_inputs_func) self.sample_inputs_sparse_coo_func = torch.no_grad()( self.sample_inputs_sparse_coo_func ) self.sample_inputs_sparse_csr_func = torch.no_grad()( self.sample_inputs_sparse_csr_func ) self.sample_inputs_sparse_csc_func = torch.no_grad()( self.sample_inputs_sparse_csc_func ) self.sample_inputs_sparse_bsr_func = torch.no_grad()( self.sample_inputs_sparse_bsr_func ) self.sample_inputs_sparse_bsc_func = torch.no_grad()( self.sample_inputs_sparse_bsc_func ) if self.reference_inputs_func is not None: self.reference_inputs_func = torch.no_grad()(self.reference_inputs_func) if not self.autodiff_fusible_nodes: self.autodiff_fusible_nodes = [] if self.autodiff_nonfusible_nodes is None: self.autodiff_nonfusible_nodes = ["aten::" + self.name] # Autograd support # Autograd flags that depend on backward AD only # - If setting has been explicitly set, raise error if inconsistent if self.supports_gradgrad is None: self.supports_gradgrad = self.supports_autograd else: assert not (self.supports_gradgrad and not self.supports_autograd), ( "supports_gradgrad refines the part of autograd is supported, so it should " "not be set if supports_autograd is False" ) if self.check_batched_grad is None: self.check_batched_grad = self.supports_autograd or self.supports_forward_ad else: assert not ( self.check_batched_grad and not (self.supports_autograd or self.supports_forward_ad) ), ( "check_batched_grad refines the part of autograd that will be checked (by gradcheck), so " "it should not be set if supports_autograd is False" ) if self.check_batched_gradgrad is None: self.check_batched_gradgrad = self.supports_gradgrad else: assert not (self.check_batched_gradgrad and not self.supports_gradgrad), ( "check_batched_gradgrad refines the part of autograd that will be checked (by " "gradgradcheck), so it should not be set if either supports_gradgrad or supports_autograd " "is False." ) if self.check_batched_forward_grad is None: self.check_batched_forward_grad = self.supports_forward_ad else: assert not ( self.check_batched_forward_grad and not self.supports_forward_ad ), ( "check_batched_forward_grad should only be used when supports_forward_ad " "is True. It is used to disable the test in the specific cases " "where the op supports forward ad but fails to compute " "batched forward grad." ) if self.check_inplace_batched_forward_grad is None: self.check_inplace_batched_forward_grad = self.check_batched_forward_grad else: assert not ( self.check_inplace_batched_forward_grad and not self.check_batched_forward_grad ), ( "check_batched_forward_grad should only be used when check_batched_forward_grad " "is True. It is used to disable the test in the specific cases " "where the op supports batched forward grad but fails to compute batched forward " "grad for the inplace variant of the op." ) assert not (self.supports_fwgrad_bwgrad and not self.supports_autograd), ( "supports_fwgrad_bwgrad enables forward-over-backward gradgrad checks and should only be " "True if backward ad is also checked, i.e., supports_forward_ad should be True.", self.name, ) # Autograd flags that depend on both forward AD and backward AD if self.supports_inplace_autograd is None: self.supports_inplace_autograd = ( self.supports_autograd or self.supports_forward_ad ) else: assert not ( self.supports_inplace_autograd and not self.supports_autograd and not self.supports_forward_ad ), ( "supports_inplace_autograd refines the part of autograd that is supported, so " "it should not be set if both supports_autograd and supports_forward_ad are False" ) if self.aliases is not None: self.aliases = tuple(AliasInfo(a) for a in self.aliases) # type: ignore[assignment] else: self.aliases = () def __call__(self, *args, **kwargs): """Calls the function variant of the operator.""" return self.op(*args, **kwargs) def __str__(self): return dataclass_repr(self) def get_op(self): """Returns the function variant of the operator, torch.<op_name>.""" return self.op def get_method(self): """Returns the method variant of the operator, torch.Tensor.<op_name>. Returns None if the operator has no method variant. """ return self.method_variant def get_inplace(self): """Returns the inplace variant of the operator, torch.Tensor.<op_name>_. Returns None if the operator has no inplace variant. """ return self.inplace_variant def get_operator(self): """Returns operator variant of the operator, e.g. operator.neg Returns None if the operator has no operator variant. """ return self.operator_variant def get_inplace_operator(self): """Returns the inplace operator variant of the operator, e.g operator.iadd Returns None if the operator has no inplace operator variant""" return self.inplace_operator_variant # Returns a tuple of callables: # (TestCase -> subtest context, TestCase -> skip / xfail context) # I'd love to combine these into one but I haven't figured out how to do it # in a way that works like it should, and I tried a LOT of things. def _maybe_skip_or_xfail(self, rules, device, sample, idx): def _subtest_fn(test_case, sample=sample.name, idx=idx): return test_case.subTest(sample=sample, idx=idx) if rules is None or len(rules) == 0: return (_subtest_fn, lambda _: contextlib.nullcontext()) # NB: match first rule only (order matters!) for rule in rules: if rule.sample_match_fn(device, sample): log.debug( "matched %s rule '%s': %s %s %s", rule.type, rule.name, self.full_name, device, sample, ) # Provide a context for the test case to run the sample input # through as a subtest AND handle skip / xfail for it as needed. return ( _subtest_fn, lambda test_case, rule=rule: rule.get_context(test_case), ) log.debug("matched no rules: %s %s %s", self.full_name, device, sample) return (_subtest_fn, lambda _: contextlib.nullcontext()) def _sample_callback_fn(self, use_subtests, device): # Get sample-specific skips / xfails. sample_skips_and_xfails = getattr( extract_test_fn(), "sample_skips_and_xfails", None ) if sample_skips_and_xfails is not None and not use_subtests: raise RuntimeError( """Sample-specific skips / xfails require use_subtests=True. Please pass this to the sample generation function and run the test logic within the returned contexts (NB: order matters!). For example: def test_foo(self, device, dtype, op): for sample, subtest_ctx, skip_xfail_ctx in op.sample_inputs(..., use_subtests=True): # these contexts handle running within subtests and skips / xfails with subtest_ctx(self), skip_xfail_ctx(self): # test logic here ...""" ) if not use_subtests: # use the default callback that returns the sample without a subtest context return None if USE_PYTEST: try: import pytest_subtests # noqa: F401 except ModuleNotFoundError: raise RuntimeError( "Encountered an OpInfo test with use_subtests=True and pytest-subtests is " "not installed. The feature will not work correctly within pytest without " "this package; please install it." ) from None def _f( sample, idx, self=self, device=device, sample_skips_and_xfails=sample_skips_and_xfails, use_subtests=use_subtests, ): # When subtests are enabled, also return a subtest context. This is required # for xfails / skips to work properly. return ( sample, *self._maybe_skip_or_xfail( sample_skips_and_xfails, device, sample, idx ), ) return _f def conjugate_sample_inputs(self, device, dtype, requires_grad=False, **kwargs): """Returns an iterable of SampleInputs but with the tensor input or first tensor in a sequence input conjugated. """ set_seed = kwargs.pop("set_seed", True) use_subtests = kwargs.pop("use_subtests", False) samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs) conj_samples = list(samples) def conjugate(tensor): _requires_grad = tensor.requires_grad tensor = tensor.conj() return tensor.requires_grad_(_requires_grad) for i, sample in enumerate(samples): sample = conj_samples[i] # Note: it is assumed that the input here is either a tensor or tensorlist if isinstance(sample.input, torch.Tensor): sample.input = conjugate(sample.input) else: sample.input[0] = conjugate(sample.input[0]) return TrackedInputIter( iter(conj_samples), "conjugate sample input", item_callback=self._sample_callback_fn(use_subtests, device), set_seed=set_seed, restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX, ) def sample_inputs(self, device, dtype, requires_grad=False, **kwargs): """ Returns an iterable of SampleInputs. These samples should be sufficient to test the function works correctly with autograd, TorchScript, etc. """ set_seed = kwargs.pop("set_seed", True) use_subtests = kwargs.pop("use_subtests", False) samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs) if kwargs.get("include_conjugated_inputs", False): conj_samples = self.conjugate_sample_inputs( device, dtype, requires_grad, **kwargs ) samples_list = list(samples) samples_list.extend(conj_samples) samples = tuple(samples_list) return TrackedInputIter( iter(samples), "sample input", item_callback=self._sample_callback_fn(use_subtests, device), set_seed=set_seed, restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX, ) def reference_inputs(self, device, dtype, requires_grad=False, **kwargs): """ Returns an iterable of SampleInputs. Distinct from sample_inputs() above because this returns an expanded set of inputs when reference_inputs_func is defined. If undefined this returns the sample inputs. """ set_seed = kwargs.pop("set_seed", True) use_subtests = kwargs.pop("use_subtests", False) if self.reference_inputs_func is None: samples = self.sample_inputs_func( self, device, dtype, requires_grad, **kwargs ) return TrackedInputIter( iter(samples), "reference input", item_callback=self._sample_callback_fn(use_subtests, device), set_seed=set_seed, restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX, ) if kwargs.get("include_conjugated_inputs", False): raise NotImplementedError references = self.reference_inputs_func( self, device, dtype, requires_grad, **kwargs ) return TrackedInputIter( iter(references), "reference input", item_callback=self._sample_callback_fn(use_subtests, device), set_seed=set_seed, restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX, ) def error_inputs(self, device, **kwargs): """ Returns an iterable of ErrorInputs. """ set_seed = kwargs.pop("set_seed", True) use_subtests = kwargs.pop("use_subtests", False) errs = self.error_inputs_func(self, device, **kwargs) def _error_item_callback(e, i, use_subtests=use_subtests, device=device): cb = self._sample_callback_fn(use_subtests, device) # no rules to apply; just return the sample if cb is None: return e # adapt the callback call since ErrorInputs contain SampleInputs _, subtest_ctx = cb(e.sample_input, i) return (e, subtest_ctx) return TrackedInputIter( iter(errs), "error input", track_callback=lambda e: e.sample_input, item_callback=_error_item_callback, set_seed=set_seed, restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX, ) def error_inputs_sparse(self, device, layout, **kwargs): """ Returns an iterable of ErrorInputs that contain sparse sample inputs with a specified layout. """ if not self.supports_sparse_layout(layout): raise unittest.SkipTest("unsupported sparse layout") return self.error_inputs_sparse_func(self, device, layout, **kwargs) def supports_sparse_layout(self, layout): """Return True if OpInfo supports the specified sparse layout.""" layout_name = str(layout).split(".")[-1] # map torch.sparse_coo to OpInfo.supports_sparse: layout_name = layout_name.replace("_coo", "") return getattr(self, f"supports_{layout_name}") def sample_inputs_sparse( self, layout, device, dtype, requires_grad=False, **kwargs ): """Returns an iterable of SampleInputs that contain inputs with a specified sparse layout. """ layout_name = str(layout).split(".")[-1] sample_inputs_mth = getattr(self, "sample_inputs_" + layout_name) def non_empty_sampler(op, generator): found_sample = False for sample in generator: found_sample = True yield sample if not found_sample: raise unittest.SkipTest("NO SAMPLES!") return non_empty_sampler( self, sample_inputs_mth(device, dtype, requires_grad=requires_grad, **kwargs), ) def _sample_inputs_unspecified(self, *args, **kwargs): """Raises an NotImplemented exception in a OpInfo instance creation that specifies supports_sparse(|_csr|_csc|_bsr|_bsc)=True without specifying the corresponding sample function as sample_inputs_sparse_(coo|csr|csc|bsr|bsc)_func. To avoid this, either define the corresponding sample function, or re-map unsupported samples to error inputs in an appropriate opinfo/definitions/sparse.py:_validate_sample_input_sparse_<op> function. """ raise NotImplementedError("no sample function specified") def sample_inputs_sparse_coo(self, device, dtype, requires_grad=False, **kwargs): """Returns an iterable of SampleInputs that contain inputs with sparse coo layout. """ return self.sample_inputs_sparse_coo_func( self, device, dtype, requires_grad, **kwargs ) def sample_inputs_sparse_csr(self, device, dtype, requires_grad=False, **kwargs): """Returns an iterable of SampleInputs that contain inputs with sparse csr layout. """ return self.sample_inputs_sparse_csr_func( self, device, dtype, requires_grad, **kwargs ) def sample_inputs_sparse_csc(self, device, dtype, requires_grad=False, **kwargs): """Returns an iterable of SampleInputs that contain inputs with sparse csc layout. """ return self.sample_inputs_sparse_csc_func( self, device, dtype, requires_grad, **kwargs ) def sample_inputs_sparse_bsr(self, device, dtype, requires_grad=False, **kwargs): """Returns an iterable of SampleInputs that contain inputs with sparse bsr layout. """ return self.sample_inputs_sparse_bsr_func( self, device, dtype, requires_grad, **kwargs ) def sample_inputs_sparse_bsc(self, device, dtype, requires_grad=False, **kwargs): """Returns an iterable of SampleInputs that contain inputs with sparse bsc layout. """ return self.sample_inputs_sparse_bsc_func( self, device, dtype, requires_grad, **kwargs ) def get_decorators(self, test_class, test_name, device, dtype, param_kwargs): """Returns the decorators targeting the given test.""" result = [] for decorator in self.decorators: if isinstance(decorator, DecorateInfo): if decorator.is_active( test_class, test_name, device, dtype, param_kwargs ): result.extend(decorator.decorators) else: result.append(decorator) return result def supported_dtypes(self, device_type): if device_type == "privateuse1": device_type = torch._C._get_privateuse1_backend_name() device_type = torch.device(device_type).type if device_type == "cuda" and TEST_WITH_ROCM: device_type = "rocm" result = self.dtypesIf.get(device_type, self.dtypes) if device_type == "mps": return result - {torch.float64, torch.cdouble} return result def supported_backward_dtypes(self, device_type): if not self.supports_autograd: return set() if device_type == "privateuse1": device_type = torch._C._get_privateuse1_backend_name() device_type = torch.device(device_type).type backward_dtypes = None if device_type == "cuda": backward_dtypes = ( self.backward_dtypesIfROCM if TEST_WITH_ROCM else self.backward_dtypesIfCUDA ) elif device_type == "hpu": backward_dtypes = self.backward_dtypesIfHpu elif device_type == "mps": backward_dtypes = self.backward_dtypes - {torch.double, torch.cdouble} else: backward_dtypes = self.backward_dtypes allowed_backward_dtypes = floating_and_complex_types_and( torch.bfloat16, torch.float16, torch.complex32 ) return set(allowed_backward_dtypes).intersection(backward_dtypes) def supports_dtype(self, dtype, device_type) -> bool: return dtype in self.supported_dtypes(device_type) @property def full_name(self): """Returns a full name that helps to uniquely identify this OpInfo.""" variant = "." + self.variant_test_name if self.variant_test_name else "" # example: "normal.in_place" where "normal" is the name and "in_place" is the variant return f"{self.name}{variant}" @property def formatted_name(self): """Returns a formatted full name for this OpInfo that can be used in test names.""" return self.full_name.replace(".", "_") # Represents a skip / xfail rule matching a particular set of tests. It allows granularity # at the device, dtype, op, and individual sample levels. This flexibility allows entire # bugs to be represented by a single rule, even if this corresponds with multiple conceptual # test cases across multiple ops. @dataclass
OpInfo
python
apache__airflow
airflow-ctl/src/airflowctl/api/datamodels/generated.py
{ "start": 14841, "end": 15463 }
class ____(BaseModel): """ Serializer for External View Plugin responses. """ model_config = ConfigDict( extra="allow", ) name: Annotated[str, Field(title="Name")] icon: Annotated[str | None, Field(title="Icon")] = None icon_dark_mode: Annotated[str | None, Field(title="Icon Dark Mode")] = None url_route: Annotated[str | None, Field(title="Url Route")] = None category: Annotated[str | None, Field(title="Category")] = None href: Annotated[str, Field(title="Href")] destination: Annotated[Destination | None, Field(title="Destination")] = "nav"
ExternalViewResponse
python
numpy__numpy
numpy/lib/tests/test_arraysetops.py
{ "start": 23000, "end": 47678 }
class ____: def check_all(self, a, b, i1, i2, c, dt): base_msg = 'check {0} failed for type {1}' msg = base_msg.format('values', dt) v = unique(a) assert_array_equal(v, b, msg) assert type(v) == type(b) msg = base_msg.format('return_index', dt) v, j = unique(a, True, False, False) assert_array_equal(v, b, msg) assert_array_equal(j, i1, msg) assert type(v) == type(b) msg = base_msg.format('return_inverse', dt) v, j = unique(a, False, True, False) assert_array_equal(v, b, msg) assert_array_equal(j, i2, msg) assert type(v) == type(b) msg = base_msg.format('return_counts', dt) v, j = unique(a, False, False, True) assert_array_equal(v, b, msg) assert_array_equal(j, c, msg) assert type(v) == type(b) msg = base_msg.format('return_index and return_inverse', dt) v, j1, j2 = unique(a, True, True, False) assert_array_equal(v, b, msg) assert_array_equal(j1, i1, msg) assert_array_equal(j2, i2, msg) assert type(v) == type(b) msg = base_msg.format('return_index and return_counts', dt) v, j1, j2 = unique(a, True, False, True) assert_array_equal(v, b, msg) assert_array_equal(j1, i1, msg) assert_array_equal(j2, c, msg) assert type(v) == type(b) msg = base_msg.format('return_inverse and return_counts', dt) v, j1, j2 = unique(a, False, True, True) assert_array_equal(v, b, msg) assert_array_equal(j1, i2, msg) assert_array_equal(j2, c, msg) assert type(v) == type(b) msg = base_msg.format(('return_index, return_inverse ' 'and return_counts'), dt) v, j1, j2, j3 = unique(a, True, True, True) assert_array_equal(v, b, msg) assert_array_equal(j1, i1, msg) assert_array_equal(j2, i2, msg) assert_array_equal(j3, c, msg) assert type(v) == type(b) def get_types(self): types = [] types.extend(np.typecodes['AllInteger']) types.extend(np.typecodes['AllFloat']) types.append('datetime64[D]') types.append('timedelta64[D]') return types def test_unique_1d(self): a = [5, 7, 1, 2, 1, 5, 7] * 10 b = [1, 2, 5, 7] i1 = [2, 3, 0, 1] i2 = [2, 3, 0, 1, 0, 2, 3] * 10 c = np.multiply([2, 1, 2, 2], 10) # test for numeric arrays types = self.get_types() for dt in types: aa = np.array(a, dt) bb = np.array(b, dt) self.check_all(aa, bb, i1, i2, c, dt) # test for object arrays dt = 'O' aa = np.empty(len(a), dt) aa[:] = a bb = np.empty(len(b), dt) bb[:] = b self.check_all(aa, bb, i1, i2, c, dt) # test for structured arrays dt = [('', 'i'), ('', 'i')] aa = np.array(list(zip(a, a)), dt) bb = np.array(list(zip(b, b)), dt) self.check_all(aa, bb, i1, i2, c, dt) # test for ticket #2799 aa = [1. + 0.j, 1 - 1.j, 1] assert_array_equal( np.sort(np.unique(aa)), [1. - 1.j, 1.], ) # test for ticket #4785 a = [(1, 2), (1, 2), (2, 3)] unq = [1, 2, 3] inv = [[0, 1], [0, 1], [1, 2]] a1 = unique(a) assert_array_equal(a1, unq) a2, a2_inv = unique(a, return_inverse=True) assert_array_equal(a2, unq) assert_array_equal(a2_inv, inv) # test for chararrays with return_inverse (gh-5099) a = np.char.chararray(5) a[...] = '' a2, a2_inv = np.unique(a, return_inverse=True) assert_array_equal(a2_inv, np.zeros(5)) # test for ticket #9137 a = [] a1_idx = np.unique(a, return_index=True)[1] a2_inv = np.unique(a, return_inverse=True)[1] a3_idx, a3_inv = np.unique(a, return_index=True, return_inverse=True)[1:] assert_equal(a1_idx.dtype, np.intp) assert_equal(a2_inv.dtype, np.intp) assert_equal(a3_idx.dtype, np.intp) assert_equal(a3_inv.dtype, np.intp) # test for ticket 2111 - float a = [2.0, np.nan, 1.0, np.nan] ua = [1.0, 2.0, np.nan] ua_idx = [2, 0, 1] ua_inv = [1, 2, 0, 2] ua_cnt = [1, 1, 2] # order of unique values is not guaranteed assert_equal(np.sort(np.unique(a)), np.sort(ua)) assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) # test for ticket 2111 - complex a = [2.0 - 1j, np.nan, 1.0 + 1j, complex(0.0, np.nan), complex(1.0, np.nan)] ua = [1.0 + 1j, 2.0 - 1j, complex(0.0, np.nan)] ua_idx = [2, 0, 3] ua_inv = [1, 2, 0, 2, 2] ua_cnt = [1, 1, 3] # order of unique values is not guaranteed assert_equal(np.sort(np.unique(a)), np.sort(ua)) assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) # test for ticket 2111 - datetime64 nat = np.datetime64('nat') a = [np.datetime64('2020-12-26'), nat, np.datetime64('2020-12-24'), nat] ua = [np.datetime64('2020-12-24'), np.datetime64('2020-12-26'), nat] ua_idx = [2, 0, 1] ua_inv = [1, 2, 0, 2] ua_cnt = [1, 1, 2] assert_equal(np.unique(a), ua) assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) # test for ticket 2111 - timedelta nat = np.timedelta64('nat') a = [np.timedelta64(1, 'D'), nat, np.timedelta64(1, 'h'), nat] ua = [np.timedelta64(1, 'h'), np.timedelta64(1, 'D'), nat] ua_idx = [2, 0, 1] ua_inv = [1, 2, 0, 2] ua_cnt = [1, 1, 2] assert_equal(np.unique(a), ua) assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) # test for gh-19300 all_nans = [np.nan] * 4 ua = [np.nan] ua_idx = [0] ua_inv = [0, 0, 0, 0] ua_cnt = [4] assert_equal(np.unique(all_nans), ua) assert_equal(np.unique(all_nans, return_index=True), (ua, ua_idx)) assert_equal(np.unique(all_nans, return_inverse=True), (ua, ua_inv)) assert_equal(np.unique(all_nans, return_counts=True), (ua, ua_cnt)) def test_unique_zero_sized(self): # test for zero-sized arrays types = self.get_types() types.extend('SU') for dt in types: a = np.array([], dt) b = np.array([], dt) i1 = np.array([], np.int64) i2 = np.array([], np.int64) c = np.array([], np.int64) self.check_all(a, b, i1, i2, c, dt) def test_unique_subclass(self): class Subclass(np.ndarray): pass i1 = [2, 3, 0, 1] i2 = [2, 3, 0, 1, 0, 2, 3] * 10 c = np.multiply([2, 1, 2, 2], 10) # test for numeric arrays types = self.get_types() for dt in types: a = np.array([5, 7, 1, 2, 1, 5, 7] * 10, dtype=dt) b = np.array([1, 2, 5, 7], dtype=dt) aa = Subclass(a.shape, dtype=dt, buffer=a) bb = Subclass(b.shape, dtype=dt, buffer=b) self.check_all(aa, bb, i1, i2, c, dt) def test_unique_byte_string_hash_based(self): # test for byte string arrays arr = ['apple', 'banana', 'apple', 'cherry', 'date', 'banana', 'fig', 'grape'] unq_sorted = ['apple', 'banana', 'cherry', 'date', 'fig', 'grape'] a1 = unique(arr, sorted=False) # the result varies depending on the impl of std::unordered_set, # so we check them by sorting assert_array_equal(sorted(a1.tolist()), unq_sorted) def test_unique_unicode_string_hash_based(self): # test for unicode string arrays arr = [ 'café', 'cafe', 'café', 'naïve', 'naive', 'résumé', 'naïve', 'resume', 'résumé', ] unq_sorted = ['cafe', 'café', 'naive', 'naïve', 'resume', 'résumé'] a1 = unique(arr, sorted=False) # the result varies depending on the impl of std::unordered_set, # so we check them by sorting assert_array_equal(sorted(a1.tolist()), unq_sorted) def test_unique_vstring_hash_based_equal_nan(self): # test for unicode and nullable string arrays (equal_nan=True) a = np.array([ # short strings 'straße', None, 'strasse', 'straße', None, 'niño', 'nino', 'élève', 'eleve', 'niño', 'élève', # medium strings 'b' * 20, 'ß' * 30, None, 'é' * 30, 'e' * 20, 'ß' * 30, 'n' * 30, 'ñ' * 20, None, 'e' * 20, 'ñ' * 20, # long strings 'b' * 300, 'ß' * 400, None, 'é' * 400, 'e' * 300, 'ß' * 400, 'n' * 400, 'ñ' * 300, None, 'e' * 300, 'ñ' * 300, ], dtype=StringDType(na_object=None) ) unq_sorted_wo_none = [ 'b' * 20, 'b' * 300, 'e' * 20, 'e' * 300, 'eleve', 'nino', 'niño', 'n' * 30, 'n' * 400, 'strasse', 'straße', 'ß' * 30, 'ß' * 400, 'élève', 'é' * 30, 'é' * 400, 'ñ' * 20, 'ñ' * 300, ] a1 = unique(a, sorted=False, equal_nan=True) # the result varies depending on the impl of std::unordered_set, # so we check them by sorting # a1 should have exactly one None count_none = sum(x is None for x in a1) assert_equal(count_none, 1) a1_wo_none = sorted(x for x in a1 if x is not None) assert_array_equal(a1_wo_none, unq_sorted_wo_none) def test_unique_vstring_hash_based_not_equal_nan(self): # test for unicode and nullable string arrays (equal_nan=False) a = np.array([ # short strings 'straße', None, 'strasse', 'straße', None, 'niño', 'nino', 'élève', 'eleve', 'niño', 'élève', # medium strings 'b' * 20, 'ß' * 30, None, 'é' * 30, 'e' * 20, 'ß' * 30, 'n' * 30, 'ñ' * 20, None, 'e' * 20, 'ñ' * 20, # long strings 'b' * 300, 'ß' * 400, None, 'é' * 400, 'e' * 300, 'ß' * 400, 'n' * 400, 'ñ' * 300, None, 'e' * 300, 'ñ' * 300, ], dtype=StringDType(na_object=None) ) unq_sorted_wo_none = [ 'b' * 20, 'b' * 300, 'e' * 20, 'e' * 300, 'eleve', 'nino', 'niño', 'n' * 30, 'n' * 400, 'strasse', 'straße', 'ß' * 30, 'ß' * 400, 'élève', 'é' * 30, 'é' * 400, 'ñ' * 20, 'ñ' * 300, ] a1 = unique(a, sorted=False, equal_nan=False) # the result varies depending on the impl of std::unordered_set, # so we check them by sorting # a1 should have exactly one None count_none = sum(x is None for x in a1) assert_equal(count_none, 6) a1_wo_none = sorted(x for x in a1 if x is not None) assert_array_equal(a1_wo_none, unq_sorted_wo_none) def test_unique_vstring_errors(self): a = np.array( [ 'apple', 'banana', 'apple', None, 'cherry', 'date', 'banana', 'fig', None, 'grape', ] * 2, dtype=StringDType(na_object=None) ) assert_raises(ValueError, unique, a, equal_nan=False) @pytest.mark.parametrize("arg", ["return_index", "return_inverse", "return_counts"]) def test_unsupported_hash_based(self, arg): """These currently never use the hash-based solution. However, it seems easier to just allow it. When the hash-based solution is added, this test should fail and be replaced with something more comprehensive. """ a = np.array([1, 5, 2, 3, 4, 8, 199, 1, 3, 5]) res_not_sorted = np.unique([1, 1], sorted=False, **{arg: True}) res_sorted = np.unique([1, 1], sorted=True, **{arg: True}) # The following should fail without first sorting `res_not_sorted`. for arr, expected in zip(res_not_sorted, res_sorted): assert_array_equal(arr, expected) def test_unique_axis_errors(self): assert_raises(TypeError, self._run_axis_tests, object) assert_raises(TypeError, self._run_axis_tests, [('a', int), ('b', object)]) assert_raises(AxisError, unique, np.arange(10), axis=2) assert_raises(AxisError, unique, np.arange(10), axis=-2) def test_unique_axis_list(self): msg = "Unique failed on list of lists" inp = [[0, 1, 0], [0, 1, 0]] inp_arr = np.asarray(inp) assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg) assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg) def test_unique_axis(self): types = [] types.extend(np.typecodes['AllInteger']) types.extend(np.typecodes['AllFloat']) types.append('datetime64[D]') types.append('timedelta64[D]') types.append([('a', int), ('b', int)]) types.append([('a', int), ('b', float)]) for dtype in types: self._run_axis_tests(dtype) msg = 'Non-bitwise-equal booleans test failed' data = np.arange(10, dtype=np.uint8).reshape(-1, 2).view(bool) result = np.array([[False, True], [True, True]], dtype=bool) assert_array_equal(unique(data, axis=0), result, msg) msg = 'Negative zero equality test failed' data = np.array([[-0.0, 0.0], [0.0, -0.0], [-0.0, 0.0], [0.0, -0.0]]) result = np.array([[-0.0, 0.0]]) assert_array_equal(unique(data, axis=0), result, msg) @pytest.mark.parametrize("axis", [0, -1]) def test_unique_1d_with_axis(self, axis): x = np.array([4, 3, 2, 3, 2, 1, 2, 2]) uniq = unique(x, axis=axis) assert_array_equal(uniq, [1, 2, 3, 4]) @pytest.mark.parametrize("axis", [None, 0, -1]) def test_unique_inverse_with_axis(self, axis): x = np.array([[4, 4, 3], [2, 2, 1], [2, 2, 1], [4, 4, 3]]) uniq, inv = unique(x, return_inverse=True, axis=axis) assert_equal(inv.ndim, x.ndim if axis is None else 1) assert_array_equal(x, np.take(uniq, inv, axis=axis)) def test_unique_axis_zeros(self): # issue 15559 single_zero = np.empty(shape=(2, 0), dtype=np.int8) uniq, idx, inv, cnt = unique(single_zero, axis=0, return_index=True, return_inverse=True, return_counts=True) # there's 1 element of shape (0,) along axis 0 assert_equal(uniq.dtype, single_zero.dtype) assert_array_equal(uniq, np.empty(shape=(1, 0))) assert_array_equal(idx, np.array([0])) assert_array_equal(inv, np.array([0, 0])) assert_array_equal(cnt, np.array([2])) # there's 0 elements of shape (2,) along axis 1 uniq, idx, inv, cnt = unique(single_zero, axis=1, return_index=True, return_inverse=True, return_counts=True) assert_equal(uniq.dtype, single_zero.dtype) assert_array_equal(uniq, np.empty(shape=(2, 0))) assert_array_equal(idx, np.array([])) assert_array_equal(inv, np.array([])) assert_array_equal(cnt, np.array([])) # test a "complicated" shape shape = (0, 2, 0, 3, 0, 4, 0) multiple_zeros = np.empty(shape=shape) for axis in range(len(shape)): expected_shape = list(shape) if shape[axis] == 0: expected_shape[axis] = 0 else: expected_shape[axis] = 1 assert_array_equal(unique(multiple_zeros, axis=axis), np.empty(shape=expected_shape)) def test_unique_masked(self): # issue 8664 x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0], dtype='uint8') y = np.ma.masked_equal(x, 0) v = np.unique(y) v2, i, c = np.unique(y, return_index=True, return_counts=True) msg = 'Unique returned different results when asked for index' assert_array_equal(v.data, v2.data, msg) assert_array_equal(v.mask, v2.mask, msg) def test_unique_sort_order_with_axis(self): # These tests fail if sorting along axis is done by treating subarrays # as unsigned byte strings. See gh-10495. fmt = "sort order incorrect for integer type '%s'" for dt in 'bhilq': a = np.array([[-1], [0]], dt) b = np.unique(a, axis=0) assert_array_equal(a, b, fmt % dt) def _run_axis_tests(self, dtype): data = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [1, 0, 0, 0]]).astype(dtype) msg = 'Unique with 1d array and axis=0 failed' result = np.array([0, 1]) assert_array_equal(unique(data), result.astype(dtype), msg) msg = 'Unique with 2d array and axis=0 failed' result = np.array([[0, 1, 0, 0], [1, 0, 0, 0]]) assert_array_equal(unique(data, axis=0), result.astype(dtype), msg) msg = 'Unique with 2d array and axis=1 failed' result = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0]]) assert_array_equal(unique(data, axis=1), result.astype(dtype), msg) msg = 'Unique with 3d array and axis=2 failed' data3d = np.array([[[1, 1], [1, 0]], [[0, 1], [0, 0]]]).astype(dtype) result = np.take(data3d, [1, 0], axis=2) assert_array_equal(unique(data3d, axis=2), result, msg) uniq, idx, inv, cnt = unique(data, axis=0, return_index=True, return_inverse=True, return_counts=True) msg = "Unique's return_index=True failed with axis=0" assert_array_equal(data[idx], uniq, msg) msg = "Unique's return_inverse=True failed with axis=0" assert_array_equal(np.take(uniq, inv, axis=0), data) msg = "Unique's return_counts=True failed with axis=0" assert_array_equal(cnt, np.array([2, 2]), msg) uniq, idx, inv, cnt = unique(data, axis=1, return_index=True, return_inverse=True, return_counts=True) msg = "Unique's return_index=True failed with axis=1" assert_array_equal(data[:, idx], uniq) msg = "Unique's return_inverse=True failed with axis=1" assert_array_equal(np.take(uniq, inv, axis=1), data) msg = "Unique's return_counts=True failed with axis=1" assert_array_equal(cnt, np.array([2, 1, 1]), msg) def test_unique_nanequals(self): # issue 20326 a = np.array([1, 1, np.nan, np.nan, np.nan]) unq = np.unique(a) not_unq = np.unique(a, equal_nan=False) assert_array_equal(unq, np.array([1, np.nan])) assert_array_equal(not_unq, np.array([1, np.nan, np.nan, np.nan])) def test_unique_array_api_functions(self): arr = np.array( [ np.nan, 1.0, 0.0, 4.0, -np.nan, -0.0, 1.0, 3.0, 4.0, np.nan, 5.0, -0.0, 1.0, -np.nan, 0.0, ], ) for res_unique_array_api, res_unique in [ ( np.unique_values(arr), np.unique(arr, equal_nan=False) ), ( np.unique_counts(arr), np.unique(arr, return_counts=True, equal_nan=False) ), ( np.unique_inverse(arr), np.unique(arr, return_inverse=True, equal_nan=False) ), ( np.unique_all(arr), np.unique( arr, return_index=True, return_inverse=True, return_counts=True, equal_nan=False ) ) ]: assert len(res_unique_array_api) == len(res_unique) if not isinstance(res_unique_array_api, tuple): res_unique_array_api = (res_unique_array_api,) if not isinstance(res_unique, tuple): res_unique = (res_unique,) for actual, expected in zip(res_unique_array_api, res_unique): # Order of output is not guaranteed assert_equal(np.sort(actual), np.sort(expected)) def test_unique_inverse_shape(self): # Regression test for https://github.com/numpy/numpy/issues/25552 arr = np.array([[1, 2, 3], [2, 3, 1]]) expected_values, expected_inverse = np.unique(arr, return_inverse=True) expected_inverse = expected_inverse.reshape(arr.shape) for func in np.unique_inverse, np.unique_all: result = func(arr) assert_array_equal(expected_values, result.values) assert_array_equal(expected_inverse, result.inverse_indices) assert_array_equal(arr, result.values[result.inverse_indices]) @pytest.mark.parametrize( 'data', [[[1, 1, 1], [1, 1, 1]], [1, 3, 2], 1], ) @pytest.mark.parametrize('transpose', [False, True]) @pytest.mark.parametrize('dtype', [np.int32, np.float64]) def test_unique_with_matrix(self, data, transpose, dtype): mat = np.matrix(data).astype(dtype) if transpose: mat = mat.T u = np.unique(mat) expected = np.unique(np.asarray(mat)) assert_array_equal(u, expected, strict=True) def test_unique_axis0_equal_nan_on_1d_array(self): # Test Issue #29336 arr1d = np.array([np.nan, 0, 0, np.nan]) expected = np.array([0., np.nan]) result = np.unique(arr1d, axis=0, equal_nan=True) assert_array_equal(result, expected) def test_unique_axis_minus1_eq_on_1d_array(self): arr1d = np.array([np.nan, 0, 0, np.nan]) expected = np.array([0., np.nan]) result = np.unique(arr1d, axis=-1, equal_nan=True) assert_array_equal(result, expected) def test_unique_axis_float_raises_typeerror(self): arr1d = np.array([np.nan, 0, 0, np.nan]) with pytest.raises(TypeError, match="integer argument expected"): np.unique(arr1d, axis=0.0, equal_nan=False) @pytest.mark.parametrize('dt', [np.dtype('F'), np.dtype('D')]) @pytest.mark.parametrize('values', [[complex(0.0, -1), complex(-0.0, -1), 0], [-200, complex(-200, -0.0), -1], [-25, 3, -5j, complex(-25, -0.0), 3j]]) def test_unique_complex_signed_zeros(self, dt, values): z = np.array(values, dtype=dt) u = np.unique(z) assert len(u) == len(values) - 1
TestUnique
python
getsentry__sentry
src/sentry/workflow_engine/handlers/condition/new_high_priority_issue_handler.py
{ "start": 454, "end": 859 }
class ____(DataConditionHandler[WorkflowEventData]): group = DataConditionHandler.Group.WORKFLOW_TRIGGER comparison_json_schema = {"type": "boolean"} @staticmethod def evaluate_value(event_data: WorkflowEventData, comparison: Any) -> bool: is_new = is_new_event(event_data) return is_new and event_data.group.priority == PriorityLevel.HIGH
NewHighPriorityIssueConditionHandler
python
ray-project__ray
release/ray_release/exception.py
{ "start": 14, "end": 947 }
class ____(enum.Enum): # If you change these, also change the `retry` section # in `build_pipeline.py` and the `reason()` function in `run_e2e.sh` SUCCESS = 0 # Do not set/return this manually UNCAUGHT = 1 # Do not set/return this manually UNSPECIFIED = 2 UNKNOWN = 3 # Hard infra errors (non-retryable) CLI_ERROR = 10 CONFIG_ERROR = 11 SETUP_ERROR = 12 CLUSTER_RESOURCE_ERROR = 13 CLUSTER_ENV_BUILD_ERROR = 14 CLUSTER_STARTUP_ERROR = 15 LOCAL_ENV_SETUP_ERROR = 16 REMOTE_ENV_SETUP_ERROR = 17 FETCH_RESULT_ERROR = 18 ANYSCALE_ERROR = 19 # Infra timeouts (retryable) RAY_WHEELS_TIMEOUT = 30 CLUSTER_ENV_BUILD_TIMEOUT = 31 CLUSTER_STARTUP_TIMEOUT = 32 CLUSTER_WAIT_TIMEOUT = 33 # Command errors - these are considered application errors COMMAND_ERROR = 40 COMMAND_ALERT = 41 COMMAND_TIMEOUT = 42 PREPARE_ERROR = 43
ExitCode
python
django__django
tests/backends/models.py
{ "start": 172, "end": 372 }
class ____(models.Model): root = models.IntegerField() square = models.PositiveIntegerField(db_default=9) def __str__(self): return "%s ** 2 == %s" % (self.root, self.square)
Square
python
dagster-io__dagster
python_modules/dagster/dagster_tests/declarative_automation_tests/scenario_utils/base_scenario.py
{ "start": 2483, "end": 3842 }
class ____(NamedTuple): asset_key: str rule_evaluations: Sequence[tuple[AutoMaterializeRuleEvaluation, Optional[Iterable[str]]]] num_requested: int = 0 num_skipped: int = 0 num_discarded: int = 0 @staticmethod def empty(asset_key: str) -> "AssetEvaluationSpec": return AssetEvaluationSpec( asset_key=asset_key, rule_evaluations=[], num_requested=0, num_skipped=0, num_discarded=0, ) @staticmethod def from_single_rule( asset_key: str, rule: AutoMaterializeRule, evaluation_data: Optional[AutoMaterializeRuleEvaluationData] = None, ) -> "AssetEvaluationSpec": return AssetEvaluationSpec( asset_key=asset_key, rule_evaluations=[ ( AutoMaterializeRuleEvaluation( rule_snapshot=rule.to_snapshot(), evaluation_data=evaluation_data ), None, ) ], num_requested=1 if rule.decision_type == AutoMaterializeDecisionType.MATERIALIZE else 0, num_skipped=1 if rule.decision_type == AutoMaterializeDecisionType.SKIP else 0, num_discarded=1 if rule.decision_type == AutoMaterializeDecisionType.DISCARD else 0, )
AssetEvaluationSpec
python
pypa__pip
src/pip/_vendor/urllib3/connectionpool.py
{ "start": 33197, "end": 40408 }
class ____(HTTPConnectionPool): """ Same as :class:`.HTTPConnectionPool`, but HTTPS. :class:`.HTTPSConnection` uses one of ``assert_fingerprint``, ``assert_hostname`` and ``host`` in this order to verify connections. If ``assert_hostname`` is False, no verification is done. The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl` is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket into an SSL socket. """ scheme = "https" ConnectionCls = HTTPSConnection def __init__( self, host, port=None, strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, headers=None, retries=None, _proxy=None, _proxy_headers=None, key_file=None, cert_file=None, cert_reqs=None, key_password=None, ca_certs=None, ssl_version=None, assert_hostname=None, assert_fingerprint=None, ca_cert_dir=None, **conn_kw ): HTTPConnectionPool.__init__( self, host, port, strict, timeout, maxsize, block, headers, retries, _proxy, _proxy_headers, **conn_kw ) self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.key_password = key_password self.ca_certs = ca_certs self.ca_cert_dir = ca_cert_dir self.ssl_version = ssl_version self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint def _prepare_conn(self, conn): """ Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket` and establish the tunnel if proxy is used. """ if isinstance(conn, VerifiedHTTPSConnection): conn.set_cert( key_file=self.key_file, key_password=self.key_password, cert_file=self.cert_file, cert_reqs=self.cert_reqs, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, assert_hostname=self.assert_hostname, assert_fingerprint=self.assert_fingerprint, ) conn.ssl_version = self.ssl_version return conn def _prepare_proxy(self, conn): """ Establishes a tunnel connection through HTTP CONNECT. Tunnel connection is established early because otherwise httplib would improperly set Host: header to proxy's IP:port. """ conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers) if self.proxy.scheme == "https": conn.tls_in_tls_required = True conn.connect() def _new_conn(self): """ Return a fresh :class:`http.client.HTTPSConnection`. """ self.num_connections += 1 log.debug( "Starting new HTTPS connection (%d): %s:%s", self.num_connections, self.host, self.port or "443", ) if not self.ConnectionCls or self.ConnectionCls is DummyConnection: raise SSLError( "Can't connect to HTTPS URL because the SSL module is not available." ) actual_host = self.host actual_port = self.port if self.proxy is not None: actual_host = self.proxy.host actual_port = self.proxy.port conn = self.ConnectionCls( host=actual_host, port=actual_port, timeout=self.timeout.connect_timeout, strict=self.strict, cert_file=self.cert_file, key_file=self.key_file, key_password=self.key_password, **self.conn_kw ) return self._prepare_conn(conn) def _validate_conn(self, conn): """ Called right before a request is made, after the socket is created. """ super(HTTPSConnectionPool, self)._validate_conn(conn) # Force connect early to allow us to validate the connection. if not getattr(conn, "sock", None): # AppEngine might not have `.sock` conn.connect() if not conn.is_verified: warnings.warn( ( "Unverified HTTPS request is being made to host '%s'. " "Adding certificate verification is strongly advised. See: " "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" "#ssl-warnings" % conn.host ), InsecureRequestWarning, ) if getattr(conn, "proxy_is_verified", None) is False: warnings.warn( ( "Unverified HTTPS connection done to an HTTPS proxy. " "Adding certificate verification is strongly advised. See: " "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" "#ssl-warnings" ), InsecureRequestWarning, ) def connection_from_url(url, **kw): """ Given a url, return an :class:`.ConnectionPool` instance of its host. This is a shortcut for not having to parse out the scheme, host, and port of the url before creating an :class:`.ConnectionPool` instance. :param url: Absolute URL string that must include the scheme. Port is optional. :param \\**kw: Passes additional parameters to the constructor of the appropriate :class:`.ConnectionPool`. Useful for specifying things like timeout, maxsize, headers, etc. Example:: >>> conn = connection_from_url('http://google.com/') >>> r = conn.request('GET', '/') """ scheme, host, port = get_host(url) port = port or port_by_scheme.get(scheme, 80) if scheme == "https": return HTTPSConnectionPool(host, port=port, **kw) else: return HTTPConnectionPool(host, port=port, **kw) def _normalize_host(host, scheme): """ Normalize hosts for comparisons and use with sockets. """ host = normalize_host(host, scheme) # httplib doesn't like it when we include brackets in IPv6 addresses # Specifically, if we include brackets but also pass the port then # httplib crazily doubles up the square brackets on the Host header. # Instead, we need to make sure we never pass ``None`` as the port. # However, for backward compatibility reasons we can't actually # *assert* that. See http://bugs.python.org/issue28539 if host.startswith("[") and host.endswith("]"): host = host[1:-1] return host def _close_pool_connections(pool): """Drains a queue of connections and closes each one.""" try: while True: conn = pool.get(block=False) if conn: conn.close() except queue.Empty: pass # Done.
HTTPSConnectionPool
python
vyperlang__vyper
vyper/exceptions.py
{ "start": 6605, "end": 6659 }
class ____(_BaseVyperException): pass
VyperException
python
spack__spack
lib/spack/spack/util/unparse/unparser.py
{ "start": 3625, "end": 45307 }
class ____(NodeVisitor): """Methods in this class recursively traverse an AST and output source code for the abstract syntax; original formatting is disregarded.""" def __init__(self, py_ver_consistent=False, _avoid_backslashes=False): self._source = [] self._precedences = {} self._type_ignores = {} self._indent = 0 self._in_try_star = False self._py_ver_consistent = py_ver_consistent self._avoid_backslashes = _avoid_backslashes def interleave(self, inter, f, seq): """Call f on each item in seq, calling inter() in between.""" seq = iter(seq) try: f(next(seq)) except StopIteration: pass else: for x in seq: inter() f(x) def items_view(self, traverser, items): """Traverse and separate the given *items* with a comma and append it to the buffer. If *items* is a single item sequence, a trailing comma will be added.""" if len(items) == 1: traverser(items[0]) self.write(",") else: self.interleave(lambda: self.write(", "), traverser, items) def maybe_newline(self): """Adds a newline if it isn't the start of generated source""" if self._source: self.write("\n") def fill(self, text=""): """Indent a piece of text and append it, according to the current indentation level""" self.maybe_newline() self.write(" " * self._indent + text) def write(self, *text): """Add new source parts""" self._source.extend(text) @contextmanager def buffered(self, buffer=None): if buffer is None: buffer = [] original_source = self._source self._source = buffer yield buffer self._source = original_source @contextmanager def block(self, *, extra=None): """A context manager for preparing the source for blocks. It adds the character':', increases the indentation on enter and decreases the indentation on exit. If *extra* is given, it will be directly appended after the colon character. """ self.write(":") if extra: self.write(extra) self._indent += 1 yield self._indent -= 1 @contextmanager def delimit(self, start, end): """A context manager for preparing the source for expressions. It adds *start* to the buffer and enters, after exit it adds *end*.""" self.write(start) yield self.write(end) def delimit_if(self, start, end, condition): if condition: return self.delimit(start, end) else: return nullcontext() def require_parens(self, precedence, node): """Shortcut to adding precedence related parens""" return self.delimit_if("(", ")", self.get_precedence(node) > precedence) def get_precedence(self, node): return self._precedences.get(node, _Precedence.TEST) def set_precedence(self, precedence, *nodes): for node in nodes: self._precedences[node] = precedence def get_raw_docstring(self, node): """If a docstring node is found in the body of the *node* parameter, return that docstring node, None otherwise. Logic mirrored from ``_PyAST_GetDocString``.""" if ( not isinstance(node, (ast.AsyncFunctionDef, ast.FunctionDef, ast.ClassDef, ast.Module)) or len(node.body) < 1 ): return None node = node.body[0] if not isinstance(node, ast.Expr): return None node = node.value if _is_str_literal(node): return node def get_type_comment(self, node): # Python 3.8 introduced type_comment # (enabled on compile(... ast.PyCF_TYPE_COMMENTS)) comment = self._type_ignores.get(node.lineno) or getattr(node, "type_comment", None) if comment is not None: return f" # type: {comment}" def traverse(self, node): if isinstance(node, list): for item in node: self.traverse(item) else: super().visit(node) # Note: as visit() resets the output text, do NOT rely on # NodeVisitor.generic_visit to handle any nodes (as it calls back in to # the subclass visit() method, which resets self._source to an empty list) def visit(self, node): """Outputs a source code string that, if converted back to an ast (using ast.parse) will generate an AST equivalent to *node*""" self._source = [] self.traverse(node) return "".join(self._source) def _write_docstring_and_traverse_body(self, node): docstring = self.get_raw_docstring(node) if docstring: self._write_docstring(docstring) self.traverse(node.body[1:]) else: self.traverse(node.body) def visit_Module(self, node): # Python 3.8 introduced types self._type_ignores = { ignore.lineno: f"ignore{ignore.tag}" for ignore in getattr(node, "type_ignores", ()) } self._write_docstring_and_traverse_body(node) self._type_ignores.clear() def visit_FunctionType(self, node): with self.delimit("(", ")"): self.interleave(lambda: self.write(", "), self.traverse, node.argtypes) self.write(" -> ") self.traverse(node.returns) def visit_Expr(self, node): self.fill() self.set_precedence(_Precedence.YIELD, node.value) self.traverse(node.value) def visit_NamedExpr(self, node): with self.require_parens(_Precedence.NAMED_EXPR, node): self.set_precedence(_Precedence.ATOM, node.target, node.value) self.traverse(node.target) self.write(" := ") self.traverse(node.value) def visit_Import(self, node): self.fill("import ") self.interleave(lambda: self.write(", "), self.traverse, node.names) def visit_ImportFrom(self, node): self.fill("from ") self.write("." * (node.level or 0)) if node.module: self.write(node.module) self.write(" import ") self.interleave(lambda: self.write(", "), self.traverse, node.names) def visit_Assign(self, node): self.fill() for target in node.targets: self.set_precedence(_Precedence.TUPLE, target) self.traverse(target) self.write(" = ") self.traverse(node.value) type_comment = self.get_type_comment(node) if type_comment: self.write(type_comment) def visit_AugAssign(self, node): self.fill() self.traverse(node.target) self.write(" " + self.binop[node.op.__class__.__name__] + "= ") self.traverse(node.value) def visit_AnnAssign(self, node): self.fill() with self.delimit_if("(", ")", not node.simple and isinstance(node.target, Name)): self.traverse(node.target) self.write(": ") self.traverse(node.annotation) if node.value: self.write(" = ") self.traverse(node.value) def visit_Return(self, node): self.fill("return") if node.value: self.write(" ") self.traverse(node.value) def visit_Pass(self, node): self.fill("pass") def visit_Break(self, node): self.fill("break") def visit_Continue(self, node): self.fill("continue") def visit_Delete(self, node): self.fill("del ") self.interleave(lambda: self.write(", "), self.traverse, node.targets) def visit_Assert(self, node): self.fill("assert ") self.traverse(node.test) if node.msg: self.write(", ") self.traverse(node.msg) def visit_Global(self, node): self.fill("global ") self.interleave(lambda: self.write(", "), self.write, node.names) def visit_Nonlocal(self, node): self.fill("nonlocal ") self.interleave(lambda: self.write(", "), self.write, node.names) def visit_Await(self, node): with self.require_parens(_Precedence.AWAIT, node): self.write("await") if node.value: self.write(" ") self.set_precedence(_Precedence.ATOM, node.value) self.traverse(node.value) def visit_Yield(self, node): with self.require_parens(_Precedence.YIELD, node): self.write("yield") if node.value: self.write(" ") self.set_precedence(_Precedence.ATOM, node.value) self.traverse(node.value) def visit_YieldFrom(self, node): with self.require_parens(_Precedence.YIELD, node): self.write("yield from ") if not node.value: raise ValueError("Node can't be used without a value attribute.") self.set_precedence(_Precedence.ATOM, node.value) self.traverse(node.value) def visit_Raise(self, node): self.fill("raise") if not node.exc: if node.cause: raise ValueError("Node can't use cause without an exception.") return self.write(" ") self.traverse(node.exc) if node.cause: self.write(" from ") self.traverse(node.cause) def do_visit_try(self, node): self.fill("try") with self.block(): self.traverse(node.body) for ex in node.handlers: self.traverse(ex) if node.orelse: self.fill("else") with self.block(): self.traverse(node.orelse) if node.finalbody: self.fill("finally") with self.block(): self.traverse(node.finalbody) def visit_Try(self, node): prev_in_try_star = self._in_try_star try: self._in_try_star = False self.do_visit_try(node) finally: self._in_try_star = prev_in_try_star def visit_TryStar(self, node): prev_in_try_star = self._in_try_star try: self._in_try_star = True self.do_visit_try(node) finally: self._in_try_star = prev_in_try_star def visit_ExceptHandler(self, node): self.fill("except*" if self._in_try_star else "except") if node.type: self.write(" ") self.traverse(node.type) if node.name: self.write(" as ") self.write(node.name) with self.block(): self.traverse(node.body) def visit_ClassDef(self, node): self.maybe_newline() for deco in node.decorator_list: self.fill("@") self.traverse(deco) self.fill("class " + node.name) if hasattr(node, "type_params"): self._type_params_helper(node.type_params) with self.delimit_if("(", ")", condition=node.bases or node.keywords): comma = False for e in node.bases: if comma: self.write(", ") else: comma = True self.traverse(e) for e in node.keywords: if comma: self.write(", ") else: comma = True self.traverse(e) with self.block(): self._write_docstring_and_traverse_body(node) def visit_FunctionDef(self, node): self._function_helper(node, "def") def visit_AsyncFunctionDef(self, node): self._function_helper(node, "async def") def _function_helper(self, node, fill_suffix): self.maybe_newline() for deco in node.decorator_list: self.fill("@") self.traverse(deco) def_str = fill_suffix + " " + node.name self.fill(def_str) if hasattr(node, "type_params"): self._type_params_helper(node.type_params) with self.delimit("(", ")"): self.traverse(node.args) if node.returns: self.write(" -> ") self.traverse(node.returns) with self.block(extra=self.get_type_comment(node)): self._write_docstring_and_traverse_body(node) def _type_params_helper(self, type_params): if type_params is not None and len(type_params) > 0: with self.delimit("[", "]"): self.interleave(lambda: self.write(", "), self.traverse, type_params) def visit_TypeVar(self, node): self.write(node.name) if node.bound: self.write(": ") self.traverse(node.bound) # Python 3.13 introduced default_value if getattr(node, "default_value", False): self.write(" = ") self.traverse(node.default_value) def visit_TypeVarTuple(self, node): self.write("*" + node.name) # Python 3.13 introduced default_value if getattr(node, "default_value", False): self.write(" = ") self.traverse(node.default_value) def visit_ParamSpec(self, node): self.write("**" + node.name) # Python 3.13 introduced default_value if getattr(node, "default_value", False): self.write(" = ") self.traverse(node.default_value) def visit_TypeAlias(self, node): self.fill("type ") self.traverse(node.name) self._type_params_helper(node.type_params) self.write(" = ") self.traverse(node.value) def visit_For(self, node): self._for_helper("for ", node) def visit_AsyncFor(self, node): self._for_helper("async for ", node) def _for_helper(self, fill, node): self.fill(fill) self.set_precedence(_Precedence.TUPLE, node.target) self.traverse(node.target) self.write(" in ") self.traverse(node.iter) with self.block(extra=self.get_type_comment(node)): self.traverse(node.body) if node.orelse: self.fill("else") with self.block(): self.traverse(node.orelse) def visit_If(self, node): self.fill("if ") self.traverse(node.test) with self.block(): self.traverse(node.body) # collapse nested ifs into equivalent elifs. while node.orelse and len(node.orelse) == 1 and isinstance(node.orelse[0], If): node = node.orelse[0] self.fill("elif ") self.traverse(node.test) with self.block(): self.traverse(node.body) # final else if node.orelse: self.fill("else") with self.block(): self.traverse(node.orelse) def visit_While(self, node): self.fill("while ") self.traverse(node.test) with self.block(): self.traverse(node.body) if node.orelse: self.fill("else") with self.block(): self.traverse(node.orelse) def visit_With(self, node): self.fill("with ") self.interleave(lambda: self.write(", "), self.traverse, node.items) with self.block(extra=self.get_type_comment(node)): self.traverse(node.body) def visit_AsyncWith(self, node): self.fill("async with ") self.interleave(lambda: self.write(", "), self.traverse, node.items) with self.block(extra=self.get_type_comment(node)): self.traverse(node.body) def _str_literal_helper( self, string, *, quote_types=_ALL_QUOTES, escape_special_whitespace=False ): """Helper for writing string literals, minimizing escapes. Returns the tuple (string literal to write, possible quote types). """ def escape_char(c): # \n and \t are non-printable, but we only escape them if # escape_special_whitespace is True if not escape_special_whitespace and c in "\n\t": return c # Always escape backslashes and other non-printable characters if c == "\\" or not c.isprintable(): return c.encode("unicode_escape").decode("ascii") return c escaped_string = "".join(map(escape_char, string)) possible_quotes = quote_types if "\n" in escaped_string: possible_quotes = [q for q in possible_quotes if q in _MULTI_QUOTES] possible_quotes = [q for q in possible_quotes if q not in escaped_string] if not possible_quotes: # If there aren't any possible_quotes, fallback to using repr # on the original string. Try to use a quote from quote_types, # e.g., so that we use triple quotes for docstrings. string = repr(string) quote = next((q for q in quote_types if string[0] in q), string[0]) return string[1:-1], [quote] if escaped_string: # Sort so that we prefer '''"''' over """\"""" possible_quotes.sort(key=lambda q: q[0] == escaped_string[-1]) # If we're using triple quotes and we'd need to escape a final # quote, escape it if possible_quotes[0][0] == escaped_string[-1]: assert len(possible_quotes[0]) == 3 escaped_string = escaped_string[:-1] + "\\" + escaped_string[-1] return escaped_string, possible_quotes def _write_str_avoiding_backslashes(self, string, *, quote_types=_ALL_QUOTES): """Write string literal value with a best effort attempt to avoid backslashes.""" string, quote_types = self._str_literal_helper(string, quote_types=quote_types) quote_type = quote_types[0] self.write(f"{quote_type}{string}{quote_type}") # Python < 3.8. Num, Str, Bytes, NameConstant, Ellipsis replaced with Constant # https://github.com/python/cpython/commit/3f22811fef73aec848d961593d95fa877f77ecbf if sys.version_info < (3, 8): def visit_Num(self, node): repr_n = repr(node.n) self.write(repr_n.replace("inf", _INFSTR)) def visit_Str(self, node): self._write_constant(node.s) def visit_Bytes(self, node): self.write(repr(node.s)) def visit_NameConstant(self, node): self.write(repr(node.value)) def visit_Ellipsis(self, node): self.write("...") def visit_JoinedStr(self, node): self.write("f") # Python 3.12 added support for backslashes inside format parts. # We need to keep adding backslashes for python < 3.11 compat. if self._avoid_backslashes: with self.buffered() as buffer: self._write_fstring_inner(node) return self._write_str_avoiding_backslashes("".join(buffer)) fstring_parts = [] for value in node.values: with self.buffered() as buffer: self._write_fstring_inner(value) fstring_parts.append(("".join(buffer), _is_str_literal(value))) new_fstring_parts = [] quote_types = list(_ALL_QUOTES) fallback_to_repr = False for value, is_constant in fstring_parts: # Python 3.12 allows `f'{''}'`. # But we unparse to `f'{""}'` for < 3.12 compat. if True: value, new_quote_types = self._str_literal_helper( value, quote_types=quote_types, escape_special_whitespace=is_constant ) if set(new_quote_types).isdisjoint(quote_types): fallback_to_repr = True break quote_types = new_quote_types elif "\n" in value: quote_types = [q for q in quote_types if q in _MULTI_QUOTES] assert quote_types new_fstring_parts.append(value) if fallback_to_repr: # If we weren't able to find a quote type that works for all parts # of the JoinedStr, fallback to using repr and triple single quotes. quote_types = ["'''"] new_fstring_parts.clear() for value, is_constant in fstring_parts: # Python 3.12 allows `f'{''}'`. # We need to unparse to `f'{""}'` for < 3.12 compat. if True: value = repr('"' + value) # force repr to use single quotes expected_prefix = "'\"" assert value.startswith(expected_prefix), repr(value) value = value[len(expected_prefix) : -1] new_fstring_parts.append(value) value = "".join(new_fstring_parts) quote_type = quote_types[0] self.write(f"{quote_type}{value}{quote_type}") def _write_fstring_inner(self, node, is_format_spec=False): if isinstance(node, JoinedStr): # for both the f-string itself, and format_spec for value in node.values: self._write_fstring_inner(value, is_format_spec=is_format_spec) elif isinstance(node, FormattedValue): self.visit_FormattedValue(node) else: # str literal maybe_string = _get_str_literal_value(node) if maybe_string is None: raise ValueError(f"Unexpected node inside JoinedStr, {node!r}") value = maybe_string.replace("{", "{{").replace("}", "}}") if is_format_spec: value = value.replace("\\", "\\\\") value = value.replace("'", "\\'") value = value.replace('"', '\\"') value = value.replace("\n", "\\n") self.write(value) def visit_FormattedValue(self, node): def unparse_inner(inner): # Python <= 3.11 does not support backslashes inside format parts unparser = type(self)(_avoid_backslashes=True) unparser.set_precedence(_Precedence.TEST.next(), inner) return unparser.visit(inner) with self.delimit("{", "}"): expr = unparse_inner(node.value) # Python <= 3.11 does not support backslash in formats part if "\\" in expr: raise ValueError( "Unable to avoid backslash in f-string expression part (python 3.11)" ) if expr.startswith("{"): # Separate pair of opening brackets as "{ {" self.write(" ") self.write(expr) if node.conversion != -1: self.write(f"!{chr(node.conversion)}") if node.format_spec: self.write(":") self._write_fstring_inner(node.format_spec, is_format_spec=True) def visit_Name(self, node): self.write(node.id) def _write_docstring(self, node): self.fill() # Don't emit `u""` because it's not avail in python AST <= 3.7 # Ubuntu 18's Python 3.6 doesn't have "kind" if not self._py_ver_consistent and getattr(node, "kind", None) == "u": self.write("u") # Python 3.8 replaced Str with Constant value = _get_str_literal_value(node) if value is None: raise ValueError(f"Node {node!r} is not a string literal.") self._write_str_avoiding_backslashes(value, quote_types=_MULTI_QUOTES) def _write_constant(self, value): if isinstance(value, (float, complex)): # Substitute overflowing decimal literal for AST infinities, # and inf - inf for NaNs. self.write( repr(value).replace("inf", _INFSTR).replace("nan", f"({_INFSTR}-{_INFSTR})") ) # Python <= 3.11 does not support backslashes inside format parts elif self._avoid_backslashes and isinstance(value, str): self._write_str_avoiding_backslashes(value) else: self.write(repr(value)) def visit_Constant(self, node): value = node.value if isinstance(value, tuple): with self.delimit("(", ")"): self.items_view(self._write_constant, value) elif value is ...: self.write("...") else: # Don't emit `u""` because it's not avail in python AST <= 3.7 # Ubuntu 18's Python 3.6 doesn't have "kind" if not self._py_ver_consistent and getattr(node, "kind", None) == "u": self.write("u") self._write_constant(node.value) def visit_List(self, node): with self.delimit("[", "]"): self.interleave(lambda: self.write(", "), self.traverse, node.elts) def visit_ListComp(self, node): with self.delimit("[", "]"): self.traverse(node.elt) for gen in node.generators: self.traverse(gen) def visit_GeneratorExp(self, node): with self.delimit("(", ")"): self.traverse(node.elt) for gen in node.generators: self.traverse(gen) def visit_SetComp(self, node): with self.delimit("{", "}"): self.traverse(node.elt) for gen in node.generators: self.traverse(gen) def visit_DictComp(self, node): with self.delimit("{", "}"): self.traverse(node.key) self.write(": ") self.traverse(node.value) for gen in node.generators: self.traverse(gen) def visit_comprehension(self, node): if node.is_async: self.write(" async for ") else: self.write(" for ") self.set_precedence(_Precedence.TUPLE, node.target) self.traverse(node.target) self.write(" in ") self.set_precedence(_Precedence.TEST.next(), node.iter, *node.ifs) self.traverse(node.iter) for if_clause in node.ifs: self.write(" if ") self.traverse(if_clause) def visit_IfExp(self, node): with self.require_parens(_Precedence.TEST, node): self.set_precedence(_Precedence.TEST.next(), node.body, node.test) self.traverse(node.body) self.write(" if ") self.traverse(node.test) self.write(" else ") self.set_precedence(_Precedence.TEST, node.orelse) self.traverse(node.orelse) def visit_Set(self, node): if node.elts: with self.delimit("{", "}"): self.interleave(lambda: self.write(", "), self.traverse, node.elts) else: # `{}` would be interpreted as a dictionary literal, and # `set` might be shadowed. Thus: self.write("{*()}") def visit_Dict(self, node): def write_key_value_pair(k, v): self.traverse(k) self.write(": ") self.traverse(v) def write_item(item): k, v = item if k is None: # for dictionary unpacking operator in dicts {**{'y': 2}} # see PEP 448 for details self.write("**") self.set_precedence(_Precedence.EXPR, v) self.traverse(v) else: write_key_value_pair(k, v) with self.delimit("{", "}"): self.interleave(lambda: self.write(", "), write_item, zip(node.keys, node.values)) def visit_Tuple(self, node): with self.delimit_if( "(", ")", # Don't drop redundant parenthesis to mimic python <= 3.10 self._py_ver_consistent or len(node.elts) == 0 or self.get_precedence(node) > _Precedence.TUPLE, ): self.items_view(self.traverse, node.elts) unop = {"Invert": "~", "Not": "not", "UAdd": "+", "USub": "-"} unop_precedence = { "not": _Precedence.NOT, "~": _Precedence.FACTOR, "+": _Precedence.FACTOR, "-": _Precedence.FACTOR, } def visit_UnaryOp(self, node): operator = self.unop[node.op.__class__.__name__] operator_precedence = self.unop_precedence[operator] with self.require_parens(operator_precedence, node): self.write(operator) # factor prefixes (+, -, ~) shouldn't be separated # from the value they belong, (e.g: +1 instead of + 1) if operator_precedence is not _Precedence.FACTOR: self.write(" ") self.set_precedence(operator_precedence, node.operand) self.traverse(node.operand) binop = { "Add": "+", "Sub": "-", "Mult": "*", "MatMult": "@", "Div": "/", "Mod": "%", "LShift": "<<", "RShift": ">>", "BitOr": "|", "BitXor": "^", "BitAnd": "&", "FloorDiv": "//", "Pow": "**", } binop_precedence = { "+": _Precedence.ARITH, "-": _Precedence.ARITH, "*": _Precedence.TERM, "@": _Precedence.TERM, "/": _Precedence.TERM, "%": _Precedence.TERM, "<<": _Precedence.SHIFT, ">>": _Precedence.SHIFT, "|": _Precedence.BOR, "^": _Precedence.BXOR, "&": _Precedence.BAND, "//": _Precedence.TERM, "**": _Precedence.POWER, } binop_rassoc = frozenset(("**",)) def visit_BinOp(self, node): operator = self.binop[node.op.__class__.__name__] operator_precedence = self.binop_precedence[operator] with self.require_parens(operator_precedence, node): if operator in self.binop_rassoc: left_precedence = operator_precedence.next() right_precedence = operator_precedence else: left_precedence = operator_precedence right_precedence = operator_precedence.next() self.set_precedence(left_precedence, node.left) self.traverse(node.left) self.write(f" {operator} ") self.set_precedence(right_precedence, node.right) self.traverse(node.right) cmpops = { "Eq": "==", "NotEq": "!=", "Lt": "<", "LtE": "<=", "Gt": ">", "GtE": ">=", "Is": "is", "IsNot": "is not", "In": "in", "NotIn": "not in", } def visit_Compare(self, node): with self.require_parens(_Precedence.CMP, node): self.set_precedence(_Precedence.CMP.next(), node.left, *node.comparators) self.traverse(node.left) for o, e in zip(node.ops, node.comparators): self.write(" " + self.cmpops[o.__class__.__name__] + " ") self.traverse(e) boolops = {"And": "and", "Or": "or"} boolop_precedence = {"and": _Precedence.AND, "or": _Precedence.OR} def visit_BoolOp(self, node): operator = self.boolops[node.op.__class__.__name__] operator_precedence = self.boolop_precedence[operator] def increasing_level_traverse(node): nonlocal operator_precedence operator_precedence = operator_precedence.next() self.set_precedence(operator_precedence, node) self.traverse(node) with self.require_parens(operator_precedence, node): s = f" {operator} " self.interleave(lambda: self.write(s), increasing_level_traverse, node.values) def visit_Attribute(self, node: ast.Attribute): self.set_precedence(_Precedence.ATOM, node.value) self.traverse(node.value) # Special case: 3.__abs__() is a syntax error, so if node.value # is an integer literal then we need to either parenthesize # it or add an extra space to get 3 .__abs__(). if _is_int_literal(node.value): self.write(" ") self.write(".") self.write(node.attr) def visit_Call(self, node): self.set_precedence(_Precedence.ATOM, node.func) self.traverse(node.func) with self.delimit("(", ")"): comma = False for e in node.args: if comma: self.write(", ") else: comma = True self.traverse(e) for e in node.keywords: if comma: self.write(", ") else: comma = True self.traverse(e) def visit_Subscript(self, node): def is_non_empty_tuple(slice_value): return isinstance(slice_value, Tuple) and slice_value.elts self.set_precedence(_Precedence.ATOM, node.value) self.traverse(node.value) with self.delimit("[", "]"): # Python >= 3.11 supports `a[42, *b]` (same AST as a[(42, *b)]), # but this is syntax error in 3.10. # So, always emit parenthesis `a[(42, *b)]` if is_non_empty_non_star_tuple(node.slice): self.items_view(self.traverse, node.slice.elts) else: self.traverse(node.slice) def visit_Starred(self, node): self.write("*") self.set_precedence(_Precedence.EXPR, node.value) self.traverse(node.value) # Python 3.9 simplified Subscript(Index(value)) to Subscript(value) # https://github.com/python/cpython/commit/13d52c268699f199a8e917a0f1dc4c51e5346c42 def visit_Index(self, node): if is_non_empty_non_star_tuple(node.value): self.items_view(self.traverse, node.value.elts) else: self.traverse(node.value) def visit_Slice(self, node): if node.lower: self.traverse(node.lower) self.write(":") if node.upper: self.traverse(node.upper) if node.step: self.write(":") self.traverse(node.step) def visit_Match(self, node): self.fill("match ") self.traverse(node.subject) with self.block(): for case in node.cases: self.traverse(case) # Python 3.9 replaced ExtSlice(slices) with Tuple(slices, Load()) # https://github.com/python/cpython/commit/13d52c268699f199a8e917a0f1dc4c51e5346c42 def visit_ExtSlice(self, node): self.interleave(lambda: self.write(", "), self.traverse, node.dims) def visit_arg(self, node): self.write(node.arg) if node.annotation: self.write(": ") self.traverse(node.annotation) def visit_arguments(self, node): first = True # normal arguments # Python 3.8 introduced position-only arguments (PEP 570) all_args = getattr(node, "posonlyargs", []) + node.args defaults = [None] * (len(all_args) - len(node.defaults)) + node.defaults for index, elements in enumerate(zip(all_args, defaults), 1): a, d = elements if first: first = False else: self.write(", ") self.traverse(a) if d: self.write("=") self.traverse(d) # Python 3.8 introduced position-only arguments (PEP 570) if index == len(getattr(node, "posonlyargs", ())): self.write(", /") # varargs, or bare '*' if no varargs but keyword-only arguments present if node.vararg or node.kwonlyargs: if first: first = False else: self.write(", ") self.write("*") if node.vararg: self.write(node.vararg.arg) if node.vararg.annotation: self.write(": ") self.traverse(node.vararg.annotation) # keyword-only arguments if node.kwonlyargs: for a, d in zip(node.kwonlyargs, node.kw_defaults): self.write(", ") self.traverse(a) if d: self.write("=") self.traverse(d) # kwargs if node.kwarg: if first: first = False else: self.write(", ") self.write("**" + node.kwarg.arg) if node.kwarg.annotation: self.write(": ") self.traverse(node.kwarg.annotation) def visit_keyword(self, node): if node.arg is None: self.write("**") else: self.write(node.arg) self.write("=") self.traverse(node.value) def visit_Lambda(self, node): with self.require_parens(_Precedence.TEST, node): self.write("lambda") with self.buffered() as buffer: self.traverse(node.args) # Don't omit extra space to keep old package hash # (extra space was removed in python 3.11) if buffer or self._py_ver_consistent: self.write(" ", *buffer) self.write(": ") self.set_precedence(_Precedence.TEST, node.body) self.traverse(node.body) def visit_alias(self, node): self.write(node.name) if node.asname: self.write(" as " + node.asname) def visit_withitem(self, node): self.traverse(node.context_expr) if node.optional_vars: self.write(" as ") self.traverse(node.optional_vars) def visit_match_case(self, node): self.fill("case ") self.traverse(node.pattern) if node.guard: self.write(" if ") self.traverse(node.guard) with self.block(): self.traverse(node.body) def visit_MatchValue(self, node): self.traverse(node.value) def visit_MatchSingleton(self, node): self._write_constant(node.value) def visit_MatchSequence(self, node): with self.delimit("[", "]"): self.interleave(lambda: self.write(", "), self.traverse, node.patterns) def visit_MatchStar(self, node): name = node.name if name is None: name = "_" self.write(f"*{name}") def visit_MatchMapping(self, node): def write_key_pattern_pair(pair): k, p = pair self.traverse(k) self.write(": ") self.traverse(p) with self.delimit("{", "}"): keys = node.keys self.interleave( lambda: self.write(", "), write_key_pattern_pair, # (zip strict is >= Python 3.10) zip(keys, node.patterns), ) rest = node.rest if rest is not None: if keys: self.write(", ") self.write(f"**{rest}") def visit_MatchClass(self, node): self.set_precedence(_Precedence.ATOM, node.cls) self.traverse(node.cls) with self.delimit("(", ")"): patterns = node.patterns self.interleave(lambda: self.write(", "), self.traverse, patterns) attrs = node.kwd_attrs if attrs: def write_attr_pattern(pair): attr, pattern = pair self.write(f"{attr}=") self.traverse(pattern) if patterns: self.write(", ") self.interleave( lambda: self.write(", "), write_attr_pattern, # (zip strict is >= Python 3.10) zip(attrs, node.kwd_patterns), ) def visit_MatchAs(self, node): name = node.name pattern = node.pattern if name is None: self.write("_") elif pattern is None: self.write(node.name) else: with self.require_parens(_Precedence.TEST, node): self.set_precedence(_Precedence.BOR, node.pattern) self.traverse(node.pattern) self.write(f" as {node.name}") def visit_MatchOr(self, node): with self.require_parens(_Precedence.BOR, node): self.set_precedence(_Precedence.BOR.next(), *node.patterns) self.interleave(lambda: self.write(" | "), self.traverse, node.patterns) if sys.version_info >= (3, 8): def _is_int_literal(node: ast.AST) -> bool: """Check if a node represents a literal int.""" return isinstance(node, ast.Constant) and isinstance(node.value, int) def _is_str_literal(node: ast.AST) -> bool: """Check if a node represents a literal str.""" return isinstance(node, ast.Constant) and isinstance(node.value, str) def _get_str_literal_value(node: ast.AST) -> Optional[str]: """Get the string value of a literal str node.""" if isinstance(node, ast.Constant) and isinstance(node.value, str): return node.value return None else: def _is_int_literal(node: ast.AST) -> bool: """Check if a node represents a literal int.""" return isinstance(node, ast.Num) and isinstance(node.n, int) def _is_str_literal(node: ast.AST) -> bool: """Check if a node represents a literal str.""" return isinstance(node, ast.Str) def _get_str_literal_value(node: ast.AST) -> Optional[str]: """Get the string value of a literal str node.""" return node.s if isinstance(node, ast.Str) else None
Unparser
python
dask__distributed
distributed/deploy/tests/test_spec_cluster.py
{ "start": 12400, "end": 17011 }
class ____(Worker, ProcessInterface): def __init__(self, *args, n=1, name=None, nthreads=None, **kwargs): self.workers = [ Worker( *args, name=str(name) + "-" + str(i), nthreads=nthreads // n, **kwargs ) for i in range(n) ] self._startup_lock = asyncio.Lock() @property def status(self): return self.workers[0].status @status.setter def status(self, value): raise NotImplementedError() def __str__(self): return "<MultiWorker n=%d>" % len(self.workers) __repr__ = __str__ async def start_unsafe(self): await asyncio.gather(*self.workers) async def close(self): await asyncio.gather(*(w.close() for w in self.workers)) @gen_test() async def test_MultiWorker(): async with SpecCluster( scheduler=scheduler, worker={ "cls": MultiWorker, "options": {"n": 2, "nthreads": 4, "memory_limit": "4 GB"}, "group": ["-0", "-1"], }, asynchronous=True, ) as cluster: s = cluster.scheduler async with Client(cluster, asynchronous=True) as client: cluster.scale(2) await cluster assert len(cluster.worker_spec) == 2 await client.wait_for_workers(4) while len(cluster.scheduler_info["workers"]) < 4: await asyncio.sleep(0.01) while "workers=4" not in repr(cluster): await asyncio.sleep(0.1) workers_line = re.search("(Workers.+)", cluster._repr_html_()).group(1) assert re.match("Workers.*4", workers_line) cluster.scale(1) await cluster assert len(s.workers) == 2 cluster.scale(memory="6GB") await cluster assert len(cluster.worker_spec) == 2 assert len(s.workers) == 4 assert cluster.plan == {ws.name for ws in s.workers.values()} cluster.scale(cores=10) await cluster assert len(cluster.workers) == 3 adapt = cluster.adapt(minimum=0, maximum=4) for _ in range(adapt.wait_count): # relax down to 0 workers await adapt.adapt() await cluster assert not s.workers future = client.submit(lambda x: x + 1, 10) await future assert len(cluster.workers) == 1 @gen_cluster(client=True, nthreads=[]) async def test_run_spec(c, s): workers = await run_spec(worker_spec, s.address) await c.wait_for_workers(len(worker_spec)) await asyncio.gather(*(w.close() for w in workers.values())) assert not s.workers await asyncio.gather(*(w.finished() for w in workers.values())) @gen_test() async def test_run_spec_cluster_worker_names(): worker = {"cls": Worker, "options": {"nthreads": 1}} class MyCluster(SpecCluster): def _new_worker_name(self, worker_number): return f"prefix-{self.name}-{worker_number}-suffix" async with SpecCluster( asynchronous=True, scheduler=scheduler, worker=worker ) as cluster: cluster.scale(2) await cluster worker_names = [0, 1] assert list(cluster.worker_spec) == worker_names assert sorted(list(cluster.workers)) == worker_names async with MyCluster( asynchronous=True, scheduler=scheduler, worker=worker, name="test-name" ) as cluster: worker_names = ["prefix-test-name-0-suffix", "prefix-test-name-1-suffix"] cluster.scale(2) await cluster assert list(cluster.worker_spec) == worker_names assert sorted(list(cluster.workers)) == worker_names @gen_test() async def test_bad_close(): with warnings.catch_warnings(record=True) as record: cluster = SpecCluster( workers=worker_spec, scheduler=scheduler, asynchronous=True ) await cluster.close() assert not record @gen_test() async def test_shutdown_scheduler_disabled(): async with SpecCluster( workers=worker_spec, scheduler=scheduler, asynchronous=True, shutdown_scheduler=False, ) as cluster: s = cluster.scheduler assert isinstance(s, Scheduler) assert s.status == Status.running @gen_test() async def test_shutdown_scheduler(): async with SpecCluster( workers=worker_spec, scheduler=scheduler, asynchronous=True ) as cluster: s = cluster.scheduler assert isinstance(s, Scheduler) assert s.status == Status.closed
MultiWorker
python
gevent__gevent
src/greentest/3.14/test_httpservers.py
{ "start": 46327, "end": 46946 }
class ____(SimpleHTTPRequestHandler): def __init__(self, directory=None): request = mock.Mock() request.makefile.return_value = BytesIO() super().__init__(request, None, None, directory=directory) self.get_called = False self.protocol_version = "HTTP/1.1" def do_GET(self): self.get_called = True self.send_response(HTTPStatus.OK) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write(b'<html><body>Data</body></html>\r\n') def log_message(self, format, *args): pass
SocketlessRequestHandler
python
pypa__warehouse
tests/unit/helpdesk/test_services.py
{ "start": 6850, "end": 7621 }
class ____: """Common tests for the service interface.""" def test_verify_service_class(self, service_class): assert verifyClass(IAdminNotificationService, service_class) def test_create_service(self, service_class): context = None request = pretend.stub( http=pretend.stub(), log=pretend.stub( debug=pretend.call_recorder(lambda msg: None), ), registry=pretend.stub( settings={ "helpdesk.notification_service_url": "https://webhook.example/1234", } ), ) service = service_class.create_service(context, request) assert isinstance(service, service_class)
TestAdminNotificationService
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1138871, "end": 1140334 }
class ____(sgqlc.types.Type, Node, RepositoryNode): """A category for discussions in a repository.""" __schema__ = github_schema __field_names__ = ("created_at", "description", "emoji", "emoji_html", "is_answerable", "name", "slug", "updated_at") created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt") """Identifies the date and time when the object was created.""" description = sgqlc.types.Field(String, graphql_name="description") """A description of this category.""" emoji = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="emoji") """An emoji representing this category.""" emoji_html = sgqlc.types.Field(sgqlc.types.non_null(HTML), graphql_name="emojiHTML") """This category's emoji rendered as HTML.""" is_answerable = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isAnswerable") """Whether or not discussions in this category support choosing an answer with the markDiscussionCommentAsAnswer mutation. """ name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name") """The name of this category.""" slug = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="slug") """The slug of this category.""" updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt") """Identifies the date and time when the object was last updated."""
DiscussionCategory
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/kubernetes_engine.py
{ "start": 6789, "end": 12058 }
class ____(GKEOperatorMixin, GoogleCloudBaseOperator): """ Deletes the cluster, including the Kubernetes endpoint and all worker nodes. To delete a certain cluster, you must specify the ``project_id``, the ``cluster_name`` of the cluster, the ``location`` that the cluster is in, and the ``task_id``. **Operator Creation**: :: operator = GKEClusterDeleteOperator( task_id='cluster_delete', project_id='my-project', location='cluster-location' cluster_name='cluster-name') .. seealso:: For more detail about deleting clusters have a look at the reference: https://google-cloud-python.readthedocs.io/en/latest/container/gapic/v1/api.html#google.cloud.container_v1.ClusterManagerClient.delete_cluster .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:GKEDeleteClusterOperator` :param location: The name of the Google Kubernetes Engine zone or region in which the cluster resides, e.g. 'us-central1-a' :param cluster_name: The name of the Google Kubernetes Engine cluster. :param use_internal_ip: Use the internal IP address as the endpoint. :param use_dns_endpoint: Use the DNS address as the endpoint. :param project_id: The Google Developers Console project id :param gcp_conn_id: The Google cloud connection id to use. This allows for users to specify a service account. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). :param api_version: The api version to use :param deferrable: Run operator in the deferrable mode. :param poll_interval: Interval size which defines how often operation status is checked. """ template_fields: Sequence[str] = tuple( {"api_version", "deferrable", "poll_interval"} | set(GKEOperatorMixin.template_fields) ) def __init__( self, location: str, use_internal_ip: bool = False, use_dns_endpoint: bool = False, project_id: str = PROVIDE_PROJECT_ID, gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, cluster_name: str | None = None, api_version: str = "v2", deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), poll_interval: int = 10, *args, **kwargs, ) -> None: super().__init__(*args, **kwargs) self.location = location self.cluster_name = cluster_name self.use_internal_ip = use_internal_ip self.use_dns_endpoint = use_dns_endpoint self.project_id = project_id self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain self.api_version = api_version self.deferrable = deferrable self.poll_interval = poll_interval self._check_input() def _check_input(self) -> None: if not all([self.project_id, self.cluster_name, self.location]): self.log.error("One of (project_id, cluster_name, location) is missing or incorrect") raise AirflowException("Operator has incorrect or missing input.") def execute(self, context: Context) -> str | None: wait_to_complete = not self.deferrable operation = self.cluster_hook.delete_cluster( name=self.cluster_name, project_id=self.project_id, wait_to_complete=wait_to_complete, ) if self.deferrable and operation is not None: self.defer( trigger=GKEOperationTrigger( operation_name=operation.name, project_id=self.project_id, location=self.location, gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, poll_interval=self.poll_interval, ), method_name="execute_complete", ) return operation.self_link if operation is not None else None def execute_complete(self, context: Context, event: dict) -> str: """Execute after trigger job is done.""" status = event["status"] message = event["message"] if status in ("failed", "error"): self.log.exception("Trigger ended with one of the failed statuses.") raise AirflowException(message) self.log.info(message) operation = self.cluster_hook.get_operation( operation_name=event["operation_name"], ) return operation.self_link
GKEDeleteClusterOperator
python
spyder-ide__spyder
spyder/plugins/remoteclient/api/__init__.py
{ "start": 511, "end": 583 }
class ____: ManageConnections = "manage connections"
RemoteClientActions
python
encode__django-rest-framework
rest_framework/mixins.py
{ "start": 1712, "end": 2613 }
class ____: """ Update a model instance. """ def update(self, request, *args, **kwargs): partial = kwargs.pop('partial', False) instance = self.get_object() serializer = self.get_serializer(instance, data=request.data, partial=partial) serializer.is_valid(raise_exception=True) self.perform_update(serializer) if getattr(instance, '_prefetched_objects_cache', None): # If 'prefetch_related' has been applied to a queryset, we need to # forcibly invalidate the prefetch cache on the instance. instance._prefetched_objects_cache = {} return Response(serializer.data) def perform_update(self, serializer): serializer.save() def partial_update(self, request, *args, **kwargs): kwargs['partial'] = True return self.update(request, *args, **kwargs)
UpdateModelMixin
python
readthedocs__readthedocs.org
readthedocs/rtd_tests/tests/test_automation_rules.py
{ "start": 13303, "end": 20927 }
class ____: @pytest.fixture(autouse=True) def setup_method(self): self.project = get(Project) self.rule_5 = self._add_rule("Five") self.rule_4 = self._add_rule("Four") self.rule_3 = self._add_rule("Three") self.rule_2 = self._add_rule("Two") self.rule_1 = self._add_rule("One") self.rule_0 = self._add_rule("Zero") self._refresh_rules() assert self.project.automation_rules.count() == 6 def _add_rule(self, description): rule = RegexAutomationRule.objects.create( project=self.project, description=description, match_arg=".*", version_type=BRANCH, action=VersionAutomationRule.ACTIVATE_VERSION_ACTION, ) return rule def test_move_rule_one_step(self): self.rule_0.move(1) new_order = [ self.rule_1, self.rule_0, self.rule_2, self.rule_3, self.rule_4, self.rule_5, ] for priority, rule in enumerate(self.project.automation_rules.all()): assert rule == new_order[priority] assert rule.priority == priority def test_move_rule_positive_steps(self): self.rule_1.move(1) self.rule_1.move(2) new_order = [ self.rule_0, self.rule_2, self.rule_3, self.rule_4, self.rule_1, self.rule_5, ] for priority, rule in enumerate(self.project.automation_rules.all()): assert rule == new_order[priority] assert rule.priority == priority def test_move_rule_positive_steps_overflow(self): self.rule_2.move(3) self.rule_2.move(2) new_order = [ self.rule_0, self.rule_2, self.rule_1, self.rule_3, self.rule_4, self.rule_5, ] for priority, rule in enumerate(self.project.automation_rules.all()): assert rule == new_order[priority] assert rule.priority == priority def test_move_rules_positive_steps(self): self.rule_2.move(2) self.rule_0.refresh_from_db() self.rule_0.move(7) self.rule_4.refresh_from_db() self.rule_4.move(4) self.rule_1.refresh_from_db() self.rule_1.move(1) new_order = [ self.rule_4, self.rule_1, self.rule_0, self.rule_3, self.rule_2, self.rule_5, ] for priority, rule in enumerate(self.project.automation_rules.all()): assert rule == new_order[priority] assert rule.priority == priority def test_move_rule_one_negative_step(self): self.rule_3.move(-1) new_order = [ self.rule_0, self.rule_1, self.rule_3, self.rule_2, self.rule_4, self.rule_5, ] for priority, rule in enumerate(self.project.automation_rules.all()): assert rule == new_order[priority] assert rule.priority == priority def test_move_rule_negative_steps(self): self.rule_4.move(-1) self.rule_4.move(-2) new_order = [ self.rule_0, self.rule_4, self.rule_1, self.rule_2, self.rule_3, self.rule_5, ] for priority, rule in enumerate(self.project.automation_rules.all()): assert rule == new_order[priority] assert rule.priority == priority def test_move_rule_negative_steps_overflow(self): self.rule_2.move(-3) self.rule_2.move(-2) new_order = [ self.rule_0, self.rule_1, self.rule_3, self.rule_2, self.rule_4, self.rule_5, ] for priority, rule in enumerate(self.project.automation_rules.all()): assert rule == new_order[priority] assert rule.priority == priority def test_move_rules_negative_steps(self): self.rule_2.move(-2) self.rule_5.refresh_from_db() self.rule_5.move(-7) self.rule_3.refresh_from_db() self.rule_3.move(-2) self.rule_1.refresh_from_db() self.rule_1.move(-1) new_order = [ self.rule_2, self.rule_3, self.rule_1, self.rule_0, self.rule_5, self.rule_4, ] for priority, rule in enumerate(self.project.automation_rules.all()): assert rule == new_order[priority] assert rule.priority == priority def test_move_rules_up_and_down(self): self.rule_2.move(2) self.rule_5.refresh_from_db() self.rule_5.move(-3) self.rule_3.refresh_from_db() self.rule_3.move(4) self.rule_1.refresh_from_db() self.rule_1.move(-1) new_order = [ self.rule_0, self.rule_1, self.rule_3, self.rule_5, self.rule_4, self.rule_2, ] for priority, rule in enumerate(self.project.automation_rules.all()): assert rule == new_order[priority] assert rule.priority == priority def test_delete_fist_rule(self): self.rule_0.delete() assert self.project.automation_rules.all().count() == 5 new_order = [ self.rule_1, self.rule_2, self.rule_3, self.rule_4, self.rule_5, ] for priority, rule in enumerate(self.project.automation_rules.all()): assert rule == new_order[priority] assert rule.priority == priority def test_delete_last_rule(self): self.rule_5.delete() assert self.project.automation_rules.all().count() == 5 new_order = [ self.rule_0, self.rule_1, self.rule_2, self.rule_3, self.rule_4, ] for priority, rule in enumerate(self.project.automation_rules.all()): assert rule == new_order[priority] assert rule.priority == priority def test_delete_some_rule(self): self.rule_2.delete() assert self.project.automation_rules.all().count() == 5 new_order = [ self.rule_0, self.rule_1, self.rule_3, self.rule_4, self.rule_5, ] for priority, rule in enumerate(self.project.automation_rules.all()): assert rule == new_order[priority] assert rule.priority == priority def _refresh_rules(self): rules = [ self.rule_0, self.rule_1, self.rule_2, self.rule_3, self.rule_4, self.rule_5, ] for rule in rules: if rule.pk: rule.refresh_from_db() def test_delete_some_rules(self): self.rule_2.delete() self._refresh_rules() self.rule_0.delete() self._refresh_rules() self.rule_5.delete() self._refresh_rules() assert self.project.automation_rules.all().count() == 3 new_order = [ self.rule_1, self.rule_3, self.rule_4, ] for priority, rule in enumerate(self.project.automation_rules.all()): assert rule == new_order[priority] assert rule.priority == priority
TestAutomationRuleMove
python
pandas-dev__pandas
asv_bench/benchmarks/series_methods.py
{ "start": 9694, "end": 10725 }
class ____: param_names = ["num_to_replace"] params = [100, 1000] def setup(self, num_to_replace): N = 1_000_000 self.arr = np.random.randn(N) self.arr1 = self.arr.copy() np.random.shuffle(self.arr1) self.ser = Series(self.arr) self.to_replace_list = np.random.choice(self.arr, num_to_replace) self.values_list = np.random.choice(self.arr1, num_to_replace) self.replace_dict = dict( zip(self.to_replace_list, self.values_list, strict=True) ) def time_replace_dict(self, num_to_replace): self.ser.replace(self.replace_dict) def peakmem_replace_dict(self, num_to_replace): self.ser.replace(self.replace_dict) def time_replace_list(self, num_to_replace): self.ser.replace(self.to_replace_list, self.values_list) def peakmem_replace_list(self, num_to_replace): self.ser.replace(self.to_replace_list, self.values_list) from .pandas_vb_common import setup # noqa: F401 isort:skip
Replace
python
Pylons__pyramid
tests/test_testing.py
{ "start": 18699, "end": 19463 }
class ____(unittest.TestCase): def setUp(self): from pyramid.testing import skip_on self.os_name = skip_on.os_name skip_on.os_name = 'wrong' def tearDown(self): from pyramid.testing import skip_on skip_on.os_name = self.os_name def _callFUT(self, *platforms): from pyramid.testing import skip_on return skip_on(*platforms) def test_wrong_platform(self): def foo(): # pragma: no cover return True decorated = self._callFUT('wrong')(foo) self.assertEqual(decorated(), None) def test_ok_platform(self): def foo(): return True decorated = self._callFUT('ok')(foo) self.assertEqual(decorated(), True)
Test_skip_on
python
pytorch__pytorch
test/distributed/test_device_mesh.py
{ "start": 2601, "end": 4981 }
class ____(DTensorTestBase): @property def world_size(self): return 4 @skip_if_lt_x_gpu(4) def test_manual_set_device(self): mesh_tensor = torch.arange(4).reshape(2, 2) self.assertTrue(not is_initialized()) # Set the device on each process before DeviceMesh constructor, # and device to be different than the default world rank torch.accelerator.set_device_index((self.rank + 2) % self.world_size) _set_env_var(world_size=self.world_size, rank=self.rank) DeviceMesh(self.device_type, mesh_tensor) self.assertTrue(is_initialized()) # check that the device is set to the correct device # and respect the previous set_device calls self.assertEqual( torch.accelerator.current_device_idx(), (self.rank + 2) % self.world_size ) self.destroy_pg() @skip_if_lt_x_gpu(4) def test_auto_set_device_from_local_rank(self): mesh_tensor = torch.arange(4).reshape(2, 2) self.assertTrue(not is_initialized()) # set the local rank to be different than the default world rank, # DeviceMesh should respect LOCAL_RANK env var if it's set local_rank = (self.rank + 1) % self.world_size _set_env_var( world_size=self.world_size, rank=self.rank, local_rank=local_rank, ) DeviceMesh(self.device_type, mesh_tensor) self.assertTrue(is_initialized()) # check that the device is set to the correct device # and respect the LOCAL_RANK env var self.assertEqual(torch.accelerator.current_device_idx(), local_rank) self.destroy_pg() @skip_if_lt_x_gpu(4) def test_auto_set_device_from_heuristic(self): mesh_tensor = torch.arange(4).reshape(2, 2) self.assertTrue(not is_initialized()) _set_env_var( world_size=self.world_size, rank=self.rank, ) with self.assertWarnsRegex( UserWarning, "It seems like you did not set/select the default device" ): DeviceMesh(self.device_type, mesh_tensor) self.assertTrue(is_initialized()) # check that the device is set to the correct device self.assertEqual(torch.accelerator.current_device_idx(), self.rank) self.destroy_pg()
DeviceMeshSetDeviceTest
python
zostera__django-bootstrap4
example/app/forms.py
{ "start": 3450, "end": 3731 }
class ____(forms.Form): title = forms.CharField() pub_date = forms.DateField() def clean(self): cleaned_data = super().clean() raise forms.ValidationError("This error was added to show the non field errors styling.") return cleaned_data
ArticleForm
python
sqlalchemy__sqlalchemy
test/orm/test_session_state_change.py
{ "start": 296, "end": 12331 }
class ____(fixtures.TestBase): def test_single_change(self): """test single method that declares and invokes a state change""" _NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE class Machine(state_changes._StateChange): @state_changes._StateChange.declare_states( (StateTestChange.a, _NO_CHANGE), StateTestChange.b ) def move_to_b(self): self._state = StateTestChange.b m = Machine() eq_(m._state, _NO_CHANGE) m.move_to_b() eq_(m._state, StateTestChange.b) def test_single_incorrect_change(self): """test single method that declares a state change but changes to the wrong state.""" _NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE class Machine(state_changes._StateChange): @state_changes._StateChange.declare_states( (StateTestChange.a, _NO_CHANGE), StateTestChange.b ) def move_to_b(self): self._state = StateTestChange.c m = Machine() eq_(m._state, _NO_CHANGE) with expect_raises_message( sa_exc.IllegalStateChangeError, r"Method 'move_to_b\(\)' " r"caused an unexpected state change to <StateTestChange.c: 3>", ): m.move_to_b() def test_single_failed_to_change(self): """test single method that declares a state change but didn't do the change.""" _NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE class Machine(state_changes._StateChange): @state_changes._StateChange.declare_states( (StateTestChange.a, _NO_CHANGE), StateTestChange.b ) def move_to_b(self): pass m = Machine() eq_(m._state, _NO_CHANGE) with expect_raises_message( sa_exc.IllegalStateChangeError, r"Method 'move_to_b\(\)' failed to change state " "to <StateTestChange.b: 2> as " "expected", ): m.move_to_b() def test_change_from_sub_method_with_declaration(self): """test successful state change by one method calling another that does the change. """ _NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE class Machine(state_changes._StateChange): @state_changes._StateChange.declare_states( (StateTestChange.a, _NO_CHANGE), StateTestChange.b ) def _inner_move_to_b(self): self._state = StateTestChange.b @state_changes._StateChange.declare_states( (StateTestChange.a, _NO_CHANGE), StateTestChange.b ) def move_to_b(self): with self._expect_state(StateTestChange.b): self._inner_move_to_b() m = Machine() eq_(m._state, _NO_CHANGE) m.move_to_b() eq_(m._state, StateTestChange.b) def test_method_and_sub_method_no_change(self): """test methods that declare the state should not change""" _NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE class Machine(state_changes._StateChange): @state_changes._StateChange.declare_states( (StateTestChange.a,), _NO_CHANGE ) def _inner_do_nothing(self): pass @state_changes._StateChange.declare_states( (StateTestChange.a,), _NO_CHANGE ) def do_nothing(self): self._inner_do_nothing() m = Machine() eq_(m._state, _NO_CHANGE) m._state = StateTestChange.a m.do_nothing() eq_(m._state, StateTestChange.a) def test_method_w_no_change_illegal_inner_change(self): _NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE class Machine(state_changes._StateChange): @state_changes._StateChange.declare_states( (StateTestChange.a, _NO_CHANGE), StateTestChange.c ) def _inner_move_to_c(self): self._state = StateTestChange.c @state_changes._StateChange.declare_states( (StateTestChange.a,), _NO_CHANGE ) def do_nothing(self): self._inner_move_to_c() m = Machine() eq_(m._state, _NO_CHANGE) m._state = StateTestChange.a with expect_raises_message( sa_exc.IllegalStateChangeError, r"Method '_inner_move_to_c\(\)' can't be called here; " r"method 'do_nothing\(\)' is already in progress and this " r"would cause an unexpected state change to " "<StateTestChange.c: 3>", ): m.do_nothing() eq_(m._state, StateTestChange.a) def test_change_from_method_sub_w_no_change(self): """test methods that declare the state should not change""" _NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE class Machine(state_changes._StateChange): @state_changes._StateChange.declare_states( (StateTestChange.a,), _NO_CHANGE ) def _inner_do_nothing(self): pass @state_changes._StateChange.declare_states( (StateTestChange.a,), StateTestChange.b ) def move_to_b(self): self._inner_do_nothing() self._state = StateTestChange.b m = Machine() eq_(m._state, _NO_CHANGE) m._state = StateTestChange.a m.move_to_b() eq_(m._state, StateTestChange.b) def test_invalid_change_from_declared_sub_method_with_declaration(self): """A method uses _expect_state() to call a sub-method, which must declare that state as its destination if no exceptions are raised. """ _NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE class Machine(state_changes._StateChange): # method declares StateTestChange.c so can't be called under # expect_state(StateTestChange.b) @state_changes._StateChange.declare_states( (StateTestChange.a, _NO_CHANGE), StateTestChange.c ) def _inner_move_to_c(self): self._state = StateTestChange.c @state_changes._StateChange.declare_states( (StateTestChange.a, _NO_CHANGE), StateTestChange.b ) def move_to_b(self): with self._expect_state(StateTestChange.b): self._inner_move_to_c() m = Machine() eq_(m._state, _NO_CHANGE) with expect_raises_message( sa_exc.IllegalStateChangeError, r"Cant run operation '_inner_move_to_c\(\)' here; will move " r"to state <StateTestChange.c: 3> where we are " "expecting <StateTestChange.b: 2>", ): m.move_to_b() def test_invalid_change_from_invalid_sub_method_with_declaration(self): """A method uses _expect_state() to call a sub-method, which must declare that state as its destination if no exceptions are raised. Test an error is raised if the sub-method doesn't change to the correct state. """ _NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE class Machine(state_changes._StateChange): # method declares StateTestChange.b, but is doing the wrong # change, so should fail under expect_state(StateTestChange.b) @state_changes._StateChange.declare_states( (StateTestChange.a, _NO_CHANGE), StateTestChange.b ) def _inner_move_to_c(self): self._state = StateTestChange.c @state_changes._StateChange.declare_states( (StateTestChange.a, _NO_CHANGE), StateTestChange.b ) def move_to_b(self): with self._expect_state(StateTestChange.b): self._inner_move_to_c() m = Machine() eq_(m._state, _NO_CHANGE) with expect_raises_message( sa_exc.IllegalStateChangeError, r"While method 'move_to_b\(\)' was running, method " r"'_inner_move_to_c\(\)' caused an unexpected state change " "to <StateTestChange.c: 3>", ): m.move_to_b() def test_invalid_prereq_state(self): _NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE class Machine(state_changes._StateChange): @state_changes._StateChange.declare_states( (StateTestChange.a, _NO_CHANGE), StateTestChange.b ) def move_to_b(self): self._state = StateTestChange.b @state_changes._StateChange.declare_states( (StateTestChange.c,), "d" ) def move_to_d(self): self._state = "d" m = Machine() eq_(m._state, _NO_CHANGE) m.move_to_b() eq_(m._state, StateTestChange.b) with expect_raises_message( sa_exc.IllegalStateChangeError, r"Can't run operation 'move_to_d\(\)' when " "Session is in state <StateTestChange.b: 2>", ): m.move_to_d() def test_declare_only(self): _NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE class Machine(state_changes._StateChange): @state_changes._StateChange.declare_states( state_changes._StateChangeStates.ANY, StateTestChange.b ) def _inner_move_to_b(self): self._state = StateTestChange.b def move_to_b(self): with self._expect_state(StateTestChange.b): self._move_to_b() m = Machine() eq_(m._state, _NO_CHANGE) with expect_raises_message( AssertionError, "Unexpected call to _expect_state outside of " "state-changing method", ): m.move_to_b() def test_sibling_calls_maintain_correct_state(self): _NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE class Machine(state_changes._StateChange): @state_changes._StateChange.declare_states( state_changes._StateChangeStates.ANY, StateTestChange.c ) def move_to_c(self): self._state = StateTestChange.c @state_changes._StateChange.declare_states( state_changes._StateChangeStates.ANY, _NO_CHANGE ) def do_nothing(self): pass m = Machine() m.do_nothing() eq_(m._state, _NO_CHANGE) m.move_to_c() eq_(m._state, StateTestChange.c) def test_change_from_sub_method_requires_declaration(self): """A method can't call another state-changing method without using _expect_state() to allow the state change to occur. """ _NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE class Machine(state_changes._StateChange): @state_changes._StateChange.declare_states( (StateTestChange.a, _NO_CHANGE), StateTestChange.b ) def _inner_move_to_b(self): self._state = StateTestChange.b @state_changes._StateChange.declare_states( (StateTestChange.a, _NO_CHANGE), StateTestChange.b ) def move_to_b(self): self._inner_move_to_b() m = Machine() with expect_raises_message( sa_exc.IllegalStateChangeError, r"Method '_inner_move_to_b\(\)' can't be called here; " r"method 'move_to_b\(\)' is already in progress and this would " r"cause an unexpected state change to <StateTestChange.b: 2>", ): m.move_to_b()
StateMachineTest
python
facebook__pyre-check
tools/upgrade/filesystem.py
{ "start": 8301, "end": 9427 }
class ____: def list( self, root: str, patterns: List[str], exclude: Optional[List[str]] = None ) -> List[str]: """ Return the list of files that match any of the patterns within root. If exclude is provided, files that match an exclude pattern are omitted. Note: The `find` command does not understand globs properly. e.g. 'a/*.py' will match 'a/b/c.py' For this reason, avoid calling this method with glob patterns. """ command = ["find", "."] command += self._match_any(patterns) if exclude: command += ["-and", "!"] command += self._match_any(exclude) return ( subprocess.run(command, stdout=subprocess.PIPE, cwd=root) .stdout.decode("utf-8") .split() ) def _match_any(self, patterns: List[str]) -> List[str]: expression = [] for pattern in patterns: if expression: expression.append("-or") expression.extend(["-path", f"./{pattern}"]) return ["(", *expression, ")"]
Filesystem
python
pandas-dev__pandas
pandas/tests/indexes/datetimes/methods/test_to_period.py
{ "start": 407, "end": 7754 }
class ____: def test_dti_to_period(self): dti = date_range(start="1/1/2005", end="12/1/2005", freq="ME") pi1 = dti.to_period() pi2 = dti.to_period(freq="D") pi3 = dti.to_period(freq="3D") assert pi1[0] == Period("Jan 2005", freq="M") assert pi2[0] == Period("1/31/2005", freq="D") assert pi3[0] == Period("1/31/2005", freq="3D") assert pi1[-1] == Period("Nov 2005", freq="M") assert pi2[-1] == Period("11/30/2005", freq="D") assert pi3[-1], Period("11/30/2005", freq="3D") tm.assert_index_equal(pi1, period_range("1/1/2005", "11/1/2005", freq="M")) tm.assert_index_equal( pi2, period_range("1/1/2005", "11/1/2005", freq="M").asfreq("D") ) tm.assert_index_equal( pi3, period_range("1/1/2005", "11/1/2005", freq="M").asfreq("3D") ) @pytest.mark.parametrize("month", MONTHS) def test_to_period_quarterly(self, month): # make sure we can make the round trip freq = f"Q-{month}" rng = period_range("1989Q3", "1991Q3", freq=freq) stamps = rng.to_timestamp() result = stamps.to_period(freq) tm.assert_index_equal(rng, result) @pytest.mark.parametrize("off", ["BQE", "QS", "BQS"]) def test_to_period_quarterlyish(self, off): rng = date_range("01-Jan-2012", periods=8, freq=off) prng = rng.to_period() assert prng.freq == "QE-DEC" @pytest.mark.parametrize("off", ["BYE", "YS", "BYS"]) def test_to_period_annualish(self, off): rng = date_range("01-Jan-2012", periods=8, freq=off) prng = rng.to_period() assert prng.freq == "YE-DEC" def test_to_period_monthish(self): offsets = ["MS", "BME"] for off in offsets: rng = date_range("01-Jan-2012", periods=8, freq=off) prng = rng.to_period() assert prng.freqstr == "M" rng = date_range("01-Jan-2012", periods=8, freq="ME") prng = rng.to_period() assert prng.freqstr == "M" with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG): date_range("01-Jan-2012", periods=8, freq="EOM") @pytest.mark.parametrize( "freq_offset, freq_period", [ ("2ME", "2M"), (MonthEnd(2), MonthEnd(2)), ], ) def test_dti_to_period_2monthish(self, freq_offset, freq_period): dti = date_range("2020-01-01", periods=3, freq=freq_offset) pi = dti.to_period() tm.assert_index_equal(pi, period_range("2020-01", "2020-05", freq=freq_period)) @pytest.mark.parametrize( "freq", ["2ME", "1me", "2QE", "2QE-SEP", "1YE", "ye", "2YE-MAR"] ) def test_to_period_frequency_M_Q_Y_raises(self, freq): msg = f"Invalid frequency: {freq}" rng = date_range("01-Jan-2012", periods=8, freq="ME") with pytest.raises(ValueError, match=msg): rng.to_period(freq) def test_to_period_infer(self): # https://github.com/pandas-dev/pandas/issues/33358 rng = date_range( start="2019-12-22 06:40:00+00:00", end="2019-12-22 08:45:00+00:00", freq="5min", ) with tm.assert_produces_warning(UserWarning, match="drop timezone info"): pi1 = rng.to_period("5min") with tm.assert_produces_warning(UserWarning, match="drop timezone info"): pi2 = rng.to_period() tm.assert_index_equal(pi1, pi2) @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") def test_period_dt64_round_trip(self): dti = date_range("1/1/2000", "1/7/2002", freq="B", unit="ns") pi = dti.to_period() tm.assert_index_equal(pi.to_timestamp(), dti) dti = date_range("1/1/2000", "1/7/2002", freq="B", unit="ns") pi = dti.to_period(freq="h") tm.assert_index_equal(pi.to_timestamp(), dti) def test_to_period_millisecond(self): index = DatetimeIndex( [ Timestamp("2007-01-01 10:11:12.123456Z"), Timestamp("2007-01-01 10:11:13.789123Z"), ] ) with tm.assert_produces_warning(UserWarning, match="drop timezone info"): period = index.to_period(freq="ms") assert 2 == len(period) assert period[0] == Period("2007-01-01 10:11:12.123Z", "ms") assert period[1] == Period("2007-01-01 10:11:13.789Z", "ms") def test_to_period_microsecond(self): index = DatetimeIndex( [ Timestamp("2007-01-01 10:11:12.123456Z"), Timestamp("2007-01-01 10:11:13.789123Z"), ] ) with tm.assert_produces_warning(UserWarning, match="drop timezone info"): period = index.to_period(freq="us") assert 2 == len(period) assert period[0] == Period("2007-01-01 10:11:12.123456Z", "us") assert period[1] == Period("2007-01-01 10:11:13.789123Z", "us") @pytest.mark.parametrize( "tz", [ "US/Eastern", timezone.utc, tzlocal(), "dateutil/US/Eastern", dateutil.tz.tzutc(), ], ) def test_to_period_tz(self, tz): ts = date_range("1/1/2000", "2/1/2000", tz=tz) with tm.assert_produces_warning(UserWarning, match="drop timezone info"): result = ts.to_period()[0] expected = ts[0].to_period(ts.freq) assert result == expected expected = date_range("1/1/2000", "2/1/2000").to_period() with tm.assert_produces_warning(UserWarning, match="drop timezone info"): result = ts.to_period(ts.freq) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("tz", ["Etc/GMT-1", "Etc/GMT+1"]) def test_to_period_tz_utc_offset_consistency(self, tz): # GH#22905 ts = date_range("1/1/2000", "2/1/2000", tz="Etc/GMT-1") with tm.assert_produces_warning(UserWarning, match="drop timezone info"): result = ts.to_period()[0] expected = ts[0].to_period(ts.freq) assert result == expected def test_to_period_nofreq(self): idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-04"]) msg = "You must pass a freq argument as current index has none." with pytest.raises(ValueError, match=msg): idx.to_period() idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"], freq="infer") assert idx.freqstr == "D" expected = PeriodIndex(["2000-01-01", "2000-01-02", "2000-01-03"], freq="D") tm.assert_index_equal(idx.to_period(), expected) # GH#7606 idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"]) assert idx.freqstr is None tm.assert_index_equal(idx.to_period(), expected) @pytest.mark.parametrize("freq", ["2BME", "SME-15", "2BMS"]) def test_to_period_offsets_not_supported(self, freq): # GH#56243 msg = "|".join( [ f"Invalid frequency: {freq}", f"{freq} is not supported as period frequency", ] ) ts = date_range("1/1/2012", periods=4, freq=freq) with pytest.raises(ValueError, match=msg): ts.to_period()
TestToPeriod
python
dagster-io__dagster
python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/plus/deploy/configure/utils.py
{ "start": 6582, "end": 10054 }
class ____(NamedTuple): name: str match: Callable[[str], bool] fragment: Path secrets_hints: list[str] def _matches_ecr(url: str) -> bool: """Check if URL is an AWS ECR registry. ECR URLs follow the format: <account-id>.dkr.ecr.<region>.amazonaws.com """ parsed = urlparse(url if "://" in url else f"https://{url}") hostname = parsed.hostname or "" # Check that hostname ends with .amazonaws.com and contains .ecr. in the subdomain structure return hostname.endswith(".amazonaws.com") and ".ecr." in hostname def _matches_dockerhub(url: str) -> bool: """Check if URL is DockerHub.""" parsed = urlparse(url if "://" in url else f"https://{url}") hostname = parsed.hostname or "" return hostname == "docker.io" or hostname.endswith(".docker.io") def _matches_ghcr(url: str) -> bool: """Check if URL is GitHub Container Registry.""" parsed = urlparse(url if "://" in url else f"https://{url}") hostname = parsed.hostname or "" return hostname == "ghcr.io" or hostname.endswith(".ghcr.io") def _matches_azure(url: str) -> bool: """Check if URL is Azure Container Registry. Azure URLs follow the format: <name>.azurecr.io """ parsed = urlparse(url if "://" in url else f"https://{url}") hostname = parsed.hostname or "" return hostname == "azurecr.io" or hostname.endswith(".azurecr.io") def _matches_gcr(url: str) -> bool: """Check if URL is Google Container Registry.""" parsed = urlparse(url if "://" in url else f"https://{url}") hostname = parsed.hostname or "" return hostname == "gcr.io" or hostname.endswith(".gcr.io") REGISTRY_INFOS = [ ContainerRegistryInfo( name="ECR", match=_matches_ecr, fragment=TEMPLATES_DIR / "registry_fragments" / "ecr-login-fragment.yaml", secrets_hints=[ 'gh secret set AWS_ACCESS_KEY_ID --body "(your AWS access key ID)"', 'gh secret set AWS_SECRET_ACCESS_KEY --body "(your AWS secret access key)"', 'gh secret set AWS_REGION --body "(your AWS region)"', ], ), ContainerRegistryInfo( name="DockerHub", match=_matches_dockerhub, fragment=TEMPLATES_DIR / "registry_fragments" / "dockerhub-login-fragment.yaml", secrets_hints=[ 'gh secret set DOCKERHUB_USERNAME --body "(your DockerHub username)"', 'gh secret set DOCKERHUB_TOKEN --body "(your DockerHub token)"', ], ), ContainerRegistryInfo( name="GitHub Container Registry", match=_matches_ghcr, fragment=TEMPLATES_DIR / "registry_fragments" / "github-container-registry-login-fragment.yaml", secrets_hints=[], ), ContainerRegistryInfo( name="Azure Container Registry", match=_matches_azure, fragment=TEMPLATES_DIR / "registry_fragments" / "azure-container-registry-login-fragment.yaml", secrets_hints=[ 'gh secret set AZURE_CLIENT_ID --body "(your Azure client ID)"', 'gh secret set AZURE_CLIENT_SECRET --body "(your Azure client secret)"', ], ), ContainerRegistryInfo( name="Google Container Registry", match=_matches_gcr, fragment=TEMPLATES_DIR / "registry_fragments" / "gcr-login-fragment.yaml", secrets_hints=[ 'gh secret set GCR_JSON_KEY --body "(your GCR JSON key)"', ], ), ]
ContainerRegistryInfo
python
Pylons__pyramid
src/pyramid/interfaces.py
{ "start": 28417, "end": 29894 }
class ____(Interface): """ WSGI application which routes requests to 'view' code based on a view registry. """ registry = Attribute( """Component architecture registry local to this application.""" ) def request_context(environ): """ Create a new request context from a WSGI environ. The request context is used to push/pop the threadlocals required when processing the request. It also contains an initialized :class:`pyramid.interfaces.IRequest` instance using the registered :class:`pyramid.interfaces.IRequestFactory`. The context may be used as a context manager to control the threadlocal lifecycle: .. code-block:: python with router.request_context(environ) as request: ... Alternatively, the context may be used without the ``with`` statement by manually invoking its ``begin()`` and ``end()`` methods. .. code-block:: python ctx = router.request_context(environ) request = ctx.begin() try: ... finally: ctx.end() """ def invoke_request(request): """ Invoke the :app:`Pyramid` request pipeline. See :ref:`router_chapter` for information on the request pipeline. The output should be a :class:`pyramid.interfaces.IResponse` object or a raised exception. """
IRouter
python
pypa__setuptools
setuptools/_vendor/typeguard/_utils.py
{ "start": 5163, "end": 5270 }
class ____: __slots__ = () def __repr__(self) -> str: return "<unset>" unset = Unset()
Unset
python
django__django
tests/db_functions/text/test_lower.py
{ "start": 194, "end": 1459 }
class ____(TestCase): def test_basic(self): Author.objects.create(name="John Smith", alias="smithj") Author.objects.create(name="Rhonda") authors = Author.objects.annotate(lower_name=Lower("name")) self.assertQuerySetEqual( authors.order_by("name"), ["john smith", "rhonda"], lambda a: a.lower_name ) Author.objects.update(name=Lower("name")) self.assertQuerySetEqual( authors.order_by("name"), [ ("john smith", "john smith"), ("rhonda", "rhonda"), ], lambda a: (a.lower_name, a.name), ) def test_num_args(self): with self.assertRaisesMessage( TypeError, "'Lower' takes exactly 1 argument (2 given)" ): Author.objects.update(name=Lower("name", "name")) def test_transform(self): with register_lookup(CharField, Lower): Author.objects.create(name="John Smith", alias="smithj") Author.objects.create(name="Rhonda") authors = Author.objects.filter(name__lower__exact="john smith") self.assertQuerySetEqual( authors.order_by("name"), ["John Smith"], lambda a: a.name )
LowerTests
python
pdm-project__pdm
src/pdm/models/caches.py
{ "start": 2860, "end": 6168 }
class ____: """Caches hashes of PyPI artifacts so we do not need to re-download them. Hashes are only cached when the URL appears to contain a hash in it and the cache key includes the hash value returned from the server). This ought to avoid issues where the location on the server changes. """ FAVORITE_HASH = "sha256" STRONG_HASHES = ("sha256", "sha384", "sha512") def __init__(self, directory: Path | str) -> None: self.directory = Path(directory) def _read_from_link(self, link: Link, session: Client) -> Iterable[bytes]: if link.is_file: with open(link.file_path, "rb") as f: yield from f else: import httpx with session.stream("GET", link.normalized) as resp: try: resp.raise_for_status() except httpx.HTTPStatusError as e: raise PdmException(f"Failed to read from {link.redacted}: {e}") from e yield from resp.iter_bytes(chunk_size=8192) def _get_file_hash(self, link: Link, session: Client) -> str: h = hashlib.new(self.FAVORITE_HASH) logger.debug("Downloading link %s for calculating hash", link.redacted) for chunk in self._read_from_link(link, session): h.update(chunk) return ":".join([h.name, h.hexdigest()]) def _should_cache(self, link: Link) -> bool: # For now, we only disable caching for local files. # We may add more when we know better about it. return not link.is_file def get_hash(self, link: Link, session: Client) -> str: # If there is no link hash (i.e., md5, sha256, etc.), we don't want # to store it. hash_value = self.get(link.url_without_fragment) if not hash_value: if link.hashes and link.hashes.keys() & self.STRONG_HASHES: logger.debug("Using hash in link for %s", link.redacted) hash_name = next(k for k in self.STRONG_HASHES if k in link.hashes) hash_value = f"{hash_name}:{link.hashes[hash_name]}" elif link.hash and link.hash_name in self.STRONG_HASHES: logger.debug("Using hash in link for %s", link.redacted) hash_value = f"{link.hash_name}:{link.hash}" else: hash_value = self._get_file_hash(link, session) if self._should_cache(link): self.set(link.url_without_fragment, hash_value) return hash_value def _get_path_for_key(self, key: str) -> Path: hashed = hashlib.sha224(key.encode("utf-8")).hexdigest() parts = (hashed[:2], hashed[2:4], hashed[4:6], hashed[6:8], hashed[8:]) return self.directory.joinpath(*parts) def get(self, url: str) -> str | None: path = self._get_path_for_key(url) with contextlib.suppress(OSError, UnicodeError): return path.read_text("utf-8").strip() return None def set(self, url: str, hash: str) -> None: path = self._get_path_for_key(url) with contextlib.suppress(OSError, UnicodeError): path.parent.mkdir(parents=True, exist_ok=True) with atomic_open_for_write(path, encoding="utf-8") as fp: fp.write(hash)
HashCache
python
pytorch__pytorch
test/distributed/checkpoint/test_dtensor_checkpoint.py
{ "start": 585, "end": 2138 }
class ____(torch.nn.Module): def __init__( self, sdt: DTensor, rdt: DTensor, submesh_sdt: DTensor, submesh_rdt: DTensor, extra_state: int = 1, extra_state_tensor: torch.Tensor = torch.zeros(1), ) -> None: super().__init__() self.sdt = torch.nn.Parameter(sdt) self.rdt = torch.nn.Parameter(rdt) self.submesh_sdt = torch.nn.Parameter(submesh_sdt) self.submesh_rdt = torch.nn.Parameter(submesh_rdt) self._extra_state = extra_state self._extra_state_tensor = extra_state_tensor @property def extra_state(self) -> int: return self._extra_state @extra_state.setter def extra_state(self, new_extra_state: int) -> None: self._extra_state = new_extra_state @property def extra_state_tensor(self) -> torch.Tensor: return self._extra_state_tensor @extra_state_tensor.setter def extra_state_tensor(self, new_extra_state_tensor: torch.Tensor) -> None: self._extra_state_tensor = new_extra_state_tensor def get_extra_state(self) -> dict[str, Union[int, torch._tensor.Tensor]]: return { "extra_state": self._extra_state, "extra_state_tensor": self._extra_state_tensor, } def set_extra_state( self, state: dict[str, Union[int, torch._tensor.Tensor]] ) -> None: self._extra_state = state["extra_state"] # pyre-ignore[8] self._extra_state_tensor = state["extra_state_tensor"] # pyre-ignore[8]
MyTestModule
python
spyder-ide__spyder
spyder/utils/stylesheet.py
{ "start": 2225, "end": 3756 }
class ____: """Base class for Spyder stylesheets.""" SET_STYLESHEET_AT_INIT = True """ Decide if the stylesheet must be set when the class is initialized. Notes ----- There are some stylesheets for which this is not possible (e.g. the ones that need to access our fonts). """ def __init__(self): self._stylesheet = qstylizer.style.StyleSheet() if self.SET_STYLESHEET_AT_INIT: self.set_stylesheet() def get_stylesheet(self): return self._stylesheet def to_string(self): if self._stylesheet.toString() == "": self.set_stylesheet() return self._stylesheet.toString() def get_copy(self): """ Return a copy of the sytlesheet. This allows it to be modified for specific widgets. """ if self._stylesheet.toString() == "": self.set_stylesheet() return copy.deepcopy(self) def set_stylesheet(self): raise NotImplementedError( "Subclasses need to implement this method to set the _stylesheet " "attribute as a Qstylizer StyleSheet object." ) def __str__(self): """ Get a string representation of the stylesheet object this class holds. """ return self.to_string() # ============================================================================= # ---- Application stylesheet # =============================================================================
SpyderStyleSheet
python
dagster-io__dagster
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
{ "start": 106018, "end": 107888 }
class ____(GeneratedAirbyteSource): class Disabled: @public def __init__( self, ): self.deletion_mode = "ignore" class Enabled: @public def __init__(self, column: str): self.deletion_mode = "deleted_field" self.column = check.str_param(column, "column") class Collection: @public def __init__( self, page_size: int, deletions: Union["FaunaSource.Disabled", "FaunaSource.Enabled"] ): self.page_size = check.int_param(page_size, "page_size") self.deletions = check.inst_param( deletions, "deletions", (FaunaSource.Disabled, FaunaSource.Enabled) ) @public def __init__( self, name: str, domain: str, port: int, scheme: str, secret: str, collection: "FaunaSource.Collection", ): """Airbyte Source for Fauna. Documentation can be found at https://github.com/fauna/airbyte/blob/source-fauna/docs/integrations/sources/fauna.md Args: name (str): The name of the destination. domain (str): Domain of Fauna to query. Defaults db.fauna.com. See the docs. port (int): Endpoint port. scheme (str): URL scheme. secret (str): Fauna secret, used when authenticating with the database. collection (FaunaSource.Collection): Settings for the Fauna Collection. """ self.domain = check.str_param(domain, "domain") self.port = check.int_param(port, "port") self.scheme = check.str_param(scheme, "scheme") self.secret = check.str_param(secret, "secret") self.collection = check.inst_param(collection, "collection", FaunaSource.Collection) super().__init__("Fauna", name)
FaunaSource
python
readthedocs__readthedocs.org
readthedocs/profiles/views.py
{ "start": 6666, "end": 7091 }
class ____(TokenMixin, CreateView): """Simple view to generate a Token object for the logged in User.""" http_method_names = ["post"] def post(self, request, *args, **kwargs): _, created = Token.objects.get_or_create(user=self.request.user) if created: messages.info(request, "API Token created successfully") return HttpResponseRedirect(self.get_success_url())
TokenCreateView
python
numba__numba
numba/core/untyped_passes.py
{ "start": 22829, "end": 29853 }
class ____(FunctionPass): """ This pass spots a `literal_unroll([<constant values>])` and rewrites it as a `literal_unroll(tuple(<constant values>))`. """ _name = "transform_literal_unroll_const_list_to_tuple" _accepted_types = (types.BaseTuple, types.LiteralList) def __init__(self): FunctionPass.__init__(self) def run_pass(self, state): mutated = False func_ir = state.func_ir for label, blk in func_ir.blocks.items(): calls = [_ for _ in blk.find_exprs('call')] for call in calls: glbl = guard(get_definition, func_ir, call.func) if glbl and isinstance(glbl, (ir.Global, ir.FreeVar)): # find a literal_unroll if glbl.value is literal_unroll: if len(call.args) > 1: msg = "literal_unroll takes one argument, found %s" raise errors.UnsupportedError(msg % len(call.args), call.loc) # get the arg, make sure its a build_list unroll_var = call.args[0] to_unroll = guard(get_definition, func_ir, unroll_var) if (isinstance(to_unroll, ir.Expr) and to_unroll.op == "build_list"): # make sure they are all const items in the list for i, item in enumerate(to_unroll.items): val = guard(get_definition, func_ir, item) if not val: msg = ("multiple definitions for variable " "%s, cannot resolve constant") raise errors.UnsupportedError(msg % item, to_unroll.loc) if not isinstance(val, ir.Const): msg = ("Found non-constant value at " "position %s in a list argument to " "literal_unroll" % i) raise errors.UnsupportedError(msg, to_unroll.loc) # The above appears ok, now swap the build_list for # a built tuple. # find the assignment for the unroll target to_unroll_lhs = guard(get_definition, func_ir, unroll_var, lhs_only=True) if to_unroll_lhs is None: msg = ("multiple definitions for variable " "%s, cannot resolve constant") raise errors.UnsupportedError(msg % unroll_var, to_unroll.loc) # scan all blocks looking for the LHS for b in func_ir.blocks.values(): asgn = b.find_variable_assignment( to_unroll_lhs.name) if asgn is not None: break else: msg = ("Cannot find assignment for known " "variable %s") % to_unroll_lhs.name raise errors.CompilerError(msg, to_unroll.loc) # Create a tuple with the list items as contents tup = ir.Expr.build_tuple(to_unroll.items, to_unroll.loc) # swap the list for the tuple asgn.value = tup mutated = True elif (isinstance(to_unroll, ir.Expr) and to_unroll.op == "build_tuple"): # this is fine, do nothing pass elif (isinstance(to_unroll, (ir.Global, ir.FreeVar)) and isinstance(to_unroll.value, tuple)): # this is fine, do nothing pass elif isinstance(to_unroll, ir.Arg): # this is only fine if the arg is a tuple ty = state.typemap[to_unroll.name] if not isinstance(ty, self._accepted_types): msg = ("Invalid use of literal_unroll with a " "function argument, only tuples are " "supported as function arguments, found " "%s") % ty raise errors.UnsupportedError(msg, to_unroll.loc) else: extra = None if isinstance(to_unroll, ir.Expr): # probably a slice if to_unroll.op == "getitem": ty = state.typemap[to_unroll.value.name] # check if this is a tuple slice if not isinstance(ty, self._accepted_types): extra = "operation %s" % to_unroll.op loc = to_unroll.loc elif isinstance(to_unroll, ir.Arg): extra = "non-const argument %s" % to_unroll.name loc = to_unroll.loc else: if to_unroll is None: extra = ('multiple definitions of ' 'variable "%s".' % unroll_var.name) loc = unroll_var.loc else: loc = to_unroll.loc extra = "unknown problem" if extra: msg = ("Invalid use of literal_unroll, " "argument should be a tuple or a list " "of constant values. Failure reason: " "found %s" % extra) raise errors.UnsupportedError(msg, loc) return mutated @register_pass(mutates_CFG=True, analysis_only=False)
TransformLiteralUnrollConstListToTuple
python
django__django
django/db/models/sql/compiler.py
{ "start": 86147, "end": 94621 }
class ____(SQLCompiler): returning_fields = None returning_params = () def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ self.pre_sql_setup() if not self.query.values: return "", () qn = self.quote_name_unless_alias values, update_params = [], [] for field, model, val in self.query.values: if hasattr(val, "resolve_expression"): val = val.resolve_expression( self.query, allow_joins=False, for_save=True ) if val.contains_aggregate: raise FieldError( "Aggregate functions are not allowed in this query " "(%s=%r)." % (field.name, val) ) if val.contains_over_clause: raise FieldError( "Window expressions are not allowed in this query " "(%s=%r)." % (field.name, val) ) if isinstance(val, ColPairs): raise FieldError( "Composite primary keys expressions are not allowed " "in this query (%s=F('pk'))." % field.name ) elif hasattr(val, "prepare_database_save"): if field.remote_field: val = val.prepare_database_save(field) else: raise TypeError( "Tried to update field %s with a model instance, %r. " "Use a value compatible with %s." % (field, val, field.__class__.__name__) ) val = field.get_db_prep_save(val, connection=self.connection) # Getting the placeholder for the field. if hasattr(field, "get_placeholder"): placeholder = field.get_placeholder(val, self, self.connection) else: placeholder = "%s" name = field.column if hasattr(val, "as_sql"): sql, params = self.compile(val) values.append("%s = %s" % (qn(name), placeholder % sql)) update_params.extend(params) elif val is not None: values.append("%s = %s" % (qn(name), placeholder)) update_params.append(val) else: values.append("%s = NULL" % qn(name)) table = self.query.base_table result = [ "UPDATE %s SET" % qn(table), ", ".join(values), ] try: where, params = self.compile(self.query.where) except FullResultSet: params = [] else: result.append("WHERE %s" % where) if self.returning_fields: # Skip empty r_sql to allow subclasses to customize behavior for # 3rd party backends. Refs #19096. r_sql, self.returning_params = self.connection.ops.returning_columns( self.returning_fields ) if r_sql: result.append(r_sql) params.extend(self.returning_params) return " ".join(result), tuple(update_params + params) def execute_sql(self, result_type): """ Execute the specified update. Return the number of rows affected by the primary update query. The "primary update query" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available. """ row_count = super().execute_sql(result_type) is_empty = row_count is None row_count = row_count or 0 for query in self.query.get_related_updates(): # If the result_type is NO_RESULTS then the aux_row_count is None. aux_row_count = query.get_compiler(self.using).execute_sql(result_type) if is_empty and aux_row_count: # Returns the row count for any related updates as the number # of rows updated. row_count = aux_row_count is_empty = False return row_count def execute_returning_sql(self, returning_fields): """ Execute the specified update and return rows of the returned columns associated with the specified returning_field if the backend supports it. """ if self.query.get_related_updates(): raise NotImplementedError( "Update returning is not implemented for queries with related updates." ) if ( not returning_fields or not self.connection.features.can_return_rows_from_update ): row_count = self.execute_sql(ROW_COUNT) return [()] * row_count self.returning_fields = returning_fields with self.connection.cursor() as cursor: sql, params = self.as_sql() cursor.execute(sql, params) rows = self.connection.ops.fetch_returned_rows( cursor, self.returning_params ) opts = self.query.get_meta() cols = [field.get_col(opts.db_table) for field in self.returning_fields] converters = self.get_converters(cols) if converters: rows = self.apply_converters(rows, converters) return list(rows) def pre_sql_setup(self): """ If the update depends on results from other tables, munge the "where" conditions to match the format required for (portable) SQL updates. If multiple updates are required, pull out the id values to update at this point so that they don't change as a result of the progressive updates. """ refcounts_before = self.query.alias_refcount.copy() # Ensure base table is in the query self.query.get_initial_alias() count = self.query.count_active_tables() if not self.query.related_updates and count == 1: return query = self.query.chain(klass=Query) query.select_related = False query.clear_ordering(force=True) query.extra = {} query.select = [] meta = query.get_meta() fields = [meta.pk.name] related_ids_index = [] for related in self.query.related_updates: if all( path.join_field.primary_key for path in meta.get_path_to_parent(related) ): # If a primary key chain exists to the targeted related update, # then the meta.pk value can be used for it. related_ids_index.append((related, 0)) else: # This branch will only be reached when updating a field of an # ancestor that is not part of the primary key chain of a MTI # tree. related_ids_index.append((related, len(fields))) fields.append(related._meta.pk.name) query.add_fields(fields) super().pre_sql_setup() is_composite_pk = meta.is_composite_pk must_pre_select = ( count > 1 and not self.connection.features.update_can_self_select ) # Now we adjust the current query: reset the where clause and get rid # of all the tables we don't need (since they're in the sub-select). self.query.clear_where() if self.query.related_updates or must_pre_select: # Either we're using the idents in multiple update queries (so # don't want them to change), or the db backend doesn't support # selecting from the updating table (e.g. MySQL). idents = [] related_ids = collections.defaultdict(list) for rows in query.get_compiler(self.using).execute_sql(MULTI): pks = [row if is_composite_pk else row[0] for row in rows] idents.extend(pks) for parent, index in related_ids_index: related_ids[parent].extend(r[index] for r in rows) self.query.add_filter("pk__in", idents) self.query.related_ids = related_ids else: # The fast path. Filters and updates in one query. self.query.add_filter("pk__in", query) self.query.reset_refcounts(refcounts_before)
SQLUpdateCompiler
python
apache__airflow
providers/fab/src/airflow/providers/fab/www/session.py
{ "start": 1438, "end": 1904 }
class ____: """Exempt certain blueprints/paths from autogenerated sessions.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.serializer = _LazySafeSerializer() def save_session(self, *args, **kwargs): """Prevent creating session from REST API and health requests.""" if request.path == "/health": return None return super().save_session(*args, **kwargs)
SessionExemptMixin
python
ansible__ansible
lib/ansible/module_utils/facts/system/distribution.py
{ "start": 23197, "end": 33010 }
class ____(object): """ This subclass of Facts fills the distribution, distribution_version and distribution_release variables To do so it checks the existence and content of typical files in /etc containing distribution information This is unit tested. Please extend the tests to cover all distributions if you have them available. """ # keep keys in sync with Conditionals page of docs OS_FAMILY_MAP = {'RedHat': ['RedHat', 'RHEL', 'Fedora', 'CentOS', 'Scientific', 'SLC', 'Ascendos', 'CloudLinux', 'PSBM', 'OracleLinux', 'OVS', 'OEL', 'Amazon', 'Amzn', 'Virtuozzo', 'XenServer', 'Alibaba', 'EulerOS', 'openEuler', 'AlmaLinux', 'Rocky', 'TencentOS', 'EuroLinux', 'Kylin Linux Advanced Server', 'MIRACLE'], 'Debian': ['Debian', 'Ubuntu', 'Raspbian', 'Neon', 'KDE neon', 'Linux Mint', 'SteamOS', 'Devuan', 'Kali', 'Cumulus Linux', 'Pop!_OS', 'Parrot', 'Pardus GNU/Linux', 'Uos', 'Deepin', 'OSMC', 'Linux Mint Debian Edition', 'Univention Corporate Server'], 'Suse': ['SuSE', 'SLES', 'SLED', 'openSUSE', 'openSUSE Tumbleweed', 'SLES_SAP', 'SUSE_LINUX', 'openSUSE Leap', 'ALP-Dolomite', 'SL-Micro', 'openSUSE MicroOS'], 'Archlinux': ['Archlinux', 'Antergos', 'Manjaro'], 'Mandrake': ['Mandrake', 'Mandriva'], 'Solaris': ['Solaris', 'Nexenta', 'OmniOS', 'OpenIndiana', 'SmartOS'], 'Slackware': ['Slackware'], 'Altlinux': ['Altlinux'], 'SMGL': ['SMGL'], 'Gentoo': ['Gentoo', 'Funtoo'], 'Alpine': ['Alpine'], 'AIX': ['AIX'], 'HP-UX': ['HPUX'], 'Darwin': ['MacOSX'], 'FreeBSD': ['FreeBSD', 'TrueOS'], 'ClearLinux': ['Clear Linux OS', 'Clear Linux Mix'], 'DragonFly': ['DragonflyBSD', 'DragonFlyBSD', 'Gentoo/DragonflyBSD', 'Gentoo/DragonFlyBSD'], 'NetBSD': ['NetBSD'], } OS_FAMILY = {} for family, names in OS_FAMILY_MAP.items(): for name in names: OS_FAMILY[name] = family def __init__(self, module): self.module = module def get_distribution_facts(self): distribution_facts = {} # The platform module provides information about the running # system/distribution. Use this as a baseline and fix buggy systems # afterwards system = platform.system() distribution_facts['distribution'] = system distribution_facts['distribution_release'] = platform.release() distribution_facts['distribution_version'] = platform.version() systems_implemented = ('AIX', 'HP-UX', 'Darwin', 'FreeBSD', 'OpenBSD', 'SunOS', 'DragonFly', 'NetBSD') if system in systems_implemented: cleanedname = system.replace('-', '') distfunc = getattr(self, 'get_distribution_' + cleanedname) dist_func_facts = distfunc() distribution_facts.update(dist_func_facts) elif system == 'Linux': distribution_files = DistributionFiles(module=self.module) # linux_distribution_facts = LinuxDistribution(module).get_distribution_facts() dist_file_facts = distribution_files.process_dist_files() distribution_facts.update(dist_file_facts) distro = distribution_facts['distribution'] # look for an os family alias for the 'distribution', if there isn't one, use 'distribution' distribution_facts['os_family'] = self.OS_FAMILY.get(distro, None) or distro return distribution_facts def get_distribution_AIX(self): aix_facts = {} rc, out, err = self.module.run_command("/usr/bin/oslevel") data = out.split('.') aix_facts['distribution_major_version'] = data[0] if len(data) > 1: aix_facts['distribution_version'] = '%s.%s' % (data[0], data[1]) aix_facts['distribution_release'] = data[1] else: aix_facts['distribution_version'] = data[0] return aix_facts def get_distribution_HPUX(self): hpux_facts = {} rc, out, err = self.module.run_command(r"/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True) data = re.search(r'HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out) if data: hpux_facts['distribution_version'] = data.groups()[0] hpux_facts['distribution_release'] = data.groups()[1] return hpux_facts def get_distribution_Darwin(self): darwin_facts = {} darwin_facts['distribution'] = 'MacOSX' rc, out, err = self.module.run_command("/usr/bin/sw_vers -productVersion") data = out.split()[-1] if data: darwin_facts['distribution_major_version'] = data.split('.')[0] darwin_facts['distribution_version'] = data return darwin_facts def get_distribution_FreeBSD(self): freebsd_facts = {} freebsd_facts['distribution_release'] = platform.release() data = re.search(r'(\d+)\.(\d+)-(RELEASE|STABLE|CURRENT|RC|PRERELEASE).*', freebsd_facts['distribution_release']) if 'trueos' in platform.version(): freebsd_facts['distribution'] = 'TrueOS' if data: freebsd_facts['distribution_major_version'] = data.group(1) freebsd_facts['distribution_version'] = '%s.%s' % (data.group(1), data.group(2)) return freebsd_facts def get_distribution_OpenBSD(self): openbsd_facts = {} openbsd_facts['distribution_version'] = platform.release() rc, out, err = self.module.run_command("/sbin/sysctl -n kern.version") match = re.match(r'OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out) if match: openbsd_facts['distribution_release'] = match.groups()[0] else: openbsd_facts['distribution_release'] = 'release' return openbsd_facts def get_distribution_DragonFly(self): dragonfly_facts = { 'distribution_release': platform.release() } rc, out, dummy = self.module.run_command("/sbin/sysctl -n kern.version") match = re.search(r'v(\d+)\.(\d+)\.(\d+)-(RELEASE|STABLE|CURRENT).*', out) if match: dragonfly_facts['distribution_major_version'] = match.group(1) dragonfly_facts['distribution_version'] = '%s.%s.%s' % match.groups()[:3] return dragonfly_facts def get_distribution_NetBSD(self): netbsd_facts = {} platform_release = platform.release() netbsd_facts['distribution_release'] = platform_release rc, out, dummy = self.module.run_command("/sbin/sysctl -n kern.version") match = re.match(r'NetBSD\s(\d+)\.(\d+)\s\((GENERIC)\).*', out) if match: netbsd_facts['distribution_major_version'] = match.group(1) netbsd_facts['distribution_version'] = '%s.%s' % match.groups()[:2] else: netbsd_facts['distribution_major_version'] = platform_release.split('.')[0] netbsd_facts['distribution_version'] = platform_release return netbsd_facts def get_distribution_SMGL(self): smgl_facts = {} smgl_facts['distribution'] = 'Source Mage GNU/Linux' return smgl_facts def get_distribution_SunOS(self): sunos_facts = {} data = get_file_content('/etc/release').splitlines()[0] if 'Solaris' in data: # for solaris 10 uname_r will contain 5.10, for solaris 11 it will have 5.11 uname_r = get_uname(self.module, flags=['-r']) ora_prefix = '' if 'Oracle Solaris' in data: data = data.replace('Oracle ', '') ora_prefix = 'Oracle ' sunos_facts['distribution'] = data.split()[0] sunos_facts['distribution_version'] = data.split()[1] sunos_facts['distribution_release'] = ora_prefix + data sunos_facts['distribution_major_version'] = uname_r.split('.')[1].rstrip() return sunos_facts uname_v = get_uname(self.module, flags=['-v']) distribution_version = None if 'SmartOS' in data: sunos_facts['distribution'] = 'SmartOS' if _file_exists('/etc/product'): product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').splitlines() if ': ' in l]) if 'Image' in product_data: distribution_version = product_data.get('Image').split()[-1] elif 'OpenIndiana' in data: sunos_facts['distribution'] = 'OpenIndiana' elif 'OmniOS' in data: sunos_facts['distribution'] = 'OmniOS' distribution_version = data.split()[-1] elif uname_v is not None and 'NexentaOS_' in uname_v: sunos_facts['distribution'] = 'Nexenta' distribution_version = data.split()[-1].lstrip('v') if sunos_facts.get('distribution', '') in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'): sunos_facts['distribution_release'] = data.strip() if distribution_version is not None: sunos_facts['distribution_version'] = distribution_version elif uname_v is not None: sunos_facts['distribution_version'] = uname_v.splitlines()[0].strip() return sunos_facts return sunos_facts
Distribution
python
walkccc__LeetCode
solutions/3117. Minimum Sum of Values by Dividing Array/3117.py
{ "start": 0, "end": 867 }
class ____: def minimumValueSum(self, nums: list[int], andValues: list[int]) -> int: n = len(nums) m = len(andValues) @functools.lru_cache(None) def dp(i: int, j: int, mask: int) -> int: """ Returns the minimum value sum of nums[i..n) and andValues[j..m), where `mask` is the running value of the current subarray. """ if i == n and j == m: return 0 if i == n or j == m: return math.inf mask &= nums[i] if mask < andValues[j]: return math.inf if mask == andValues[j]: # 1. Keep going. # 2. End the subarray here and pick nums[i], then fresh start. return min(dp(i + 1, j, mask), nums[i] + dp(i + 1, j + 1, -1)) return dp(i + 1, j, mask) # Keep going. ans = dp(0, 0, -1) return ans if ans < math.inf else -1
Solution
python
tensorflow__tensorflow
tensorflow/python/distribute/experimental/mirrored_strategy_test.py
{ "start": 1779, "end": 19762 }
class ____(test_util.DTensorBaseTest): def setUp(self): super().setUp() global_ids = test_util.create_device_ids_array((2,)) local_ids = np.ravel(global_ids).tolist() mesh_dict = { device: layout.Mesh(['batch'], global_ids, local_ids, test_util.create_device_list((2,), device)) for device in ['TPU', 'GPU', 'CPU'] } self.mesh = self.configTestMesh(mesh_dict) @parameterized.named_parameters([ ('py_floats', lambda: [1.0, 2.0], True), ('np_floats', lambda: np.array([1.0, 2.0]), True), ('tf_const', lambda: constant_op.constant([1.0, 2.0]), True), ('py_floats_callable', lambda: [1.0, 2.0], False), ('np_floats_callable', lambda: np.array([1.0, 2.0]), False), ('tf_const_callable', lambda: constant_op.constant([1.0, 2.0]), False), ]) def test_variable_creation(self, init_value, convert_callable): if convert_callable: init_value = init_value() strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) with strategy.scope(): v = variables.Variable(init_value) self.assertIsInstance(v, d_variable.DVariable) self.assertIsNotNone(v.layout) self.assertEqual(v.layout, layout.Layout.replicated(self.mesh, rank=1)) def test_variable_creation_with_dtype(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) with strategy.scope(): v = variables.Variable( 0, dtype='int64', aggregation=variables.VariableAggregationV2.ONLY_FIRST_REPLICA) self.assertIsInstance(v, d_variable.DVariable) self.assertEqual(v.dtype, dtypes.int64) def test_mesh(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) self.assertEqual(strategy.mesh, self.mesh) def test_strategy_extension(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) self.assertIsInstance(strategy.extended, distribute_lib.StrategyExtendedV2) def test_num_replica_in_sync(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) self.assertEqual(strategy.num_replicas_in_sync, 2) def test_worker_devices(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) worker_devices = strategy.extended.worker_devices self.assertLen(worker_devices, 2) self.assertEqual(worker_devices, tuple(self.mesh.local_devices())) def test_parameter_devices(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) parameter_devices = strategy.extended.parameter_devices self.assertLen(parameter_devices, 2) self.assertEqual(parameter_devices, tuple(self.mesh.local_devices())) def test_variable_created_in_scope(self): strategy1 = mirrored_strategy.MirroredStrategy(mesh=self.mesh) with strategy1.scope(): v1 = variables.Variable(constant_op.constant([1.0, 2.0])) v2 = variables.Variable(constant_op.constant([1.0, 2.0])) strategy2 = mirrored_strategy.MirroredStrategy(mesh=self.mesh) with strategy2.scope(): v3 = variables.Variable(constant_op.constant([1.0, 2.0])) self.assertTrue(strategy1.extended.variable_created_in_scope(v1)) self.assertFalse(strategy1.extended.variable_created_in_scope(v2)) self.assertFalse(strategy1.extended.variable_created_in_scope(v3)) self.assertTrue(strategy2.extended.variable_created_in_scope(v3)) def test_colocate_vars_with(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) with strategy.scope(): v1 = variables.Variable(constant_op.constant([1.0, 2.0])) with strategy.extended.colocate_vars_with(v1): v2 = variables.Variable(constant_op.constant([2.0, 3.0])) # We assert the layout for the variable, and make sure they are same. self.assertEqual(v1.layout, v2.layout) def test_in_multi_worker_mode(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) self.assertFalse(strategy.extended._in_multi_worker_mode()) def test_run_with_tensor_inputs(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) tensor_input = constant_op.constant(3.0) @def_function.function def replica_fn(inputs): return inputs * 2.0 with self.assertRaisesRegex( ValueError, 'Unsupported input types for MirroredStrategy.'): strategy.run(replica_fn, args=(tensor_input,)) def test_run_with_graph_tensor_inputs(self): # Note that this is potentially a sharp edge for the user, since the eager # test case was raising an error, but the graph context will run, by treat # the inputs as a global inputs. # TODO(scottzhu): Mitigate this eager/graph behavior difference in future. strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) @def_function.function def replica_fn(inputs): return inputs * 2.0 @def_function.function def run_fn(): tensor_input = constant_op.constant(3.0) return strategy.run(replica_fn, args=(tensor_input,)) with strategy.scope(): result = run_fn() self.assertEqual(result, constant_op.constant(6.0)) def test_run_with_unsupported_input_types(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) random_inputs = [123, '456'] @def_function.function def replica_fn(inputs): return inputs * 2.0 with self.assertRaisesRegex( ValueError, 'Unsupported input types for MirroredStrategy.'): strategy.run(replica_fn, args=(random_inputs,)) def test_run_with_distribute_value_input(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) def value_fn(value_context): return value_context.replica_id_in_sync_group distributed_values = ( strategy.experimental_distribute_values_from_function( value_fn)) @def_function.function def replica_fn(inputs): return inputs * 2 result = strategy.run(replica_fn, args=(distributed_values,)) self.assertIsInstance(result, dtensor_util.DTensorDistributedValue) self.assertLen(result.values, 2) # Note that the scalar value from # experimental_distribute_values_from_function will be up rank to 1D since # batched shared dtensor need at least be 1D. So the result from the # strategy.run is [0], instead of just 0. self.assertAllClose(result.values[0], constant_op.constant([0])) self.assertAllClose(result.values[1], constant_op.constant([2])) def test_run_without_input(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) @def_function.function def replica_fn(): return constant_op.constant([1.0]) result = strategy.run(replica_fn) self.assertIsInstance(result, dtensor_util.DTensorDistributedValue) self.assertLen(result.values, 2) self.assertAllClose(result.values[0], constant_op.constant([1.0])) self.assertAllClose(result.values[1], constant_op.constant([1.0])) def test_nested_structure_output(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) array_value = np.array([3., 2., 1.]) def value_fn(ctx): value = array_value[ctx.replica_id_in_sync_group] return {'a': value, 'b': constant_op.constant([value + 1.0, value + 2.0])} distributed_values = ( strategy.experimental_distribute_values_from_function( value_fn)) @def_function.function def replica_fn(inputs): result = {} for key in inputs: result[key] = inputs[key] * 2.0 return result result = strategy.run(replica_fn, args=(distributed_values,)) self.assertLen(result.keys(), 2) self.assertIsInstance(result['a'], dtensor_util.DTensorDistributedValue) self.assertAllClose(result['a'].values[0], constant_op.constant([6.0])) self.assertAllClose(result['a'].values[1], constant_op.constant([4.0])) self.assertIsInstance(result['b'], dtensor_util.DTensorDistributedValue) self.assertAllClose(result['b'].values[0], constant_op.constant([8.0, 10.0])) self.assertAllClose(result['b'].values[1], constant_op.constant([6.0, 8.0])) def test_inputs_with_dtensor_distribute_values(self): @def_function.function def replica_fn_1(inputs): return inputs * 2.0 @def_function.function def replica_fn_2(inputs): return inputs + 1.0 strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) tensor_input = constant_op.constant(3.0) d_tensor_input = strategy.experimental_distribute_values_from_function( lambda _: tensor_input) result_1 = strategy.run(replica_fn_1, args=(d_tensor_input,)) self.assertIsInstance(result_1, dtensor_util.DTensorDistributedValue) self.assertLen(result_1.values, 2) self.assertAllClose(result_1.values[0], constant_op.constant([6.0])) self.assertAllClose(result_1.values[1], constant_op.constant([6.0])) result_2 = strategy.run(replica_fn_2, args=(result_1,)) self.assertIsInstance(result_2, dtensor_util.DTensorDistributedValue) self.assertLen(result_2.values, 2) self.assertAllClose(result_2.values[0], constant_op.constant([7.0])) self.assertAllClose(result_2.values[1], constant_op.constant([7.0])) def test_run_with_nullary_ops(self): @def_function.function def replica_fn(): return constant_op.constant([3.0]) strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) result = strategy.run(replica_fn) self.assertIsInstance(result, dtensor_util.DTensorDistributedValue) self.assertAllClose(result.values[0], constant_op.constant([3.0])) self.assertAllClose(result.values[1], constant_op.constant([3.0])) def test_get_replica_context(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) tensor_input = constant_op.constant(3) d_tensor_input = strategy.experimental_distribute_values_from_function( lambda _: tensor_input) @def_function.function def replica_fn(inputs): replica_context = distribute_lib.get_replica_context() self.assertIsInstance(replica_context, dtensor_util.DTensorReplicaContext) return inputs * replica_context.num_replicas_in_sync # Default replica context self.assertIsNotNone(distribute_lib.get_replica_context()) with strategy.scope(): self.assertIsNone(distribute_lib.get_replica_context()) result = strategy.run(replica_fn, args=(d_tensor_input,)) self.assertLen(result.values, 2) self.assertAllClose(result.values[0], constant_op.constant([6])) self.assertAllClose(result.values[1], constant_op.constant([6])) def test_gather_non_dtensor_value(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) tensor_input = constant_op.constant(3.0) result = strategy.gather(tensor_input, axis=0) self.assertAllClose(result, tensor_input) def test_gather_dtensor_value(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) def value_fn(value_context): start = value_context.replica_id_in_sync_group return array_ops.reshape(math_ops.range(start=start, limit=start + 6), shape=(1, 2, 3)) distribute_result = strategy.experimental_distribute_values_from_function( value_fn) result = strategy.gather(distribute_result, axis=0) self.assertEqual(result.shape, [2, 2, 3]) self.assertAllClose(result, [[[0, 1, 2], [3, 4, 5]], [[1, 2, 3], [4, 5, 6]]]) result = strategy.gather(distribute_result, axis=1) self.assertEqual(result.shape, [1, 4, 3]) self.assertAllClose(result, [[[0, 1, 2], [3, 4, 5], [1, 2, 3], [4, 5, 6]]]) result = strategy.gather(distribute_result, axis=2) self.assertEqual(result.shape, [1, 2, 6]) self.assertAllClose(result, [[[0, 1, 2, 1, 2, 3], [3, 4, 5, 4, 5, 6]]]) def test_reduce_mean_non_dtensor_value(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) tensor_input = constant_op.constant([[3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]) with self.assertRaisesRegex( ValueError, 'Unsupported input types for MirroredStrategy.'): strategy.reduce(reduce_util.ReduceOp.MEAN, tensor_input, axis=0) def test_reduce_sum_non_dtensor_value(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) tensor_input = constant_op.constant([[3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]) with self.assertRaisesRegex( ValueError, 'Unsupported input types for MirroredStrategy.'): strategy.reduce(reduce_util.ReduceOp.SUM, tensor_input, axis=0) def test_reduce_mean_distribute_value(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) @def_function.function def value_fn(value_context): i = value_context.replica_id_in_sync_group n = value_context.num_replicas_in_sync return constant_op.constant([[0.0, 1.0], [2.0, 3.0]]) + i * n * 2.0 distribute_value = strategy.experimental_distribute_values_from_function( value_fn) # replica 1 has [[0.0, 1.0],[2.0, 3.0]] and replica 2 has # [[4.0, 5.0],[6.0, 7.0]] result = strategy.reduce( reduce_util.ReduceOp.MEAN, distribute_value, axis=None) self.assertAllClose(result, constant_op.constant([[2.0, 3.0], [4.0, 5.0]])) result = strategy.reduce( reduce_util.ReduceOp.MEAN, distribute_value, axis=0) self.assertAllClose(result, constant_op.constant([3.0, 4.0])) result = strategy.reduce( reduce_util.ReduceOp.MEAN, distribute_value, axis=1) self.assertAllClose(result, constant_op.constant([2.5, 4.5])) def test_reduce_sum_distribute_value(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) @def_function.function def value_fn(value_context): i = value_context.replica_id_in_sync_group n = value_context.num_replicas_in_sync return constant_op.constant([[0.0, 1.0], [2.0, 3.0]]) + i * n * 2.0 distribute_value = strategy.experimental_distribute_values_from_function( value_fn) # replica 1 has [[0.0, 1.0],[2.0, 3.0]] and replica 2 has # [[4.0, 5.0],[6.0, 7.0]] result = strategy.reduce( reduce_util.ReduceOp.SUM, distribute_value, axis=None) self.assertAllClose(result, constant_op.constant([[4.0, 6.0], [8.0, 10.0]])) result = strategy.reduce( reduce_util.ReduceOp.SUM, distribute_value, axis=0) self.assertAllClose(result, constant_op.constant([12.0, 16.0])) result = strategy.reduce( reduce_util.ReduceOp.SUM, distribute_value, axis=1) self.assertAllClose(result, constant_op.constant([10.0, 18.0])) def test_reduce_mean_mirrored_value(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) with strategy.scope(): v = variables.Variable(constant_op.constant([[1.0, 2.0], [3.0, 4.0]])) self.assertIsInstance(v, d_variable.DVariable) result = strategy.reduce(reduce_util.ReduceOp.MEAN, v, axis=None) self.assertAllClose(result, constant_op.constant([[1.0, 2.0], [3.0, 4.0]])) result = strategy.reduce(reduce_util.ReduceOp.MEAN, v, axis=0) self.assertAllClose(result, constant_op.constant([2.0, 3.0])) result = strategy.reduce(reduce_util.ReduceOp.MEAN, v, axis=1) self.assertAllClose(result, constant_op.constant([1.5, 3.5])) def test_reduce_sum_mirrored_value(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) with strategy.scope(): v = variables.Variable(constant_op.constant([[1.0, 2.0], [3.0, 4.0]])) self.assertIsInstance(v, d_variable.DVariable) result = strategy.reduce(reduce_util.ReduceOp.SUM, v, axis=None) self.assertAllClose(result, constant_op.constant([[1.0, 2.0], [3.0, 4.0]])) result = strategy.reduce(reduce_util.ReduceOp.SUM, v, axis=0) self.assertAllClose(result, constant_op.constant([4.0, 6.0])) result = strategy.reduce(reduce_util.ReduceOp.SUM, v, axis=1) self.assertAllClose(result, constant_op.constant([3.0, 7.0])) def test_reduce_value_device(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) tensor_input = constant_op.constant([[3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]) result = strategy.reduce(reduce_util.ReduceOp.MEAN, tensor_input, axis=None) self.assertIn('CPU:0', result.device) def test_experimental_local_results(self): @def_function.function def replica_fn(): return constant_op.constant([3.0]) strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) result = strategy.run(replica_fn) local_result = strategy.experimental_local_results(result) self.assertIsInstance(local_result, tuple) self.assertLen(local_result, 2) self.assertEqual(local_result[0], constant_op.constant([3.0])) self.assertEqual(local_result[1], constant_op.constant([3.0])) def test_experimental_local_results_with_inputs(self): strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh) array_value = np.array([3., 2.]) def value_fn(ctx): value = array_value[ctx.replica_id_in_sync_group] return {'a': value, 'b': constant_op.constant([value + 1.0, value + 2.0])} distributed_values = ( strategy.experimental_distribute_values_from_function( value_fn)) @def_function.function def replica_fn(inputs): result = {} for key in inputs: result[key] = inputs[key] * 2.0 return result result = strategy.run(replica_fn, args=(distributed_values,)) local_result = strategy.experimental_local_results(result) self.assertIsInstance(local_result, tuple) self.assertLen(local_result, 2) self.assertDictEqual(local_result[0], {'a': constant_op.constant([6.0]), 'b': constant_op.constant([8.0, 10.0])}) self.assertDictEqual(local_result[1], {'a': constant_op.constant([4.0]), 'b': constant_op.constant([6.0, 8.0])})
StrategyBaseTest
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/mysql/asyncmy.py
{ "start": 5099, "end": 6627 }
class ____(MySQLDialect_pymysql): driver = "asyncmy" supports_statement_cache = True supports_server_side_cursors = True _sscursor = AsyncAdapt_asyncmy_ss_cursor is_async = True has_terminate = True @classmethod def import_dbapi(cls) -> DBAPIModule: return AsyncAdapt_asyncmy_dbapi(__import__("asyncmy")) def do_terminate(self, dbapi_connection: DBAPIConnection) -> None: dbapi_connection.terminate() def create_connect_args(self, url: URL) -> ConnectArgsType: # type: ignore[override] # noqa: E501 return super().create_connect_args( url, _translate_args=dict(username="user", database="db") ) def is_disconnect( self, e: DBAPIModule.Error, connection: Optional[Union[PoolProxiedConnection, DBAPIConnection]], cursor: Optional[DBAPICursor], ) -> bool: if super().is_disconnect(e, connection, cursor): return True else: str_e = str(e).lower() return ( "not connected" in str_e or "network operation failed" in str_e ) def _found_rows_client_flag(self) -> int: from asyncmy.constants import CLIENT # type: ignore return CLIENT.FOUND_ROWS # type: ignore[no-any-return] def get_driver_connection( self, connection: DBAPIConnection ) -> AsyncIODBAPIConnection: return connection._connection # type: ignore[no-any-return] dialect = MySQLDialect_asyncmy
MySQLDialect_asyncmy
python
streamlit__streamlit
lib/tests/streamlit/elements/media_test.py
{ "start": 1254, "end": 9192 }
class ____(DeltaGeneratorTestCase): @parameterized.expand( [ ("foo.wav", "audio/wav", MockMediaKind.AUDIO, False), (Path("foo.wav"), "audio/wav", MockMediaKind.AUDIO, False), ("path/to/foo.wav", "audio/wav", MockMediaKind.AUDIO, False), (Path("path/to/foo.wav"), "audio/wav", MockMediaKind.AUDIO, False), (b"fake_audio_data", "audio/wav", MockMediaKind.AUDIO, False), ("https://foo.com/foo.wav", "audio/wav", MockMediaKind.AUDIO, True), ("foo.mp4", "video/mp4", MockMediaKind.VIDEO, False), (Path("foo.mp4"), "video/mp4", MockMediaKind.VIDEO, False), ("path/to/foo.mp4", "video/mp4", MockMediaKind.VIDEO, False), (Path("path/to/foo.mp4"), "video/mp4", MockMediaKind.VIDEO, False), (b"fake_video_data", "video/mp4", MockMediaKind.VIDEO, False), ("https://foo.com/foo.mp4", "video/mp4", MockMediaKind.VIDEO, True), ] ) def test_add_bytes_and_filenames_to_mediafilemanager( self, media_data: MediaData, mimetype: str, media_kind: MockMediaKind, is_url: bool, ): """st.audio + st.video should register bytes and filenames with the MediaFileManager. URL-based media does not go through the MediaFileManager. """ with ( mock.patch( "streamlit.runtime.media_file_manager.MediaFileManager.add" ) as mock_mfm_add, mock.patch("streamlit.runtime.caching.save_media_data"), ): mock_mfm_add.return_value = "https://mockoutputurl.com" if media_kind is MockMediaKind.AUDIO: st.audio(media_data, mimetype) element = self.get_delta_from_queue().new_element element_url = element.audio.url else: st.video(media_data, mimetype) element = self.get_delta_from_queue().new_element element_url = element.video.url if is_url: # URLs should be returned as-is, and should not result in a call to # MediaFileManager.add assert media_data == element_url mock_mfm_add.assert_not_called() else: # Other strings, Path objects, and audio/video data, should be passed to # MediaFileManager.add expected_media_data = ( str(media_data) if isinstance(media_data, Path) else media_data ) mock_mfm_add.assert_called_once_with( expected_media_data, mimetype, str(make_delta_path(RootContainer.MAIN, (), 0)), ) assert element_url == "https://mockoutputurl.com" def test_audio_width_config_default(self): """Test that default width is 'stretch' for audio.""" with ( mock.patch( "streamlit.runtime.media_file_manager.MediaFileManager.add" ) as mock_mfm_add, mock.patch("streamlit.runtime.caching.save_media_data"), ): mock_mfm_add.return_value = "https://mockoutputurl.com" st.audio("foo.wav", "audio/wav") c = self.get_delta_from_queue().new_element.audio assert ( c.width_config.WhichOneof("width_spec") == WidthConfigFields.USE_STRETCH.value ) assert c.width_config.use_stretch def test_video_width_config_default(self): """Test that default width is 'stretch' for video.""" with ( mock.patch( "streamlit.runtime.media_file_manager.MediaFileManager.add" ) as mock_mfm_add, mock.patch("streamlit.runtime.caching.save_media_data"), ): mock_mfm_add.return_value = "https://mockoutputurl.com" st.video("foo.mp4", "video/mp4") c = self.get_delta_from_queue().new_element.video assert ( c.width_config.WhichOneof("width_spec") == WidthConfigFields.USE_STRETCH.value ) assert c.width_config.use_stretch def test_audio_width_config_pixel(self): """Test that pixel width works properly for audio.""" with ( mock.patch( "streamlit.runtime.media_file_manager.MediaFileManager.add" ) as mock_mfm_add, mock.patch("streamlit.runtime.caching.save_media_data"), ): mock_mfm_add.return_value = "https://mockoutputurl.com" st.audio("foo.wav", "audio/wav", width=200) c = self.get_delta_from_queue().new_element.audio assert ( c.width_config.WhichOneof("width_spec") == WidthConfigFields.PIXEL_WIDTH.value ) assert c.width_config.pixel_width == 200 def test_video_width_config_pixel(self): """Test that pixel width works properly for video.""" with ( mock.patch( "streamlit.runtime.media_file_manager.MediaFileManager.add" ) as mock_mfm_add, mock.patch("streamlit.runtime.caching.save_media_data"), ): mock_mfm_add.return_value = "https://mockoutputurl.com" st.video("foo.mp4", "video/mp4", width=200) c = self.get_delta_from_queue().new_element.video assert ( c.width_config.WhichOneof("width_spec") == WidthConfigFields.PIXEL_WIDTH.value ) assert c.width_config.pixel_width == 200 def test_audio_width_config_stretch(self): """Test that 'stretch' width works properly for audio.""" with ( mock.patch( "streamlit.runtime.media_file_manager.MediaFileManager.add" ) as mock_mfm_add, mock.patch("streamlit.runtime.caching.save_media_data"), ): mock_mfm_add.return_value = "https://mockoutputurl.com" st.audio("foo.wav", "audio/wav", width="stretch") c = self.get_delta_from_queue().new_element.audio assert ( c.width_config.WhichOneof("width_spec") == WidthConfigFields.USE_STRETCH.value ) assert c.width_config.use_stretch def test_video_width_config_stretch(self): """Test that 'stretch' width works properly for video.""" with ( mock.patch( "streamlit.runtime.media_file_manager.MediaFileManager.add" ) as mock_mfm_add, mock.patch("streamlit.runtime.caching.save_media_data"), ): mock_mfm_add.return_value = "https://mockoutputurl.com" st.video("foo.mp4", "video/mp4", width="stretch") c = self.get_delta_from_queue().new_element.video assert ( c.width_config.WhichOneof("width_spec") == WidthConfigFields.USE_STRETCH.value ) assert c.width_config.use_stretch @parameterized.expand( [ ("invalid",), (-100,), (0,), (100.5,), (None,), ] ) def test_audio_invalid_width(self, width): """Test that invalid width values raise exceptions for audio.""" with pytest.raises(StreamlitInvalidWidthError): st.audio("foo.wav", "audio/wav", width=width) @parameterized.expand( [ ("invalid",), (-100,), (0,), (100.5,), (None,), ] ) def test_video_invalid_width(self, width): """Test that invalid width values raise exceptions for video.""" with pytest.raises(StreamlitInvalidWidthError): st.video("foo.mp4", "video/mp4", width=width)
MediaTest
python
huggingface__transformers
src/transformers/models/diffllama/modeling_diffllama.py
{ "start": 28936, "end": 32087 }
class ____(DiffLlamaPreTrainedModel): def __init__(self, config: DiffLlamaConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [DiffLlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = DiffLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = DiffLlamaRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @check_model_inputs() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, cache_position: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds: torch.Tensor = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position: torch.Tensor = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask, position_embeddings=position_embeddings, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) @auto_docstring
DiffLlamaModel
python
openai__gym
gym/wrappers/transform_observation.py
{ "start": 92, "end": 1672 }
class ____(gym.ObservationWrapper): """Transform the observation via an arbitrary function :attr:`f`. The function :attr:`f` should be defined on the observation space of the base environment, ``env``, and should, ideally, return values in the same space. If the transformation you wish to apply to observations returns values in a *different* space, you should subclass :class:`ObservationWrapper`, implement the transformation, and set the new observation space accordingly. If you were to use this wrapper instead, the observation space would be set incorrectly. Example: >>> import gym >>> import numpy as np >>> env = gym.make('CartPole-v1') >>> env = TransformObservation(env, lambda obs: obs + 0.1*np.random.randn(*obs.shape)) >>> env.reset() array([-0.08319338, 0.04635121, -0.07394746, 0.20877492]) """ def __init__(self, env: gym.Env, f: Callable[[Any], Any]): """Initialize the :class:`TransformObservation` wrapper with an environment and a transform function :param:`f`. Args: env: The environment to apply the wrapper f: A function that transforms the observation """ super().__init__(env) assert callable(f) self.f = f def observation(self, observation): """Transforms the observations with callable :attr:`f`. Args: observation: The observation to transform Returns: The transformed observation """ return self.f(observation)
TransformObservation
python
numpy__numpy
benchmarks/benchmarks/bench_itemselection.py
{ "start": 487, "end": 1201 }
class ____(Benchmark): params = [ [True, False], TYPES1 + ["O", "i,O"]] param_names = ["values_is_scalar", "dtype"] def setup(self, values_is_scalar, dtype): if values_is_scalar: self.vals = np.array(1., dtype=dtype) else: self.vals = np.ones(1000, dtype=dtype) self.arr = np.ones(1000, dtype=dtype) self.dense_mask = np.ones(1000, dtype="bool") self.sparse_mask = np.zeros(1000, dtype="bool") def time_dense(self, values_is_scalar, dtype): np.putmask(self.arr, self.dense_mask, self.vals) def time_sparse(self, values_is_scalar, dtype): np.putmask(self.arr, self.sparse_mask, self.vals)
PutMask
python
allegroai__clearml
clearml/backend_api/services/v2_23/datasets.py
{ "start": 12088, "end": 15540 }
class ____(NonStrictDataModel): """ :param uri: Data URI :type uri: str :param content_type: Content type (e.g. 'image/jpeg', 'image/png') :type content_type: str :param width: Width in pixels :type width: int :param height: Height in pixels :type height: int :param timestamp: Timestamp in the source data (for video content. for images, this value should be 0) :type timestamp: int """ _schema = { "properties": { "content_type": { "description": "Content type (e.g. 'image/jpeg', 'image/png')", "type": "string", }, "height": {"description": "Height in pixels", "type": "integer"}, "timestamp": { "default": 0, "description": "Timestamp in the source data (for video content. for images, this value should be 0)", "type": "integer", }, "uri": {"description": "Data URI", "type": "string"}, "width": {"description": "Width in pixels", "type": "integer"}, }, "required": ["uri"], "type": "object", } def __init__( self, uri, content_type=None, width=None, height=None, timestamp=0, **kwargs ): super(Preview, self).__init__(**kwargs) self.uri = uri self.content_type = content_type self.width = width self.height = height self.timestamp = timestamp @schema_property("uri") def uri(self): return self._property_uri @uri.setter def uri(self, value): if value is None: self._property_uri = None return self.assert_isinstance(value, "uri", six.string_types) self._property_uri = value @schema_property("content_type") def content_type(self): return self._property_content_type @content_type.setter def content_type(self, value): if value is None: self._property_content_type = None return self.assert_isinstance(value, "content_type", six.string_types) self._property_content_type = value @schema_property("width") def width(self): return self._property_width @width.setter def width(self, value): if value is None: self._property_width = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "width", six.integer_types) self._property_width = value @schema_property("height") def height(self): return self._property_height @height.setter def height(self, value): if value is None: self._property_height = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "height", six.integer_types) self._property_height = value @schema_property("timestamp") def timestamp(self): return self._property_timestamp @timestamp.setter def timestamp(self, value): if value is None: self._property_timestamp = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "timestamp", six.integer_types) self._property_timestamp = value
Preview
python
astropy__astropy
astropy/coordinates/builtin_frames/gcrs.py
{ "start": 4314, "end": 5289 }
class ____(BaseRADecFrame): """ A coordinate frame defined in a similar manner as GCRS, but precessed to a requested (mean) equinox. Note that this does *not* end up the same as regular GCRS even for J2000 equinox, because the GCRS orientation is fixed to that of ICRS, which is not quite the same as the dynamical J2000 orientation. The frame attributes are listed under **Other Parameters** """ equinox = TimeAttribute(default=EQUINOX_J2000, doc="The equinox time") obstime = TimeAttribute( default=DEFAULT_OBSTIME, doc="The reference time (e.g., time of observation)" ) obsgeoloc = CartesianRepresentationAttribute( default=[0, 0, 0], unit=u.m, doc="The observer location relative to Earth center", ) obsgeovel = CartesianRepresentationAttribute( default=[0, 0, 0], unit=u.m / u.s, doc="The observer velocity relative to Earth center", )
PrecessedGeocentric
python
matplotlib__matplotlib
lib/mpl_toolkits/axisartist/axis_artist.py
{ "start": 3261, "end": 3725 }
class ____: def get_ref_artist(self): """ Return the underlying artist that actually defines some properties (e.g., color) of this artist. """ raise RuntimeError("get_ref_artist must overridden") def get_attribute_from_ref_artist(self, attr_name): getter = methodcaller("get_" + attr_name) prop = getter(super()) return getter(self.get_ref_artist()) if prop == "auto" else prop
AttributeCopier
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocol53.py
{ "start": 1666, "end": 2402 }
class ____(Proto_CoGeneric): # This should generate a reportIncompatibleMethodOverride error # but does not currently. def m(self) -> Impl_CoSelf: ... x01: Proto_CoRecurs = Impl_CoRecurs() x02: Proto_CoRecurs = Impl_CoSelf() x03: Proto_CoRecurs = Impl_CoGeneric() x04: Proto_CoRecurs = Impl_CoOther() x11: Proto_CoSelf = Impl_CoRecurs() x12: Proto_CoSelf = Impl_CoSelf() x13: Proto_CoSelf = Impl_CoGeneric() # This should generate a reportAsignmentType error. x14: Proto_CoSelf = Impl_CoOther() x21: Proto_CoGeneric = Impl_CoRecurs() x22: Proto_CoGeneric = Impl_CoSelf() x23: Proto_CoGeneric = Impl_CoGeneric() # This should generate a reportAsignmentType error. x24: Proto_CoGeneric = Impl_CoOther()
Impl_CoOtherExplicit3
python
kamyu104__LeetCode-Solutions
Python/maximum-deletions-on-a-string.py
{ "start": 706, "end": 1726 }
class ____(object): def deleteString(self, s): """ :type s: str :rtype: int """ def getPrefix(pattern, start): prefix = [-1]*(len(pattern)-start) j = -1 for i in xrange(1, len(pattern)-start): while j != -1 and pattern[start+j+1] != pattern[start+i]: j = prefix[j] if pattern[start+j+1] == pattern[start+i]: j += 1 prefix[i] = j return prefix if all(x == s[0] for x in s): return len(s) dp = [1]*len(s) # dp[i]: max operation count of s[i:] for i in reversed(xrange(len(s)-1)): prefix = getPrefix(s, i) # prefix[j]+1: longest prefix suffix length of s[i:j+1] for j in xrange(1, len(prefix), 2): if 2*(prefix[j]+1) == j+1: dp[i] = max(dp[i], dp[i+(prefix[j]+1)]+1) return dp[0] # Time: O(n^2) # Space: O(n) # dp, rolling hash
Solution2
python
getsentry__sentry-python
tests/test_ai_monitoring.py
{ "start": 6336, "end": 8983 }
class ____: def test_no_truncation_needed(self, sample_messages): """Test that messages under the limit are not truncated""" result, truncation_index = truncate_messages_by_size( sample_messages, max_bytes=MAX_GEN_AI_MESSAGE_BYTES ) assert len(result) == len(sample_messages) assert result == sample_messages assert truncation_index == 0 def test_truncation_removes_oldest_first(self, large_messages): """Test that oldest messages are removed first during truncation""" small_limit = 3000 result, truncation_index = truncate_messages_by_size( large_messages, max_bytes=small_limit ) assert len(result) < len(large_messages) if result: assert result[-1] == large_messages[-1] assert truncation_index == len(large_messages) - len(result) def test_empty_messages_list(self): """Test handling of empty messages list""" result, truncation_index = truncate_messages_by_size( [], max_bytes=MAX_GEN_AI_MESSAGE_BYTES // 500 ) assert result == [] assert truncation_index == 0 def test_find_truncation_index( self, ): """Test that the truncation index is found correctly""" # when represented in JSON, these are each 7 bytes long messages = ["A" * 5, "B" * 5, "C" * 5, "D" * 5, "E" * 5] truncation_index = _find_truncation_index(messages, 20) assert truncation_index == 3 assert messages[truncation_index:] == ["D" * 5, "E" * 5] messages = ["A" * 5, "B" * 5, "C" * 5, "D" * 5, "E" * 5] truncation_index = _find_truncation_index(messages, 40) assert truncation_index == 0 assert messages[truncation_index:] == [ "A" * 5, "B" * 5, "C" * 5, "D" * 5, "E" * 5, ] def test_progressive_truncation(self, large_messages): """Test that truncation works progressively with different limits""" limits = [ MAX_GEN_AI_MESSAGE_BYTES // 5, MAX_GEN_AI_MESSAGE_BYTES // 10, MAX_GEN_AI_MESSAGE_BYTES // 25, MAX_GEN_AI_MESSAGE_BYTES // 100, MAX_GEN_AI_MESSAGE_BYTES // 500, ] prev_count = len(large_messages) for limit in limits: result = truncate_messages_by_size(large_messages, max_bytes=limit) current_count = len(result) assert current_count <= prev_count assert current_count >= 1 prev_count = current_count
TestTruncateMessagesBySize
python
pytorch__pytorch
test/dynamo/cpython/3_13/test_set.py
{ "start": 58878, "end": 59099 }
class ____(_TestCopying, __TestCase): def setUp(self): self.set = set([((1, 2), (3, 4))]) super().setUp() #==============================================================================
TestCopyingNested
python
getsentry__sentry
tests/sentry/web/frontend/test_organization_avatar.py
{ "start": 265, "end": 1684 }
class ____(TestCase): def test_headers(self) -> None: org = self.create_organization() photo = File.objects.create(name="test.png", type="avatar.file") photo.putfile(BytesIO(b"test")) avatar = OrganizationAvatar.objects.create(organization=org, file_id=photo.id) url = reverse("sentry-organization-avatar-url", kwargs={"avatar_id": avatar.ident}) response = self.client.get(url) assert response.status_code == 200 assert response["Cache-Control"] == FOREVER_CACHE assert response["Access-Control-Allow-Origin"] assert response.get("Vary") is None assert response.get("Set-Cookie") is None def test_origin_header(self) -> None: org = self.create_organization() photo = File.objects.create(name="test.png", type="avatar.file") photo.putfile(BytesIO(b"test")) avatar = OrganizationAvatar.objects.create(organization=org, file_id=photo.id) url = reverse("sentry-organization-avatar-url", kwargs={"avatar_id": avatar.ident}) response = self.client.get(url, HTTP_ORIGIN="http://localhost") assert response.status_code == 200 assert response["Cache-Control"] == FOREVER_CACHE assert response["Access-Control-Allow-Origin"] == "http://localhost" assert response.get("Vary") is None assert response.get("Set-Cookie") is None
OrganizationAvatarTest
python
walkccc__LeetCode
solutions/2678. Number of Senior Citizens/2678.py
{ "start": 0, "end": 131 }
class ____: def countSeniors(self, details: list[str]) -> int: return sum(int(detail[11:13]) > 60 for detail in details)
Solution
python
django__django
tests/model_forms/tests.py
{ "start": 4594, "end": 28749 }
class ____(TestCase): def test_base_form(self): self.assertEqual(list(BaseCategoryForm.base_fields), ["name", "slug", "url"]) def test_no_model_class(self): class NoModelModelForm(forms.ModelForm): pass with self.assertRaisesMessage( ValueError, "ModelForm has no model class specified." ): NoModelModelForm() def test_empty_fields_to_fields_for_model(self): """ An argument of fields=() to fields_for_model should return an empty dictionary """ field_dict = fields_for_model(Person, fields=()) self.assertEqual(len(field_dict), 0) def test_fields_for_model_form_fields(self): form_declared_fields = CustomWriterForm.declared_fields field_dict = fields_for_model( Writer, fields=["name"], form_declared_fields=form_declared_fields, ) self.assertIs(field_dict["name"], form_declared_fields["name"]) def test_empty_fields_on_modelform(self): """ No fields on a ModelForm should actually result in no fields. """ class EmptyPersonForm(forms.ModelForm): class Meta: model = Person fields = () form = EmptyPersonForm() self.assertEqual(len(form.fields), 0) def test_empty_fields_to_construct_instance(self): """ No fields should be set on a model instance if construct_instance receives fields=(). """ form = modelform_factory(Person, fields="__all__")({"name": "John Doe"}) self.assertTrue(form.is_valid()) instance = construct_instance(form, Person(), fields=()) self.assertEqual(instance.name, "") def test_blank_with_null_foreign_key_field(self): """ #13776 -- ModelForm's with models having a FK set to null=False and required=False should be valid. """ class FormForTestingIsValid(forms.ModelForm): class Meta: model = Student fields = "__all__" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields["character"].required = False char = Character.objects.create( username="user", last_action=datetime.datetime.today() ) data = {"study": "Engineering"} data2 = {"study": "Engineering", "character": char.pk} # form is valid because required=False for field 'character' f1 = FormForTestingIsValid(data) self.assertTrue(f1.is_valid()) f2 = FormForTestingIsValid(data2) self.assertTrue(f2.is_valid()) obj = f2.save() self.assertEqual(obj.character, char) def test_blank_false_with_null_true_foreign_key_field(self): """ A ModelForm with a model having ForeignKey(blank=False, null=True) and the form field set to required=False should allow the field to be unset. """ class AwardForm(forms.ModelForm): class Meta: model = Award fields = "__all__" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields["character"].required = False character = Character.objects.create( username="user", last_action=datetime.datetime.today() ) award = Award.objects.create(name="Best sprinter", character=character) data = {"name": "Best tester", "character": ""} # remove character form = AwardForm(data=data, instance=award) self.assertTrue(form.is_valid()) award = form.save() self.assertIsNone(award.character) def test_blank_foreign_key_with_radio(self): class BookForm(forms.ModelForm): class Meta: model = Book fields = ["author"] widgets = {"author": forms.RadioSelect()} writer = Writer.objects.create(name="Joe Doe") form = BookForm() self.assertEqual( list(form.fields["author"].choices), [ ("", "---------"), (writer.pk, "Joe Doe"), ], ) def test_non_blank_foreign_key_with_radio(self): class AwardForm(forms.ModelForm): class Meta: model = Award fields = ["character"] widgets = {"character": forms.RadioSelect()} character = Character.objects.create( username="user", last_action=datetime.datetime.today(), ) form = AwardForm() self.assertEqual( list(form.fields["character"].choices), [(character.pk, "user")], ) def test_save_blank_false_with_required_false(self): """ A ModelForm with a model with a field set to blank=False and the form field set to required=False should allow the field to be unset. """ obj = Writer.objects.create(name="test") form = CustomWriterForm(data={"name": ""}, instance=obj) self.assertTrue(form.is_valid()) obj = form.save() self.assertEqual(obj.name, "") def test_save_blank_null_unique_charfield_saves_null(self): form_class = modelform_factory( model=NullableUniqueCharFieldModel, fields="__all__" ) empty_value = ( "" if connection.features.interprets_empty_strings_as_nulls else None ) data = { "codename": "", "email": "", "slug": "", "url": "", } form = form_class(data=data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(form.instance.codename, empty_value) self.assertEqual(form.instance.email, empty_value) self.assertEqual(form.instance.slug, empty_value) self.assertEqual(form.instance.url, empty_value) # Save a second form to verify there isn't a unique constraint # violation. form = form_class(data=data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(form.instance.codename, empty_value) self.assertEqual(form.instance.email, empty_value) self.assertEqual(form.instance.slug, empty_value) self.assertEqual(form.instance.url, empty_value) def test_missing_fields_attribute(self): message = ( "Creating a ModelForm without either the 'fields' attribute " "or the 'exclude' attribute is prohibited; form " "MissingFieldsForm needs updating." ) with self.assertRaisesMessage(ImproperlyConfigured, message): class MissingFieldsForm(forms.ModelForm): class Meta: model = Category def test_extra_fields(self): class ExtraFields(BaseCategoryForm): some_extra_field = forms.BooleanField() self.assertEqual( list(ExtraFields.base_fields), ["name", "slug", "url", "some_extra_field"] ) def test_extra_field_model_form(self): with self.assertRaisesMessage(FieldError, "no-field"): class ExtraPersonForm(forms.ModelForm): """ModelForm with an extra field""" age = forms.IntegerField() class Meta: model = Person fields = ("name", "no-field") def test_extra_declared_field_model_form(self): class ExtraPersonForm(forms.ModelForm): """ModelForm with an extra field""" age = forms.IntegerField() class Meta: model = Person fields = ("name", "age") def test_extra_field_modelform_factory(self): with self.assertRaisesMessage( FieldError, "Unknown field(s) (no-field) specified for Person" ): modelform_factory(Person, fields=["no-field", "name"]) def test_replace_field(self): class ReplaceField(forms.ModelForm): url = forms.BooleanField() class Meta: model = Category fields = "__all__" self.assertIsInstance( ReplaceField.base_fields["url"], forms.fields.BooleanField ) def test_replace_field_variant_2(self): # Should have the same result as before, # but 'fields' attribute specified differently class ReplaceField(forms.ModelForm): url = forms.BooleanField() class Meta: model = Category fields = ["url"] self.assertIsInstance( ReplaceField.base_fields["url"], forms.fields.BooleanField ) def test_replace_field_variant_3(self): # Should have the same result as before, # but 'fields' attribute specified differently class ReplaceField(forms.ModelForm): url = forms.BooleanField() class Meta: model = Category fields = [] # url will still appear, since it is explicit above self.assertIsInstance( ReplaceField.base_fields["url"], forms.fields.BooleanField ) def test_override_field(self): class WriterForm(forms.ModelForm): book = forms.CharField(required=False) class Meta: model = Writer fields = "__all__" wf = WriterForm({"name": "Richard Lockridge"}) self.assertTrue(wf.is_valid()) def test_limit_nonexistent_field(self): expected_msg = "Unknown field(s) (nonexistent) specified for Category" with self.assertRaisesMessage(FieldError, expected_msg): class InvalidCategoryForm(forms.ModelForm): class Meta: model = Category fields = ["nonexistent"] def test_limit_fields_with_string(self): msg = ( "CategoryForm.Meta.fields cannot be a string. Did you mean to type: " "('url',)?" ) with self.assertRaisesMessage(TypeError, msg): class CategoryForm(forms.ModelForm): class Meta: model = Category fields = "url" # note the missing comma def test_exclude_fields(self): class ExcludeFields(forms.ModelForm): class Meta: model = Category exclude = ["url"] self.assertEqual(list(ExcludeFields.base_fields), ["name", "slug"]) def test_exclude_nonexistent_field(self): class ExcludeFields(forms.ModelForm): class Meta: model = Category exclude = ["nonexistent"] self.assertEqual(list(ExcludeFields.base_fields), ["name", "slug", "url"]) def test_exclude_fields_with_string(self): msg = ( "CategoryForm.Meta.exclude cannot be a string. Did you mean to type: " "('url',)?" ) with self.assertRaisesMessage(TypeError, msg): class CategoryForm(forms.ModelForm): class Meta: model = Category exclude = "url" # note the missing comma def test_exclude_and_validation(self): # This Price instance generated by this form is not valid because the # quantity field is required, but the form is valid because the field # is excluded from the form. This is for backwards compatibility. class PriceFormWithoutQuantity(forms.ModelForm): class Meta: model = Price exclude = ("quantity",) form = PriceFormWithoutQuantity({"price": "6.00"}) self.assertTrue(form.is_valid()) price = form.save(commit=False) msg = "{'quantity': ['This field cannot be null.']}" with self.assertRaisesMessage(ValidationError, msg): price.full_clean() # The form should not validate fields that it doesn't contain even if # they are specified using 'fields', not 'exclude'. class PriceFormWithoutQuantity(forms.ModelForm): class Meta: model = Price fields = ("price",) form = PriceFormWithoutQuantity({"price": "6.00"}) self.assertTrue(form.is_valid()) # The form should still have an instance of a model that is not # complete and not saved into a DB yet. self.assertEqual(form.instance.price, Decimal("6.00")) self.assertIsNone(form.instance.quantity) self.assertIsNone(form.instance.pk) def test_confused_form(self): class ConfusedForm(forms.ModelForm): """Using 'fields' *and* 'exclude'. Not sure why you'd want to do this, but uh, "be liberal in what you accept" and all. """ class Meta: model = Category fields = ["name", "url"] exclude = ["url"] self.assertEqual(list(ConfusedForm.base_fields), ["name"]) def test_mixmodel_form(self): class MixModelForm(BaseCategoryForm): """Don't allow more than one 'model' definition in the inheritance hierarchy. Technically, it would generate a valid form, but the fact that the resulting save method won't deal with multiple objects is likely to trip up people not familiar with the mechanics. """ class Meta: model = Article fields = "__all__" # MixModelForm is now an Article-related thing, because # MixModelForm.Meta overrides BaseCategoryForm.Meta. self.assertEqual( list(MixModelForm.base_fields), [ "headline", "slug", "pub_date", "writer", "article", "categories", "status", ], ) def test_article_form(self): self.assertEqual( list(ArticleForm.base_fields), [ "headline", "slug", "pub_date", "writer", "article", "categories", "status", ], ) def test_bad_form(self): # First class with a Meta class wins... class BadForm(ArticleForm, BaseCategoryForm): pass self.assertEqual( list(BadForm.base_fields), [ "headline", "slug", "pub_date", "writer", "article", "categories", "status", ], ) def test_invalid_meta_model(self): class InvalidModelForm(forms.ModelForm): class Meta: pass # no model # Can't create new form msg = "ModelForm has no model class specified." with self.assertRaisesMessage(ValueError, msg): InvalidModelForm() # Even if you provide a model instance with self.assertRaisesMessage(ValueError, msg): InvalidModelForm(instance=Category) def test_subcategory_form(self): class SubCategoryForm(BaseCategoryForm): """Subclassing without specifying a Meta on the class will use the parent's Meta (or the first parent in the MRO if there are multiple parent classes). """ pass self.assertEqual(list(SubCategoryForm.base_fields), ["name", "slug", "url"]) def test_subclassmeta_form(self): class SomeCategoryForm(forms.ModelForm): checkbox = forms.BooleanField() class Meta: model = Category fields = "__all__" class SubclassMeta(SomeCategoryForm): """We can also subclass the Meta inner class to change the fields list. """ class Meta(SomeCategoryForm.Meta): exclude = ["url"] self.assertHTMLEqual( str(SubclassMeta()), '<div><label for="id_name">Name:</label>' '<input type="text" name="name" maxlength="20" required id="id_name">' '</div><div><label for="id_slug">Slug:</label><input type="text" ' 'name="slug" maxlength="20" required id="id_slug"></div><div>' '<label for="id_checkbox">Checkbox:</label>' '<input type="checkbox" name="checkbox" required id="id_checkbox"></div>', ) def test_orderfields_form(self): class OrderFields(forms.ModelForm): class Meta: model = Category fields = ["url", "name"] self.assertEqual(list(OrderFields.base_fields), ["url", "name"]) self.assertHTMLEqual( str(OrderFields()), '<div><label for="id_url">The URL:</label>' '<input type="text" name="url" maxlength="40" required id="id_url">' '</div><div><label for="id_name">Name:</label><input type="text" ' 'name="name" maxlength="20" required id="id_name"></div>', ) def test_orderfields2_form(self): class OrderFields2(forms.ModelForm): class Meta: model = Category fields = ["slug", "url", "name"] exclude = ["url"] self.assertEqual(list(OrderFields2.base_fields), ["slug", "name"]) def test_default_populated_on_optional_field(self): class PubForm(forms.ModelForm): mode = forms.CharField(max_length=255, required=False) class Meta: model = PublicationDefaults fields = ("mode",) # Empty data uses the model field default. mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.mode, "di") self.assertEqual(m1._meta.get_field("mode").get_default(), "di") # Blank data doesn't use the model field default. mf2 = PubForm({"mode": ""}) self.assertEqual(mf2.errors, {}) m2 = mf2.save(commit=False) self.assertEqual(m2.mode, "") def test_default_not_populated_on_non_empty_value_in_cleaned_data(self): class PubForm(forms.ModelForm): mode = forms.CharField(max_length=255, required=False) mocked_mode = None def clean(self): self.cleaned_data["mode"] = self.mocked_mode return self.cleaned_data class Meta: model = PublicationDefaults fields = ("mode",) pub_form = PubForm({}) pub_form.mocked_mode = "de" pub = pub_form.save(commit=False) self.assertEqual(pub.mode, "de") # Default should be populated on an empty value in cleaned_data. default_mode = "di" for empty_value in pub_form.fields["mode"].empty_values: with self.subTest(empty_value=empty_value): pub_form = PubForm({}) pub_form.mocked_mode = empty_value pub = pub_form.save(commit=False) self.assertEqual(pub.mode, default_mode) def test_default_not_populated_on_optional_checkbox_input(self): class PubForm(forms.ModelForm): class Meta: model = PublicationDefaults fields = ("active",) # Empty data doesn't use the model default because CheckboxInput # doesn't have a value in HTML form submission. mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertIs(m1.active, False) self.assertIsInstance(mf1.fields["active"].widget, forms.CheckboxInput) self.assertIs(m1._meta.get_field("active").get_default(), True) def test_default_not_populated_on_checkboxselectmultiple(self): class PubForm(forms.ModelForm): mode = forms.CharField(required=False, widget=forms.CheckboxSelectMultiple) class Meta: model = PublicationDefaults fields = ("mode",) # Empty data doesn't use the model default because an unchecked # CheckboxSelectMultiple doesn't have a value in HTML form submission. mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.mode, "") self.assertEqual(m1._meta.get_field("mode").get_default(), "di") def test_default_not_populated_on_selectmultiple(self): class PubForm(forms.ModelForm): mode = forms.CharField(required=False, widget=forms.SelectMultiple) class Meta: model = PublicationDefaults fields = ("mode",) # Empty data doesn't use the model default because an unselected # SelectMultiple doesn't have a value in HTML form submission. mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.mode, "") self.assertEqual(m1._meta.get_field("mode").get_default(), "di") def test_prefixed_form_with_default_field(self): class PubForm(forms.ModelForm): prefix = "form-prefix" class Meta: model = PublicationDefaults fields = ("mode",) mode = "de" self.assertNotEqual( mode, PublicationDefaults._meta.get_field("mode").get_default() ) mf1 = PubForm({"form-prefix-mode": mode}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.mode, mode) def test_renderer_kwarg(self): custom = object() self.assertIs(ProductForm(renderer=custom).renderer, custom) def test_default_splitdatetime_field(self): class PubForm(forms.ModelForm): datetime_published = forms.SplitDateTimeField(required=False) class Meta: model = PublicationDefaults fields = ("datetime_published",) mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.datetime_published, datetime.datetime(2000, 1, 1)) mf2 = PubForm( {"datetime_published_0": "2010-01-01", "datetime_published_1": "0:00:00"} ) self.assertEqual(mf2.errors, {}) m2 = mf2.save(commit=False) self.assertEqual(m2.datetime_published, datetime.datetime(2010, 1, 1)) def test_default_filefield(self): class PubForm(forms.ModelForm): class Meta: model = PublicationDefaults fields = ("file",) mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.file.name, "default.txt") mf2 = PubForm({}, {"file": SimpleUploadedFile("name", b"foo")}) self.assertEqual(mf2.errors, {}) m2 = mf2.save(commit=False) self.assertEqual(m2.file.name, "name") def test_default_selectdatewidget(self): class PubForm(forms.ModelForm): date_published = forms.DateField( required=False, widget=forms.SelectDateWidget ) class Meta: model = PublicationDefaults fields = ("date_published",) mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.date_published, datetime.date.today()) mf2 = PubForm( { "date_published_year": "2010", "date_published_month": "1", "date_published_day": "1", } ) self.assertEqual(mf2.errors, {}) m2 = mf2.save(commit=False) self.assertEqual(m2.date_published, datetime.date(2010, 1, 1))
ModelFormBaseTest
python
pytorch__pytorch
test/dynamo/test_fx_graph_runnable.py
{ "start": 2564, "end": 3133 }
class ____(torch.nn.Module): def __init__(self, input_size=10, hidden_size=20, output_size=5): super().__init__() self.linear1 = torch.nn.Linear(input_size, hidden_size) self.linear2 = torch.nn.Linear(hidden_size, output_size) self.relu = torch.nn.ReLU() self.dropout = torch.nn.Dropout(0.1) def forward(self, x): x = self.linear1(x) x = self.relu(x) x = self.dropout(x) x = self.linear2(x) return x @unittest.skipIf(IS_FBCODE or IS_SANDCASTLE, "Skip in fbcode/sandcastle")
ToyModel
python
viewflow__viewflow
viewflow/jsonstore.py
{ "start": 6763, "end": 6831 }
class ____(JSONFieldMixin, fields.IntegerField): pass
IntegerField
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/ruff/RUF012.py
{ "start": 2079, "end": 2187 }
class ____(V1BaseModel): mutable_default: list[int] = [] from pydantic.v1.generics import GenericModel
I
python
apache__airflow
providers/microsoft/psrp/tests/unit/microsoft/psrp/hooks/test_psrp.py
{ "start": 1360, "end": 3928 }
class ____(MagicMock): had_errors = False def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.state = PSInvocationState.NOT_STARTED def poll_invoke(self, timeout=None): self.state = PSInvocationState.COMPLETED self.output.append("output") def informational(message_type, message, **kwargs): kwargs.setdefault("command_name", "command") return Mock(MESSAGE_TYPE=message_type, message=message, **kwargs) self.streams.debug.append(informational(MessageType.DEBUG_RECORD, "debug1")) self.streams.debug.append(informational(MessageType.DEBUG_RECORD, "debug2\r\n", command_name=None)) self.streams.verbose.append(informational(MessageType.VERBOSE_RECORD, "verbose")) self.streams.warning.append(informational(MessageType.WARNING_RECORD, "warning")) self.streams.information.append( Mock( MESSAGE_TYPE=MessageType.INFORMATION_RECORD, computer="computer", user="user", message_data="information", ) ) self.streams.progress.append( Mock(MESSAGE_TYPE=MessageType.PROGRESS_RECORD, activity="activity", description="description") ) if self.had_errors: self.streams.error.append( Mock( MESSAGE_TYPE=MessageType.ERROR_RECORD, command_name="command", message="error", reason="reason", script_stacktrace="\r\n".join(DUMMY_STACKTRACE), ) ) def begin_invoke(self): self.state = PSInvocationState.RUNNING self.output = [] self.streams.debug = [] self.streams.error = [] self.streams.information = [] self.streams.progress = [] self.streams.verbose = [] self.streams.warning = [] def end_invoke(self): while self.state == PSInvocationState.RUNNING: self.poll_invoke() def mock_powershell_factory(): return MagicMock(return_value=MockPowerShell()) @patch( f"{PsrpHook.__module__}.{PsrpHook.__name__}.get_connection", new=lambda _, conn_id: Connection( conn_id=conn_id, login="username", password="password", host="remote_host", ), ) @patch(f"{PsrpHook.__module__}.WSMan") @patch(f"{PsrpHook.__module__}.PowerShell", new_callable=mock_powershell_factory) @patch(f"{PsrpHook.__module__}.RunspacePool")
MockPowerShell
python
google__jax
tests/jaxpr_effects_test.py
{ "start": 3690, "end": 5023 }
class ____(jtu.JaxTestCase): def test_trivial_jaxpr_has_no_effects(self): def f(x): return x + 1. jaxpr = jax.make_jaxpr(f)(2.) self.assertEqual(core.no_effects, jaxpr.effects) def test_effectful_primitive_in_jaxpr_creates_effects(self): def f(x): effect_p.bind(effect=foo_effect) return x + 1. jaxpr = jax.make_jaxpr(f)(2.) self.assertEqual({foo_effect}, jaxpr.jaxpr.eqns[0].effects) self.assertEqual({foo_effect}, jaxpr.effects) def test_different_effects_in_jaxpr(self): def f(x): effect_p.bind(effect=foo_effect) effect_p.bind(effect=bar_effect) return x + 1. jaxpr = jax.make_jaxpr(f)(2.) self.assertEqual({foo_effect}, jaxpr.jaxpr.eqns[0].effects) self.assertEqual({bar_effect}, jaxpr.jaxpr.eqns[1].effects) self.assertEqual({foo_effect, bar_effect}, jaxpr.effects) def test_jaxpr_typecheck_should_verify_eqn_effects_are_subset(self): def f(x): effect_p.bind(effect=foo_effect) effect_p.bind(effect=bar_effect) return x + 1. jaxpr = jax.make_jaxpr(f)(2.).jaxpr # Edit jaxpr to make its type wrong jaxpr = jaxpr.replace(effects={foo_effect}) with self.assertRaisesRegex(core.JaxprTypeError, 'Equation effect not present in jaxpr effects.'): core.check_jaxpr(jaxpr)
JaxprEffectsTest
python
h5py__h5py
h5py/h5py_warnings.py
{ "start": 471, "end": 523 }
class ____(H5pyWarning): pass
H5pyDeprecationWarning
python
pytorch__pytorch
torch/_guards.py
{ "start": 2269, "end": 4228 }
class ____: frame_id: int | None # This id is per-frame, and counts how many times we've compiled this # frame. This could have been a global id but having this be per-frame # gives you a better intuitive sense for how many recompiles have occurred # so far. frame_compile_id: int | None # torch.compiling a compiled autograd graph compiled_autograd_id: int | None = None # TODO: consider also tracking the recompilation count # See Note: Updating CompileId def __str__(self) -> str: # NOTE: Keep this in sync with both from_string and the tlparse repo if self.compiled_autograd_id is not None: assert (self.frame_id is None) == (self.frame_compile_id is None) frame_str = "" if self.frame_id is not None: frame_str = f"/{self.frame_id}/{self.frame_compile_id}" return f"!{self.compiled_autograd_id}{frame_str}" else: assert self.frame_id is not None and self.frame_compile_id is not None return f"{self.frame_id}/{self.frame_compile_id}" @classmethod def from_string(cls, compile_id: Optional[str]) -> Optional[CompileId]: """ Factory method that creates a CompileId from its string representation. Keep this in sync with the __str__ method. """ if compile_id is None: return None try: for pattern in (COMPILE_ID_PATTERN, CA_COMPILE_ID_PATTERN): if match := pattern.match(compile_id): groups = match.groupdict() for k, v in groups.items(): if v is not None: groups[k] = int(v) return cls(**groups) # type: ignore[arg-type] else: raise ValueError except Exception as e: raise ValueError(f"Invalid compile_id '{compile_id}'") from e
CompileId
python
ansible__ansible
test/units/cli/test_cli.py
{ "start": 1363, "end": 3880 }
class ____(unittest.TestCase): def setUp(self): self.tty_patcher = patch('ansible.cli.sys.stdin.isatty', return_value=True) self.mock_isatty = self.tty_patcher.start() def tearDown(self): self.tty_patcher.stop() def test(self): res = cli.CLI.build_vault_ids(['foo@bar']) self.assertEqual(res, ['foo@bar']) def test_no_vault_id_no_auto_prompt(self): # simulate 'ansible-playbook site.yml' with out --ask-vault-pass, should not prompt res = cli.CLI.build_vault_ids([], auto_prompt=False) self.assertEqual(res, []) def test_no_vault_ids_auto_prompt(self): # create_new_password=False # simulate 'ansible-vault edit encrypted.yml' res = cli.CLI.build_vault_ids([], auto_prompt=True) self.assertEqual(res, ['default@prompt_ask_vault_pass']) def test_no_vault_ids_auto_prompt_ask_vault_pass(self): # create_new_password=False # simulate 'ansible-vault edit --ask-vault-pass encrypted.yml' res = cli.CLI.build_vault_ids([], auto_prompt=True, ask_vault_pass=True) self.assertEqual(res, ['default@prompt_ask_vault_pass']) def test_no_vault_id_ask_vault_pass(self): res = cli.CLI.build_vault_ids([], ask_vault_pass=True) self.assertEqual(res, ['default@prompt_ask_vault_pass']) def test_no_vault_ids_password_files(self): res = cli.CLI.build_vault_ids([], vault_password_files=['some-password-file']) self.assertEqual(res, ['default@some-password-file']) def test_everything(self): res = cli.CLI.build_vault_ids(['blip@prompt', 'baz@prompt_ask_vault_pass', 'some-password-file', 'qux@another-password-file'], vault_password_files=['yet-another-password-file', 'one-more-password-file'], ask_vault_pass=True, auto_prompt=False) self.assertEqual(set(res), set(['blip@prompt', 'baz@prompt_ask_vault_pass', 'default@prompt_ask_vault_pass', 'some-password-file', 'qux@another-password-file', 'default@yet-another-password-file', 'default@one-more-password-file'])) @pytest.mark.usefixtures("_zap_vault_secrets_context")
TestCliBuildVaultIds
python
tensorflow__tensorflow
tensorflow/python/ops/parallel_for/control_flow_ops_test.py
{ "start": 54325, "end": 54726 }
class ____(PForTestCase): def test_optional_from_value(self): def loop_fn(i): o = gen_optional_ops.optional_from_value( [i, i + 1, constant_op.constant(3)] ) gen_optional_ops.optional_none() return gen_optional_ops.optional_get_value( o, [dtypes.int32, dtypes.int32, dtypes.int32], [[], [], []] ) self._test_loop_fn(loop_fn, 2)
OptionalTest
python
apache__airflow
providers/google/tests/unit/google/cloud/triggers/test_gcs.py
{ "start": 6143, "end": 9460 }
class ____: TRIGGER = GCSPrefixBlobTrigger( bucket=TEST_BUCKET, prefix=TEST_PREFIX, poke_interval=TEST_POLLING_INTERVAL, google_cloud_conn_id=TEST_GCP_CONN_ID, hook_params=TEST_HOOK_PARAMS, ) def test_gcs_prefix_blob_trigger_serialization(self): """ Asserts that the GCSPrefixBlobTrigger correctly serializes its arguments and classpath. """ classpath, kwargs = self.TRIGGER.serialize() assert classpath == "airflow.providers.google.cloud.triggers.gcs.GCSPrefixBlobTrigger" assert kwargs == { "bucket": TEST_BUCKET, "prefix": TEST_PREFIX, "poke_interval": TEST_POLLING_INTERVAL, "google_cloud_conn_id": TEST_GCP_CONN_ID, "hook_params": TEST_HOOK_PARAMS, } @pytest.mark.asyncio @mock.patch("airflow.providers.google.cloud.triggers.gcs.GCSPrefixBlobTrigger._list_blobs_with_prefix") async def test_gcs_prefix_blob_trigger_success(self, mock_list_blobs_with_prefixs): """ Tests that the GCSPrefixBlobTrigger is success case """ mock_list_blobs_with_prefixs.return_value = ["success"] generator = self.TRIGGER.run() actual = await generator.asend(None) assert ( TriggerEvent({"status": "success", "message": "Successfully completed", "matches": ["success"]}) == actual ) @pytest.mark.asyncio @mock.patch("airflow.providers.google.cloud.triggers.gcs.GCSPrefixBlobTrigger._list_blobs_with_prefix") async def test_gcs_prefix_blob_trigger_exception(self, mock_list_blobs_with_prefixs): """ Tests the GCSPrefixBlobTrigger does fire if there is an exception. """ mock_list_blobs_with_prefixs.side_effect = AsyncMock(side_effect=Exception("Test exception")) task = [i async for i in self.TRIGGER.run()] assert len(task) == 1 assert TriggerEvent({"status": "error", "message": "Test exception"}) in task @pytest.mark.asyncio @mock.patch("airflow.providers.google.cloud.triggers.gcs.GCSPrefixBlobTrigger._list_blobs_with_prefix") async def test_gcs_prefix_blob_trigger_pending(self, mock_list_blobs_with_prefixs): """ Test that GCSPrefixBlobTrigger is in loop if file isn't found. """ mock_list_blobs_with_prefixs.return_value = [] task = asyncio.create_task(self.TRIGGER.run().__anext__()) await asyncio.sleep(0.5) # TriggerEvent was not returned assert task.done() is False asyncio.get_event_loop().stop() @pytest.mark.asyncio async def test_list_blobs_with_prefix(self): """ Tests to check if a particular object in Google Cloud Storage is found or not """ hook = AsyncMock(GCSAsyncHook) storage = AsyncMock(Storage) hook.get_storage_client.return_value = storage bucket = AsyncMock(Bucket) storage.get_bucket.return_value = bucket bucket.list_blobs.return_value = ["test_string"] res = await self.TRIGGER._list_blobs_with_prefix(hook, TEST_BUCKET, TEST_PREFIX) assert res == ["test_string"] bucket.list_blobs.assert_called_once_with(prefix=TEST_PREFIX)
TestGCSPrefixBlobTrigger
python
pydantic__pydantic
pydantic/networks.py
{ "start": 22439, "end": 22680 }
class ____(AnyUrl): """A type that will accept any ws or wss URL. * TLD not required * Host not required * Max length 2083 """ _constraints = UrlConstraints(max_length=2083, allowed_schemes=['ws', 'wss'])
WebsocketUrl
python
tensorflow__tensorflow
tensorflow/python/autograph/converters/logical_expressions.py
{ "start": 1562, "end": 4381 }
class ____(converter.Base): """Converts logical expressions to corresponding TF calls.""" def _overload_of(self, operator): op_type = type(operator) if op_type in LOGICAL_OPERATORS: return LOGICAL_OPERATORS[op_type] if self.ctx.user.options.uses(converter.Feature.EQUALITY_OPERATORS): if op_type in EQUALITY_OPERATORS: return EQUALITY_OPERATORS[op_type] return None def _as_lambda(self, expr): return templates.replace_as_expression('lambda: expr', expr=expr) def _as_binary_function(self, func_name, arg1, arg2): return templates.replace_as_expression( 'func_name(arg1, arg2)', func_name=parser.parse_expression(func_name), arg1=arg1, arg2=arg2) def _as_binary_operation(self, op, arg1, arg2): template = templates.replace_as_expression( 'arg1 is arg2', # Note: `is` will be replaced with `op` below. arg1=arg1, arg2=arg2) template.ops[0] = op return template def _as_unary_function(self, func_name, arg): return templates.replace_as_expression( 'func_name(arg)', func_name=parser.parse_expression(func_name), arg=arg) def _process_binop(self, op, left, right): overload = self._overload_of(op) if overload is None: return self._as_binary_operation(op, left, right) return self._as_binary_function(overload, left, right) def visit_Compare(self, node): node = self.generic_visit(node) ops_and_comps = list(zip(node.ops, node.comparators)) left = node.left # Repeated comparisons are converted to conjunctions: # a < b < c -> a < b and b < c op_tree = None while ops_and_comps: op, right = ops_and_comps.pop(0) binary_comparison = self._process_binop(op, left, right) if op_tree is not None: op_tree = self._as_binary_function('ag__.and_', self._as_lambda(op_tree), self._as_lambda(binary_comparison)) else: op_tree = binary_comparison left = right assert op_tree is not None return op_tree def visit_UnaryOp(self, node): node = self.generic_visit(node) overload = self._overload_of(node.op) if overload is None: return node return self._as_unary_function(overload, node.operand) def visit_BoolOp(self, node): node = self.generic_visit(node) node_values = node.values right = node.values.pop() while node_values: left = node_values.pop() right = self._as_binary_function( self._overload_of(node.op), self._as_lambda(left), self._as_lambda(right)) return right def transform(node, ctx): transformer = LogicalExpressionTransformer(ctx) return transformer.visit(node)
LogicalExpressionTransformer
python
getsentry__sentry
src/sentry/integrations/discord/message_builder/base/embed/image.py
{ "start": 231, "end": 955 }
class ____: def __init__( self, url: str, proxy_url: str | None = None, height: int | None = None, width: int | None = None, ) -> None: self.url = url self.proxy_url = proxy_url self.height = height self.width = width def build(self) -> DiscordMessageEmbedImageDict: embed_image = DiscordMessageEmbedImageDict(url=self.url) if self.proxy_url is not None: embed_image["proxy_url"] = self.proxy_url if self.height is not None: embed_image["height"] = self.height if self.width is not None: embed_image["width"] = self.width return embed_image
DiscordMessageEmbedImage
python
kamyu104__LeetCode-Solutions
Python/final-array-state-after-k-multiplication-operations-i.py
{ "start": 3979, "end": 4548 }
class ____(object): def getFinalState(self, nums, k, multiplier): """ :type nums: List[int] :type k: int :type multiplier: int :rtype: List[int] """ if multiplier == 1: return nums min_heap = [(x, i) for i, x in enumerate(nums)] heapq.heapify(min_heap) for _ in xrange(k): i = heapq.heappop(min_heap)[1] nums[i] *= multiplier heapq.heappush(min_heap, (nums[i], i)) return nums # Time: O(k * n) # Space: O(1) # simulation
Solution4
python
jazzband__django-pipeline
pipeline/compressors/__init__.py
{ "start": 14306, "end": 15171 }
class ____(CompressorBase): def execute_command(self, command, content): argument_list = [] for flattening_arg in command: if isinstance(flattening_arg, (str,)): argument_list.append(flattening_arg) else: argument_list.extend(flattening_arg) pipe = subprocess.Popen( argument_list, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, ) if content: content = smart_bytes(content) stdout, stderr = pipe.communicate(content) set_std_streams_blocking() if stderr.strip() and pipe.returncode != 0: raise CompressorError(force_str(stderr)) elif self.verbose: print(force_str(stderr)) return force_str(stdout)
SubProcessCompressor
python
getsentry__sentry
src/sentry/lang/native/utils.py
{ "start": 5189, "end": 5925 }
class ____: """ Creates a new exponential backoff. """ def __init__(self, initial, max): """ :param initial: The initial backoff time in seconds. :param max: The maximum backoff time in seconds. """ self.initial = initial self.max = max self._current = 0 def reset(self): """ Resets the backoff time zero. """ self._current = 0 def sleep_failure(self): """ Sleeps until the next retry attempt and increases the backoff time for the next failure. """ if self._current > 0: time.sleep(self._current) self._current = min(max(self._current * 2, self.initial), self.max)
Backoff
python
allegroai__clearml
clearml/backend_api/services/v2_13/models.py
{ "start": 21098, "end": 22323 }
class ____(Response): """ Response of models.add_or_update_metadata endpoint. :param updated: Number of models updated (0 or 1) :type updated: int """ _service = "models" _action = "add_or_update_metadata" _version = "2.13" _schema = { "definitions": {}, "properties": { "updated": { "description": "Number of models updated (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], } }, "type": "object", } def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None: super(AddOrUpdateMetadataResponse, self).__init__(**kwargs) self.updated = updated @schema_property("updated") def updated(self) -> Optional[int]: return self._property_updated @updated.setter def updated(self, value: Optional[int]) -> None: if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value
AddOrUpdateMetadataResponse
python
OmkarPathak__pygorithm
pygorithm/data_structures/linked_list.py
{ "start": 154, "end": 679 }
class ____(object): """ Node class for creating a node for linked list. Each node has its data and a pointer that points to next node in the Linked l_list """ def __init__(self, data, next_node=None): """ constructor :param data: :param next_node: """ self.data = data self.next = next_node @staticmethod def get_code(): """ return the code for the current class """ return inspect.getsource(Node)
Node
python
huggingface__transformers
src/transformers/models/sew/modeling_sew.py
{ "start": 7685, "end": 10065 }
class ____(nn.Module): """Construct the features from raw audio waveform""" def __init__(self, config): super().__init__() if config.feat_extract_norm == "group": conv_layers = [SEWGroupNormConvLayer(config, layer_id=0)] + [ SEWNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1) ] elif config.feat_extract_norm == "layer": conv_layers = [SEWLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)] else: raise ValueError( f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']" ) self.conv_layers = nn.ModuleList(conv_layers) self.gradient_checkpointing = False self._requires_grad = True def _freeze_parameters(self): for param in self.parameters(): param.requires_grad = False self._requires_grad = False def forward(self, input_values): hidden_states = input_values[:, None] # make sure hidden_states require grad for gradient_checkpointing if self._requires_grad and self.training: hidden_states.requires_grad = True for conv_layer in self.conv_layers: hidden_states = conv_layer(hidden_states) return hidden_states def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: Optional[float] = None, dropout: float = 0.0, **kwargs: Unpack[TransformersKwargs], ): if scaling is None: scaling = query.size(-1) ** -0.5 # Take the dot product between "query" and "key" to get the raw attention scores. attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling if attention_mask is not None: attention_mask = attention_mask[:, :, :, : key.shape[-2]] attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights
SEWFeatureEncoder
python
walkccc__LeetCode
solutions/354. Russian Doll Envelopes/354.py
{ "start": 0, "end": 511 }
class ____: def maxEnvelopes(self, envelopes: list[list[int]]) -> int: envelopes.sort(key=lambda x: (x[0], -x[1])) return self._lengthOfLIS(envelopes) def _lengthOfLIS(self, envelopes: list[list[int]]) -> int: # tails[i] := the minimum tails of all the increasing subsequences having # length i + 1 tails = [] for _, h in envelopes: if not tails or h > tails[-1]: tails.append(h) else: tails[bisect.bisect_left(tails, h)] = h return len(tails)
Solution
python
kamyu104__LeetCode-Solutions
Python/multiply-strings.py
{ "start": 37, "end": 653 }
class ____(object): def multiply(self, num1, num2): """ :type num1: str :type num2: str :rtype: str """ result = [0]*(len(num1)+len(num2)) for i in reversed(xrange(len(num1))): for j in reversed(xrange(len(num2))): result[i+j+1] += int(num1[i])*int(num2[j]) result[i+j] += result[i+j+1]//10 result[i+j+1] %= 10 for i in xrange(len(result)): if result[i]: break return "".join(map(lambda x: str(x), result[i:])) # Time: O(m * n) # Space: O(m + n)
Solution
python
cython__cython
Cython/Shadow.py
{ "start": 19209, "end": 19821 }
class ____: def __init__(self): import threading self._l = threading.Lock() def acquire(self): return self._l.acquire() def release(self): return self._l.release() def locked(self): return self._l.locked() def can_check_locked(self): """Check if locked() is available. Always True in pure Python mode.""" return True def __enter__(self): return self._l.__enter__() def __exit__(self, exc_type, exc_value, traceback): return self._l.__exit__(exc_type, exc_value, traceback) pythread_type_lock = pymutex
pymutex
python
crytic__slither
slither/detectors/functions/out_of_order_retryable.py
{ "start": 313, "end": 5424 }
class ____(AbstractDetector): ARGUMENT = "out-of-order-retryable" HELP = "Out-of-order retryable transactions" IMPACT = DetectorClassification.MEDIUM CONFIDENCE = DetectorClassification.MEDIUM WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#out-of-order-retryable-transactions" WIKI_TITLE = "Out-of-order retryable transactions" WIKI_DESCRIPTION = "Out-of-order retryable transactions" # region wiki_exploit_scenario WIKI_EXPLOIT_SCENARIO = """ ```solidity contract L1 { function doStuffOnL2() external { // Retryable A IInbox(inbox).createRetryableTicket({ to: l2contract, l2CallValue: 0, maxSubmissionCost: maxSubmissionCost, excessFeeRefundAddress: msg.sender, callValueRefundAddress: msg.sender, gasLimit: gasLimit, maxFeePerGas: maxFeePerGas, data: abi.encodeCall(l2contract.claim_rewards, ()) }); // Retryable B IInbox(inbox).createRetryableTicket({ to: l2contract, l2CallValue: 0, maxSubmissionCost: maxSubmissionCost, excessFeeRefundAddress: msg.sender, callValueRefundAddress: msg.sender, gasLimit: gas, maxFeePerGas: maxFeePerGas, data: abi.encodeCall(l2contract.unstake, ()) }); } } contract L2 { function claim_rewards() public { // rewards is computed based on balance and staking period uint unclaimed_rewards = _compute_and_update_rewards(); token.safeTransfer(msg.sender, unclaimed_rewards); } // Call claim_rewards before unstaking, otherwise you lose your rewards function unstake() public { _free_rewards(); // clean up rewards related variables balance = balance[msg.sender]; balance[msg.sender] = 0; staked_token.safeTransfer(msg.sender, balance); } } ``` Bob calls `doStuffOnL2` but the first retryable ticket calling `claim_rewards` fails. The second retryable ticket calling `unstake` is executed successfully. As a result, Bob loses his rewards.""" # endregion wiki_exploit_scenario WIKI_RECOMMENDATION = "Do not rely on the order or successful execution of retryable tickets." key = "OUTOFORDERRETRYABLE" # pylint: disable=too-many-branches def _detect_multiple_tickets( self, function: FunctionContract, node: Node, visited: List[Node] ) -> None: if node in visited: return visited = visited + [node] fathers_context = [] for father in node.fathers: if self.key in father.context: fathers_context += father.context[self.key] # Exclude path that dont bring further information if node in self.visited_all_paths: if all(f_c in self.visited_all_paths[node] for f_c in fathers_context): return else: self.visited_all_paths[node] = [] self.visited_all_paths[node] = self.visited_all_paths[node] + fathers_context if self.key not in node.context: node.context[self.key] = fathers_context # include ops from internal function calls internal_ops = [] for ir in node.internal_calls: if isinstance(ir.function, Function): internal_ops += ir.function.all_slithir_operations() # analyze node for retryable tickets for ir in node.irs + internal_ops: if ( isinstance(ir, HighLevelCall) and isinstance(ir.function, Function) and ir.function.name in [ "createRetryableTicket", "outboundTransferCustomRefund", "unsafeCreateRetryableTicket", ] ): node.context[self.key].append(node) if len(node.context[self.key]) > 1: self.results.append(node.context[self.key]) return for son in node.sons: self._detect_multiple_tickets(function, son, visited) def _detect(self) -> List[Output]: results = [] # pylint: disable=attribute-defined-outside-init self.results = [] self.visited_all_paths = {} for contract in self.compilation_unit.contracts: for function in contract.functions: if ( function.is_implemented and function.contract_declarer == contract and function.entry_point ): function.entry_point.context[self.key] = [] self._detect_multiple_tickets(function, function.entry_point, []) for multiple_tickets in self.results: info = ["Multiple retryable tickets created in the same function:\n"] for x in multiple_tickets: info += ["\t -", x, "\n"] json = self.generate_result(info) results.append(json) return results
OutOfOrderRetryable
python
pytorch__pytorch
torch/testing/_internal/opinfo/utils.py
{ "start": 1195, "end": 8780 }
class ____(_dispatch_dtypes): # Class to tag the dynamically generated types. pass def get_supported_dtypes(op, sample_inputs_fn, device_type): # Returns the supported dtypes for the given operator and device_type pair. assert device_type in ["cpu", "cuda"] if not TEST_CUDA and device_type == "cuda": warnings.warn( "WARNING: CUDA is not available, empty_dtypes dispatch will be returned!", stacklevel=2, ) return _dynamic_dispatch_dtypes(()) supported_dtypes = set() for dtype in all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half): try: samples = sample_inputs_fn(op, device_type, dtype, False) except RuntimeError: # If `sample_inputs_fn` doesn't support sampling for a given # `dtype`, we assume that the `dtype` is not supported. # We raise a warning, so that user knows that this was the case # and can investigate if there was an issue with the `sample_inputs_fn`. warnings.warn( f"WARNING: Unable to generate sample for device:{device_type} and dtype:{dtype}", stacklevel=2, ) continue # We assume the dtype is supported # only if all samples pass for the given dtype. supported = True for sample in samples: try: op(sample.input, *sample.args, **sample.kwargs) except RuntimeError: # dtype is not supported supported = False break if supported: supported_dtypes.add(dtype) return _dynamic_dispatch_dtypes(supported_dtypes) def dtypes_dispatch_hint(dtypes): # Function returns the appropriate dispatch function (from COMPLETE_DTYPES_DISPATCH and EXTENSIBLE_DTYPE_DISPATCH) # and its string representation for the passed `dtypes`. return_type = collections.namedtuple("return_type", "dispatch_fn dispatch_fn_str") # CUDA is not available, dtypes will be empty. if len(dtypes) == 0: return return_type((), "()") set_dtypes = set(dtypes) for dispatch in COMPLETE_DTYPES_DISPATCH: # Short circuit if we get an exact match. if set(dispatch()) == set_dtypes: return return_type(dispatch, dispatch.__name__ + "()") chosen_dispatch = None chosen_dispatch_score = 0.0 for dispatch in EXTENSIBLE_DTYPE_DISPATCH: dispatch_dtypes = set(dispatch()) if not dispatch_dtypes.issubset(set_dtypes): continue score = len(dispatch_dtypes) if score > chosen_dispatch_score: chosen_dispatch_score = score chosen_dispatch = dispatch # If user passed dtypes which are lower than the lowest # dispatch type available (not likely but possible in code path). if chosen_dispatch is None: return return_type((), str(dtypes)) return return_type( partial(dispatch, *tuple(set(dtypes) - set(dispatch()))), dispatch.__name__ + str(tuple(set(dtypes) - set(dispatch()))), ) def is_dynamic_dtype_set(op): # Detect if the OpInfo entry acquired dtypes dynamically # using `get_supported_dtypes`. return op.dynamic_dtypes def str_format_dynamic_dtype(op): fmt_str = f""" OpInfo({op.name}, dtypes={dtypes_dispatch_hint(op.dtypes).dispatch_fn_str}, dtypesIfCUDA={dtypes_dispatch_hint(op.dtypesIfCUDA).dispatch_fn_str}, ) """ return fmt_str def np_unary_ufunc_integer_promotion_wrapper(fn): # Wrapper that passes PyTorch's default scalar # type as an argument to the wrapped NumPy # unary ufunc when given an integer input. # This mimics PyTorch's integer->floating point # type promotion. # # This is necessary when NumPy promotes # integer types to double, since PyTorch promotes # integer types to the default scalar type. # Helper to determine if promotion is needed def is_integral(dtype): return dtype in [ np.bool_, bool, np.uint8, np.int8, np.int16, np.int32, np.int64, ] @wraps(fn) def wrapped_fn(x): # As the default dtype can change, acquire it when function is called. # NOTE: Promotion in PyTorch is from integer types to the default dtype np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()] if is_integral(x.dtype): return fn(x.astype(np_dtype)) return fn(x) return wrapped_fn def reference_reduction_numpy(f, supports_keepdims=True): """Wraps a NumPy reduction operator. The wrapper function will forward dim, keepdim, mask, and identity kwargs to the wrapped function as the NumPy equivalent axis, keepdims, where, and initiak kwargs, respectively. Args: f: NumPy reduction operator to wrap supports_keepdims (bool, optional): Whether the NumPy operator accepts keepdims parameter. If it does not, the wrapper will manually unsqueeze the reduced dimensions if it was called with keepdim=True. Defaults to True. Returns: Wrapped function """ @wraps(f) def wrapper(x: npt.NDArray, *args, **kwargs): # Copy keys into a set keys = set(kwargs.keys()) dim = kwargs.pop("dim", None) keepdim = kwargs.pop("keepdim", False) if "dim" in keys: dim = tuple(dim) if isinstance(dim, Sequence) else dim # NumPy reductions don't accept dim=0 for scalar inputs # so we convert it to None if and only if dim is equivalent if x.ndim == 0 and dim in {0, -1, (0,), (-1,)}: kwargs["axis"] = None else: kwargs["axis"] = dim if "keepdim" in keys and supports_keepdims: kwargs["keepdims"] = keepdim if "mask" in keys: mask = kwargs.pop("mask") if mask is not None: assert mask.layout == torch.strided kwargs["where"] = mask.cpu().numpy() if "identity" in keys: identity = kwargs.pop("identity") if identity is not None: if identity.dtype is torch.bfloat16: identity = identity.cpu().to(torch.float32) else: identity = identity.cpu() kwargs["initial"] = identity.numpy() result = f(x, *args, **kwargs) # Unsqueeze reduced dimensions if NumPy does not support keepdims if keepdim and not supports_keepdims and x.ndim > 0: dim = list(range(x.ndim)) if dim is None else dim result = np.expand_dims(result, dim) return result return wrapper def prod_numpy(a, *args, **kwargs): """ The function will call np.prod with type as np.int64 if the input type is int or uint64 if is uint. This is necessary because windows np.prod uses by default int32 while on linux it uses int64. This is for fixing integer overflow https://github.com/pytorch/pytorch/issues/77320 Returns: np.prod of input """ if "dtype" not in kwargs: if np.issubdtype(a.dtype, np.signedinteger): a = a.astype(np.int64) elif np.issubdtype(a.dtype, np.unsignedinteger): a = a.astype(np.uint64) fn = reference_reduction_numpy(np.prod) return fn(a, *args, **kwargs)
_dynamic_dispatch_dtypes
python
numba__numba
numba/core/typing/builtins.py
{ "start": 4046, "end": 4360 }
class ____(AbstractTemplate): """ Given a heterogeneous pair, return the first element. """ key = "pair_first" def generic(self, args, kws): assert not kws [pair] = args if isinstance(pair, types.Pair): return signature(pair.first_type, pair) @infer
PairFirst
python
MorvanZhou__Reinforcement-learning-with-tensorflow
experiments/Solve_BipedalWalker/DDPG.py
{ "start": 7579, "end": 10478 }
class ____(object): """ This SumTree code is modified version and the original code is from: https://github.com/jaara/AI-blog/blob/master/SumTree.py Story the data with it priority in tree and data frameworks. """ data_pointer = 0 def __init__(self, capacity): self.capacity = capacity # for all priority values self.tree = np.zeros(2 * capacity - 1)+1e-5 # [--------------Parent nodes-------------][-------leaves to recode priority-------] # size: capacity - 1 size: capacity self.data = np.zeros(capacity, dtype=object) # for all transitions # [--------------data frame-------------] # size: capacity def add_new_priority(self, p, data): leaf_idx = self.data_pointer + self.capacity - 1 self.data[self.data_pointer] = data # update data_frame self.update(leaf_idx, p) # update tree_frame self.data_pointer += 1 if self.data_pointer >= self.capacity: # replace when exceed the capacity self.data_pointer = 0 def update(self, tree_idx, p): change = p - self.tree[tree_idx] self.tree[tree_idx] = p self._propagate_change(tree_idx, change) def _propagate_change(self, tree_idx, change): """change the sum of priority value in all parent nodes""" parent_idx = (tree_idx - 1) // 2 self.tree[parent_idx] += change if parent_idx != 0: self._propagate_change(parent_idx, change) def get_leaf(self, lower_bound): leaf_idx = self._retrieve(lower_bound) # search the max leaf priority based on the lower_bound data_idx = leaf_idx - self.capacity + 1 return [leaf_idx, self.tree[leaf_idx], self.data[data_idx]] def _retrieve(self, lower_bound, parent_idx=0): """ Tree structure and array storage: Tree index: 0 -> storing priority sum / \ 1 2 / \ / \ 3 4 5 6 -> storing priority for transitions Array type for storing: [0,1,2,3,4,5,6] """ left_child_idx = 2 * parent_idx + 1 right_child_idx = left_child_idx + 1 if left_child_idx >= len(self.tree): # end search when no more child return parent_idx if self.tree[left_child_idx] == self.tree[right_child_idx]: return self._retrieve(lower_bound, np.random.choice([left_child_idx, right_child_idx])) if lower_bound <= self.tree[left_child_idx]: # downward search, always search for a higher priority node return self._retrieve(lower_bound, left_child_idx) else: return self._retrieve(lower_bound - self.tree[left_child_idx], right_child_idx) @property def root_priority(self): return self.tree[0] # the root
SumTree
python
walkccc__LeetCode
solutions/377. Combination Sum IV/377.py
{ "start": 0, "end": 343 }
class ____: def combinationSum4(self, nums: list[int], target: int) -> int: dp = [1] + [-1] * target def dfs(target: int) -> int: if target < 0: return 0 if dp[target] != -1: return dp[target] dp[target] = sum(dfs(target - num) for num in nums) return dp[target] return dfs(target)
Solution
python
pytest-dev__pytest
testing/test_doctest.py
{ "start": 28112, "end": 35139 }
class ____: @pytest.mark.parametrize("config_mode", ["ini", "comment"]) def test_allow_unicode(self, pytester, config_mode): """Test that doctests which output unicode work in all python versions tested by pytest when the ALLOW_UNICODE option is used (either in the configuration file or by an inline comment). """ if config_mode == "ini": pytester.makeini( """ [pytest] doctest_optionflags = ALLOW_UNICODE """ ) comment = "" else: comment = "#doctest: +ALLOW_UNICODE" pytester.maketxtfile( test_doc=f""" >>> b'12'.decode('ascii') {comment} '12' """ ) pytester.makepyfile( foo=f""" def foo(): ''' >>> b'12'.decode('ascii') {comment} '12' ''' """ ) reprec = pytester.inline_run("--doctest-modules") reprec.assertoutcome(passed=2) @pytest.mark.parametrize("config_mode", ["ini", "comment"]) def test_allow_bytes(self, pytester, config_mode): """Test that doctests which output bytes work in all python versions tested by pytest when the ALLOW_BYTES option is used (either in the configuration file or by an inline comment)(#1287). """ if config_mode == "ini": pytester.makeini( """ [pytest] doctest_optionflags = ALLOW_BYTES """ ) comment = "" else: comment = "#doctest: +ALLOW_BYTES" pytester.maketxtfile( test_doc=f""" >>> b'foo' {comment} 'foo' """ ) pytester.makepyfile( foo=f""" def foo(): ''' >>> b'foo' {comment} 'foo' ''' """ ) reprec = pytester.inline_run("--doctest-modules") reprec.assertoutcome(passed=2) def test_unicode_string(self, pytester: Pytester): """Test that doctests which output unicode fail in Python 2 when the ALLOW_UNICODE option is not used. The same test should pass in Python 3. """ pytester.maketxtfile( test_doc=""" >>> b'12'.decode('ascii') '12' """ ) reprec = pytester.inline_run() reprec.assertoutcome(passed=1) def test_bytes_literal(self, pytester: Pytester): """Test that doctests which output bytes fail in Python 3 when the ALLOW_BYTES option is not used. (#1287). """ pytester.maketxtfile( test_doc=""" >>> b'foo' 'foo' """ ) reprec = pytester.inline_run() reprec.assertoutcome(failed=1) def test_number_re(self) -> None: _number_re = _get_checker()._number_re # type: ignore for s in [ "1.", "+1.", "-1.", ".1", "+.1", "-.1", "0.1", "+0.1", "-0.1", "1e5", "+1e5", "1e+5", "+1e+5", "1e-5", "+1e-5", "-1e-5", "1.2e3", "-1.2e-3", ]: print(s) m = _number_re.match(s) assert m is not None assert float(m.group()) == pytest.approx(float(s)) for s in ["1", "abc"]: print(s) assert _number_re.match(s) is None @pytest.mark.parametrize("config_mode", ["ini", "comment"]) def test_number_precision(self, pytester, config_mode): """Test the NUMBER option.""" if config_mode == "ini": pytester.makeini( """ [pytest] doctest_optionflags = NUMBER """ ) comment = "" else: comment = "#doctest: +NUMBER" pytester.maketxtfile( test_doc=f""" Scalars: >>> import math >>> math.pi {comment} 3.141592653589793 >>> math.pi {comment} 3.1416 >>> math.pi {comment} 3.14 >>> -math.pi {comment} -3.14 >>> math.pi {comment} 3. >>> 3. {comment} 3.0 >>> 3. {comment} 3. >>> 3. {comment} 3.01 >>> 3. {comment} 2.99 >>> .299 {comment} .3 >>> .301 {comment} .3 >>> 951. {comment} 1e3 >>> 1049. {comment} 1e3 >>> -1049. {comment} -1e3 >>> 1e3 {comment} 1e3 >>> 1e3 {comment} 1000. Lists: >>> [3.1415, 0.097, 13.1, 7, 8.22222e5, 0.598e-2] {comment} [3.14, 0.1, 13., 7, 8.22e5, 6.0e-3] >>> [[0.333, 0.667], [0.999, 1.333]] {comment} [[0.33, 0.667], [0.999, 1.333]] >>> [[[0.101]]] {comment} [[[0.1]]] Doesn't barf on non-numbers: >>> 'abc' {comment} 'abc' >>> None {comment} """ ) reprec = pytester.inline_run() reprec.assertoutcome(passed=1) @pytest.mark.parametrize( "expression,output", [ # ints shouldn't match floats: ("3.0", "3"), ("3e0", "3"), ("1e3", "1000"), ("3", "3.0"), # Rounding: ("3.1", "3.0"), ("3.1", "3.2"), ("3.1", "4.0"), ("8.22e5", "810000.0"), # Only the actual output is rounded up, not the expected output: ("3.0", "2.98"), ("1e3", "999"), # The current implementation doesn't understand that numbers inside # strings shouldn't be treated as numbers: pytest.param("'3.1416'", "'3.14'", marks=pytest.mark.xfail), ], ) def test_number_non_matches(self, pytester, expression, output): pytester.maketxtfile( test_doc=f""" >>> {expression} #doctest: +NUMBER {output} """ ) reprec = pytester.inline_run() reprec.assertoutcome(passed=0, failed=1) def test_number_and_allow_unicode(self, pytester: Pytester): pytester.maketxtfile( test_doc=""" >>> from collections import namedtuple >>> T = namedtuple('T', 'a b c') >>> T(a=0.2330000001, b=u'str', c=b'bytes') # doctest: +ALLOW_UNICODE, +ALLOW_BYTES, +NUMBER T(a=0.233, b=u'str', c='bytes') """ ) reprec = pytester.inline_run() reprec.assertoutcome(passed=1)
TestLiterals
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 311044, "end": 311502 }
class ____(sgqlc.types.Input): """Ways in which team connections can be ordered.""" __schema__ = github_schema __field_names__ = ("field", "direction") field = sgqlc.types.Field(sgqlc.types.non_null(TeamOrderField), graphql_name="field") """The field in which to order nodes by.""" direction = sgqlc.types.Field(sgqlc.types.non_null(OrderDirection), graphql_name="direction") """The direction in which to order nodes."""
TeamOrder
python
apache__airflow
providers/snowflake/src/airflow/providers/snowflake/operators/snowpark.py
{ "start": 1170, "end": 5815 }
class ____(PythonOperator): """ Executes a Python function with Snowpark Python code. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:SnowparkOperator` :param snowflake_conn_id: Reference to :ref:`Snowflake connection id<howto/connection:snowflake>` :param python_callable: A reference to an object that is callable :param op_args: a list of positional arguments that will get unpacked when calling your callable :param op_kwargs: a dictionary of keyword arguments that will get unpacked in your function :param templates_dict: a dictionary where the values are templates that will get templated by the Airflow engine sometime between ``__init__`` and ``execute`` takes place and are made available in your callable's context after the template has been applied. (templated) :param templates_exts: a list of file extensions to resolve while processing templated fields, for examples ``['.sql', '.hql']`` :param show_return_value_in_logs: a bool value whether to show return_value logs. Defaults to True, which allows return value log output. It can be set to False to prevent log output of return value when you return huge data such as transmission a large amount of XCom to TaskAPI. :param warehouse: name of warehouse (will overwrite any warehouse defined in the connection's extra JSON) :param database: name of database (will overwrite database defined in connection) :param schema: name of schema (will overwrite schema defined in connection) :param role: name of role (will overwrite any role defined in connection's extra JSON) :param authenticator: authenticator for Snowflake. 'snowflake' (default) to use the internal Snowflake authenticator 'externalbrowser' to authenticate using your web browser and Okta, ADFS or any other SAML 2.0-compliant identify provider (IdP) that has been defined for your account 'https://<your_okta_account_name>.okta.com' to authenticate through native Okta. :param session_parameters: You can set session-level parameters at the time you connect to Snowflake """ def __init__( self, *, snowflake_conn_id: str = "snowflake_default", python_callable: Callable, op_args: Collection[Any] | None = None, op_kwargs: Mapping[str, Any] | None = None, templates_dict: dict[str, Any] | None = None, templates_exts: Sequence[str] | None = None, show_return_value_in_logs: bool = True, warehouse: str | None = None, database: str | None = None, schema: str | None = None, role: str | None = None, authenticator: str | None = None, session_parameters: dict | None = None, **kwargs, ): super().__init__( python_callable=python_callable, op_args=op_args, op_kwargs=op_kwargs, templates_dict=templates_dict, templates_exts=templates_exts, show_return_value_in_logs=show_return_value_in_logs, **kwargs, ) self.snowflake_conn_id = snowflake_conn_id self.warehouse = warehouse self.database = database self.schema = schema self.role = role self.authenticator = authenticator self.session_parameters = session_parameters def execute_callable(self): hook = SnowflakeHook( snowflake_conn_id=self.snowflake_conn_id, warehouse=self.warehouse, database=self.database, role=self.role, schema=self.schema, authenticator=self.authenticator, session_parameters=self.session_parameters, ) session = hook.get_snowpark_session() context = get_current_context() session.update_query_tag( { "dag_id": context["dag_run"].dag_id, "dag_run_id": context["dag_run"].run_id, "task_id": context["task_instance"].task_id, "operator": self.__class__.__name__, } ) try: # inject session object if the function has "session" keyword as an argument self.op_kwargs = inject_session_into_op_kwargs( self.python_callable, dict(self.op_kwargs), session ) return super().execute_callable() finally: session.close()
SnowparkOperator
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pyupgrade/UP037_0.py
{ "start": 1064, "end": 1764 }
class ____(TypedDict): E: TypedDict("E") x: Annotated[()] x: DefaultNamedArg(name="name", quox="str") x: DefaultNamedArg(name="name") x: NamedTuple("X", [("foo",), ("bar",)]) x: NamedTuple("X", ["foo", "bar"]) x: NamedTuple() x: Literal["foo", "bar"] x = cast(x, "str") def foo(x, *args, **kwargs): ... def foo(*, inplace): ... x: Annotated[1:2] = ... x = TypeVar("x", "str", "int") x = cast("str", x) X = List["MyClass"] # Handle end of line comment in string annotation # See https://github.com/astral-sh/ruff/issues/15816 def f() -> "Literal[0]#": return 0 def g(x: "Literal['abc']#") -> None: return def f() -> """Literal[0] # """: return 0
D
python
pytorch__pytorch
torch/_export/serde/schema.py
{ "start": 10423, "end": 10543 }
class ____: min_val: Annotated[Optional[int], 10] max_val: Annotated[Optional[int], 20] @dataclass
RangeConstraint
python
pytorch__pytorch
test/inductor/test_ordered_set.py
{ "start": 54214, "end": 54448 }
class ____(TestCopying, TestCase): def setUp(self): super().setUp() self.OrderedSet = OrderedSet(["zero", 0, None]) # ------------------------------------------------------------------------------
TestCopyingTriple
python
huggingface__transformers
src/transformers/models/got_ocr2/modular_got_ocr2.py
{ "start": 9395, "end": 9456 }
class ____(SamVisionAttention): pass
GotOcr2VisionAttention
python
sympy__sympy
sympy/physics/biomechanics/activation.py
{ "start": 14269, "end": 25522 }
class ____(ActivationBase): r"""First-order activation dynamics based on De Groote et al., 2016 [1]_. Explanation =========== Gives the first-order activation dynamics equation for the rate of change of activation with respect to time as a function of excitation and activation. The function is defined by the equation: .. math:: \frac{da}{dt} = \left(\frac{\frac{1}{2} + a0}{\tau_a \left(\frac{1}{2} + \frac{3a}{2}\right)} + \frac{\left(\frac{1}{2} + \frac{3a}{2}\right) \left(\frac{1}{2} - a0\right)}{\tau_d}\right) \left(e - a\right) where .. math:: a0 = \frac{\tanh{\left(b \left(e - a\right) \right)}}{2} with constant values of :math:`tau_a = 0.015`, :math:`tau_d = 0.060`, and :math:`b = 10`. References ========== .. [1] De Groote, F., Kinney, A. L., Rao, A. V., & Fregly, B. J., Evaluation of direct collocation optimal control problem formulations for solving the muscle redundancy problem, Annals of biomedical engineering, 44(10), (2016) pp. 2922-2936 """ def __init__(self, name, activation_time_constant=None, deactivation_time_constant=None, smoothing_rate=None, ): """Initializer for ``FirstOrderActivationDeGroote2016``. Parameters ========== activation time constant : Symbol | Number | None The value of the activation time constant governing the delay between excitation and activation when excitation exceeds activation. deactivation time constant : Symbol | Number | None The value of the deactivation time constant governing the delay between excitation and activation when activation exceeds excitation. smoothing_rate : Symbol | Number | None The slope of the hyperbolic tangent function used to smooth between the switching of the equations where excitation exceed activation and where activation exceeds excitation. The recommended value to use is ``10``, but values between ``0.1`` and ``100`` can be used. """ super().__init__(name) # Symbols self.activation_time_constant = activation_time_constant self.deactivation_time_constant = deactivation_time_constant self.smoothing_rate = smoothing_rate @classmethod def with_defaults(cls, name): r"""Alternate constructor that will use the published constants. Explanation =========== Returns an instance of ``FirstOrderActivationDeGroote2016`` using the three constant values specified in the original publication. These have the values: :math:`tau_a = 0.015` :math:`tau_d = 0.060` :math:`b = 10` """ tau_a = Float('0.015') tau_d = Float('0.060') b = Float('10.0') return cls(name, tau_a, tau_d, b) @property def activation_time_constant(self): """Delay constant for activation. Explanation =========== The alias ```tau_a`` can also be used to access the same attribute. """ return self._tau_a @activation_time_constant.setter def activation_time_constant(self, tau_a): if hasattr(self, '_tau_a'): msg = ( f'Can\'t set attribute `activation_time_constant` to ' f'{repr(tau_a)} as it is immutable and already has value ' f'{self._tau_a}.' ) raise AttributeError(msg) self._tau_a = Symbol(f'tau_a_{self.name}') if tau_a is None else tau_a @property def tau_a(self): """Delay constant for activation. Explanation =========== The alias ``activation_time_constant`` can also be used to access the same attribute. """ return self._tau_a @property def deactivation_time_constant(self): """Delay constant for deactivation. Explanation =========== The alias ``tau_d`` can also be used to access the same attribute. """ return self._tau_d @deactivation_time_constant.setter def deactivation_time_constant(self, tau_d): if hasattr(self, '_tau_d'): msg = ( f'Can\'t set attribute `deactivation_time_constant` to ' f'{repr(tau_d)} as it is immutable and already has value ' f'{self._tau_d}.' ) raise AttributeError(msg) self._tau_d = Symbol(f'tau_d_{self.name}') if tau_d is None else tau_d @property def tau_d(self): """Delay constant for deactivation. Explanation =========== The alias ``deactivation_time_constant`` can also be used to access the same attribute. """ return self._tau_d @property def smoothing_rate(self): """Smoothing constant for the hyperbolic tangent term. Explanation =========== The alias ``b`` can also be used to access the same attribute. """ return self._b @smoothing_rate.setter def smoothing_rate(self, b): if hasattr(self, '_b'): msg = ( f'Can\'t set attribute `smoothing_rate` to {b!r} as it is ' f'immutable and already has value {self._b!r}.' ) raise AttributeError(msg) self._b = Symbol(f'b_{self.name}') if b is None else b @property def b(self): """Smoothing constant for the hyperbolic tangent term. Explanation =========== The alias ``smoothing_rate`` can also be used to access the same attribute. """ return self._b @property def order(self): """Order of the (differential) equation governing activation.""" return 1 @property def state_vars(self): """Ordered column matrix of functions of time that represent the state variables. Explanation =========== The alias ``x`` can also be used to access the same attribute. """ return Matrix([self._a]) @property def x(self): """Ordered column matrix of functions of time that represent the state variables. Explanation =========== The alias ``state_vars`` can also be used to access the same attribute. """ return Matrix([self._a]) @property def input_vars(self): """Ordered column matrix of functions of time that represent the input variables. Explanation =========== The alias ``r`` can also be used to access the same attribute. """ return Matrix([self._e]) @property def r(self): """Ordered column matrix of functions of time that represent the input variables. Explanation =========== The alias ``input_vars`` can also be used to access the same attribute. """ return Matrix([self._e]) @property def constants(self): """Ordered column matrix of non-time varying symbols present in ``M`` and ``F``. Only symbolic constants are returned. If a numeric type (e.g. ``Float``) has been used instead of ``Symbol`` for a constant then that attribute will not be included in the matrix returned by this property. This is because the primary use of this property attribute is to provide an ordered sequence of the still-free symbols that require numeric values during code generation. Explanation =========== The alias ``p`` can also be used to access the same attribute. """ constants = [self._tau_a, self._tau_d, self._b] symbolic_constants = [c for c in constants if not c.is_number] return Matrix(symbolic_constants) if symbolic_constants else zeros(0, 1) @property def p(self): """Ordered column matrix of non-time varying symbols present in ``M`` and ``F``. Explanation =========== Only symbolic constants are returned. If a numeric type (e.g. ``Float``) has been used instead of ``Symbol`` for a constant then that attribute will not be included in the matrix returned by this property. This is because the primary use of this property attribute is to provide an ordered sequence of the still-free symbols that require numeric values during code generation. The alias ``constants`` can also be used to access the same attribute. """ constants = [self._tau_a, self._tau_d, self._b] symbolic_constants = [c for c in constants if not c.is_number] return Matrix(symbolic_constants) if symbolic_constants else zeros(0, 1) @property def M(self): """Ordered square matrix of coefficients on the LHS of ``M x' = F``. Explanation =========== The square matrix that forms part of the LHS of the linear system of ordinary differential equations governing the activation dynamics: ``M(x, r, t, p) x' = F(x, r, t, p)``. """ return Matrix([Integer(1)]) @property def F(self): """Ordered column matrix of equations on the RHS of ``M x' = F``. Explanation =========== The column matrix that forms the RHS of the linear system of ordinary differential equations governing the activation dynamics: ``M(x, r, t, p) x' = F(x, r, t, p)``. """ return Matrix([self._da_eqn]) def rhs(self): """Ordered column matrix of equations for the solution of ``M x' = F``. Explanation =========== The solution to the linear system of ordinary differential equations governing the activation dynamics: ``M(x, r, t, p) x' = F(x, r, t, p)``. """ return Matrix([self._da_eqn]) @cached_property def _da_eqn(self): HALF = Rational(1, 2) a0 = HALF * tanh(self._b * (self._e - self._a)) a1 = (HALF + Rational(3, 2) * self._a) a2 = (HALF + a0) / (self._tau_a * a1) a3 = a1 * (HALF - a0) / self._tau_d activation_dynamics_equation = (a2 + a3) * (self._e - self._a) return activation_dynamics_equation def __eq__(self, other): """Equality check for ``FirstOrderActivationDeGroote2016``.""" if type(self) != type(other): return False self_attrs = (self.name, self.tau_a, self.tau_d, self.b) other_attrs = (other.name, other.tau_a, other.tau_d, other.b) if self_attrs == other_attrs: return True return False def __repr__(self): """Representation of ``FirstOrderActivationDeGroote2016``.""" return ( f'{self.__class__.__name__}({self.name!r}, ' f'activation_time_constant={self.tau_a!r}, ' f'deactivation_time_constant={self.tau_d!r}, ' f'smoothing_rate={self.b!r})' )
FirstOrderActivationDeGroote2016
python
oauthlib__oauthlib
oauthlib/oauth2/rfc8628/endpoints/pre_configured.py
{ "start": 221, "end": 1403 }
class ____(DeviceAuthorizationEndpoint): """An all-in-one endpoint featuring Authorization code grant and Bearer tokens.""" def __init__( self, request_validator: RequestValidator, verification_uri: str, interval: int = 5, verification_uri_complete: Optional[str] = None, # noqa: FA100 user_code_generator: Callable[[None], str] = None, **kwargs, ): """Construct a new web application server. :param request_validator: An implementation of oauthlib.oauth2.rfc8626.RequestValidator. :param interval: How long the device needs to wait before polling the server :param verification_uri: the verification_uri to be send back. :param user_code_generator: a callable that allows the user code to be configured. """ DeviceAuthorizationEndpoint.__init__( self, request_validator, interval=interval, verification_uri=verification_uri, user_code_generator=user_code_generator, verification_uri_complete=verification_uri_complete, )
DeviceApplicationServer
python
walkccc__LeetCode
solutions/3001. Minimum Moves to Capture The Queen/3001.py
{ "start": 0, "end": 913 }
class ____: def minMovesToCaptureTheQueen( self, a: int, b: int, c: int, d: int, e: int, f: int, ) -> int: # The rook is in the same row as the queen. if a == e: # The bishop blocks the rook or not. return 2 if c == a and (b < d < f or b > d > f) else 1 # The rook is in the same column as the queen. if b == f: # The bishop blocks the rook or not. return 2 if d == f and (a < c < e or a > c > e) else 1 # The bishop is in the same up-diagonal as the queen. if c + d == e + f: # The rook blocks the bishop or not. return 2 if a + b == c + d and (c < a < e or c > a > e) else 1 # The bishop is in the same down-diagonal as the queen. if c - d == e - f: # The rook blocks the bishop or not. return 2 if a - b == c - d and (c < a < e or c > a > e) else 1 # The rook can always get the green in two steps. return 2
Solution
python
apache__airflow
providers/google/tests/unit/google/common/hooks/test_base_google.py
{ "start": 2653, "end": 3723 }
class ____: def test_do_nothing_on_non_error(self): result = _retryable_test_with_temporary_quota_retry(lambda: 42) assert result == 42 def test_retry_on_exception(self): message = "POST https://translation.googleapis.com/language/translate/v2: User Rate Limit Exceeded" errors = [mock.MagicMock(details=mock.PropertyMock(return_value="userRateLimitExceeded"))] custom_fn = NoForbiddenAfterCount(count=5, message=message, errors=errors) _retryable_test_with_temporary_quota_retry(custom_fn) assert custom_fn.counter == 5 def test_raise_exception_on_non_quota_exception(self): message = "POST https://translation.googleapis.com/language/translate/v2: Daily Limit Exceeded" errors = [mock.MagicMock(details=mock.PropertyMock(return_value="dailyLimitExceeded"))] with pytest.raises(Forbidden, match="Daily Limit Exceeded"): _retryable_test_with_temporary_quota_retry( NoForbiddenAfterCount(5, message=message, errors=errors) )
TestQuotaRetry
python
sqlalchemy__sqlalchemy
test/ext/asyncio/test_engine.py
{ "start": 38153, "end": 51434 }
class ____(EngineFixture): __backend__ = True __requires__ = ("server_side_cursors", "async_dialect") @async_test async def test_no_ss_cursor_w_execute(self, async_engine): users = self.tables.users async with async_engine.connect() as conn: conn = await conn.execution_options(stream_results=True) with expect_raises_message( async_exc.AsyncMethodRequired, r"Can't use the AsyncConnection.execute\(\) method with a " r"server-side cursor. Use the AsyncConnection.stream\(\) " r"method for an async streaming result set.", ): await conn.execute(select(users)) @async_test async def test_no_ss_cursor_w_exec_driver_sql(self, async_engine): async with async_engine.connect() as conn: conn = await conn.execution_options(stream_results=True) with expect_raises_message( async_exc.AsyncMethodRequired, r"Can't use the AsyncConnection.exec_driver_sql\(\) " r"method with a " r"server-side cursor. Use the AsyncConnection.stream\(\) " r"method for an async streaming result set.", ): await conn.exec_driver_sql("SELECT * FROM users") @async_test async def test_stream_ctxmanager(self, async_engine): async with async_engine.connect() as conn: conn = await conn.execution_options(stream_results=True) async with conn.stream(select(self.tables.users)) as result: assert not result._real_result._soft_closed assert not result.closed with expect_raises_message(Exception, "hi"): i = 0 async for row in result: if i > 2: raise Exception("hi") i += 1 assert result._real_result._soft_closed assert result.closed @async_test async def test_stream_scalars_ctxmanager(self, async_engine): async with async_engine.connect() as conn: conn = await conn.execution_options(stream_results=True) async with conn.stream_scalars( select(self.tables.users) ) as result: assert not result._real_result._soft_closed assert not result.closed with expect_raises_message(Exception, "hi"): i = 0 async for scalar in result: if i > 2: raise Exception("hi") i += 1 assert result._real_result._soft_closed assert result.closed @testing.combinations( (None,), ("scalars",), ("mappings",), argnames="filter_" ) @async_test async def test_all(self, async_engine, filter_): users = self.tables.users async with async_engine.connect() as conn: result = await conn.stream(select(users)) if filter_ == "mappings": result = result.mappings() elif filter_ == "scalars": result = result.scalars(1) all_ = await result.all() if filter_ == "mappings": eq_( all_, [ {"user_id": i, "user_name": "name%d" % i} for i in range(1, 20) ], ) elif filter_ == "scalars": eq_( all_, ["name%d" % i for i in range(1, 20)], ) else: eq_(all_, [(i, "name%d" % i) for i in range(1, 20)]) @testing.combinations( (None,), ("scalars",), ("stream_scalars",), ("mappings",), argnames="filter_", ) @async_test async def test_aiter(self, async_engine, filter_): users = self.tables.users async with async_engine.connect() as conn: if filter_ == "stream_scalars": result = await conn.stream_scalars(select(users.c.user_name)) else: result = await conn.stream(select(users)) if filter_ == "mappings": result = result.mappings() elif filter_ == "scalars": result = result.scalars(1) rows = [] async for row in result: rows.append(row) if filter_ == "mappings": eq_( rows, [ {"user_id": i, "user_name": "name%d" % i} for i in range(1, 20) ], ) elif filter_ in ("scalars", "stream_scalars"): eq_( rows, ["name%d" % i for i in range(1, 20)], ) else: eq_(rows, [(i, "name%d" % i) for i in range(1, 20)]) @testing.combinations((None,), ("mappings",), argnames="filter_") @async_test async def test_keys(self, async_engine, filter_): users = self.tables.users async with async_engine.connect() as conn: result = await conn.stream(select(users)) if filter_ == "mappings": result = result.mappings() eq_(result.keys(), ["user_id", "user_name"]) await result.close() @async_test async def test_unique_all(self, async_engine): users = self.tables.users async with async_engine.connect() as conn: result = await conn.stream( union_all(select(users), select(users)).order_by( users.c.user_id ) ) all_ = await result.unique().all() eq_(all_, [(i, "name%d" % i) for i in range(1, 20)]) @async_test async def test_columns_all(self, async_engine): users = self.tables.users async with async_engine.connect() as conn: result = await conn.stream(select(users)) all_ = await result.columns(1).all() eq_(all_, [("name%d" % i,) for i in range(1, 20)]) @testing.combinations( (None,), ("scalars",), ("mappings",), argnames="filter_" ) @testing.combinations(None, 2, 5, 10, argnames="yield_per") @testing.combinations("method", "opt", argnames="yield_per_type") @async_test async def test_partitions( self, async_engine, filter_, yield_per, yield_per_type ): users = self.tables.users async with async_engine.connect() as conn: stmt = select(users) if yield_per and yield_per_type == "opt": stmt = stmt.execution_options(yield_per=yield_per) result = await conn.stream(stmt) if filter_ == "mappings": result = result.mappings() elif filter_ == "scalars": result = result.scalars(1) if yield_per and yield_per_type == "method": result = result.yield_per(yield_per) check_result = [] # stream() sets stream_results unconditionally assert isinstance( result._real_result.cursor_strategy, _cursor.BufferedRowCursorFetchStrategy, ) if yield_per: partition_size = yield_per eq_(result._real_result.cursor_strategy._bufsize, yield_per) async for partition in result.partitions(): check_result.append(partition) else: eq_(result._real_result.cursor_strategy._bufsize, 5) partition_size = 5 async for partition in result.partitions(partition_size): check_result.append(partition) ranges = [ (i, min(20, i + partition_size)) for i in range(1, 21, partition_size) ] if filter_ == "mappings": eq_( check_result, [ [ {"user_id": i, "user_name": "name%d" % i} for i in range(a, b) ] for (a, b) in ranges ], ) elif filter_ == "scalars": eq_( check_result, [["name%d" % i for i in range(a, b)] for (a, b) in ranges], ) else: eq_( check_result, [ [(i, "name%d" % i) for i in range(a, b)] for (a, b) in ranges ], ) @testing.combinations( (None,), ("scalars",), ("mappings",), argnames="filter_" ) @async_test async def test_one_success(self, async_engine, filter_): users = self.tables.users async with async_engine.connect() as conn: result = await conn.stream( select(users).limit(1).order_by(users.c.user_name) ) if filter_ == "mappings": result = result.mappings() elif filter_ == "scalars": result = result.scalars() u1 = await result.one() if filter_ == "mappings": eq_(u1, {"user_id": 1, "user_name": "name%d" % 1}) elif filter_ == "scalars": eq_(u1, 1) else: eq_(u1, (1, "name%d" % 1)) @async_test async def test_one_no_result(self, async_engine): users = self.tables.users async with async_engine.connect() as conn: result = await conn.stream( select(users).where(users.c.user_name == "nonexistent") ) with expect_raises_message( exc.NoResultFound, "No row was found when one was required" ): await result.one() @async_test async def test_one_multi_result(self, async_engine): users = self.tables.users async with async_engine.connect() as conn: result = await conn.stream( select(users).where(users.c.user_name.in_(["name3", "name5"])) ) with expect_raises_message( exc.MultipleResultsFound, "Multiple rows were found when exactly one was required", ): await result.one() @testing.combinations(("scalars",), ("stream_scalars",), argnames="case") @async_test async def test_scalars(self, async_engine, case): users = self.tables.users stmt = select(users).order_by(users.c.user_id) async with async_engine.connect() as conn: if case == "scalars": result = (await conn.scalars(stmt)).all() elif case == "stream_scalars": result = await (await conn.stream_scalars(stmt)).all() eq_(result, list(range(1, 20))) @async_test @testing.combinations(("stream",), ("stream_scalars",), argnames="case") async def test_stream_fetch_many_not_complete(self, async_engine, case): users = self.tables.users big_query = select(users).join(users.alias("other"), true()) async with async_engine.connect() as conn: if case == "stream": result = await conn.stream(big_query) elif case == "stream_scalars": result = await conn.stream_scalars(big_query) f1 = await result.fetchmany(5) f2 = await result.fetchmany(10) f3 = await result.fetchmany(7) eq_(len(f1) + len(f2) + len(f3), 22) res = await result.fetchall() eq_(len(res), 19 * 19 - 22) @async_test @testing.combinations(("stream",), ("execute",), argnames="case") async def test_cursor_close(self, async_engine, case): users = self.tables.users async with async_engine.connect() as conn: if case == "stream": result = await conn.stream(select(users)) cursor = result._real_result.cursor elif case == "execute": result = await conn.execute(select(users)) cursor = result.cursor await conn.run_sync(lambda _: cursor.close()) @async_test @testing.variation("case", ["scalar_one", "scalar_one_or_none", "scalar"]) async def test_stream_scalar(self, async_engine, case: testing.Variation): users = self.tables.users async with async_engine.connect() as conn: result = await conn.stream( select(users).limit(1).order_by(users.c.user_name) ) if case.scalar_one: u1 = await result.scalar_one() elif case.scalar_one_or_none: u1 = await result.scalar_one_or_none() elif case.scalar: u1 = await result.scalar() else: case.fail() eq_(u1, 1)
AsyncResultTest
python
mlflow__mlflow
examples/pydanticai/tracing.py
{ "start": 1100, "end": 2551 }
class ____(BaseModel): support_advice: str = Field(description="Advice returned to the customer") block_card: bool = Field(description="Whether to block their card or not") risk: int = Field(description="Risk level of query", ge=0, le=10) support_agent = Agent( "openai:gpt-4o", deps_type=SupportDependencies, output_type=SupportOutput, system_prompt=( "You are a support agent in our bank, give the " "customer support and judge the risk level of their query. " "Reply using the customer's name." ), instrument=True, ) @support_agent.system_prompt async def add_customer_name(ctx: RunContext[SupportDependencies]) -> str: customer_name = await ctx.deps.db.customer_name(id=ctx.deps.customer_id) return f"The customer's name is {customer_name!r}" @support_agent.tool async def customer_balance(ctx: RunContext[SupportDependencies], include_pending: bool) -> str: """Returns the customer's current account balance.""" balance = await ctx.deps.db.customer_balance( id=ctx.deps.customer_id, include_pending=include_pending, ) return f"${balance:.2f}" if __name__ == "__main__": deps = SupportDependencies(customer_id=123, db=DatabaseConn()) result = support_agent.run_sync("What is my balance?", deps=deps) print(result.output) result = support_agent.run_sync("I just lost my card!", deps=deps) print(result.output)
SupportOutput
python
PrefectHQ__prefect
src/prefect/server/schemas/filters.py
{ "start": 31275, "end": 32218 }
class ____(PrefectOperatorFilterBaseModel): """Filter by `TaskRun.type` and `TaskRun.name`.""" type: Optional[TaskRunFilterStateType] = Field( default=None, description="Filter criteria for `TaskRun.state_type`" ) name: Optional[TaskRunFilterStateName] = Field( default=None, description="Filter criteria for `TaskRun.state_name`" ) def _get_filter_list( self, db: "PrefectDBInterface" ) -> Iterable[sa.ColumnExpressionArgument[bool]]: filters: list[sa.ColumnExpressionArgument[bool]] = [] if self.type is not None: filter = self.type.as_sql_filter() if isinstance(filter, sa.BinaryExpression): filters.append(filter) if self.name is not None: filter = self.name.as_sql_filter() if isinstance(filter, sa.BinaryExpression): filters.append(filter) return filters
TaskRunFilterState
python
pydantic__pydantic
pydantic/networks.py
{ "start": 4147, "end": 11644 }
class ____: _constraints: ClassVar[UrlConstraints] = UrlConstraints() _url: _CoreUrl def __init__(self, url: str | _CoreUrl | _BaseUrl) -> None: self._url = _build_type_adapter(self.__class__).validate_python(url)._url @property def scheme(self) -> str: """The scheme part of the URL. e.g. `https` in `https://user:pass@host:port/path?query#fragment` """ return self._url.scheme @property def username(self) -> str | None: """The username part of the URL, or `None`. e.g. `user` in `https://user:pass@host:port/path?query#fragment` """ return self._url.username @property def password(self) -> str | None: """The password part of the URL, or `None`. e.g. `pass` in `https://user:pass@host:port/path?query#fragment` """ return self._url.password @property def host(self) -> str | None: """The host part of the URL, or `None`. If the URL must be punycode encoded, this is the encoded host, e.g if the input URL is `https://£££.com`, `host` will be `xn--9aaa.com` """ return self._url.host def unicode_host(self) -> str | None: """The host part of the URL as a unicode string, or `None`. e.g. `host` in `https://user:pass@host:port/path?query#fragment` If the URL must be punycode encoded, this is the decoded host, e.g if the input URL is `https://£££.com`, `unicode_host()` will be `£££.com` """ return self._url.unicode_host() @property def port(self) -> int | None: """The port part of the URL, or `None`. e.g. `port` in `https://user:pass@host:port/path?query#fragment` """ return self._url.port @property def path(self) -> str | None: """The path part of the URL, or `None`. e.g. `/path` in `https://user:pass@host:port/path?query#fragment` """ return self._url.path @property def query(self) -> str | None: """The query part of the URL, or `None`. e.g. `query` in `https://user:pass@host:port/path?query#fragment` """ return self._url.query def query_params(self) -> list[tuple[str, str]]: """The query part of the URL as a list of key-value pairs. e.g. `[('foo', 'bar')]` in `https://user:pass@host:port/path?foo=bar#fragment` """ return self._url.query_params() @property def fragment(self) -> str | None: """The fragment part of the URL, or `None`. e.g. `fragment` in `https://user:pass@host:port/path?query#fragment` """ return self._url.fragment def unicode_string(self) -> str: """The URL as a unicode string, unlike `__str__()` this will not punycode encode the host. If the URL must be punycode encoded, this is the decoded string, e.g if the input URL is `https://£££.com`, `unicode_string()` will be `https://£££.com` """ return self._url.unicode_string() def encoded_string(self) -> str: """The URL's encoded string representation via __str__(). This returns the punycode-encoded host version of the URL as a string. """ return str(self) def __str__(self) -> str: """The URL as a string, this will punycode encode the host if required.""" return str(self._url) def __repr__(self) -> str: return f'{self.__class__.__name__}({str(self._url)!r})' def __deepcopy__(self, memo: dict) -> Self: return self.__class__(self._url) def __eq__(self, other: Any) -> bool: return self.__class__ is other.__class__ and self._url == other._url def __lt__(self, other: Any) -> bool: return self.__class__ is other.__class__ and self._url < other._url def __gt__(self, other: Any) -> bool: return self.__class__ is other.__class__ and self._url > other._url def __le__(self, other: Any) -> bool: return self.__class__ is other.__class__ and self._url <= other._url def __ge__(self, other: Any) -> bool: return self.__class__ is other.__class__ and self._url >= other._url def __hash__(self) -> int: return hash(self._url) def __len__(self) -> int: return len(str(self._url)) @classmethod def build( cls, *, scheme: str, username: str | None = None, password: str | None = None, host: str, port: int | None = None, path: str | None = None, query: str | None = None, fragment: str | None = None, ) -> Self: """Build a new `Url` instance from its component parts. Args: scheme: The scheme part of the URL. username: The username part of the URL, or omit for no username. password: The password part of the URL, or omit for no password. host: The host part of the URL. port: The port part of the URL, or omit for no port. path: The path part of the URL, or omit for no path. query: The query part of the URL, or omit for no query. fragment: The fragment part of the URL, or omit for no fragment. Returns: An instance of URL """ return cls( _CoreUrl.build( scheme=scheme, username=username, password=password, host=host, port=port, path=path, query=query, fragment=fragment, ) ) @classmethod def serialize_url(cls, url: Any, info: core_schema.SerializationInfo) -> str | Self: if not isinstance(url, cls): raise PydanticSerializationUnexpectedValue( f"Expected `{cls}` but got `{type(url)}` with value `'{url}'` - serialized value may not be as expected." ) if info.mode == 'json': return str(url) return url @classmethod def __get_pydantic_core_schema__( cls, source: type[_BaseUrl], handler: GetCoreSchemaHandler ) -> core_schema.CoreSchema: def wrap_val(v, h): if isinstance(v, source): return v if isinstance(v, _BaseUrl): v = str(v) core_url = h(v) instance = source.__new__(source) instance._url = core_url return instance return core_schema.no_info_wrap_validator_function( wrap_val, schema=core_schema.url_schema(**cls._constraints.defined_constraints), serialization=core_schema.plain_serializer_function_ser_schema( cls.serialize_url, info_arg=True, when_used='always' ), ) @classmethod def __get_pydantic_json_schema__( cls, core_schema: core_schema.CoreSchema, handler: _schema_generation_shared.GetJsonSchemaHandler ) -> JsonSchemaValue: # we use the url schema for json schema generation, but we might have to extract it from # the function-wrap schema we use as a tool for validation on initialization inner_schema = core_schema['schema'] if core_schema['type'] == 'function-wrap' else core_schema return handler(inner_schema) __pydantic_serializer__ = SchemaSerializer(core_schema.any_schema(serialization=core_schema.to_string_ser_schema()))
_BaseUrl
python
coleifer__peewee
tests/shortcuts.py
{ "start": 27742, "end": 29929 }
class ____(BaseTestCase): def setUp(self): super(TestThreadSafeDatabaseMetadata, self).setUp() ts_database.create_tables([TSReg]) def test_threadsafe_database_metadata(self): self.assertTrue(isinstance(TSReg._meta, ThreadSafeDatabaseMetadata)) self.assertEqual(TSReg._meta.database, ts_database) t1 = TSReg.create(key='k1') t1_db = TSReg.get(TSReg.key == 'k1') self.assertEqual(t1.id, t1_db.id) def test_swap_database(self): d1 = get_in_memory_db() d2 = get_in_memory_db() class M(TSBase): pass def swap_db(): self.assertEqual(M._meta.database, ts_database) d1.bind([M]) self.assertEqual(M._meta.database, d1) with d2.bind_ctx([M]): self.assertEqual(M._meta.database, d2) self.assertEqual(M._meta.database, d1) self.assertEqual(M._meta.database, ts_database) # From a separate thread, swap the database and verify it works # correctly. t = threading.Thread(target=swap_db) t.start() ; t.join() # In the main thread the original database has not been altered. self.assertEqual(M._meta.database, ts_database) def test_preserve_original_db(self): outputs = [] d1 = get_in_memory_db() d2 = get_in_memory_db() class M(TSBase): class Meta: database = d1 def swap_db(): self.assertTrue(M._meta.database is d1) with d2.bind_ctx([M]): self.assertTrue(M._meta.database is d2) self.assertTrue(M._meta.database is d1) d2.bind([M]) # Now bind to d2 and leave it bound. self.assertTrue(M._meta.database is d2) # From a separate thread, swap the database and verify it works # correctly. threads = [threading.Thread(target=swap_db) for _ in range(20)] for t in threads: t.start() for t in threads: t.join() # In the main thread the original database has not been altered. self.assertTrue(M._meta.database is d1)
TestThreadSafeDatabaseMetadata