language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
encode__django-rest-framework
tests/test_model_serializer.py
{ "start": 16056, "end": 18270 }
class ____(TestCase): def test_hstore_field(self): class HStoreFieldModel(models.Model): hstore_field = postgres_fields.HStoreField() class TestSerializer(serializers.ModelSerializer): class Meta: model = HStoreFieldModel fields = ['hstore_field'] expected = dedent(""" TestSerializer(): hstore_field = HStoreField() """) self.assertEqual(repr(TestSerializer()), expected) def test_array_field(self): class ArrayFieldModel(models.Model): array_field = postgres_fields.ArrayField(base_field=models.CharField()) array_field_with_blank = postgres_fields.ArrayField(blank=True, base_field=models.CharField()) class TestSerializer(serializers.ModelSerializer): class Meta: model = ArrayFieldModel fields = ['array_field', 'array_field_with_blank'] expected = dedent(""" TestSerializer(): array_field = ListField(allow_empty=False, child=CharField(label='Array field')) array_field_with_blank = ListField(child=CharField(label='Array field with blank'), required=False) """) self.assertEqual(repr(TestSerializer()), expected) @pytest.mark.skipif(hasattr(models, 'JSONField'), reason='has models.JSONField') def test_json_field(self): class JSONFieldModel(models.Model): json_field = postgres_fields.JSONField() json_field_with_encoder = postgres_fields.JSONField(encoder=DjangoJSONEncoder) class TestSerializer(serializers.ModelSerializer): class Meta: model = JSONFieldModel fields = ['json_field', 'json_field_with_encoder'] expected = dedent(""" TestSerializer(): json_field = JSONField(encoder=None, style={'base_template': 'textarea.html'}) json_field_with_encoder = JSONField(encoder=<class 'django.core.serializers.json.DjangoJSONEncoder'>, style={'base_template': 'textarea.html'}) """) self.assertEqual(repr(TestSerializer()), expected)
TestPosgresFieldsMapping
python
spack__spack
lib/spack/spack/vendor/jinja2/visitor.py
{ "start": 343, "end": 1766 }
class ____: """Walks the abstract syntax tree and call visitor functions for every node found. The visitor functions may return values which will be forwarded by the `visit` method. Per default the visitor functions for the nodes are ``'visit_'`` + class name of the node. So a `TryFinally` node visit function would be `visit_TryFinally`. This behavior can be changed by overriding the `get_visitor` function. If no visitor function exists for a node (return value `None`) the `generic_visit` visitor is used instead. """ def get_visitor(self, node: Node) -> "t.Optional[VisitCallable]": """Return the visitor function for this node or `None` if no visitor exists for this node. In that case the generic visit function is used instead. """ return getattr(self, f"visit_{type(node).__name__}", None) # type: ignore def visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any: """Visit a node.""" f = self.get_visitor(node) if f is not None: return f(node, *args, **kwargs) return self.generic_visit(node, *args, **kwargs) def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any: """Called if no explicit visitor function exists for a node.""" for node in node.iter_child_nodes(): self.visit(node, *args, **kwargs)
NodeVisitor
python
numpy__numpy
numpy/_core/tests/test_numerictypes.py
{ "start": 7831, "end": 8023 }
class ____(ReadValuesPlain): """Check the creation of heterogeneous arrays (plain, single row)""" _descr = Pdescr multiple_rows = 0 _buffer = PbufferT[0]
TestReadValuesPlainSingle
python
pytorch__pytorch
torch/cuda/__init__.py
{ "start": 57216, "end": 57430 }
class ____(_CudaLegacyStorage): @classproperty def dtype(self): _warn_typed_storage_removal() return self._dtype @classproperty def _dtype(self): return torch.int8
CharStorage
python
instagram__MonkeyType
monkeytype/stubs.py
{ "start": 2560, "end": 9005 }
class ____(DefaultDict[Any, Any]): """A mapping of module name to the set of names to be imported.""" def __init__(self) -> None: super().__init__(set) def merge(self, other: "ImportMap") -> None: for module, names in other.items(): self[module].update(names) def _get_import_for_qualname(qualname: str) -> str: # Nested classes are annotated using the path from the root class # (e.g. Parent.Child, where Child is defined inside Parent) return qualname.split(".")[0] def get_imports_for_annotation(anno: Any) -> ImportMap: """Return the imports (module, name) needed for the type in the annotation""" imports = ImportMap() if ( anno is inspect.Parameter.empty or anno is inspect.Signature.empty or not ( isinstance(anno, type) or is_any(anno) or is_union(anno) or is_generic(anno) ) or anno.__module__ == "builtins" ): return imports if is_any(anno): imports["typing"].add("Any") elif _is_optional(anno): imports["typing"].add("Optional") elem_type = _get_optional_elem(anno) elem_imports = get_imports_for_annotation(elem_type) imports.merge(elem_imports) elif is_generic(anno): if is_union(anno): imports["typing"].add("Union") else: imports[anno.__module__].add( _get_import_for_qualname(qualname_of_generic(anno)) ) elem_types = getattr(anno, "__args__", None) or [] for et in elem_types: elem_imports = get_imports_for_annotation(et) imports.merge(elem_imports) else: name = _get_import_for_qualname(anno.__qualname__) imports[anno.__module__].add(name) return imports def get_imports_for_signature(sig: inspect.Signature) -> ImportMap: """Return the imports (module, name) needed for all types in annotations""" imports = ImportMap() for param in sig.parameters.values(): param_imports = get_imports_for_annotation(param.annotation) if not _is_optional(param.annotation) and param.default is None: imports["typing"].add("Optional") imports.merge(param_imports) return_imports = get_imports_for_annotation(sig.return_annotation) imports.merge(return_imports) return imports def update_signature_args( sig: inspect.Signature, arg_types: Dict[str, type], has_self: bool, existing_annotation_strategy: ExistingAnnotationStrategy = ExistingAnnotationStrategy.REPLICATE, ) -> inspect.Signature: """Update argument annotations with the supplied types""" params = [] for arg_idx, name in enumerate(sig.parameters): param = sig.parameters[name] typ = arg_types.get(name) typ = inspect.Parameter.empty if typ is None else typ is_self = has_self and arg_idx == 0 annotated = param.annotation is not inspect.Parameter.empty if ( annotated and existing_annotation_strategy == ExistingAnnotationStrategy.OMIT ): # generate no annotation for already-annotated args when generating # a stub to apply, avoiding the possibility of "incompatible # annotation" errors param = param.replace(annotation=inspect.Parameter.empty) # Don't touch existing annotations unless asked to ignore them if not is_self and ( (existing_annotation_strategy == ExistingAnnotationStrategy.IGNORE) or not annotated ): param = param.replace(annotation=typ) params.append(param) return sig.replace(parameters=params) def update_signature_return( sig: inspect.Signature, return_type: Optional[type] = None, yield_type: Optional[type] = None, existing_annotation_strategy: ExistingAnnotationStrategy = ExistingAnnotationStrategy.REPLICATE, ) -> inspect.Signature: """Update return annotation with the supplied types""" anno = sig.return_annotation if anno is not inspect.Signature.empty: # If generating a stub to apply and there's already a return type # annotation, generate a stub with no return type annotation, to avoid # the possibility of "incompatible annotation" errors. if existing_annotation_strategy == ExistingAnnotationStrategy.OMIT: return sig.replace(return_annotation=inspect.Signature.empty) # Don't change pre-existing annotations unless asked to if existing_annotation_strategy == ExistingAnnotationStrategy.REPLICATE: return sig # NB: We cannot distinguish between functions that explicitly only # return None and those that do so implicitly. In the case of generator # functions both are typed as Iterator[<yield_type>] if (yield_type is not None) and ( (return_type is None) or (return_type == NoneType) ): anno = make_iterator(yield_type) elif (yield_type is not None) and (return_type is not None): anno = make_generator(yield_type, NoneType, return_type) elif return_type is not None: anno = return_type return sig.replace(return_annotation=anno) def shrink_traced_types( traces: Iterable[CallTrace], max_typed_dict_size: int, ) -> Tuple[Dict[str, type], Optional[type], Optional[type]]: """Merges the traced types and returns the minimally equivalent types""" arg_types: DefaultDict[str, Set[type]] = collections.defaultdict(set) return_types: Set[type] = set() yield_types: Set[type] = set() for t in traces: for arg, typ in t.arg_types.items(): arg_types[arg].add(typ) if t.return_type is not None: return_types.add(t.return_type) if t.yield_type is not None: yield_types.add(t.yield_type) shrunken_arg_types = { name: shrink_types(ts, max_typed_dict_size) for name, ts in arg_types.items() } return_type = ( shrink_types(return_types, max_typed_dict_size) if return_types else None ) yield_type = shrink_types(yield_types, max_typed_dict_size) if yield_types else None return (shrunken_arg_types, return_type, yield_type) def get_typed_dict_class_name(parameter_name: str) -> str: """Return the name for a TypedDict class generated for parameter `parameter_name`.""" return f"{pascal_case(parameter_name)}TypedDict__RENAME_ME__"
ImportMap
python
plotly__plotly.py
plotly/graph_objs/histogram2d/colorbar/_tickformatstop.py
{ "start": 233, "end": 8534 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "histogram2d.colorbar" _path_str = "histogram2d.colorbar.tickformatstop" _valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"} @property def dtickrange(self): """ range [*min*, *max*], where "min", "max" - dtick values which describe some zoom level, it is possible to omit "min" or "max" value by passing "null" The 'dtickrange' property is an info array that may be specified as: * a list or tuple of 2 elements where: (0) The 'dtickrange[0]' property accepts values of any type (1) The 'dtickrange[1]' property accepts values of any type Returns ------- list """ return self["dtickrange"] @dtickrange.setter def dtickrange(self, val): self["dtickrange"] = val @property def enabled(self): """ Determines whether or not this stop is used. If `false`, this stop is ignored even within its `dtickrange`. The 'enabled' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["enabled"] @enabled.setter def enabled(self, val): self["enabled"] = val @property def name(self): """ When used in a template, named items are created in the output figure in addition to any items the figure already has in this array. You can modify these items in the output figure by making your own item with `templateitemname` matching this `name` alongside your modifications (including `visible: false` or `enabled: false` to hide it). Has no effect outside of a template. The 'name' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["name"] @name.setter def name(self, val): self["name"] = val @property def templateitemname(self): """ Used to refer to a named item in this array in the template. Named items from the template will be created even without a matching item in the input figure, but you can modify one by making an item with `templateitemname` matching its `name`, alongside your modifications (including `visible: false` or `enabled: false` to hide it). If there is no template or no matching item, this item will be hidden unless you explicitly show it with `visible: true`. The 'templateitemname' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["templateitemname"] @templateitemname.setter def templateitemname(self, val): self["templateitemname"] = val @property def value(self): """ string - dtickformat for described zoom level, the same as "tickformat" The 'value' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["value"] @value.setter def value(self, val): self["value"] = val @property def _prop_descriptions(self): return """\ dtickrange range [*min*, *max*], where "min", "max" - dtick values which describe some zoom level, it is possible to omit "min" or "max" value by passing "null" enabled Determines whether or not this stop is used. If `false`, this stop is ignored even within its `dtickrange`. name When used in a template, named items are created in the output figure in addition to any items the figure already has in this array. You can modify these items in the output figure by making your own item with `templateitemname` matching this `name` alongside your modifications (including `visible: false` or `enabled: false` to hide it). Has no effect outside of a template. templateitemname Used to refer to a named item in this array in the template. Named items from the template will be created even without a matching item in the input figure, but you can modify one by making an item with `templateitemname` matching its `name`, alongside your modifications (including `visible: false` or `enabled: false` to hide it). If there is no template or no matching item, this item will be hidden unless you explicitly show it with `visible: true`. value string - dtickformat for described zoom level, the same as "tickformat" """ def __init__( self, arg=None, dtickrange=None, enabled=None, name=None, templateitemname=None, value=None, **kwargs, ): """ Construct a new Tickformatstop object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.histogram2d.co lorbar.Tickformatstop` dtickrange range [*min*, *max*], where "min", "max" - dtick values which describe some zoom level, it is possible to omit "min" or "max" value by passing "null" enabled Determines whether or not this stop is used. If `false`, this stop is ignored even within its `dtickrange`. name When used in a template, named items are created in the output figure in addition to any items the figure already has in this array. You can modify these items in the output figure by making your own item with `templateitemname` matching this `name` alongside your modifications (including `visible: false` or `enabled: false` to hide it). Has no effect outside of a template. templateitemname Used to refer to a named item in this array in the template. Named items from the template will be created even without a matching item in the input figure, but you can modify one by making an item with `templateitemname` matching its `name`, alongside your modifications (including `visible: false` or `enabled: false` to hide it). If there is no template or no matching item, this item will be hidden unless you explicitly show it with `visible: true`. value string - dtickformat for described zoom level, the same as "tickformat" Returns ------- Tickformatstop """ super().__init__("tickformatstops") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.histogram2d.colorbar.Tickformatstop constructor must be a dict or an instance of :class:`plotly.graph_objs.histogram2d.colorbar.Tickformatstop`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("dtickrange", arg, dtickrange) self._set_property("enabled", arg, enabled) self._set_property("name", arg, name) self._set_property("templateitemname", arg, templateitemname) self._set_property("value", arg, value) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Tickformatstop
python
scrapy__scrapy
tests/CrawlerRunner/change_reactor.py
{ "start": 116, "end": 754 }
class ____(Spider): name = "no_request" custom_settings = { "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", } async def start(self): return yield configure_logging({"LOG_FORMAT": "%(levelname)s: %(message)s", "LOG_LEVEL": "DEBUG"}) from scrapy.utils.reactor import install_reactor # noqa: E402 install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor") runner = CrawlerRunner() d = runner.crawl(NoRequestsSpider) from twisted.internet import reactor # noqa: E402,TID253 d.addBoth(callback=lambda _: reactor.stop()) reactor.run()
NoRequestsSpider
python
pytorch__pytorch
torch/distributed/optim/zero_redundancy_optimizer.py
{ "start": 5965, "end": 6706 }
class ____(enum.IntEnum): r""" Define possible statuses that :class:`ZeroRedundancyOptimizer` can be in when overlapping with :class:`DistributedDataParallel`. Attributes: ``UNINITIALIZED``: The ZeRO instance is effectively uninitialized and is waiting for DDP to finalize its bucketing. ``DDP_HAS_REBUILT_BUCKETS``: DDP has rebuilt its buckets, meaning that its bucketing is finalized. The ZeRO instance can now collect the necessary information about the DDP bucketing. ``INITIALIZED``: The ZeRO instance is fully initialized and can now optimize parameters. """ UNINITIALIZED = 0 DDP_HAS_REBUILT_BUCKETS = 1 INITIALIZED = 2
_OverlapStatus
python
pytest-dev__pytest-cov
src/pytest_cov/engine.py
{ "start": 1345, "end": 9621 }
class ____: """Base class for different plugin implementations.""" def __init__(self, options: argparse.Namespace, config: Union[None, object], nodeid: Union[None, str]): """Get some common config used by multiple derived classes.""" self.cov_source = options.cov_source self.cov_report = options.cov_report self.cov_config = options.cov_config self.cov_append = options.cov_append self.cov_branch = options.cov_branch self.cov_precision = options.cov_precision self.config = config self.nodeid = nodeid self.cov = None self.combining_cov = None self.data_file = None self.node_descs = set() self.failed_workers = [] self.topdir = os.fspath(Path.cwd()) self.is_collocated = None self.started = False @contextlib.contextmanager def ensure_topdir(self): original_cwd = Path.cwd() os.chdir(self.topdir) yield os.chdir(original_cwd) @_ensure_topdir def pause(self): self.started = False self.cov.stop() @_ensure_topdir def resume(self): self.cov.start() self.started = True def start(self): self.started = True def finish(self): self.started = False @staticmethod def get_node_desc(platform, version_info): """Return a description of this node.""" return 'platform {}, python {}'.format(platform, '{}.{}.{}-{}-{}'.format(*version_info[:5])) @staticmethod def get_width(): # taken from https://github.com/pytest-dev/pytest/blob/33c7b05a/src/_pytest/_io/terminalwriter.py#L26 width, _ = shutil.get_terminal_size(fallback=(80, 24)) # The Windows get_terminal_size may be bogus, let's sanify a bit. if width < 40: width = 80 return width def sep(self, stream, s, txt): if hasattr(stream, 'sep'): stream.sep(s, txt) else: fullwidth = self.get_width() # taken from https://github.com/pytest-dev/pytest/blob/33c7b05a/src/_pytest/_io/terminalwriter.py#L126 # The goal is to have the line be as long as possible # under the condition that len(line) <= fullwidth. if sys.platform == 'win32': # If we print in the last column on windows we are on a # new line but there is no way to verify/neutralize this # (we may not know the exact line width). # So let's be defensive to avoid empty lines in the output. fullwidth -= 1 N = max((fullwidth - len(txt) - 2) // (2 * len(s)), 1) fill = s * N line = f'{fill} {txt} {fill}' # In some situations there is room for an extra sepchar at the right, # in particular if we consider that with a sepchar like "_ " the # trailing space is not important at the end of the line. if len(line) + len(s.rstrip()) <= fullwidth: line += s.rstrip() # (end of terminalwriter borrowed code) line += '\n\n' stream.write(line) @_ensure_topdir def summary(self, stream): """Produce coverage reports.""" total = None if not self.cov_report: with _backup(self.cov, 'config'): return self.cov.report(show_missing=True, ignore_errors=True, file=_NullFile) # Output coverage section header. if len(self.node_descs) == 1: self.sep(stream, '_', f'coverage: {"".join(self.node_descs)}') else: self.sep(stream, '_', 'coverage') for node_desc in sorted(self.node_descs): self.sep(stream, ' ', f'{node_desc}') # Report on any failed workers. if self.failed_workers: self.sep(stream, '_', 'coverage: failed workers') stream.write('The following workers failed to return coverage data, ensure that pytest-cov is installed on these workers.\n') for node in self.failed_workers: stream.write(f'{node.gateway.id}\n') # Produce terminal report if wanted. if any(x in self.cov_report for x in ['term', 'term-missing']): options = { 'show_missing': ('term-missing' in self.cov_report) or None, 'ignore_errors': True, 'file': stream, 'precision': self.cov_precision, } skip_covered = isinstance(self.cov_report, dict) and 'skip-covered' in self.cov_report.values() options.update({'skip_covered': skip_covered or None}) with _backup(self.cov, 'config'): total = self.cov.report(**options) # Produce annotated source code report if wanted. if 'annotate' in self.cov_report: annotate_dir = self.cov_report['annotate'] with _backup(self.cov, 'config'): self.cov.annotate(ignore_errors=True, directory=annotate_dir) # We need to call Coverage.report here, just to get the total # Coverage.annotate don't return any total and we need it for --cov-fail-under. with _backup(self.cov, 'config'): total = self.cov.report(ignore_errors=True, file=_NullFile) if annotate_dir: stream.write(f'Coverage annotated source written to dir {annotate_dir}\n') else: stream.write('Coverage annotated source written next to source\n') # Produce html report if wanted. if 'html' in self.cov_report: output = self.cov_report['html'] with _backup(self.cov, 'config'): total = self.cov.html_report(ignore_errors=True, directory=output) stream.write(f'Coverage HTML written to dir {self.cov.config.html_dir if output is None else output}\n') # Produce xml report if wanted. if 'xml' in self.cov_report: output = self.cov_report['xml'] with _backup(self.cov, 'config'): total = self.cov.xml_report(ignore_errors=True, outfile=output) stream.write(f'Coverage XML written to file {self.cov.config.xml_output if output is None else output}\n') # Produce json report if wanted if 'json' in self.cov_report: output = self.cov_report['json'] with _backup(self.cov, 'config'): total = self.cov.json_report(ignore_errors=True, outfile=output) stream.write('Coverage JSON written to file %s\n' % (self.cov.config.json_output if output is None else output)) # Produce Markdown report if wanted. if 'markdown' in self.cov_report: output = self.cov_report['markdown'] with _backup(self.cov, 'config'): with Path(output).open('w') as output_file: total = self.cov.report(ignore_errors=True, file=output_file, output_format='markdown') stream.write(f'Coverage Markdown information written to file {output}\n') # Produce Markdown report if wanted, appending to output file if 'markdown-append' in self.cov_report: output = self.cov_report['markdown-append'] with _backup(self.cov, 'config'): with Path(output).open('a') as output_file: total = self.cov.report(ignore_errors=True, file=output_file, output_format='markdown') stream.write(f'Coverage Markdown information appended to file {output}\n') # Produce lcov report if wanted. if 'lcov' in self.cov_report: output = self.cov_report['lcov'] with _backup(self.cov, 'config'): self.cov.lcov_report(ignore_errors=True, outfile=output) # We need to call Coverage.report here, just to get the total # Coverage.lcov_report doesn't return any total and we need it for --cov-fail-under. total = self.cov.report(ignore_errors=True, file=_NullFile) stream.write(f'Coverage LCOV written to file {self.cov.config.lcov_output if output is None else output}\n') return total
CovController
python
PrefectHQ__prefect
src/integrations/prefect-snowflake/prefect_snowflake/experimental/workers/spcs.py
{ "start": 16412, "end": 16501 }
class ____(BaseWorkerResult): """Result returned by the SPCSWorker."""
SPCSWorkerResult
python
pandas-dev__pandas
pandas/core/arrays/categorical.py
{ "start": 94468, "end": 102940 }
class ____(PandasDelegate, PandasObject, NoNewAttributesMixin): """ Accessor object for categorical properties of the Series values. Parameters ---------- data : Series or CategoricalIndex The object to which the categorical accessor is attached. See Also -------- Series.dt : Accessor object for datetimelike properties of the Series values. Series.sparse : Accessor for sparse matrix data types. Examples -------- >>> s = pd.Series(list("abbccc")).astype("category") >>> s 0 a 1 b 2 b 3 c 4 c 5 c dtype: category Categories (3, str): ['a', 'b', 'c'] >>> s.cat.categories Index(['a', 'b', 'c'], dtype='str') >>> s.cat.rename_categories(list("cba")) 0 c 1 b 2 b 3 a 4 a 5 a dtype: category Categories (3, str): ['c', 'b', 'a'] >>> s.cat.reorder_categories(list("cba")) 0 a 1 b 2 b 3 c 4 c 5 c dtype: category Categories (3, str): ['c', 'b', 'a'] >>> s.cat.add_categories(["d", "e"]) 0 a 1 b 2 b 3 c 4 c 5 c dtype: category Categories (5, str): ['a', 'b', 'c', 'd', 'e'] >>> s.cat.remove_categories(["a", "c"]) 0 NaN 1 b 2 b 3 NaN 4 NaN 5 NaN dtype: category Categories (1, str): ['b'] >>> s1 = s.cat.add_categories(["d", "e"]) >>> s1.cat.remove_unused_categories() 0 a 1 b 2 b 3 c 4 c 5 c dtype: category Categories (3, str): ['a', 'b', 'c'] >>> s.cat.set_categories(list("abcde")) 0 a 1 b 2 b 3 c 4 c 5 c dtype: category Categories (5, str): ['a', 'b', 'c', 'd', 'e'] >>> s.cat.as_ordered() 0 a 1 b 2 b 3 c 4 c 5 c dtype: category Categories (3, str): ['a' < 'b' < 'c'] >>> s.cat.as_unordered() 0 a 1 b 2 b 3 c 4 c 5 c dtype: category Categories (3, str): ['a', 'b', 'c'] """ def __init__(self, data) -> None: self._validate(data) self._parent = data.values self._index = data.index self._name = data.name self._freeze() @staticmethod def _validate(data) -> None: if not isinstance(data.dtype, CategoricalDtype): raise AttributeError("Can only use .cat accessor with a 'category' dtype") def _delegate_property_get(self, name: str): return getattr(self._parent, name) def _delegate_property_set(self, name: str, new_values) -> None: setattr(self._parent, name, new_values) @property def codes(self) -> Series: """ Return Series of codes as well as the index. See Also -------- Series.cat.categories : Return the categories of this categorical. Series.cat.as_ordered : Set the Categorical to be ordered. Series.cat.as_unordered : Set the Categorical to be unordered. Examples -------- >>> raw_cate = pd.Categorical(["a", "b", None, "a"], categories=["a", "b"]) >>> ser = pd.Series(raw_cate) >>> ser.cat.codes 0 0 1 1 2 -1 3 0 dtype: int8 """ from pandas import Series return Series(self._parent.codes, index=self._index) def _delegate_method(self, name: str, *args, **kwargs): from pandas import Series method = getattr(self._parent, name) res = method(*args, **kwargs) if res is not None: return Series(res, index=self._index, name=self._name) # utility routines def _get_codes_for_values( values: Index | Series | ExtensionArray | np.ndarray, categories: Index, ) -> np.ndarray: """ utility routine to turn values into codes given the specified categories If `values` is known to be a Categorical, use recode_for_categories instead. """ codes = categories.get_indexer_for(values) wrong = (codes == -1) & ~isna(values) if wrong.any(): warnings.warn( "Constructing a Categorical with a dtype and values containing " "non-null entries not in that dtype's categories is deprecated " "and will raise in a future version.", Pandas4Warning, stacklevel=find_stack_level(), ) return coerce_indexer_dtype(codes, categories) def recode_for_categories( codes: np.ndarray, old_categories, new_categories, *, copy: bool = True, warn: bool = False, ) -> np.ndarray: """ Convert a set of codes for to a new set of categories Parameters ---------- codes : np.ndarray old_categories, new_categories : Index copy: bool, default True Whether to copy if the codes are unchanged. warn : bool, default False Whether to warn on silent-NA mapping. Returns ------- new_codes : np.ndarray[np.int64] Examples -------- >>> old_cat = pd.Index(["b", "a", "c"]) >>> new_cat = pd.Index(["a", "b"]) >>> codes = np.array([0, 1, 1, 2]) >>> recode_for_categories(codes, old_cat, new_cat, copy=True) array([ 1, 0, 0, -1], dtype=int8) """ if len(old_categories) == 0: # All null anyway, so just retain the nulls if copy: return codes.copy() return codes elif new_categories.equals(old_categories): # Same categories, so no need to actually recode if copy: return codes.copy() return codes codes_in_old_cats = new_categories.get_indexer_for(old_categories) if warn: wrong = codes_in_old_cats == -1 if wrong.any(): warnings.warn( "Constructing a Categorical with a dtype and values containing " "non-null entries not in that dtype's categories is deprecated " "and will raise in a future version.", Pandas4Warning, stacklevel=find_stack_level(), ) indexer = coerce_indexer_dtype(codes_in_old_cats, new_categories) new_codes = take_nd(indexer, codes, fill_value=-1) return new_codes def factorize_from_iterable(values) -> tuple[np.ndarray, Index]: """ Factorize an input `values` into `categories` and `codes`. Preserves categorical dtype in `categories`. Parameters ---------- values : list-like Returns ------- codes : ndarray categories : Index If `values` has a categorical dtype, then `categories` is a CategoricalIndex keeping the categories and order of `values`. """ from pandas import CategoricalIndex if not is_list_like(values): raise TypeError("Input must be list-like") categories: Index vdtype = getattr(values, "dtype", None) if isinstance(vdtype, CategoricalDtype): values = extract_array(values) # The Categorical we want to build has the same categories # as values but its codes are by def [0, ..., len(n_categories) - 1] cat_codes = np.arange(len(values.categories), dtype=values.codes.dtype) cat = Categorical.from_codes(cat_codes, dtype=values.dtype, validate=False) categories = CategoricalIndex(cat) codes = values.codes else: # The value of ordered is irrelevant since we don't use cat as such, # but only the resulting categories, the order of which is independent # from ordered. Set ordered to False as default. See GH #15457 cat = Categorical(values, ordered=False) categories = cat.categories codes = cat.codes return codes, categories def factorize_from_iterables(iterables) -> tuple[list[np.ndarray], list[Index]]: """ A higher-level wrapper over `factorize_from_iterable`. Parameters ---------- iterables : list-like of list-likes Returns ------- codes : list of ndarrays categories : list of Indexes Notes ----- See `factorize_from_iterable` for more info. """ if len(iterables) == 0: # For consistency, it should return two empty lists. return [], [] codes, categories = zip( *(factorize_from_iterable(it) for it in iterables), strict=True, ) return list(codes), list(categories)
CategoricalAccessor
python
tensorflow__tensorflow
tensorflow/python/distribute/mirrored_strategy_test.py
{ "start": 49285, "end": 50206 }
class ____(object): def __init__(self, two_variables=False): self.variables = [] self.variables.append(variable_v1.VariableV1(1.25, name="dummy_var1")) if two_variables: self.variables.append(variable_v1.VariableV1(2.0, name="dummy_var2")) def __call__(self, factor=2): x = factor * self.variables[0] if len(self.variables) > 1: x += self.variables[1] return x @combinations.generate( combinations.combine( distribution=[ combinations.NamedDistribution( "Mirrored", # pylint: disable=g-long-lambda lambda: mirrored_strategy.MirroredStrategy( devices=mirrored_strategy.all_local_devices(), cross_device_ops=cross_device_ops_lib.ReductionToOneDevice( ), ), required_gpus=1) ], mode=["graph"]))
MockModel
python
google__pytype
pytype/pytd/parse/node_test.py
{ "start": 771, "end": 879 }
class ____(Node): """Inner node 'Y', with two children. See testVisitor[...]() below.""" c: Any d: Any
Y
python
celery__celery
celery/exceptions.py
{ "start": 7825, "end": 7908 }
class ____(DeprecationWarning): """Warning of deprecation."""
CDeprecationWarning
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_T.py
{ "start": 8286, "end": 9839 }
class ____(Benchmark): r""" Trigonometric 1 objective function. This class defines the Trigonometric 1 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Trigonometric01}}(x) = \sum_{i=1}^{n} \left [n - \sum_{j=1}^{n} \cos(x_j) + i \left(1 - cos(x_i) - sin(x_i) \right ) \right]^2 Here, :math:`n` represents the number of dimensions and :math:`x_i \in [0, \pi]` for :math:`i = 1, ..., n`. *Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for :math:`i = 1, ..., n` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: equaiton uncertain here. Is it just supposed to be the cos term in the inner sum, or the whole of the second line in Jamil #153. """ change_dimensionality = True def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([0.0] * self.N, [pi] * self.N)) self.global_optimum = [[0.0 for _ in range(self.N)]] self.fglob = 0.0 def fun(self, x, *args): self.nfev += 1 i = atleast_2d(arange(1.0, self.N + 1)).T inner = cos(x) + i * (1 - cos(x) - sin(x)) return sum((self.N - sum(inner, axis=1)) ** 2)
Trigonometric01
python
Textualize__textual
src/textual/drivers/headless_driver.py
{ "start": 149, "end": 1888 }
class ____(Driver): """A do-nothing driver for testing.""" @property def is_headless(self) -> bool: """Is the driver running in 'headless' mode?""" return True def _get_terminal_size(self) -> tuple[int, int]: if self._size is not None: return self._size width: int | None = 80 height: int | None = 25 import shutil try: width, height = shutil.get_terminal_size() except (AttributeError, ValueError, OSError): try: width, height = shutil.get_terminal_size() except (AttributeError, ValueError, OSError): pass width = width or 80 height = height or 25 return width, height def write(self, data: str) -> None: """Write data to the output device. Args: data: Raw data. """ # Nothing to write as this is a headless driver. def start_application_mode(self) -> None: """Start application mode.""" loop = asyncio.get_running_loop() def send_size_event() -> None: """Send first resize event.""" terminal_size = self._get_terminal_size() width, height = terminal_size textual_size = Size(width, height) event = events.Resize(textual_size, textual_size) asyncio.run_coroutine_threadsafe( self._app._post_message(event), loop=loop, ) send_size_event() def disable_input(self) -> None: """Disable further input.""" def stop_application_mode(self) -> None: """Stop application mode, restore state.""" # Nothing to do
HeadlessDriver
python
astropy__astropy
astropy/table/column.py
{ "start": 50645, "end": 53895 }
class ____(ColumnInfo): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. In this case it just adds the ``mask_val`` attribute. """ # Add `serialize_method` attribute to the attrs that MaskedColumnInfo knows # about. This allows customization of the way that MaskedColumn objects # get written to file depending on format. The default is to use whatever # the writer would normally do, which in the case of FITS or ECSV is to use # a NULL value within the data itself. If serialize_method is 'data_mask' # then the mask is explicitly written out as a separate column if there # are any masked values. See also code below. attr_names = ColumnInfo.attr_names | {"serialize_method"} # When `serialize_method` is 'data_mask', and data and mask are being written # as separate columns, use column names <name> and <name>.mask (instead # of default encoding as <name>.data and <name>.mask). _represent_as_dict_primary_data = "data" mask_val = np.ma.masked def __init__(self, bound=False): super().__init__(bound) # If bound to a data object instance then create the dict of attributes # which stores the info attribute values. if bound: # Specify how to serialize this object depending on context. self.serialize_method = { "fits": "null_value", "ecsv": "null_value", "hdf5": "data_mask", "parquet": "data_mask", None: "null_value", } def _represent_as_dict(self): out = super()._represent_as_dict() # If we are a structured masked column, then our parent class, # ColumnInfo, will already have set up a dict with masked parts, # which will be serialized later, so no further work needed here. if self._parent.dtype.names is not None: return out col = self._parent # If the serialize method for this context (e.g. 'fits' or 'ecsv') is # 'data_mask', that means to serialize using an explicit mask column. method = self.serialize_method[self._serialize_context] if method == "data_mask": # Note: a driver here is a performance issue in #8443 where repr() of a # np.ma.MaskedArray value is up to 10 times slower than repr of a normal array # value. So regardless of whether there are masked elements it is useful to # explicitly define this as a serialized column and use col.data.data (ndarray) # instead of letting it fall through to the "standard" serialization machinery. out["data"] = col.data.data if np.any(col.mask): # Only if there are actually masked elements do we add the ``mask`` column out["mask"] = col.mask elif method == "null_value": pass else: raise ValueError( 'serialize method must be either "data_mask" or "null_value"' ) return out
MaskedColumnInfo
python
django__django
tests/prefetch_related/models.py
{ "start": 3088, "end": 3498 }
class ____(models.Model): name = models.CharField(max_length=50) qualifications = models.ManyToManyField(Qualification) objects = TeacherManager() objects_custom = TeacherQuerySet.as_manager() class Meta: ordering = ["id"] def __str__(self): return "%s (%s)" % ( self.name, ", ".join(q.name for q in self.qualifications.all()), )
Teacher
python
django__django
django/db/migrations/state.py
{ "start": 3335, "end": 24921 }
class ____: """ Represent the entire project's overall state. This is the item that is passed around - do it here rather than at the app level so that cross-app FKs/etc. resolve properly. """ def __init__(self, models=None, real_apps=None): self.models = models or {} # Apps to include from main registry, usually unmigrated ones if real_apps is None: real_apps = set() else: assert isinstance(real_apps, set) self.real_apps = real_apps self.is_delayed = False # {remote_model_key: {model_key: {field_name: field}}} self._relations = None @property def relations(self): if self._relations is None: self.resolve_fields_and_relations() return self._relations def add_model(self, model_state): model_key = model_state.app_label, model_state.name_lower self.models[model_key] = model_state if self._relations is not None: self.resolve_model_relations(model_key) if "apps" in self.__dict__: # hasattr would cache the property self.reload_model(*model_key) def remove_model(self, app_label, model_name): model_key = app_label, model_name del self.models[model_key] if self._relations is not None: self._relations.pop(model_key, None) # Call list() since _relations can change size during iteration. for related_model_key, model_relations in list(self._relations.items()): model_relations.pop(model_key, None) if not model_relations: del self._relations[related_model_key] if "apps" in self.__dict__: # hasattr would cache the property self.apps.unregister_model(*model_key) # Need to do this explicitly since unregister_model() doesn't clear # the cache automatically (#24513) self.apps.clear_cache() def rename_model(self, app_label, old_name, new_name): # Add a new model. old_name_lower = old_name.lower() new_name_lower = new_name.lower() renamed_model = self.models[app_label, old_name_lower].clone() renamed_model.name = new_name self.models[app_label, new_name_lower] = renamed_model # Repoint all fields pointing to the old model to the new one. old_model_tuple = (app_label, old_name_lower) new_remote_model = f"{app_label}.{new_name}" to_reload = set() for model_state, name, field, reference in get_references( self, old_model_tuple ): changed_field = None if reference.to: changed_field = field.clone() changed_field.remote_field.model = new_remote_model if reference.through: if changed_field is None: changed_field = field.clone() changed_field.remote_field.through = new_remote_model if changed_field: model_state.fields[name] = changed_field to_reload.add((model_state.app_label, model_state.name_lower)) if self._relations is not None: old_name_key = app_label, old_name_lower new_name_key = app_label, new_name_lower if old_name_key in self._relations: self._relations[new_name_key] = self._relations.pop(old_name_key) for model_relations in self._relations.values(): if old_name_key in model_relations: model_relations[new_name_key] = model_relations.pop(old_name_key) # Reload models related to old model before removing the old model. self.reload_models(to_reload, delay=True) # Remove the old model. self.remove_model(app_label, old_name_lower) self.reload_model(app_label, new_name_lower, delay=True) def alter_model_options(self, app_label, model_name, options, option_keys=None): model_state = self.models[app_label, model_name] model_state.options = {**model_state.options, **options} if option_keys: for key in option_keys: if key not in options: model_state.options.pop(key, False) self.reload_model(app_label, model_name, delay=True) def remove_model_options(self, app_label, model_name, option_name, value_to_remove): model_state = self.models[app_label, model_name] if objs := model_state.options.get(option_name): model_state.options[option_name] = [ obj for obj in objs if tuple(obj) != tuple(value_to_remove) ] self.reload_model(app_label, model_name, delay=True) def alter_model_managers(self, app_label, model_name, managers): model_state = self.models[app_label, model_name] model_state.managers = list(managers) self.reload_model(app_label, model_name, delay=True) def _append_option(self, app_label, model_name, option_name, obj): model_state = self.models[app_label, model_name] model_state.options[option_name] = [*model_state.options[option_name], obj] self.reload_model(app_label, model_name, delay=True) def _remove_option(self, app_label, model_name, option_name, obj_name): model_state = self.models[app_label, model_name] objs = model_state.options[option_name] model_state.options[option_name] = [obj for obj in objs if obj.name != obj_name] self.reload_model(app_label, model_name, delay=True) def _alter_option(self, app_label, model_name, option_name, obj_name, alt_obj): model_state = self.models[app_label, model_name] objs = model_state.options[option_name] model_state.options[option_name] = [ obj if obj.name != obj_name else alt_obj for obj in objs ] self.reload_model(app_label, model_name, delay=True) def add_index(self, app_label, model_name, index): self._append_option(app_label, model_name, "indexes", index) def remove_index(self, app_label, model_name, index_name): self._remove_option(app_label, model_name, "indexes", index_name) def rename_index(self, app_label, model_name, old_index_name, new_index_name): model_state = self.models[app_label, model_name] objs = model_state.options["indexes"] new_indexes = [] for obj in objs: if obj.name == old_index_name: obj = obj.clone() obj.name = new_index_name new_indexes.append(obj) model_state.options["indexes"] = new_indexes self.reload_model(app_label, model_name, delay=True) def add_constraint(self, app_label, model_name, constraint): self._append_option(app_label, model_name, "constraints", constraint) def remove_constraint(self, app_label, model_name, constraint_name): self._remove_option(app_label, model_name, "constraints", constraint_name) def alter_constraint(self, app_label, model_name, constraint_name, constraint): self._alter_option( app_label, model_name, "constraints", constraint_name, constraint ) def add_field(self, app_label, model_name, name, field, preserve_default): # If preserve default is off, don't use the default for future state. if not preserve_default: field = field.clone() field.default = NOT_PROVIDED else: field = field model_key = app_label, model_name self.models[model_key].fields[name] = field if self._relations is not None: self.resolve_model_field_relations(model_key, name, field) # Delay rendering of relationships if it's not a relational field. delay = not field.is_relation self.reload_model(*model_key, delay=delay) def remove_field(self, app_label, model_name, name): model_key = app_label, model_name model_state = self.models[model_key] old_field = model_state.fields.pop(name) if self._relations is not None: self.resolve_model_field_relations(model_key, name, old_field) # Delay rendering of relationships if it's not a relational field. delay = not old_field.is_relation self.reload_model(*model_key, delay=delay) def alter_field(self, app_label, model_name, name, field, preserve_default): if not preserve_default: field = field.clone() field.default = NOT_PROVIDED else: field = field model_key = app_label, model_name fields = self.models[model_key].fields if self._relations is not None: old_field = fields.pop(name) if old_field.is_relation: self.resolve_model_field_relations(model_key, name, old_field) fields[name] = field if field.is_relation: self.resolve_model_field_relations(model_key, name, field) else: fields[name] = field # TODO: investigate if old relational fields must be reloaded or if # it's sufficient if the new field is (#27737). # Delay rendering of relationships if it's not a relational field and # not referenced by a foreign key. delay = not field.is_relation and not field_is_referenced( self, model_key, (name, field) ) self.reload_model(*model_key, delay=delay) def rename_field(self, app_label, model_name, old_name, new_name): model_key = app_label, model_name model_state = self.models[model_key] # Rename the field. fields = model_state.fields try: found = fields.pop(old_name) except KeyError: raise FieldDoesNotExist( f"{app_label}.{model_name} has no field named '{old_name}'" ) fields[new_name] = found for field in fields.values(): # Fix from_fields to refer to the new field. from_fields = getattr(field, "from_fields", None) if from_fields: field.from_fields = tuple( [ new_name if from_field_name == old_name else from_field_name for from_field_name in from_fields ] ) # Fix field names (e.g. for CompositePrimaryKey) to refer to the # new field. if field_names := getattr(field, "field_names", None): if old_name in field_names: field.field_names = tuple( [ new_name if field_name == old_name else field_name for field_name in field.field_names ] ) # Fix index/unique_together to refer to the new field. options = model_state.options for option in ("index_together", "unique_together"): if option in options: options[option] = [ [new_name if n == old_name else n for n in together] for together in options[option] ] # Fix to_fields to refer to the new field. delay = True references = get_references(self, model_key, (old_name, found)) for *_, field, reference in references: delay = False if reference.to: remote_field, to_fields = reference.to if getattr(remote_field, "field_name", None) == old_name: remote_field.field_name = new_name if to_fields: field.to_fields = tuple( [ new_name if to_field_name == old_name else to_field_name for to_field_name in to_fields ] ) if self._relations is not None: old_name_lower = old_name.lower() new_name_lower = new_name.lower() for to_model in self._relations.values(): if old_name_lower in to_model[model_key]: field = to_model[model_key].pop(old_name_lower) field.name = new_name_lower to_model[model_key][new_name_lower] = field self.reload_model(*model_key, delay=delay) def _find_reload_model(self, app_label, model_name, delay=False): if delay: self.is_delayed = True related_models = set() try: old_model = self.apps.get_model(app_label, model_name) except LookupError: pass else: # Get all relations to and from the old model before reloading, # as _meta.apps may change if delay: related_models = get_related_models_tuples(old_model) else: related_models = get_related_models_recursive(old_model) # Get all outgoing references from the model to be rendered model_state = self.models[(app_label, model_name)] # Directly related models are the models pointed to by ForeignKeys, # OneToOneFields, and ManyToManyFields. direct_related_models = set() for field in model_state.fields.values(): if field.is_relation: if field.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT: continue rel_app_label, rel_model_name = _get_app_label_and_model_name( field.related_model, app_label ) direct_related_models.add((rel_app_label, rel_model_name.lower())) # For all direct related models recursively get all related models. related_models.update(direct_related_models) for rel_app_label, rel_model_name in direct_related_models: try: rel_model = self.apps.get_model(rel_app_label, rel_model_name) except LookupError: pass else: if delay: related_models.update(get_related_models_tuples(rel_model)) else: related_models.update(get_related_models_recursive(rel_model)) # Include the model itself related_models.add((app_label, model_name)) return related_models def reload_model(self, app_label, model_name, delay=False): if "apps" in self.__dict__: # hasattr would cache the property related_models = self._find_reload_model(app_label, model_name, delay) self._reload(related_models) def reload_models(self, models, delay=True): if "apps" in self.__dict__: # hasattr would cache the property related_models = set() for app_label, model_name in models: related_models.update( self._find_reload_model(app_label, model_name, delay) ) self._reload(related_models) def _reload(self, related_models): # Unregister all related models with self.apps.bulk_update(): for rel_app_label, rel_model_name in related_models: self.apps.unregister_model(rel_app_label, rel_model_name) states_to_be_rendered = [] # Gather all models states of those models that will be rerendered. # This includes: # 1. All related models of unmigrated apps for model_state in self.apps.real_models: if (model_state.app_label, model_state.name_lower) in related_models: states_to_be_rendered.append(model_state) # 2. All related models of migrated apps for rel_app_label, rel_model_name in related_models: try: model_state = self.models[rel_app_label, rel_model_name] except KeyError: pass else: states_to_be_rendered.append(model_state) # Render all models self.apps.render_multiple(states_to_be_rendered) def update_model_field_relation( self, model, model_key, field_name, field, concretes, ): remote_model_key = resolve_relation(model, *model_key) if remote_model_key[0] not in self.real_apps and remote_model_key in concretes: remote_model_key = concretes[remote_model_key] relations_to_remote_model = self._relations[remote_model_key] if field_name in self.models[model_key].fields: # The assert holds because it's a new relation, or an altered # relation, in which case references have been removed by # alter_field(). assert field_name not in relations_to_remote_model[model_key] relations_to_remote_model[model_key][field_name] = field else: del relations_to_remote_model[model_key][field_name] if not relations_to_remote_model[model_key]: del relations_to_remote_model[model_key] def resolve_model_field_relations( self, model_key, field_name, field, concretes=None, ): remote_field = field.remote_field if not remote_field: return if concretes is None: concretes, _ = self._get_concrete_models_mapping_and_proxy_models() self.update_model_field_relation( remote_field.model, model_key, field_name, field, concretes, ) through = getattr(remote_field, "through", None) if not through: return self.update_model_field_relation( through, model_key, field_name, field, concretes ) def resolve_model_relations(self, model_key, concretes=None): if concretes is None: concretes, _ = self._get_concrete_models_mapping_and_proxy_models() model_state = self.models[model_key] for field_name, field in model_state.fields.items(): self.resolve_model_field_relations(model_key, field_name, field, concretes) def resolve_fields_and_relations(self): # Resolve fields. for model_state in self.models.values(): for field_name, field in model_state.fields.items(): field.name = field_name # Resolve relations. # {remote_model_key: {model_key: {field_name: field}}} self._relations = defaultdict(partial(defaultdict, dict)) concretes, proxies = self._get_concrete_models_mapping_and_proxy_models() for model_key in concretes: self.resolve_model_relations(model_key, concretes) for model_key in proxies: self._relations[model_key] = self._relations[concretes[model_key]] def get_concrete_model_key(self, model): ( concrete_models_mapping, _, ) = self._get_concrete_models_mapping_and_proxy_models() model_key = make_model_tuple(model) return concrete_models_mapping[model_key] def _get_concrete_models_mapping_and_proxy_models(self): concrete_models_mapping = {} proxy_models = {} # Split models to proxy and concrete models. for model_key, model_state in self.models.items(): if model_state.options.get("proxy"): proxy_models[model_key] = model_state # Find a concrete model for the proxy. concrete_models_mapping[model_key] = ( self._find_concrete_model_from_proxy( proxy_models, model_state, ) ) else: concrete_models_mapping[model_key] = model_key return concrete_models_mapping, proxy_models def _find_concrete_model_from_proxy(self, proxy_models, model_state): for base in model_state.bases: if not (isinstance(base, str) or issubclass(base, models.Model)): continue base_key = make_model_tuple(base) base_state = proxy_models.get(base_key) if not base_state: # Concrete model found, stop looking at bases. return base_key return self._find_concrete_model_from_proxy(proxy_models, base_state) def clone(self): """Return an exact copy of this ProjectState.""" new_state = ProjectState( models={k: v.clone() for k, v in self.models.items()}, real_apps=self.real_apps, ) if "apps" in self.__dict__: new_state.apps = self.apps.clone() new_state.is_delayed = self.is_delayed return new_state def clear_delayed_apps_cache(self): if self.is_delayed and "apps" in self.__dict__: del self.__dict__["apps"] @cached_property def apps(self): return StateApps(self.real_apps, self.models) @classmethod def from_apps(cls, apps): """Take an Apps and return a ProjectState matching it.""" app_models = {} for model in apps.get_models(include_swapped=True): model_state = ModelState.from_model(model) app_models[(model_state.app_label, model_state.name_lower)] = model_state return cls(app_models) def __eq__(self, other): return self.models == other.models and self.real_apps == other.real_apps
ProjectState
python
ray-project__ray
python/ray/serve/schema.py
{ "start": 25219, "end": 27900 }
class ____(BaseModel): """Options to start the HTTP Proxy with. NOTE: This config allows extra parameters to make it forward-compatible (ie older versions of Serve are able to accept configs from a newer versions, simply ignoring new parameters). """ host: str = Field( default="0.0.0.0", description=( "Host for HTTP servers to listen on. Defaults to " '"0.0.0.0", which exposes Serve publicly. Cannot be updated once ' "Serve has started running. Serve must be shut down and restarted " "with the new host instead." ), ) port: int = Field( default=8000, description=( "Port for HTTP server. Defaults to 8000. Cannot be updated once " "Serve has started running. Serve must be shut down and restarted " "with the new port instead." ), ) root_path: str = Field( default="", description=( 'Root path to mount the serve application (for example, "/serve"). All ' 'deployment routes will be prefixed with this path. Defaults to "".' ), ) request_timeout_s: float = Field( default=None, description="The timeout for HTTP requests. Defaults to no timeout.", ) keep_alive_timeout_s: int = Field( default=DEFAULT_UVICORN_KEEP_ALIVE_TIMEOUT_S, description="The HTTP proxy will keep idle connections alive for this duration " "before closing them when no requests are ongoing. Defaults to " f"{DEFAULT_UVICORN_KEEP_ALIVE_TIMEOUT_S} seconds.", ) ssl_keyfile: Optional[str] = Field( default=None, description="Path to the SSL key file for HTTPS. If provided with ssl_certfile, " "the HTTP server will use HTTPS. Cannot be updated once Serve has started.", ) ssl_certfile: Optional[str] = Field( default=None, description="Path to the SSL certificate file for HTTPS. If provided with " "ssl_keyfile, the HTTP server will use HTTPS. Cannot be updated once Serve " "has started.", ) ssl_keyfile_password: Optional[str] = Field( default=None, description="Password for the SSL key file, if encrypted.", ) ssl_ca_certs: Optional[str] = Field( default=None, description="Path to the CA certificate file for verifying client certificates.", ) @validator("ssl_certfile") def validate_ssl_certfile(cls, v, values): ssl_keyfile = values.get("ssl_keyfile") validate_ssl_config(v, ssl_keyfile) return v @PublicAPI(stability="stable")
HTTPOptionsSchema
python
conda__conda
conda/models/version.py
{ "start": 1250, "end": 17320 }
class ____(metaclass=SingleStrArgCachingType): """Implement an order relation between version strings. Version strings can contain the usual alphanumeric characters (A-Za-z0-9), separated into components by dots and underscores. Empty segments (i.e. two consecutive dots, a leading/trailing underscore) are not permitted. An optional epoch number - an integer followed by '!' - can proceed the actual version string (this is useful to indicate a change in the versioning scheme itself). Version comparison is case-insensitive. Conda supports six types of version strings: * Release versions contain only integers, e.g. '1.0', '2.3.5'. * Pre-release versions use additional letters such as 'a' or 'rc', for example '1.0a1', '1.2.beta3', '2.3.5rc3'. * Development versions are indicated by the string 'dev', for example '1.0dev42', '2.3.5.dev12'. * Post-release versions are indicated by the string 'post', for example '1.0post1', '2.3.5.post2'. * Tagged versions have a suffix that specifies a particular property of interest, e.g. '1.1.parallel'. Tags can be added to any of the preceding four types. As far as sorting is concerned, tags are treated like strings in pre-release versions. * An optional local version string separated by '+' can be appended to the main (upstream) version string. It is only considered in comparisons when the main versions are equal, but otherwise handled in exactly the same manner. To obtain a predictable version ordering, it is crucial to keep the version number scheme of a given package consistent over time. Specifically, * version strings should always have the same number of components (except for an optional tag suffix or local version string), * letters/strings indicating non-release versions should always occur at the same position. Before comparison, version strings are parsed as follows: * They are first split into epoch, version number, and local version number at '!' and '+' respectively. If there is no '!', the epoch is set to 0. If there is no '+', the local version is empty. * The version part is then split into components at '.' and '_'. * Each component is split again into runs of numerals and non-numerals * Subcomponents containing only numerals are converted to integers. * Strings are converted to lower case, with special treatment for 'dev' and 'post'. * When a component starts with a letter, the fillvalue 0 is inserted to keep numbers and strings in phase, resulting in '1.1.a1' == 1.1.0a1'. * The same is repeated for the local version part. Examples: 1.2g.beta15.rc => [[0], [1], [2, 'g'], [0, 'beta', 15], [0, 'rc']] 1!2.15.1_ALPHA => [[1], [2], [15], [1, '_alpha']] The resulting lists are compared lexicographically, where the following rules are applied to each pair of corresponding subcomponents: * integers are compared numerically * strings are compared lexicographically, case-insensitive * strings are smaller than integers, except * 'dev' versions are smaller than all corresponding versions of other types * 'post' versions are greater than all corresponding versions of other types * if a subcomponent has no correspondent, the missing correspondent is treated as integer 0 to ensure '1.1' == '1.1.0'. The resulting order is: 0.4 < 0.4.0 < 0.4.1.rc == 0.4.1.RC # case-insensitive comparison < 0.4.1 < 0.5a1 < 0.5b3 < 0.5C1 # case-insensitive comparison < 0.5 < 0.9.6 < 0.960923 < 1.0 < 1.1dev1 # special case 'dev' < 1.1_ # appended underscore is special case for openssl-like versions < 1.1a1 < 1.1.0dev1 # special case 'dev' == 1.1.dev1 # 0 is inserted before string < 1.1.a1 < 1.1.0rc1 < 1.1.0 == 1.1 < 1.1.0post1 # special case 'post' == 1.1.post1 # 0 is inserted before string < 1.1post1 # special case 'post' < 1996.07.12 < 1!0.4.1 # epoch increased < 1!3.1.1.6 < 2!0.4.1 # epoch increased again Some packages (most notably openssl) have incompatible version conventions. In particular, openssl interprets letters as version counters rather than pre-release identifiers. For openssl, the relation 1.0.1 < 1.0.1a => False # should be true for openssl holds, whereas conda packages use the opposite ordering. You can work-around this problem by appending an underscore to plain version numbers: 1.0.1_ < 1.0.1a => True # ensure correct ordering for openssl """ _cache_ = {} def __init__(self, vstr: str): # version comparison is case-insensitive version = vstr.strip().rstrip().lower() # basic validity checks if version == "": raise InvalidVersionSpec(vstr, "empty version string") invalid = not version_check_re.match(version) if invalid and "-" in version and "_" not in version: # Allow for dashes as long as there are no underscores # as well, by converting the former to the latter. version = version.replace("-", "_") invalid = not version_check_re.match(version) if invalid: raise InvalidVersionSpec(vstr, "invalid character(s)") # when fillvalue == 0 => 1.1 == 1.1.0 # when fillvalue == -1 => 1.1 < 1.1.0 self.norm_version = version self.fillvalue = 0 # find epoch version = version.split("!") if len(version) == 1: # epoch not given => set it to '0' epoch = ["0"] elif len(version) == 2: # epoch given, must be an integer if not version[0].isdigit(): raise InvalidVersionSpec(vstr, "epoch must be an integer") epoch = [version[0]] else: raise InvalidVersionSpec(vstr, "duplicated epoch separator '!'") # find local version string version = version[-1].split("+") if len(version) == 1: # no local version self.local = [] # Case 2: We have a local version component in version[1] elif len(version) == 2: # local version given self.local = version[1].replace("_", ".").split(".") else: raise InvalidVersionSpec(vstr, "duplicated local version separator '+'") # Error Case: Version is empty because the version string started with +. # e.g. "+", "1.2", "+a", "+1". # This is an error because specifying only a local version is invalid. # version[0] is empty because vstr.split("+") returns something like ['', '1.2'] if version[0] == "": raise InvalidVersionSpec( vstr, "Missing version before local version separator '+'" ) if version[0][-1] == "_": # If the last character of version is "-" or "_", don't split that out # individually. Implements the instructions for openssl-like versions # > You can work-around this problem by appending a dash to plain version numbers split_version = version[0][:-1].replace("_", ".").split(".") split_version[-1] += "_" else: split_version = version[0].replace("_", ".").split(".") self.version = epoch + split_version # split components into runs of numerals and non-numerals, # convert numerals to int, handle special strings for v in (self.version, self.local): for k in range(len(v)): c = version_split_re.findall(v[k]) if not c: raise InvalidVersionSpec(vstr, "empty version component") for j in range(len(c)): if c[j].isdigit(): c[j] = int(c[j]) elif c[j] == "post": # ensure number < 'post' == infinity c[j] = float("inf") elif c[j] == "dev": # ensure '*' < 'DEV' < '_' < 'a' < number # by upper-casing (all other strings are lower case) c[j] = "DEV" if v[k][0].isdigit(): v[k] = c else: # components shall start with a number to keep numbers and # strings in phase => prepend fillvalue v[k] = [self.fillvalue] + c def __str__(self) -> str: return self.norm_version def __repr__(self) -> str: return f'{self.__class__.__name__}("{self}")' def _eq(self, t1: list[str], t2: list[str]) -> bool: for v1, v2 in zip_longest(t1, t2, fillvalue=[]): for c1, c2 in zip_longest(v1, v2, fillvalue=self.fillvalue): if c1 != c2: return False return True def __eq__(self, other: object) -> bool: if not isinstance(other, VersionOrder): return False return self._eq(self.version, other.version) and self._eq( self.local, other.local ) def startswith(self, other: object) -> bool: if not isinstance(other, VersionOrder): return False # Tests if the version lists match up to the last element in "other". if other.local: if not self._eq(self.version, other.version): return False t1 = self.local t2 = other.local else: t1 = self.version t2 = other.version nt = len(t2) - 1 if not self._eq(t1[:nt], t2[:nt]): return False v1 = [] if len(t1) <= nt else t1[nt] v2 = t2[nt] nt = len(v2) - 1 if not self._eq([v1[:nt]], [v2[:nt]]): return False c1 = self.fillvalue if len(v1) <= nt else v1[nt] c2 = v2[nt] if isinstance(c2, str): return isinstance(c1, str) and c1.startswith(c2) return c1 == c2 def __ne__(self, other: object) -> bool: return not (self == other) def __lt__(self, other: object) -> bool: if not isinstance(other, VersionOrder): return False for t1, t2 in zip([self.version, self.local], [other.version, other.local]): for v1, v2 in zip_longest(t1, t2, fillvalue=[]): for c1, c2 in zip_longest(v1, v2, fillvalue=self.fillvalue): if c1 == c2: continue elif isinstance(c1, str): if not isinstance(c2, str): # str < int return True elif isinstance(c2, str): # not (int < str) return False # c1 and c2 have the same type return c1 < c2 # self == other return False def __gt__(self, other: object) -> bool: return other < self def __le__(self, other: object) -> bool: return not (other < self) def __ge__(self, other: object) -> bool: return not (self < other) # each token slurps up leading whitespace, which we strip out. VSPEC_TOKENS = ( r"\s*\^[^$]*[$]|" # regexes r"\s*[()|,]|" # parentheses, logical and, logical or r"[^()|,]+" ) # everything else def treeify(spec_str): """ Examples: >>> treeify("1.2.3") '1.2.3' >>> treeify("1.2.3,>4.5.6") (',', '1.2.3', '>4.5.6') >>> treeify("1.2.3,4.5.6|<=7.8.9") ('|', (',', '1.2.3', '4.5.6'), '<=7.8.9') >>> treeify("(1.2.3|4.5.6),<=7.8.9") (',', ('|', '1.2.3', '4.5.6'), '<=7.8.9') >>> treeify("((1.5|((1.6|1.7), 1.8), 1.9 |2.0))|2.1") ('|', '1.5', (',', ('|', '1.6', '1.7'), '1.8', '1.9'), '2.0', '2.1') >>> treeify("1.5|(1.6|1.7),1.8,1.9|2.0|2.1") ('|', '1.5', (',', ('|', '1.6', '1.7'), '1.8', '1.9'), '2.0', '2.1') """ # Converts a VersionSpec expression string into a tuple-based # expression tree. if not isinstance(spec_str, str): raise TypeError("`spec_str` must be a string.") tokens = re.findall(VSPEC_TOKENS, f"({spec_str})") output = [] stack = [] def apply_ops(cstop): # cstop: operators with lower precedence while stack and stack[-1] not in cstop: if len(output) < 2: raise InvalidVersionSpec(spec_str, "cannot join single expression") c = stack.pop() r = output.pop() # Fuse expressions with the same operator; e.g., # ('|', ('|', a, b), ('|', c, d))becomes # ('|', a, b, c d) # We're playing a bit of a trick here. Instead of checking # if the left or right entries are tuples, we're counting # on the fact that if we _do_ see a string instead, its # first character cannot possibly be equal to the operator. r = r[1:] if r[0] == c else (r,) left = output.pop() left = left[1:] if left[0] == c else (left,) output.append((c,) + left + r) for item in tokens: item = item.strip() if item == "|": apply_ops("(") stack.append("|") elif item == ",": apply_ops("|(") stack.append(",") elif item == "(": stack.append("(") elif item == ")": apply_ops("(") if not stack or stack[-1] != "(": raise InvalidVersionSpec(spec_str, "expression must start with '('") stack.pop() else: output.append(item) if stack: raise InvalidVersionSpec( spec_str, f"unable to convert to expression tree: {stack}" ) if not output: raise InvalidVersionSpec(spec_str, "unable to determine version from spec") return output[0] def untreeify(spec, _inand=False, depth=0): """ Examples: >>> untreeify('1.2.3') '1.2.3' >>> untreeify((',', '1.2.3', '>4.5.6')) '1.2.3,>4.5.6' >>> untreeify(('|', (',', '1.2.3', '4.5.6'), '<=7.8.9')) '(1.2.3,4.5.6)|<=7.8.9' >>> untreeify((',', ('|', '1.2.3', '4.5.6'), '<=7.8.9')) '(1.2.3|4.5.6),<=7.8.9' >>> untreeify(('|', '1.5', (',', ('|', '1.6', '1.7'), '1.8', '1.9'), '2.0', '2.1')) '1.5|((1.6|1.7),1.8,1.9)|2.0|2.1' """ if isinstance(spec, tuple): if spec[0] == "|": res = "|".join(map(lambda x: untreeify(x, depth=depth + 1), spec[1:])) if _inand or depth > 0: res = f"({res})" else: res = ",".join( map(lambda x: untreeify(x, _inand=True, depth=depth + 1), spec[1:]) ) if depth > 0: res = f"({res})" return res return spec def compatible_release_operator(x, y): return op.__ge__(x, y) and x.startswith( VersionOrder(".".join(str(y).split(".")[:-1])) ) # This RE matches the operators '==', '!=', '<=', '>=', '<', '>' # followed by a version string. It rejects expressions like # '<= 1.2' (space after operator), '<>1.2' (unknown operator), # and '<=!1.2' (nonsensical operator). version_relation_re = re.compile(r"^(=|==|!=|<=|>=|<|>|~=)(?![=<>!~])(\S+)$") regex_split_re = re.compile(r".*[()|,^$]") OPERATOR_MAP = { "==": op.__eq__, "!=": op.__ne__, "<=": op.__le__, ">=": op.__ge__, "<": op.__lt__, ">": op.__gt__, "=": lambda x, y: x.startswith(y), "!=startswith": lambda x, y: not x.startswith(y), "~=": compatible_release_operator, } OPERATOR_START = frozenset(("=", "<", ">", "!", "~"))
VersionOrder
python
sympy__sympy
sympy/physics/biomechanics/tests/test_musculotendon.py
{ "start": 21367, "end": 32906 }
class ____: @staticmethod def test_class(): assert issubclass(MusculotendonDeGroote2016, ForceActuator) assert issubclass(MusculotendonDeGroote2016, _NamedMixin) assert MusculotendonDeGroote2016.__name__ == 'MusculotendonDeGroote2016' @staticmethod def test_instance(): origin = Point('pO') insertion = Point('pI') insertion.set_pos(origin, dynamicsymbols('q')*ReferenceFrame('N').x) pathway = LinearPathway(origin, insertion) activation = FirstOrderActivationDeGroote2016('name') l_T_slack = Symbol('l_T_slack') F_M_max = Symbol('F_M_max') l_M_opt = Symbol('l_M_opt') v_M_max = Symbol('v_M_max') alpha_opt = Symbol('alpha_opt') beta = Symbol('beta') instance = MusculotendonDeGroote2016( 'name', pathway, activation, musculotendon_dynamics=MusculotendonFormulation.RIGID_TENDON, tendon_slack_length=l_T_slack, peak_isometric_force=F_M_max, optimal_fiber_length=l_M_opt, maximal_fiber_velocity=v_M_max, optimal_pennation_angle=alpha_opt, fiber_damping_coefficient=beta, ) assert isinstance(instance, MusculotendonDeGroote2016) @pytest.fixture(autouse=True) def _musculotendon_fixture(self): self.name = 'name' self.N = ReferenceFrame('N') self.q = dynamicsymbols('q') self.origin = Point('pO') self.insertion = Point('pI') self.insertion.set_pos(self.origin, self.q*self.N.x) self.pathway = LinearPathway(self.origin, self.insertion) self.activation = FirstOrderActivationDeGroote2016(self.name) self.l_T_slack = Symbol('l_T_slack') self.F_M_max = Symbol('F_M_max') self.l_M_opt = Symbol('l_M_opt') self.v_M_max = Symbol('v_M_max') self.alpha_opt = Symbol('alpha_opt') self.beta = Symbol('beta') def test_with_defaults(self): origin = Point('pO') insertion = Point('pI') insertion.set_pos(origin, dynamicsymbols('q')*ReferenceFrame('N').x) pathway = LinearPathway(origin, insertion) activation = FirstOrderActivationDeGroote2016('name') l_T_slack = Symbol('l_T_slack') F_M_max = Symbol('F_M_max') l_M_opt = Symbol('l_M_opt') v_M_max = Float('10.0') alpha_opt = Float('0.0') beta = Float('0.1') instance = MusculotendonDeGroote2016.with_defaults( 'name', pathway, activation, musculotendon_dynamics=MusculotendonFormulation.RIGID_TENDON, tendon_slack_length=l_T_slack, peak_isometric_force=F_M_max, optimal_fiber_length=l_M_opt, ) assert instance.tendon_slack_length == l_T_slack assert instance.peak_isometric_force == F_M_max assert instance.optimal_fiber_length == l_M_opt assert instance.maximal_fiber_velocity == v_M_max assert instance.optimal_pennation_angle == alpha_opt assert instance.fiber_damping_coefficient == beta @pytest.mark.parametrize( 'l_T_slack, expected', [ (None, Symbol('l_T_slack_name')), (Symbol('l_T_slack'), Symbol('l_T_slack')), (Rational(1, 2), Rational(1, 2)), (Float('0.5'), Float('0.5')), ], ) def test_tendon_slack_length(self, l_T_slack, expected): instance = MusculotendonDeGroote2016( self.name, self.pathway, self.activation, musculotendon_dynamics=MusculotendonFormulation.RIGID_TENDON, tendon_slack_length=l_T_slack, peak_isometric_force=self.F_M_max, optimal_fiber_length=self.l_M_opt, maximal_fiber_velocity=self.v_M_max, optimal_pennation_angle=self.alpha_opt, fiber_damping_coefficient=self.beta, ) assert instance.l_T_slack == expected assert instance.tendon_slack_length == expected @pytest.mark.parametrize( 'F_M_max, expected', [ (None, Symbol('F_M_max_name')), (Symbol('F_M_max'), Symbol('F_M_max')), (Integer(1000), Integer(1000)), (Float('1000.0'), Float('1000.0')), ], ) def test_peak_isometric_force(self, F_M_max, expected): instance = MusculotendonDeGroote2016( self.name, self.pathway, self.activation, musculotendon_dynamics=MusculotendonFormulation.RIGID_TENDON, tendon_slack_length=self.l_T_slack, peak_isometric_force=F_M_max, optimal_fiber_length=self.l_M_opt, maximal_fiber_velocity=self.v_M_max, optimal_pennation_angle=self.alpha_opt, fiber_damping_coefficient=self.beta, ) assert instance.F_M_max == expected assert instance.peak_isometric_force == expected @pytest.mark.parametrize( 'l_M_opt, expected', [ (None, Symbol('l_M_opt_name')), (Symbol('l_M_opt'), Symbol('l_M_opt')), (Rational(1, 2), Rational(1, 2)), (Float('0.5'), Float('0.5')), ], ) def test_optimal_fiber_length(self, l_M_opt, expected): instance = MusculotendonDeGroote2016( self.name, self.pathway, self.activation, musculotendon_dynamics=MusculotendonFormulation.RIGID_TENDON, tendon_slack_length=self.l_T_slack, peak_isometric_force=self.F_M_max, optimal_fiber_length=l_M_opt, maximal_fiber_velocity=self.v_M_max, optimal_pennation_angle=self.alpha_opt, fiber_damping_coefficient=self.beta, ) assert instance.l_M_opt == expected assert instance.optimal_fiber_length == expected @pytest.mark.parametrize( 'v_M_max, expected', [ (None, Symbol('v_M_max_name')), (Symbol('v_M_max'), Symbol('v_M_max')), (Integer(10), Integer(10)), (Float('10.0'), Float('10.0')), ], ) def test_maximal_fiber_velocity(self, v_M_max, expected): instance = MusculotendonDeGroote2016( self.name, self.pathway, self.activation, musculotendon_dynamics=MusculotendonFormulation.RIGID_TENDON, tendon_slack_length=self.l_T_slack, peak_isometric_force=self.F_M_max, optimal_fiber_length=self.l_M_opt, maximal_fiber_velocity=v_M_max, optimal_pennation_angle=self.alpha_opt, fiber_damping_coefficient=self.beta, ) assert instance.v_M_max == expected assert instance.maximal_fiber_velocity == expected @pytest.mark.parametrize( 'alpha_opt, expected', [ (None, Symbol('alpha_opt_name')), (Symbol('alpha_opt'), Symbol('alpha_opt')), (Integer(0), Integer(0)), (Float('0.1'), Float('0.1')), ], ) def test_optimal_pennation_angle(self, alpha_opt, expected): instance = MusculotendonDeGroote2016( self.name, self.pathway, self.activation, musculotendon_dynamics=MusculotendonFormulation.RIGID_TENDON, tendon_slack_length=self.l_T_slack, peak_isometric_force=self.F_M_max, optimal_fiber_length=self.l_M_opt, maximal_fiber_velocity=self.v_M_max, optimal_pennation_angle=alpha_opt, fiber_damping_coefficient=self.beta, ) assert instance.alpha_opt == expected assert instance.optimal_pennation_angle == expected @pytest.mark.parametrize( 'beta, expected', [ (None, Symbol('beta_name')), (Symbol('beta'), Symbol('beta')), (Integer(0), Integer(0)), (Rational(1, 10), Rational(1, 10)), (Float('0.1'), Float('0.1')), ], ) def test_fiber_damping_coefficient(self, beta, expected): instance = MusculotendonDeGroote2016( self.name, self.pathway, self.activation, musculotendon_dynamics=MusculotendonFormulation.RIGID_TENDON, tendon_slack_length=self.l_T_slack, peak_isometric_force=self.F_M_max, optimal_fiber_length=self.l_M_opt, maximal_fiber_velocity=self.v_M_max, optimal_pennation_angle=self.alpha_opt, fiber_damping_coefficient=beta, ) assert instance.beta == expected assert instance.fiber_damping_coefficient == expected def test_excitation(self): instance = MusculotendonDeGroote2016( self.name, self.pathway, self.activation, ) assert hasattr(instance, 'e') assert hasattr(instance, 'excitation') e_expected = dynamicsymbols('e_name') assert instance.e == e_expected assert instance.excitation == e_expected assert instance.e is instance.excitation def test_excitation_is_immutable(self): instance = MusculotendonDeGroote2016( self.name, self.pathway, self.activation, ) with pytest.raises(AttributeError): instance.e = None with pytest.raises(AttributeError): instance.excitation = None def test_activation(self): instance = MusculotendonDeGroote2016( self.name, self.pathway, self.activation, ) assert hasattr(instance, 'a') assert hasattr(instance, 'activation') a_expected = dynamicsymbols('a_name') assert instance.a == a_expected assert instance.activation == a_expected def test_activation_is_immutable(self): instance = MusculotendonDeGroote2016( self.name, self.pathway, self.activation, ) with pytest.raises(AttributeError): instance.a = None with pytest.raises(AttributeError): instance.activation = None def test_repr(self): instance = MusculotendonDeGroote2016( self.name, self.pathway, self.activation, musculotendon_dynamics=MusculotendonFormulation.RIGID_TENDON, tendon_slack_length=self.l_T_slack, peak_isometric_force=self.F_M_max, optimal_fiber_length=self.l_M_opt, maximal_fiber_velocity=self.v_M_max, optimal_pennation_angle=self.alpha_opt, fiber_damping_coefficient=self.beta, ) expected = ( 'MusculotendonDeGroote2016(\'name\', ' 'pathway=LinearPathway(pO, pI), ' 'activation_dynamics=FirstOrderActivationDeGroote2016(\'name\', ' 'activation_time_constant=tau_a_name, ' 'deactivation_time_constant=tau_d_name, ' 'smoothing_rate=b_name), ' 'musculotendon_dynamics=0, ' 'tendon_slack_length=l_T_slack, ' 'peak_isometric_force=F_M_max, ' 'optimal_fiber_length=l_M_opt, ' 'maximal_fiber_velocity=v_M_max, ' 'optimal_pennation_angle=alpha_opt, ' 'fiber_damping_coefficient=beta)' ) assert repr(instance) == expected
TestMusculotendonDeGroote2016
python
networkx__networkx
networkx/generators/tests/test_lattice.py
{ "start": 5325, "end": 5950 }
class ____: """Unit tests for :func:`networkx.generators.lattice.hypercube_graph`""" def test_special_cases(self): for n, H in [ (0, nx.null_graph()), (1, nx.path_graph(2)), (2, nx.cycle_graph(4)), (3, nx.cubical_graph()), ]: G = nx.hypercube_graph(n) assert nx.could_be_isomorphic(G, H) def test_degree_distribution(self): for n in range(1, 10): G = nx.hypercube_graph(n) expected_histogram = [0] * n + [2**n] assert nx.degree_histogram(G) == expected_histogram
TestHypercubeGraph
python
allegroai__clearml
clearml/backend_api/services/v2_13/models.py
{ "start": 106081, "end": 110344 }
class ____(Response): """ Response of models.publish_many endpoint. :param published: Number of models published :type published: int :param published_tasks: :type published_tasks: Sequence[dict] """ _service = "models" _action = "publish_many" _version = "2.13" _schema = { "definitions": {}, "failures": { "item": { "error": { "description": "Error info", "properties": { "codes": {"item": {"type": "integer"}, "type": "array"}, "data": {"additionalProperties": True, "type": "object"}, "msg": {"type": "string"}, }, "type": "object", }, "id": {"description": "ID of the failed entity", "type": "string"}, "type": "object", }, "type": "array", }, "properties": { "published": { "description": "Number of models published", "type": ["integer", "null"], }, "published_tasks": { "items": { "description": "Result of publishing of the model's associated task (if exists). Returned only if the task was published successfully as part of the model publishing.", "properties": { "data": { "description": "Data returned from the task publishing operation.", "properties": { "committed_versions_results": { "description": "Committed versions results", "items": { "additionalProperties": True, "type": "object", }, "type": "array", }, "fields": { "additionalProperties": True, "description": "Updated fields names and values", "type": "object", }, "updated": { "description": "Number of tasks updated (0 or 1)", "enum": [0, 1], "type": "integer", }, }, "type": "object", }, "id": {"description": "Task id", "type": "string"}, }, "type": "object", }, "type": ["array", "null"], }, }, } def __init__( self, published: Optional[int] = None, published_tasks: Optional[List[dict]] = None, **kwargs: Any ) -> None: super(PublishManyResponse, self).__init__(**kwargs) self.published = published self.published_tasks = published_tasks @schema_property("published") def published(self) -> Optional[int]: return self._property_published @published.setter def published(self, value: Optional[int]) -> None: if value is None: self._property_published = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "published", six.integer_types) self._property_published = value @schema_property("published_tasks") def published_tasks(self) -> Optional[List[dict]]: return self._property_published_tasks @published_tasks.setter def published_tasks(self, value: Optional[List[dict]]) -> None: if value is None: self._property_published_tasks = None return self.assert_isinstance(value, "published_tasks", (list, tuple)) self.assert_isinstance(value, "published_tasks", (dict,), is_array=True) self._property_published_tasks = value
PublishManyResponse
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/control_flow/cond_v2_test.py
{ "start": 62470, "end": 64061 }
class ____(test.TestCase): def testCase(self): def branch1(x): logging_ops.print_v2("1") return x def branch2(x): return x + 1 with ops.Graph().as_default(): x = array_ops.constant(1) output = cond_v2.indexed_case( array_ops.constant(0), [lambda: branch1(x), lambda: branch2(x)]) cond_op = output.op.inputs[0].op self.assertEqual(cond_op.type, "Case") self.assertEqual(1., self.evaluate(output)) def testStatelessCase(self): def branch1(x): return x + 1 def branch2(x): return x + 2 with ops.Graph().as_default(): x = array_ops.constant(1) output = cond_v2.indexed_case( array_ops.constant(0), [lambda: branch1(x), lambda: branch2(x)]) cond_op = output.op.inputs[0].op self.assertEqual(cond_op.type, "StatelessCase") self.assertEqual(2., self.evaluate(output)) def _cond(pred, true_fn, false_fn, name): if _is_old_cond(): return tf_cond.cond(pred, true_fn, false_fn, name=name) else: return cond_v2.cond_v2(pred, true_fn, false_fn, name=name) def _is_old_cond(): return isinstance(ops.get_default_graph()._get_control_flow_context(), control_flow_ops.CondContext) def _has_node_with_op(run_metadata, op_type): """Whether any node in `run_metadata.partition_graphs` matches `op_type`.""" for graph in run_metadata.partition_graphs: for node in graph.node: if node.op == op_type: return True return False if __name__ == "__main__": ops.enable_eager_execution() test.main()
CaseTest
python
pytorch__pytorch
torch/_export/error.py
{ "start": 24, "end": 1064 }
class ____(Enum): # User providing invalid inputs to either tracer, or other public facing APIs INVALID_INPUT_TYPE = 1 # User returning values from their models that we don't support. INVALID_OUTPUT_TYPE = 2 # Generated IR does not conform to Export IR Specification. VIOLATION_OF_SPEC = 3 # User's code contains types and functionalities we don't support. NOT_SUPPORTED = 4 # User's code didn't provide necessary details for us to successfully trace and export. # For example, we use a lot of decorators and ask users to annotate their model. MISSING_PROPERTY = 5 # User is using an API without proper initialization step. UNINITIALIZED = 6 def internal_assert(pred: bool, assert_msg: str) -> None: """ This is exir's custom assert method. It internally just throws InternalError. Note that the sole purpose is to throw our own error while maintaining similar syntax as python assert. """ if not pred: raise InternalError(assert_msg)
ExportErrorType
python
django__django
tests/test_client_regress/tests.py
{ "start": 54053, "end": 55744 }
class ____(SimpleTestCase): """ HttpRequest.body, HttpRequest.read(), and HttpRequest.read(BUFFER) have proper LimitedStream behavior. Refs #14753, #15785 """ def test_body_from_empty_request(self): """HttpRequest.body on a test client GET request should return the empty string.""" self.assertEqual(self.client.get("/body/").content, b"") def test_read_from_empty_request(self): """HttpRequest.read() on a test client GET request should return the empty string.""" self.assertEqual(self.client.get("/read_all/").content, b"") def test_read_numbytes_from_empty_request(self): """HttpRequest.read(LARGE_BUFFER) on a test client GET request should return the empty string.""" self.assertEqual(self.client.get("/read_buffer/").content, b"") def test_read_from_nonempty_request(self): """HttpRequest.read() on a test client PUT request with some payload should return that payload.""" payload = b"foobar" self.assertEqual( self.client.put( "/read_all/", data=payload, content_type="text/plain" ).content, payload, ) def test_read_numbytes_from_nonempty_request(self): """HttpRequest.read(LARGE_BUFFER) on a test client PUT request with some payload should return that payload.""" payload = b"foobar" self.assertEqual( self.client.put( "/read_buffer/", data=payload, content_type="text/plain" ).content, payload, ) @override_settings(ROOT_URLCONF="test_client_regress.urls")
ReadLimitedStreamTest
python
huggingface__transformers
src/transformers/models/dinat/modeling_dinat.py
{ "start": 7045, "end": 8598 }
class ____(nn.Module): """ Convolutional Downsampling Layer. Args: dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. """ def __init__(self, dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: super().__init__() self.dim = dim self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) self.norm = norm_layer(2 * dim) def forward(self, input_feature: torch.Tensor) -> torch.Tensor: input_feature = self.reduction(input_feature.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) input_feature = self.norm(input_feature) return input_feature # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Dinat
DinatDownsampler
python
scipy__scipy
scipy/stats/tests/test_generation/reference_distributions.py
{ "start": 10226, "end": 12343 }
class ____(ReferenceDistribution): """Reference implementation of the SkewNormal distribution. Follow the example here to generate new reference distributions. Use the reference distributions to generate reference values of distributions functions. For now, copy-paste the output into unit tests. Full code to generate reference values does not need to be included as a comment in the test; just refer to the reference distribution used and the settings (e.g. mp.dps=50). """ def __init__(self, *, a): # Overriding __init__ is not necessary, but it allows IDEs to hint at # shape parameters. All parameters are keyword only to avoid the # ambiguity inherent in positional arguments. The infrastructure does # not take care of location and scale; nonetheless, assume standard # location and scale. Typically, there is no need to test the SciPy # distribution infrastructure's treatment of location and scale # separately for a specific distribution. super().__init__(a=a) def _support(self, a): # Override _support if the support of the distribution is a subset of # the real line return -mp.inf, mp.inf def _pdf(self, x, a): # Write PDFs following a scholarly reference as closely as possible. # Trust mpmath for the accuracy, and don't worry about speed. What's # important is the ease of verifying the PDF against the reference. If # the result is inaccurate, it will not match SciPy's output (whether # SciPy is accurate or not). If this occurs, try increasing dps before # implementing a numerically favorable (but presumably more complex) # implementation. return 2 * mp.npdf(x) * mp.ncdf(a * x) # Avoid overriding other methods unless the generic implementation is # believed to be inaccurate (e.g. due to numerical difficulties) or it is # too slow. Why? Less code to write, less code to review, and a guarantee # that there is no *mistake* in the implementation (e.g. wrong formula).
SkewNormal
python
run-llama__llama_index
llama-index-integrations/vector_stores/llama-index-vector-stores-vearch/llama_index/vector_stores/vearch/base.py
{ "start": 673, "end": 11451 }
class ____(BasePydanticVectorStore): """ Vearch vector store: embeddings are stored within a Vearch table. when query, the index uses Vearch to query for the top k most similar nodes. Args: chroma_collection (chromadb.api.models.Collection.Collection): ChromaDB collection instance """ flat_metadata: bool = True stores_text: bool = True using_db_name: str using_table_name: str url: str _vearch: vearch_cluster.VearchCluster = PrivateAttr() def __init__( self, path_or_url: Optional[str] = None, table_name: str = _DEFAULT_TABLE_NAME, db_name: str = _DEFAULT_CLUSTER_DB_NAME, **kwargs: Any, ) -> None: """Initialize vearch vector store.""" if path_or_url is None: raise ValueError("Please input url of cluster") if not db_name: db_name = _DEFAULT_CLUSTER_DB_NAME db_name += "_" db_name += str(uuid.uuid4()).split("-")[-1] if not table_name: table_name = _DEFAULT_TABLE_NAME table_name += "_" table_name += str(uuid.uuid4()).split("-")[-1] super().__init__( using_db_name=db_name, using_table_name=table_name, url=path_or_url, ) self._vearch = vearch_cluster.VearchCluster(path_or_url) @classmethod def class_name(cls) -> str: return "VearchVectorStore" @property def client(self) -> Any: """Get client.""" return self._vearch def _get_matadata_field(self, metadatas: Optional[List[dict]] = None) -> None: field_list = [] if metadatas: for key, value in metadatas[0].items(): if isinstance(value, int): field_list.append({"field": key, "type": "int"}) continue if isinstance(value, str): field_list.append({"field": key, "type": "str"}) continue if isinstance(value, float): field_list.append({"field": key, "type": "float"}) continue else: raise ValueError("Please check data type,support int, str, float") self.field_list = field_list def _add_texts( self, ids: Iterable[str], texts: Iterable[str], metadatas: Optional[List[dict]] = None, embeddings: Optional[List[List[float]]] = None, **kwargs: Any, ) -> List[str]: """ Returns: List of ids from adding the texts into the vectorstore. """ if embeddings is None: raise ValueError("embeddings is None") self._get_matadata_field(metadatas) dbs_list = self._vearch.list_dbs() if self.using_db_name not in dbs_list: create_db_code = self._vearch.create_db(self.using_db_name) if not create_db_code: raise ValueError("create db failed!!!") space_list = self._vearch.list_spaces(self.using_db_name) if self.using_table_name not in space_list: create_space_code = self._create_space(len(embeddings[0])) if not create_space_code: raise ValueError("create space failed!!!") docid = [] if embeddings is not None and metadatas is not None: meta_field_list = [i["field"] for i in self.field_list] for text, metadata, embed, id_d in zip(texts, metadatas, embeddings, ids): profiles: typing.Dict[str, Any] = {} profiles["ref_doc_id"] = id_d profiles["text"] = text for f in meta_field_list: profiles[f] = metadata[f] embed_np = np.array(embed) profiles["text_embedding"] = { "feature": (embed_np / np.linalg.norm(embed_np)).tolist() } insert_res = self._vearch.insert_one( self.using_db_name, self.using_table_name, profiles ) if insert_res["status"] == 200: docid.append(insert_res["_id"]) continue else: retry_insert = self._vearch.insert_one( self.using_db_name, self.using_table_name, profiles ) docid.append(retry_insert["_id"]) continue return docid def _create_space( self, dim: int = 1024, ) -> int: """ Create Cluster VectorStore space. Args: dim:dimension of vector. Return: code,0 failed for ,1 for success. """ type_dict = {"int": "integer", "str": "string", "float": "float"} space_config = { "name": self.using_table_name, "partition_num": 1, "replica_num": 1, "engine": { "index_size": 1, "retrieval_type": "HNSW", "retrieval_param": { "metric_type": "InnerProduct", "nlinks": -1, "efConstruction": -1, }, }, } tmp_proer = { "ref_doc_id": {"type": "string"}, "text": {"type": "string"}, "text_embedding": { "type": "vector", "index": True, "dimension": dim, "store_type": "MemoryOnly", }, } for item in self.field_list: tmp_proer[item["field"]] = {"type": type_dict[item["type"]]} space_config["properties"] = tmp_proer return self._vearch.create_space(self.using_db_name, space_config) def add( self, nodes: List[BaseNode], **add_kwargs: Any, ) -> List[str]: if not self._vearch: raise ValueError("Vearch Engine is not initialized") embeddings = [] metadatas = [] ids = [] texts = [] for node in nodes: embeddings.append(node.get_embedding()) metadatas.append( node_to_metadata_dict( node, remove_text=True, flat_metadata=self.flat_metadata ) ) ids.append(node.node_id) texts.append(node.get_content(metadata_mode=MetadataMode.NONE) or "") return self._add_texts( ids=ids, texts=texts, metadatas=metadatas, embeddings=embeddings, ) def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """ Query index for top k most similar nodes. Args: query : vector store query. Returns: VectorStoreQueryResult: Query results. """ meta_filters = {} if query.filters is not None: for filter_ in query.filters.legacy_filters(): meta_filters[filter_.key] = filter_.value if self.flag: meta_field_list = self._vearch.get_space( self.using_db_name, self.using_table_name ) meta_field_list.remove("text_embedding") embed = query.query_embedding if embed is None: raise ValueError("query.query_embedding is None") k = query.similarity_top_k query_data = { "query": { "sum": [ { "field": "text_embedding", "feature": (embed / np.linalg.norm(embed)).tolist(), } ], }, "retrieval_param": {"metric_type": "InnerProduct", "efSearch": 64}, "size": k, "fields": meta_field_list, } query_result = self._vearch.search( self.using_db_name, self.using_table_name, query_data ) res = query_result["hits"]["hits"] nodes = [] similarities = [] ids = [] for item in res: content = "" meta_data = {} node_id = "" score = item["_score"] item = item["_source"] for item_key in item: if item_key == "text": content = item[item_key] continue elif item_key == "_id": node_id = item[item_key] ids.append(node_id) continue meta_data[item_key] = item[item_key] similarities.append(score) try: node = metadata_dict_to_node(meta_data) node.set_content(content) except Exception: metadata, node_info, relationships = legacy_metadata_dict_to_node( meta_data ) node = TextNode( text=content, id_=node_id, metadata=metadata, start_char_idx=node_info.get("start", None), end_char_idx=node_info.get("end", None), relationships=relationships, ) nodes.append(node) return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids) def _delete( self, ids: Optional[List[str]] = None, **kwargs: Any, ) -> None: """ Delete the documents which have the specified ids. Args: ids: The ids of the embedding vectors. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful. False otherwise, None if not implemented. """ if ids is None or len(ids) == 0: return for _id in ids: queries = { "query": { "filter": [{"term": {"ref_doc_id": [_id], "operator": "and"}}] }, "size": 10000, } self._vearch.delete_by_query( self, self.using_db_name, self.using_table_name, queries ) def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. Returns: None """ if len(ref_doc_id) == 0: return ids: List[str] = [] ids.append(ref_doc_id) self._delete(ids)
VearchVectorStore
python
kamyu104__LeetCode-Solutions
Python/merge-operations-for-minimum-travel-time.py
{ "start": 81, "end": 1029 }
class ____(object): def minTravelTime(self, l, n, k, position, time): """ :type l: int :type n: int :type k: int :type position: List[int] :type time: List[int] :rtype: int """ prefix = [0]*(n+1) for i in xrange(n): prefix[i+1] = prefix[i]+time[i] dp = collections.defaultdict(lambda: collections.defaultdict(lambda: float("inf"))) dp[0][time[0]] = 0 for cnt in xrange(2, (n-k)+1): new_dp = collections.defaultdict(lambda: collections.defaultdict(lambda: float("inf"))) for i in xrange(cnt-1, (cnt-1)+(k+1)): for j in xrange(cnt-2, i): for t, c in dp[j].iteritems(): new_dp[i][prefix[i+1]-prefix[j+1]] = min(new_dp[i][prefix[i+1]-prefix[j+1]], (position[i]-position[j])*t+c) dp = new_dp return min(dp[n-1].itervalues())
Solution
python
pydantic__pydantic
pydantic/experimental/pipeline.py
{ "start": 2483, "end": 2873 }
class ____: pass # TODO: ultimately, make this public, see https://github.com/pydantic/pydantic/pull/9459#discussion_r1628197626 # Also, make this frozen eventually, but that doesn't work right now because of the generic base # Which attempts to modify __orig_base__ and such. # We could go with a manual freeze, but that seems overkill for now. @dataclass(**_slots_true)
_FieldTypeMarker
python
pytransitions__transitions
transitions/extensions/diagrams.py
{ "start": 1156, "end": 2104 }
class ____(Transition): """Transition used in conjunction with (Nested)Graphs to update graphs whenever a transition is conducted. """ def __init__(self, *args, **kwargs): label = kwargs.pop("label", None) super(TransitionGraphSupport, self).__init__(*args, **kwargs) if label: self.label = label def _change_state(self, event_data): graph = event_data.machine.model_graphs[id(event_data.model)] graph.reset_styling() graph.set_previous_transition(self.source, self.dest) super(TransitionGraphSupport, self)._change_state( event_data ) # pylint: disable=protected-access graph = event_data.machine.model_graphs[ id(event_data.model) ] # graph might have changed during change_event graph.set_node_style(getattr(event_data.model, event_data.machine.model_attribute), "active")
TransitionGraphSupport
python
vyperlang__vyper
vyper/ast/nodes.py
{ "start": 34400, "end": 34520 }
class ____(Operator): __slots__ = () _description = "bitwise and" _pretty = "&" _op = operator.and_
BitAnd
python
pytorch__pytorch
torch/_dynamo/variables/tensor.py
{ "start": 66002, "end": 66623 }
class ____(TensorVariable): """An unspecialized python variable which prevents access to the underlying raw value. This is needed if item is called on a FakeTensor.""" _nonvar_fields = { "need_unwrap", *TensorVariable._nonvar_fields, } def __init__(self, proxy: torch.fx.Proxy, **kwargs) -> None: need_unwrap = kwargs.pop("need_unwrap", False) super().__init__(proxy, **kwargs) self.need_unwrap = need_unwrap @classmethod def from_tensor_variable(cls, tensor_variable): return FakeItemVariable(**dict(tensor_variable.__dict__))
FakeItemVariable
python
great-expectations__great_expectations
tests/core/test_expectation_suite.py
{ "start": 2924, "end": 7592 }
class ____: """Tests related to ExpectationSuite.__init__()""" @pytest.mark.unit def test_expectation_suite_init_defaults( self, empty_data_context: AbstractDataContext, fake_expectation_suite_name: str, ): suite = ExpectationSuite(name=fake_expectation_suite_name) default_meta = {"great_expectations_version": ge_version} assert suite.name == fake_expectation_suite_name assert suite.expectations == [] assert suite.suite_parameters == {} assert suite.meta == default_meta assert suite.id is None @pytest.mark.unit def test_expectation_suite_init_overrides( self, empty_data_context: AbstractDataContext, fake_expectation_suite_name: str, expect_column_values_to_be_in_set_col_a_with_meta: ExpectationConfiguration, ): class DummyExecutionEngine: pass test_suite_parameters = {"$PARAMETER": "test_suite_parameters"} default_meta = {"great_expectations_version": ge_version} test_meta_base = {"test_key": "test_value"} test_meta = {**default_meta, **test_meta_base} test_id = "test_id" suite = ExpectationSuite( name=fake_expectation_suite_name, expectations=[expect_column_values_to_be_in_set_col_a_with_meta], suite_parameters=test_suite_parameters, meta=test_meta, id=test_id, ) assert suite.name == fake_expectation_suite_name assert suite.expectation_configurations == [ expect_column_values_to_be_in_set_col_a_with_meta ] assert suite.suite_parameters == test_suite_parameters assert suite.meta == test_meta assert suite.id == test_id @pytest.mark.unit def test_expectation_suite_init_overrides_expectations_dict_and_obj( self, empty_data_context: AbstractDataContext, fake_expectation_suite_name: str, expect_column_values_to_be_in_set_col_a_with_meta_dict: dict, expect_column_values_to_be_in_set_col_a_with_meta: ExpectationConfiguration, ): """What does this test and why? The expectations param of ExpectationSuite takes a list of ExpectationConfiguration or dicts and both can be provided at the same time. We need to make sure they both show up as expectation configurations in the instantiated ExpectationSuite. """ # noqa: E501 # FIXME CoP test_expectations_input = [ expect_column_values_to_be_in_set_col_a_with_meta_dict, expect_column_values_to_be_in_set_col_a_with_meta, ] suite = ExpectationSuite( name=fake_expectation_suite_name, expectations=test_expectations_input, # type: ignore[arg-type] # FIXME CoP ) assert suite.name == fake_expectation_suite_name test_expected_expectations = [ ExpectationConfiguration(**expect_column_values_to_be_in_set_col_a_with_meta_dict), expect_column_values_to_be_in_set_col_a_with_meta, ] assert len(suite.expectations) == 2 assert suite.expectation_configurations == test_expected_expectations @pytest.mark.unit def test_bad_expectation_configs_are_skipped( self, bad_expectation_dict: dict, expect_column_values_to_be_in_set_col_a_with_meta_dict: dict, ): suite = ExpectationSuite( name="test_suite", expectations=[ bad_expectation_dict, expect_column_values_to_be_in_set_col_a_with_meta_dict, ], ) assert len(suite.expectations) == 1 assert ( suite.expectations[0].expectation_type == expect_column_values_to_be_in_set_col_a_with_meta_dict["type"] ) @pytest.mark.unit def test_expectation_suite_init_overrides_non_json_serializable_meta( self, fake_expectation_suite_name: str, ): """What does this test and why? meta field overrides need to be json serializable, if not we raise an exception. """ class NotSerializable: def __dict__(self): raise NotImplementedError test_meta = {"this_is_not_json_serializable": NotSerializable()} with pytest.raises(InvalidExpectationConfigurationError) as e: ExpectationSuite( name=fake_expectation_suite_name, meta=test_meta, # type: ignore[arg-type] # FIXME CoP ) assert "is of type NotSerializable which cannot be serialized to json" in str(e.value)
TestInit
python
pypa__pip
src/pip/_vendor/rich/errors.py
{ "start": 422, "end": 495 }
class ____(ConsoleError): """Markup was badly formatted."""
MarkupError
python
kamyu104__LeetCode-Solutions
Python/number-of-recent-calls.py
{ "start": 105, "end": 410 }
class ____(object): def __init__(self): self.__q = collections.deque() def ping(self, t): """ :type t: int :rtype: int """ self.__q.append(t) while self.__q[0] < t-3000: self.__q.popleft() return len(self.__q)
RecentCounter
python
django__django
django/core/exceptions.py
{ "start": 2571, "end": 6422 }
class ____(Exception): """An error while validating data.""" def __init__(self, message, code=None, params=None): """ The `message` argument can be a single error, a list of errors, or a dictionary that maps field names to lists of errors. What we define as an "error" can be either a simple string or an instance of ValidationError with its message attribute set, and what we define as list or dictionary can be an actual `list` or `dict` or an instance of ValidationError with its `error_list` or `error_dict` attribute set. """ super().__init__(message, code, params) if isinstance(message, ValidationError): if hasattr(message, "error_dict"): message = message.error_dict elif not hasattr(message, "message"): message = message.error_list else: message, code, params = message.message, message.code, message.params if isinstance(message, dict): self.error_dict = {} for field, messages in message.items(): if not isinstance(messages, ValidationError): messages = ValidationError(messages) self.error_dict[field] = messages.error_list elif isinstance(message, list): self.error_list = [] for message in message: # Normalize plain strings to instances of ValidationError. if not isinstance(message, ValidationError): message = ValidationError(message) if hasattr(message, "error_dict"): self.error_list.extend(sum(message.error_dict.values(), [])) else: self.error_list.extend(message.error_list) else: self.message = message self.code = code self.params = params self.error_list = [self] @property def message_dict(self): # Trigger an AttributeError if this ValidationError # doesn't have an error_dict. getattr(self, "error_dict") return dict(self) @property def messages(self): if hasattr(self, "error_dict"): return sum(dict(self).values(), []) return list(self) def update_error_dict(self, error_dict): if hasattr(self, "error_dict"): for field, error_list in self.error_dict.items(): error_dict.setdefault(field, []).extend(error_list) else: error_dict.setdefault(NON_FIELD_ERRORS, []).extend(self.error_list) return error_dict def __iter__(self): if hasattr(self, "error_dict"): for field, errors in self.error_dict.items(): yield field, list(ValidationError(errors)) else: for error in self.error_list: message = error.message if error.params: message %= error.params yield str(message) def __str__(self): if hasattr(self, "error_dict"): return repr(dict(self)) return repr(list(self)) def __repr__(self): return "ValidationError(%s)" % self def __eq__(self, other): if not isinstance(other, ValidationError): return NotImplemented return hash(self) == hash(other) def __hash__(self): if hasattr(self, "message"): return hash( ( self.message, self.code, make_hashable(self.params), ) ) if hasattr(self, "error_dict"): return hash(make_hashable(self.error_dict)) return hash(tuple(sorted(self.error_list, key=operator.attrgetter("message"))))
ValidationError
python
eventlet__eventlet
eventlet/zipkin/api.py
{ "start": 2174, "end": 3993 }
class ____: END_ANNOTATION = SERVER_SEND def __init__(self, name, trace_id, span_id, parent_id, sampled, endpoint): """ :param name: RPC name (String) :param trace_id: int :param span_id: int :param parent_id: int or None :param sampled: lets the downstream servers know if I should record trace data for the request (bool) :param endpoint: zipkin._thrift.zipkinCore.ttypes.EndPoint """ self.name = name self.trace_id = trace_id self.span_id = span_id self.parent_id = parent_id self.sampled = sampled self.endpoint = endpoint self.annotations = [] self.bannotations = [] self._done = False def add_annotation(self, annotation): if annotation.host is None: annotation.host = self.endpoint if not self._done: self.annotations.append(annotation) if annotation.value == self.END_ANNOTATION: self.flush() def add_binary_annotation(self, bannotation): if bannotation.host is None: bannotation.host = self.endpoint if not self._done: self.bannotations.append(bannotation) def flush(self): span = ZipkinDataBuilder.build_span(name=self.name, trace_id=self.trace_id, span_id=self.span_id, parent_id=self.parent_id, annotations=self.annotations, bannotations=self.bannotations) client.send_to_collector(span) self.annotations = [] self.bannotations = [] self._done = True
TraceData
python
charlax__professional-programming
antipatterns/sqlalchemy-examples/exists.py
{ "start": 302, "end": 904 }
class ____(Base): __tablename__ = "toasters" id = Column(Integer, primary_key=True) name = Column(String) color = Column(String) def toaster_exists_bad(toaster_id): session = Session() return bool(session.query(Toaster).filter_by(id=toaster_id).first()) def toaster_exists_good(toaster_id): session = Session() query = session.query(Toaster).filter_by(id=toaster_id) return session.query(query.exists()).scalar() def main(): Base.metadata.create_all(engine) toaster_exists_bad(1) toaster_exists_good(2) if __name__ == "__main__": main()
Toaster
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/input.py
{ "start": 2158, "end": 12512 }
class ____: """Defines an argument to an op's compute function. Inputs may flow from previous op outputs, or be stubbed using config. They may optionally be typed using the Dagster type system. Args: name (str): Name of the input. dagster_type (Optional[Union[Type, DagsterType]]]): The type of this input. Users should provide the Python type of the objects that they expect to be passed for this input, or a :py:class:`DagsterType` that defines a runtime check that they want to be run on this input. Defaults to :py:class:`Any`. description (Optional[str]): Human-readable description of the input. default_value (Optional[Any]): The default value to use if no input is provided. metadata (Optional[Dict[str, Any]]): A dict of metadata for the input. asset_key (Optional[Union[AssetKey, InputContext -> AssetKey]]): An AssetKey (or function that produces an AssetKey from the InputContext) which should be associated with this InputDefinition. Used for tracking lineage information through Dagster. asset_partitions (Optional[Union[AbstractSet[str], InputContext -> AbstractSet[str]]]): A set of partitions of the given asset_key (or a function that produces this list of partitions from the InputContext) which should be associated with this InputDefinition. input_manager_key (Optional[str]): The resource key for the :py:class:`InputManager` used for loading this input when it is not connected to an upstream output. """ _name: str _type_not_set: bool _dagster_type: DagsterType _description: Optional[str] _default_value: Any _input_manager_key: Optional[str] _raw_metadata: ArbitraryMetadataMapping _metadata: Mapping[str, MetadataValue] _asset_key: Optional[Union[AssetKey, Callable[["InputContext"], AssetKey]]] _asset_partitions_fn: Optional[Callable[["InputContext"], set[str]]] def __init__( self, name: str, dagster_type: object = None, description: Optional[str] = None, default_value: object = NoValueSentinel, metadata: Optional[ArbitraryMetadataMapping] = None, asset_key: Optional[Union[AssetKey, Callable[["InputContext"], AssetKey]]] = None, asset_partitions: Optional[Union[set[str], Callable[["InputContext"], set[str]]]] = None, input_manager_key: Optional[str] = None, # when adding new params, make sure to update combine_with_inferred and with_dagster_type below ): self._name = check_valid_name(name, allow_list=["config"]) self._type_not_set = dagster_type is None self._dagster_type = resolve_dagster_type(dagster_type) self._description = check.opt_str_param(description, "description") self._default_value = _check_default_value(self._name, self._dagster_type, default_value) self._input_manager_key = check.opt_str_param(input_manager_key, "input_manager_key") self._raw_metadata = check.opt_mapping_param(metadata, "metadata", key_type=str) self._metadata = normalize_metadata(self._raw_metadata, allow_invalid=True) if not callable(asset_key): check.opt_inst_param(asset_key, "asset_key", AssetKey) self._asset_key = asset_key if asset_partitions: check.param_invariant( asset_key is not None, "asset_partitions", 'Cannot specify "asset_partitions" argument without also specifying "asset_key"', ) if callable(asset_partitions): self._asset_partitions_fn = asset_partitions elif asset_partitions is not None: _asset_partitions = check.set_param(asset_partitions, "asset_partitions", of_type=str) self._asset_partitions_fn = lambda _: _asset_partitions else: self._asset_partitions_fn = None @property def name(self) -> str: return self._name @property def dagster_type(self) -> DagsterType: return self._dagster_type @property def description(self) -> Optional[str]: return self._description @property def has_default_value(self) -> bool: return self._default_value is not NoValueSentinel @property def default_value(self) -> Any: check.invariant(self.has_default_value, "Can only fetch default_value if has_default_value") return self._default_value @property def input_manager_key(self) -> Optional[str]: return self._input_manager_key @property def metadata(self) -> ArbitraryMetadataMapping: return self._raw_metadata @property def is_asset(self) -> bool: return self._asset_key is not None @property def hardcoded_asset_key(self) -> Optional[AssetKey]: if not callable(self._asset_key): return self._asset_key else: return None def get_asset_key(self, context: "InputContext") -> Optional[AssetKey]: """Get the AssetKey associated with this InputDefinition for the given :py:class:`InputContext` (if any). Args: context (InputContext): The InputContext that this InputDefinition is being evaluated in """ if callable(self._asset_key): return self._asset_key(context) else: return self.hardcoded_asset_key def get_asset_partitions(self, context: "InputContext") -> Optional[set[str]]: """Get the set of partitions that this op will read from this InputDefinition for the given :py:class:`InputContext` (if any). Args: context (InputContext): The InputContext that this InputDefinition is being evaluated in """ if self._asset_partitions_fn is None: return None return self._asset_partitions_fn(context) def mapping_to( self, node_name: str, input_name: str, fan_in_index: Optional[int] = None ) -> "InputMapping": """Create an input mapping to an input of a child node. In a GraphDefinition, you can use this helper function to construct an :py:class:`InputMapping` to the input of a child node. Args: node_name (str): The name of the child node to which to map this input. input_name (str): The name of the child node' input to which to map this input. fan_in_index (Optional[int]): The index in to a fanned in input, else None Examples: .. code-block:: python input_mapping = InputDefinition('composite_input', Int).mapping_to( 'child_node', 'int_input' ) """ check.str_param(node_name, "node_name") check.str_param(input_name, "input_name") check.opt_int_param(fan_in_index, "fan_in_index") return InputMapping( graph_input_name=self.name, mapped_node_name=node_name, mapped_node_input_name=input_name, fan_in_index=fan_in_index, graph_input_description=self.description, dagster_type=self.dagster_type, ) @staticmethod def create_from_inferred(inferred: InferredInputProps) -> "InputDefinition": return InputDefinition( name=inferred.name, dagster_type=_checked_inferred_type(inferred), description=inferred.description, default_value=inferred.default_value, ) def combine_with_inferred(self, inferred: InferredInputProps) -> "InputDefinition": """Return a new InputDefinition that merges this ones properties with those inferred from type signature. This can update: dagster_type, description, and default_value if they are not set. """ check.invariant( self.name == inferred.name, f"InferredInputProps name {inferred.name} did not align with InputDefinition name" f" {self.name}", ) dagster_type = self._dagster_type if self._type_not_set: dagster_type = _checked_inferred_type(inferred) description = self._description if description is None and inferred.description is not None: description = inferred.description default_value = self._default_value if not self.has_default_value: default_value = inferred.default_value return InputDefinition( name=self.name, dagster_type=dagster_type, description=description, default_value=default_value, metadata=self.metadata, asset_key=self._asset_key, asset_partitions=self._asset_partitions_fn, input_manager_key=self._input_manager_key, ) def with_dagster_type(self, dagster_type: DagsterType) -> "InputDefinition": return InputDefinition( name=self.name, dagster_type=dagster_type, description=self.description, default_value=self.default_value if self.has_default_value else NoValueSentinel, metadata=self.metadata, asset_key=self._asset_key, asset_partitions=self._asset_partitions_fn, input_manager_key=self._input_manager_key, ) def _checked_inferred_type(inferred: InferredInputProps) -> DagsterType: try: if inferred.annotation == inspect.Parameter.empty: resolved_type = resolve_dagster_type(None) elif inferred.annotation is None: # When inferred.annotation is None, it means someone explicitly put "None" as the # annotation, so want to map it to a DagsterType that checks for the None type resolved_type = resolve_dagster_type(type(None)) else: resolved_type = resolve_dagster_type(inferred.annotation) except DagsterError as e: raise DagsterInvalidDefinitionError( f"Problem using type '{inferred.annotation}' from type annotation for argument " f"'{inferred.name}', correct the issue or explicitly set the dagster_type " "via In()." ) from e return resolved_type
InputDefinition
python
allegroai__clearml
clearml/backend_api/services/v2_20/events.py
{ "start": 65282, "end": 66334 }
class ____(Response): """ Response of events.delete_for_task endpoint. :param deleted: Number of deleted events :type deleted: bool """ _service = "events" _action = "delete_for_task" _version = "2.20" _schema = { "definitions": {}, "properties": { "deleted": { "description": "Number of deleted events", "type": ["boolean", "null"], } }, "type": "object", } def __init__(self, deleted: Optional[bool] = None, **kwargs: Any) -> None: super(DeleteForTaskResponse, self).__init__(**kwargs) self.deleted = deleted @schema_property("deleted") def deleted(self) -> Optional[bool]: return self._property_deleted @deleted.setter def deleted(self, value: Optional[bool]) -> None: if value is None: self._property_deleted = None return self.assert_isinstance(value, "deleted", (bool,)) self._property_deleted = value
DeleteForTaskResponse
python
kubernetes-client__python
kubernetes/client/models/v1_csi_driver_list.py
{ "start": 383, "end": 6872 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'items': 'list[V1CSIDriver]', 'kind': 'str', 'metadata': 'V1ListMeta' } attribute_map = { 'api_version': 'apiVersion', 'items': 'items', 'kind': 'kind', 'metadata': 'metadata' } def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501 """V1CSIDriverList - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._items = None self._kind = None self._metadata = None self.discriminator = None if api_version is not None: self.api_version = api_version self.items = items if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata @property def api_version(self): """Gets the api_version of this V1CSIDriverList. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this V1CSIDriverList. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this V1CSIDriverList. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V1CSIDriverList. # noqa: E501 :type: str """ self._api_version = api_version @property def items(self): """Gets the items of this V1CSIDriverList. # noqa: E501 items is the list of CSIDriver # noqa: E501 :return: The items of this V1CSIDriverList. # noqa: E501 :rtype: list[V1CSIDriver] """ return self._items @items.setter def items(self, items): """Sets the items of this V1CSIDriverList. items is the list of CSIDriver # noqa: E501 :param items: The items of this V1CSIDriverList. # noqa: E501 :type: list[V1CSIDriver] """ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501 raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501 self._items = items @property def kind(self): """Gets the kind of this V1CSIDriverList. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V1CSIDriverList. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this V1CSIDriverList. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V1CSIDriverList. # noqa: E501 :type: str """ self._kind = kind @property def metadata(self): """Gets the metadata of this V1CSIDriverList. # noqa: E501 :return: The metadata of this V1CSIDriverList. # noqa: E501 :rtype: V1ListMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """Sets the metadata of this V1CSIDriverList. :param metadata: The metadata of this V1CSIDriverList. # noqa: E501 :type: V1ListMeta """ self._metadata = metadata def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1CSIDriverList): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1CSIDriverList): return True return self.to_dict() != other.to_dict()
V1CSIDriverList
python
huggingface__transformers
src/transformers/models/ernie/modular_ernie.py
{ "start": 28836, "end": 33390 }
class ____(BertForMultipleChoice): @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, task_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) task_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Task type embedding is a special embedding to represent the characteristic of different tasks, such as word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We assign a `task_type_id` to each task and the `task_type_id` is in the range `[0, config.task_type_vocab_size-1] position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.ernie( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
ErnieForMultipleChoice
python
django__django
tests/null_queries/models.py
{ "start": 412, "end": 610 }
class ____(models.Model): first = models.ForeignKey(OuterA, models.CASCADE) # second would clash with the __second lookup. third = models.ForeignKey(OuterB, models.SET_NULL, null=True)
Inner
python
doocs__leetcode
lcci/16.20.T9/Solution2.py
{ "start": 0, "end": 228 }
class ____: def getValidT9Words(self, num: str, words: List[str]) -> List[str]: trans = str.maketrans(ascii_lowercase, "22233344455566677778889999") return [w for w in words if w.translate(trans) == num]
Solution
python
huggingface__transformers
tests/quantization/autoround/test_auto_round.py
{ "start": 1197, "end": 9320 }
class ____(unittest.TestCase): model_name = "OPEA/Qwen2.5-1.5B-Instruct-int4-sym-inc" input_text = "There is a girl who likes adventure," EXPECTED_OUTPUTS = set() ## Different backends may produce slight variations in output EXPECTED_OUTPUTS.add( "There is a girl who likes adventure, and she has been exploring the world " "for many years. She travels to different countries and cultures, trying new " "things every day. One of her favorite places to visit is a small village in " "the mountains where" ) EXPECTED_OUTPUTS.add( "There is a girl who likes adventure, and she has been exploring the world for many years. She has visited every country in Europe and has even traveled to some of the most remote parts of Africa. She enjoys hiking through the mountains and discovering" ) EXPECTED_OUTPUTS.add( "There is a girl who likes adventure, and she has been exploring the world for many years. She has visited every country in Europe and has even traveled to some of the most remote parts of Africa. She has also climbed mountains and explored caves" ) device_map = torch_device # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ backend_synchronize(torch_device) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, device_map=cls.device_map, dtype=torch.float16 ) def tearDown(self): gc.collect() backend_empty_cache(torch_device) gc.collect() def test_quantized_model(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=40, do_sample=False) self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_raise_if_non_quantized(self): model_id = "facebook/opt-125m" quantization_config = AutoRoundConfig(bits=4) with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) def test_quantized_model_bf16(self): """ Simple test that checks if the quantized model is working properly with bf16 """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantization_config = AutoRoundConfig(backend="triton") quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, dtype=torch.bfloat16, device_map=self.device_map, quantization_config=quantization_config, ) output = quantized_model.generate(**input_ids, max_new_tokens=40, do_sample=False) self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) @require_intel_extension_for_pytorch def test_quantized_model_on_cpu(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt") quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, dtype="auto") output = quantized_model.generate(**input_ids, max_new_tokens=40, do_sample=False) self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_save_pretrained(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ ## some backends like marlin/ipex will repack the weight that caused the weight shape changed with tempfile.TemporaryDirectory() as tmpdirname: quantization_config = AutoRoundConfig(backend="triton") quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=self.device_map, dtype=torch.float16, quantization_config=quantization_config, ) quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=torch_device) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=40, do_sample=False) output_tokens = self.tokenizer.decode(output[0], skip_special_tokens=True) self.assertIn(output_tokens, self.EXPECTED_OUTPUTS) @require_torch_multi_accelerator def test_quantized_model_multi_accelerator(self): """ Simple test that checks if the quantized model is working properly with multiple accelerators """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantization_config = AutoRoundConfig(backend="triton") quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, device_map="auto", quantization_config=quantization_config, dtype="auto" ) output = quantized_model.generate(**input_ids, max_new_tokens=40, do_sample=False) self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_convert_from_gptq(self): """ Simple test that checks if auto-round work properly with gptq format """ model_name = "ybelkada/opt-125m-gptq-4bit" quantization_config = AutoRoundConfig() model = AutoModelForCausalLM.from_pretrained( model_name, device_map=torch_device, quantization_config=quantization_config, dtype="auto" ) tokenizer = AutoTokenizer.from_pretrained(model_name) text = "There is a girl who likes adventure," inputs = tokenizer(text, return_tensors="pt").to(model.device) tokenizer.decode(model.generate(**inputs, max_new_tokens=5)[0]) @require_intel_extension_for_pytorch def test_convert_from_awq_cpu(self): """ Simple test that checks if auto-round work properly with awq format """ model_name = "casperhansen/opt-125m-awq" quantization_config = AutoRoundConfig() model = AutoModelForCausalLM.from_pretrained( model_name, device_map="cpu", quantization_config=quantization_config, dtype="auto" ) tokenizer = AutoTokenizer.from_pretrained(model_name) text = "There is a girl who likes adventure," inputs = tokenizer(text, return_tensors="pt").to(model.device) tokenizer.decode(model.generate(**inputs, max_new_tokens=5)[0]) @require_torch_gpu def test_mixed_bits(self): """ Simple test that checks if auto-round work properly with mixed bits """ model_name = "facebook/opt-125m" model = AutoModelForCausalLM.from_pretrained(model_name, dtype="auto") tokenizer = AutoTokenizer.from_pretrained(model_name) layer_config = { "model.decoder.layers.0.self_attn.k_proj": {"bits": 8}, "model.decoder.layers.6.self_attn.out_proj": {"bits": 2, "group_size": 32}, } bits, group_size, sym = 4, 128, True from auto_round import AutoRound autoround = AutoRound(model, tokenizer, bits=bits, group_size=group_size, sym=sym, layer_config=layer_config) with tempfile.TemporaryDirectory() as tmpdirname: autoround.quantize_and_save(output_dir=tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, dtype=torch.float16, device_map=torch_device) text = "There is a girl who likes adventure," inputs = tokenizer(text, return_tensors="pt").to(model.device) tokenizer.decode(model.generate(**inputs, max_new_tokens=5)[0])
AutoRoundTest
python
getsentry__sentry
src/sentry/shared_integrations/exceptions/__init__.py
{ "start": 4327, "end": 4378 }
class ____(ApiError): code = 409
ApiConflictError
python
dagster-io__dagster
python_modules/dagster/dagster/_core/storage/db_io_manager.py
{ "start": 3061, "end": 13405 }
class ____(IOManager): def __init__( self, *, type_handlers: Sequence[DbTypeHandler], db_client: DbClient, database: str, schema: Optional[str] = None, io_manager_name: Optional[str] = None, default_load_type: Optional[type] = None, ): self._handlers_by_type: dict[type[Any], DbTypeHandler] = {} self._io_manager_name = io_manager_name or self.__class__.__name__ for type_handler in type_handlers: for handled_type in type_handler.supported_types: check.invariant( handled_type not in self._handlers_by_type, f"{self._io_manager_name} provided with two handlers for the same type. " f"Type: '{handled_type}'. Handler classes: '{type(type_handler)}' and " f"'{type(self._handlers_by_type.get(handled_type))}'.", ) self._handlers_by_type[handled_type] = type_handler self._db_client = db_client self._database = database self._schema = schema if ( default_load_type is None and len(type_handlers) == 1 and len(type_handlers[0].supported_types) == 1 ): self._default_load_type = type_handlers[0].supported_types[0] else: self._default_load_type = default_load_type def handle_output(self, context: OutputContext, obj: object) -> None: # If the output type is set to Nothing, handle_output will not be # called. We still need to raise an error when the return value # is None, but the typing type is not Nothing if obj is None: raise DagsterInvariantViolationError( "Unexpected 'None' output value. If a 'None' value is intentional, set the output" " type to None by adding return type annotation '-> None'.", ) obj_type = type(obj) self._check_supported_type(obj_type) table_slice = self._get_table_slice(context, context) with self._db_client.connect(context, table_slice) as conn: self._db_client.ensure_schema_exists(context, table_slice, conn) self._db_client.delete_table_slice(context, table_slice, conn) handler = self._resolve_handler(obj_type) handler_metadata = handler.handle_output(context, table_slice, obj, conn) context.add_output_metadata( { **(handler_metadata or {}), "Query": self._db_client.get_select_statement(table_slice), } ) # Try to attach relation identifier metadata to the output asset, but # don't fail if it errors because the user has already attached it. try: context.add_output_metadata( dict(TableMetadataSet(table_name=self._db_client.get_table_name(table_slice))) ) except DagsterInvalidMetadata: pass def load_input(self, context: InputContext) -> object: obj_type = context.dagster_type.typing_type if obj_type is Any and self._default_load_type is not None: load_type = self._default_load_type else: load_type = obj_type self._check_supported_type(load_type) table_slice = self._get_table_slice(context, cast("OutputContext", context.upstream_output)) with self._db_client.connect(context, table_slice) as conn: return self._resolve_handler(load_type).load_input(context, table_slice, conn) # type: ignore # (pyright bug) def _resolve_handler(self, obj_type: type) -> DbTypeHandler: return next( handler for type_, handler in self._handlers_by_type.items() if issubclass(obj_type, type_) ) def _get_table_slice( self, context: Union[OutputContext, InputContext], output_context: OutputContext ) -> TableSlice: output_context_metadata = output_context.definition_metadata or {} schema: str table: str partition_dimensions: list[TablePartitionDimension] = [] if context.has_asset_key: asset_key_path = context.asset_key.path table = asset_key_path[-1] # schema order of precedence: metadata, I/O manager 'schema' config, key_prefix if output_context_metadata.get("schema"): schema = cast("str", output_context_metadata["schema"]) elif self._schema: schema = self._schema elif len(asset_key_path) > 1: schema = asset_key_path[-2] else: schema = "public" if context.has_asset_partitions: partition_expr = output_context_metadata.get("partition_expr") if partition_expr is None: raise ValueError( f"Asset '{context.asset_key}' has partitions, but no 'partition_expr'" " metadata value, so we don't know what column it's partitioned on. To" " specify a column, set this metadata value. E.g." ' @asset(metadata={"partition_expr": "your_partition_column"}).' ) if isinstance(context.asset_partitions_def, MultiPartitionsDefinition): partition_range = context.asset_partition_key_range for part in context.asset_partitions_def.partitions_defs: start_key_for_partition = cast( "MultiPartitionKey", partition_range.start ).keys_by_dimension[part.name] end_key_for_partition = cast( "MultiPartitionKey", partition_range.end ).keys_by_dimension[part.name] if isinstance(part.partitions_def, TimeWindowPartitionsDefinition): start_time = part.partitions_def.time_window_for_partition_key( start_key_for_partition ).start end_time = part.partitions_def.time_window_for_partition_key( end_key_for_partition ).end partitions = TimeWindow(start_time, end_time) else: partitions = part.partitions_def.get_partition_keys_in_range( PartitionKeyRange( start=start_key_for_partition, end=end_key_for_partition ) ) partition_expr_str = cast("Mapping[str, str]", partition_expr).get( part.name ) if partition_expr is None: raise ValueError( f"Asset '{context.asset_key}' has partition {part.name}, but the" f" 'partition_expr' metadata does not contain a {part.name} entry," " so we don't know what column to filter it on. Specify which" " column of the database contains data for the" f" {part.name} partition." ) partition_dimensions.append( TablePartitionDimension( partition_expr=cast("str", partition_expr_str), partitions=partitions, ) ) elif isinstance(context.asset_partitions_def, TimeWindowPartitionsDefinition): partition_dimensions.append( TablePartitionDimension( partition_expr=cast("str", partition_expr), partitions=( context.asset_partitions_time_window if context.asset_partition_keys else [] ), ) ) else: partition_dimensions.append( TablePartitionDimension( partition_expr=cast("str", partition_expr), partitions=context.asset_partition_keys, ) ) else: if "table" in output_context_metadata: table = check.str_param(output_context_metadata["table"], "table") else: table = output_context.name if output_context_metadata.get("schema"): schema = cast("str", output_context_metadata["schema"]) elif self._schema: schema = self._schema else: schema = "public" return TableSlice( table=table, schema=schema, database=self._database, partition_dimensions=partition_dimensions, columns=(context.definition_metadata or {}).get("columns"), ) def _check_supported_type(self, obj_type): if not issubclass(obj_type, tuple(self._handlers_by_type.keys())): msg = ( f"{self._io_manager_name} does not have a handler for type '{obj_type}'. Has" " handlers for types" f" '{', '.join([str(handler_type) for handler_type in self._handlers_by_type.keys()])}'." ) if obj_type is Any: type_hints = " or ".join( [str(handler_type) for handler_type in self._handlers_by_type.keys()] ) msg += f" Please add {type_hints} type hints to your assets and ops." else: msg += ( f" Please build the {self._io_manager_name} with an type handler for type" f" '{obj_type}', so the {self._io_manager_name} can correctly handle the" " output." ) raise CheckError(msg)
DbIOManager
python
walkccc__LeetCode
solutions/1392. Longest Happy Prefix/1392.py
{ "start": 0, "end": 577 }
class ____: def longestPrefix(self, s: str) -> str: BASE = 26 HASH = 8_417_508_174_513 n = len(s) maxLength = 0 pow = 1 prefixHash = 0 # the hash of s[0..i] suffixHash = 0 # the hash of s[j..n) def val(c: str) -> int: return ord(c) - ord('a') j = n - 1 for i in range(n - 1): prefixHash = (prefixHash * BASE + val(s[i])) % HASH suffixHash = (val(s[j]) * pow + suffixHash) % HASH pow = pow * BASE % HASH if prefixHash == suffixHash: maxLength = i + 1 j -= 1 return s[:maxLength]
Solution
python
pandas-dev__pandas
asv_bench/benchmarks/join_merge.py
{ "start": 9785, "end": 10360 }
class ____: params = ["inner", "outer", "left", "right"] param_names = ["how"] def setup(self, how): low, high, n = -1000, 1000, 10**6 self.left = DataFrame( np.random.randint(low, high, (n, 7)), columns=list("ABCDEFG") ) self.left["left"] = self.left.sum(axis=1) self.right = self.left.sample(frac=1).rename({"left": "right"}, axis=1) self.right = self.right.reset_index(drop=True) self.right["right"] *= -1 def time_i8merge(self, how): merge(self.left, self.right, how=how)
I8Merge
python
scipy__scipy
scipy/stats/_distn_infrastructure.py
{ "start": 19471, "end": 21669 }
class ____(rv_frozen): def pdf(self, x): return self.dist.pdf(x, *self.args, **self.kwds) def logpdf(self, x): return self.dist.logpdf(x, *self.args, **self.kwds) def argsreduce(cond, *args): """Clean arguments to: 1. Ensure all arguments are iterable (arrays of dimension at least one 2. If cond != True and size > 1, ravel(args[i]) where ravel(condition) is True, in 1D. Return list of processed arguments. Examples -------- >>> import numpy as np >>> from scipy.stats._distn_infrastructure import argsreduce >>> rng = np.random.default_rng() >>> A = rng.random((4, 5)) >>> B = 2 >>> C = rng.random((1, 5)) >>> cond = np.ones(A.shape) >>> [A1, B1, C1] = argsreduce(cond, A, B, C) >>> A1.shape (4, 5) >>> B1.shape (1,) >>> C1.shape (1, 5) >>> cond[2,:] = 0 >>> [A1, B1, C1] = argsreduce(cond, A, B, C) >>> A1.shape (15,) >>> B1.shape (1,) >>> C1.shape (15,) """ # some distributions assume arguments are iterable. newargs = np.atleast_1d(*args) # np.atleast_1d returns an array if only one argument, or a list of arrays # if more than one argument. if not isinstance(newargs, (list | tuple)): newargs = (newargs,) if np.all(cond): # broadcast arrays with cond *newargs, cond = np.broadcast_arrays(*newargs, cond) return [arg.ravel() for arg in newargs] s = cond.shape # np.extract returns flattened arrays, which are not broadcastable together # unless they are either the same size or size == 1. return [(arg if np.size(arg) == 1 else np.extract(cond, np.broadcast_to(arg, s))) for arg in newargs] parse_arg_template = """ def _parse_args(self, %(shape_arg_str)s %(locscale_in)s): return (%(shape_arg_str)s), %(locscale_out)s def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None): return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size) def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'): return (%(shape_arg_str)s), %(locscale_out)s, moments """
rv_continuous_frozen
python
huggingface__transformers
src/transformers/models/big_bird/modeling_big_bird.py
{ "start": 89780, "end": 95732 }
class ____(BigBirdPreTrainedModel): _tied_weights_keys = { "cls.predictions.decoder.bias": "cls.predictions.bias", "cls.predictions.decoder.weight": "bert.embeddings.word_embeddings.weight", } def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `BigBirdForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[MaskedLMOutput, tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> import torch >>> from transformers import AutoTokenizer, BigBirdForMaskedLM >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-roberta-base") >>> model = BigBirdForMaskedLM.from_pretrained("google/bigbird-roberta-base") >>> squad_ds = load_dataset("rajpurkar/squad_v2", split="train") # doctest: +IGNORE_RESULT >>> # select random long article >>> LONG_ARTICLE_TARGET = squad_ds[81514]["context"] >>> # select random sentence >>> LONG_ARTICLE_TARGET[332:398] 'the highest values are very close to the theoretical maximum value' >>> # add mask_token >>> LONG_ARTICLE_TO_MASK = LONG_ARTICLE_TARGET.replace("maximum", "[MASK]") >>> inputs = tokenizer(LONG_ARTICLE_TO_MASK, return_tensors="pt") >>> # long article input >>> list(inputs["input_ids"].shape) [1, 919] >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # retrieve index of [MASK] >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) >>> tokenizer.decode(predicted_token_id) 'maximum' ``` ```python >>> labels = tokenizer(LONG_ARTICLE_TARGET, return_tensors="pt")["input_ids"] >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(outputs.loss.item(), 2) 1.99 ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] # add a dummy token if self.config.pad_token_id is None: raise ValueError("The PAD token should be defined for generation") attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full( (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device ) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {"input_ids": input_ids, "attention_mask": attention_mask} @auto_docstring( custom_intro=""" BigBird Model with a `language modeling` head on top for CLM fine-tuning. """ )
BigBirdForMaskedLM
python
econchick__interrogate
tests/functional/sample/full.py
{ "start": 108, "end": 1706 }
class ____: """Foo class""" def __init__(self): """init method of Foo class""" self.foo = None def __str__(self): """a magic method.""" pass def _semiprivate(self): """a semipriate method""" pass def __private(self): """a private method""" pass def method_foo(self): """this method does foo""" pass def get(self): """this method gets something""" pass async def get(self): """this async method gets something""" pass @property def prop(self): """this method has a get property decorator""" pass @prop.setter def prop(self): """this method has a set property decorator""" pass @prop.deleter def prop(self): """this method as a del property decorator""" pass @typing.overload def module_overload(a: None) -> None: """overloaded method""" ... @typing.overload def module_overload(a: int) -> int: """overloaded method""" ... def module_overload(a): """overloaded method implementation""" pass @overload def simple_overload(a: None) -> None: """overloaded method""" ... @overload def simple_overload(a: int) -> int: """overloaded method""" ... def simple_overload(a): """overloaded method implementation""" pass def top_level_func(): """A top level function""" def inner_func(): """A inner function""" pass
Foo
python
pytorch__pytorch
torch/autograd/variable.py
{ "start": 142, "end": 256 }
class ____(type): def __instancecheck__(cls, other): return isinstance(other, torch.Tensor)
VariableMeta
python
scrapy__scrapy
tests/test_pipeline_files.py
{ "start": 11396, "end": 11745 }
class ____: name: str # default fields file_urls: list = dataclasses.field(default_factory=list) files: list = dataclasses.field(default_factory=list) # overridden fields custom_file_urls: list = dataclasses.field(default_factory=list) custom_files: list = dataclasses.field(default_factory=list)
FilesPipelineTestDataClass
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/links/dataprep.py
{ "start": 1293, "end": 1516 }
class ____(BaseGoogleLink): """Helper class for constructing Dataprep job group link.""" name = "Job group details page" key = "dataprep_job_group_page" format_str = DATAPREP_JOB_GROUP_LINK
DataprepJobGroupLink
python
tensorflow__tensorflow
third_party/xla/xla/python/xla_client.py
{ "start": 8242, "end": 9747 }
class ____: """Python representation of a xla.DotDimensionNumbers protobuf.""" __slots__ = ( 'lhs_contracting_dimensions', 'rhs_contracting_dimensions', 'lhs_batch_dimensions', 'rhs_batch_dimensions', ) def __init__(self): self.lhs_contracting_dimensions = [] self.rhs_contracting_dimensions = [] self.lhs_batch_dimensions = [] self.rhs_batch_dimensions = [] def make_dot_dimension_numbers( dimension_numbers: ( DotDimensionNumbers | tuple[tuple[list[int], list[int]], tuple[list[int], list[int]]] ), ) -> DotDimensionNumbers: """Builds a DotDimensionNumbers object from a specification. Args: dimension_numbers: either a `DotDimensionNumbers` or a nested tuple `((lhs_contract, rhs_contract), (lhs_batch, rhs_batch))` of lists of integers representing the dimensions to treat as contracting dimensions and batch dimensions on each input operand. Returns: A `DotDimensionNumbers` object. """ if isinstance(dimension_numbers, (list, tuple)): (lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers dot_dims_proto = DotDimensionNumbers() dot_dims_proto.lhs_contracting_dimensions.extend(lhs_contract) dot_dims_proto.rhs_contracting_dimensions.extend(rhs_contract) dot_dims_proto.lhs_batch_dimensions.extend(lhs_batch) dot_dims_proto.rhs_batch_dimensions.extend(rhs_batch) return dot_dims_proto else: return dimension_numbers
DotDimensionNumbers
python
google__jax
tests/pallas/pallas_test.py
{ "start": 46624, "end": 46711 }
class ____(PallasBaseTest): INTERPRET = True
PallasCallInputOutputAliasingInterpretTest
python
kamyu104__LeetCode-Solutions
Python/maximum-running-time-of-n-computers.py
{ "start": 595, "end": 1126 }
class ____(object): def maxRunTime(self, n, batteries): """ :type n: int :type batteries: List[int] :rtype: int """ def check(n, batteries, x): return sum(min(b, x) for b in batteries) >= n*x left, right = min(batteries), sum(batteries)//n while left <= right: mid = left + (right-left)//2 if not check(n, batteries, mid): right = mid-1 else: left = mid+1 return right
Solution2
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_ismn.py
{ "start": 855, "end": 1842 }
class ____(ColumnMapMetricProvider): # This is the id string that will be used to reference your metric. condition_metric_name = "column_values.to_be_valid_ismn" # This method implements the core logic for the PandasExecutionEngine @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): return column.apply(lambda x: is_valid_ismn(x)) # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine # @column_condition_partial(engine=SqlAlchemyExecutionEngine) # def _sqlalchemy(cls, column, _dialect, **kwargs): # raise NotImplementedError # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine # @column_condition_partial(engine=SparkDFExecutionEngine) # def _spark(cls, column, **kwargs): # raise NotImplementedError # This class defines the Expectation itself
ColumnValuesToBeValidIsmn
python
mlflow__mlflow
mlflow/genai/judges/tools/constants.py
{ "start": 290, "end": 702 }
class ____: """Registry of judge tool names.""" GET_TRACE_INFO = "get_trace_info" GET_ROOT_SPAN = "get_root_span" GET_SPAN = "get_span" LIST_SPANS = "list_spans" SEARCH_TRACE_REGEX = "search_trace_regex" GET_SPAN_PERFORMANCE_AND_TIMING_REPORT = "get_span_performance_and_timing_report" _GET_TRACES_IN_SESSION = "_get_traces_in_session" _SEARCH_TRACES = "_search_traces"
ToolNames
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-stackoverflow/llama_index/readers/stackoverflow/base.py
{ "start": 2629, "end": 6460 }
class ____(BaseReader): def __init__( self, api_key: str = None, team_name: str = None, cache_dir: str = None ) -> None: self._api_key = api_key or os.environ.get("STACKOVERFLOW_PAT") self._team_name = team_name or os.environ.get("STACKOVERFLOW_TEAM_NAME") self._last_index_time = None # TODO self._cache_dir = cache_dir if self._cache_dir: os.makedirs(self._cache_dir, exist_ok=True) def load_data( self, page: int = 1, doc_type: str = "posts", limit: int = 50 ) -> List[Document]: data = [] has_more = True while has_more: url = self.build_url(page, doc_type) headers = {"X-API-Access-Token": self._api_key} fp = os.path.join(self._cache_dir, f"{doc_type}_{page}.json") response = {} if self._cache_dir and os.path.exists(fp) and os.path.getsize(fp) > 0: try: with open(fp) as f: response = f.read() response = json.loads(response) except Exception as e: logger.error(e) if not response: response = rate_limited_get(url, headers) response.raise_for_status() if self._cache_dir: with open( os.path.join(self._cache_dir, f"{doc_type}_{page}.json"), "w" ) as f: f.write(response.content.decode("utf-8")) logger.info(f"Wrote {fp} to cache") response = response.json() has_more = response["has_more"] items = response["items"] logger.info(f"Fetched {len(items)} {doc_type} from Stack Overflow") for item_dict in items: owner_fields = {} if "owner" in item_dict: owner_fields = { f"owner_{k}": v for k, v in item_dict.pop("owner").items() } if "title" not in item_dict: item_dict["title"] = item_dict["link"] post = StackOverflowPost(**item_dict, **owner_fields) # TODO: filter out old posts # last_modified = datetime.fromtimestamp(post.last_edit_date or post.last_activity_date) # if last_modified < self._last_index_time: # return data post_document = Document( text=post.body_markdown, doc_id=post.post_id, extra_info={ "title": post.title, "author": post.owner_display_name, "timestamp": datetime.fromtimestamp(post.creation_date), "location": post.link, "url": post.link, "author_image_url": post.owner_profile_image, "type": post.post_type, }, ) data.append(post_document) if has_more: page += 1 return data def build_url(self, page: int, doc_type: str) -> str: team_fragment = f"&team={self._team_name}" # not sure if this filter is shared globally, or only to a particular team filter_fragment = "&filter=!nOedRLbqzB" page_fragment = f"&page={page}" return f"https://api.stackoverflowteams.com/2.3/{doc_type}?{team_fragment}{filter_fragment}{page_fragment}" if __name__ == "__main__": reader = StackoverflowReader( os.environ.get("STACKOVERFLOW_PAT"), os.environ.get("STACKOVERFLOW_TEAM_NAME"), cache_dir="./stackoverflow_cache", ) # reader.load_data()
StackoverflowReader
python
numpy__numpy
numpy/distutils/system_info.py
{ "start": 86227, "end": 86428 }
class ____(openblas_info): section = 'openblas' dir_env_var = 'OPENBLAS' _lib_names = ['openblas'] _require_symbols = ['zungqr_'] notfounderror = BlasNotFoundError
openblas_lapack_info
python
huggingface__transformers
src/transformers/models/wav2vec2_conformer/modular_wav2vec2_conformer.py
{ "start": 28656, "end": 29323 }
class ____(Wav2Vec2ForCTC): def __init__(self, config, target_lang: Optional[str] = None): r""" target_lang (`str`, *optional*): Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or adapter.<lang>.bin. Only relevant when using an instance of [`UniSpeechSatForCTC`] with adapters. Uses 'eng' by default. """ super().__init__(config) def tie_weights(self): raise AttributeError("Not needed for Wav2Vec2Conformer") def freeze_base_model(self): raise AttributeError("Not needed for Wav2Vec2Conformer")
Wav2Vec2ConformerForCTC
python
kamyu104__LeetCode-Solutions
Python/minimum-cost-to-set-cooking-time.py
{ "start": 42, "end": 722 }
class ____(object): def minCostSetTime(self, startAt, moveCost, pushCost, targetSeconds): """ :type startAt: int :type moveCost: int :type pushCost: int :type targetSeconds: int :rtype: int """ def cost(m, s): if not (0 <= m <= 99 and s <= 99): return float("inf") result = 0 curr = startAt for x in map(int, list(str(m*100 + s))): result += (moveCost if x != curr else 0)+pushCost curr = x return result m, s = divmod(targetSeconds, 60) return min(cost(m, s), cost(m-1, s+60))
Solution
python
django__django
tests/sessions_tests/tests.py
{ "start": 36866, "end": 37005 }
class ____(FileSessionTests): def mkdtemp(self): tmp_dir = super().mkdtemp() return Path(tmp_dir)
FileSessionPathLibTests
python
ansible__ansible
lib/ansible/_internal/_ssh/_ssh_agent.py
{ "start": 1726, "end": 2624 }
class ____(enum.IntEnum): # Responses SSH_AGENT_FAILURE = 5 SSH_AGENT_SUCCESS = 6 SSH_AGENT_IDENTITIES_ANSWER = 12 SSH_AGENT_SIGN_RESPONSE = 14 SSH_AGENT_EXTENSION_FAILURE = 28 SSH_AGENT_EXTENSION_RESPONSE = 29 # Constraints SSH_AGENT_CONSTRAIN_LIFETIME = 1 SSH_AGENT_CONSTRAIN_CONFIRM = 2 SSH_AGENT_CONSTRAIN_EXTENSION = 255 # Requests SSH_AGENTC_REQUEST_IDENTITIES = 11 SSH_AGENTC_SIGN_REQUEST = 13 SSH_AGENTC_ADD_IDENTITY = 17 SSH_AGENTC_REMOVE_IDENTITY = 18 SSH_AGENTC_REMOVE_ALL_IDENTITIES = 19 SSH_AGENTC_ADD_SMARTCARD_KEY = 20 SSH_AGENTC_REMOVE_SMARTCARD_KEY = 21 SSH_AGENTC_LOCK = 22 SSH_AGENTC_UNLOCK = 23 SSH_AGENTC_ADD_ID_CONSTRAINED = 25 SSH_AGENTC_ADD_SMARTCARD_KEY_CONSTRAINED = 26 SSH_AGENTC_EXTENSION = 27 def to_blob(self) -> bytes: return bytes([self])
ProtocolMsgNumbers
python
optuna__optuna
optuna/pruners/_patient.py
{ "start": 248, "end": 4646 }
class ____(BasePruner): """Pruner which wraps another pruner with tolerance. This pruner monitors intermediate values in a trial and prunes the trial if the improvement in the intermediate values after a patience period is less than a threshold. The pruner handles NaN values in the following manner: 1. If all intermediate values before or during the patient period are NaN, the trial will not be pruned 2. During the pruning calculations, NaN values are ignored. Only valid numeric values are considered. Example: .. testcode:: import numpy as np from sklearn.datasets import load_iris from sklearn.linear_model import SGDClassifier from sklearn.model_selection import train_test_split import optuna X, y = load_iris(return_X_y=True) X_train, X_valid, y_train, y_valid = train_test_split(X, y) classes = np.unique(y) def objective(trial): alpha = trial.suggest_float("alpha", 0.0, 1.0) clf = SGDClassifier(alpha=alpha) n_train_iter = 100 for step in range(n_train_iter): clf.partial_fit(X_train, y_train, classes=classes) intermediate_value = clf.score(X_valid, y_valid) trial.report(intermediate_value, step) if trial.should_prune(): raise optuna.TrialPruned() return clf.score(X_valid, y_valid) study = optuna.create_study( direction="maximize", pruner=optuna.pruners.PatientPruner(optuna.pruners.MedianPruner(), patience=1), ) study.optimize(objective, n_trials=20) Args: wrapped_pruner: Wrapped pruner to perform pruning when :class:`~optuna.pruners.PatientPruner` allows a trial to be pruned. If it is :obj:`None`, this pruner is equivalent to early-stopping taken the intermediate values in the individual trial. patience: Pruning is disabled until the objective doesn't improve for ``patience`` consecutive steps. min_delta: Tolerance value to check whether or not the objective improves. This value should be non-negative. """ def __init__( self, wrapped_pruner: BasePruner | None, patience: int, min_delta: float = 0.0 ) -> None: if patience < 0: raise ValueError(f"patience cannot be negative but got {patience}.") if min_delta < 0: raise ValueError(f"min_delta cannot be negative but got {min_delta}.") self._wrapped_pruner = wrapped_pruner self._patience = patience self._min_delta = min_delta def prune(self, study: "optuna.study.Study", trial: "optuna.trial.FrozenTrial") -> bool: step = trial.last_step if step is None: return False intermediate_values = trial.intermediate_values steps = np.asarray(list(intermediate_values.keys())) # Do not prune if number of step to determine are insufficient. if steps.size <= self._patience + 1: return False steps.sort() # This is the score patience steps ago steps_before_patience = steps[: -self._patience - 1] scores_before_patience = np.asarray( list(intermediate_values[step] for step in steps_before_patience) ) # And these are the scores after that steps_after_patience = steps[-self._patience - 1 :] scores_after_patience = np.asarray( list(intermediate_values[step] for step in steps_after_patience) ) direction = study.direction if direction == StudyDirection.MINIMIZE: maybe_prune = np.nanmin(scores_before_patience) + self._min_delta < np.nanmin( scores_after_patience ) else: maybe_prune = np.nanmax(scores_before_patience) - self._min_delta > np.nanmax( scores_after_patience ) if maybe_prune: if self._wrapped_pruner is not None: return self._wrapped_pruner.prune(study, trial) else: return True else: return False
PatientPruner
python
getsentry__sentry
src/sentry/api/bases/organization.py
{ "start": 5630, "end": 6016 }
class ____(OrganizationPermission): scope_map = { "GET": ["org:read", "org:write", "org:admin", "org:integrations", "org:ci"], "POST": ["org:read", "org:write", "org:admin", "org:integrations"], "PUT": ["org:read", "org:write", "org:admin", "org:integrations"], "DELETE": ["org:admin", "org:integrations"], }
OrganizationIntegrationsLoosePermission
python
pypa__hatch
tests/cli/status/test_status.py
{ "start": 5517, "end": 8727 }
class ____: def test_no_detection_no_project(self, hatch, config_file, helpers, isolation): config_file.model.mode = "aware" config_file.save() result = hatch("status") assert result.exit_code == 0, result.output assert result.output == helpers.dedent( f""" Mode is set to `aware` but no project is set, defaulting to the current directory [Project] - <no project detected> [Location] - {isolation} [Config] - {config_file.path} """ ) def test_unknown_project(self, hatch, isolation, config_file, helpers): project = "foo" config_file.model.project = project config_file.model.mode = "aware" config_file.save() result = hatch("status") assert result.exit_code == 0, result.output assert result.output == helpers.dedent( f""" Unable to locate project {project}, defaulting to the current directory [Project] - <no project detected> [Location] - {isolation} [Config] - {config_file.path} """ ) def test_not_a_project(self, hatch, temp_dir, config_file, helpers): project = "foo" config_file.model.project = project config_file.model.projects = {project: str(temp_dir)} config_file.model.mode = "aware" config_file.save() result = hatch("status") assert result.exit_code == 0, result.output assert result.output == helpers.dedent( f""" [Project] - {project} (not a project) [Location] - {temp_dir} [Config] - {config_file.path} """ ) @pytest.mark.parametrize("file_name", ["pyproject.toml", "setup.py"]) def test_found_project(self, hatch, temp_dir, config_file, helpers, file_name): project_file = temp_dir / file_name project_file.touch() project = "foo" config_file.model.project = project config_file.model.projects = {project: str(temp_dir)} config_file.model.mode = "aware" config_file.save() result = hatch("status") assert result.exit_code == 0, result.output assert result.output == helpers.dedent( f""" [Project] - {project} [Location] - {temp_dir} [Config] - {config_file.path} """ ) def test_local_override(self, hatch, temp_dir, config_file, helpers): project_file = temp_dir / "pyproject.toml" project_file.touch() project = "foo" config_file.model.project = project config_file.model.projects = {project: str(temp_dir)} config_file.model.mode = "aware" config_file.save() with temp_chdir() as d: d.joinpath("pyproject.toml").touch() result = hatch("status") assert result.exit_code == 0, result.output assert result.output == helpers.dedent( f""" [Project] - {d.name} (current directory) [Location] - {d} [Config] - {config_file.path} """ )
TestModeAware
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/with6.py
{ "start": 147, "end": 389 }
class ____(type): def __enter__(cls) -> "ClassA": print("Enter A") return cls def __exit__( cls, exc_typ: type[Exception], exc_val: Exception, exc_tbc: TracebackType ) -> None: print("Exit A")
ClassA
python
pandas-dev__pandas
pandas/tests/frame/methods/test_asfreq.py
{ "start": 319, "end": 10283 }
class ____: def test_asfreq2(self, frame_or_series): ts = frame_or_series( [0.0, 1.0, 2.0], index=DatetimeIndex( [ datetime(2009, 10, 30), datetime(2009, 11, 30), datetime(2009, 12, 31), ], dtype="M8[ns]", freq="BME", ), ) daily_ts = ts.asfreq("B") monthly_ts = daily_ts.asfreq("BME") tm.assert_equal(monthly_ts, ts) daily_ts = ts.asfreq("B", method="pad") monthly_ts = daily_ts.asfreq("BME") tm.assert_equal(monthly_ts, ts) daily_ts = ts.asfreq(offsets.BDay()) monthly_ts = daily_ts.asfreq(offsets.BMonthEnd()) tm.assert_equal(monthly_ts, ts) result = ts[:0].asfreq("ME") assert len(result) == 0 assert result is not ts if frame_or_series is Series: daily_ts = ts.asfreq("D", fill_value=-1) result = daily_ts.value_counts().sort_index() expected = Series( [60, 1, 1, 1], index=[-1.0, 2.0, 1.0, 0.0], name="count" ).sort_index() tm.assert_series_equal(result, expected) def test_asfreq_datetimeindex_empty(self, frame_or_series): # GH#14320 index = DatetimeIndex(["2016-09-29 11:00"]) expected = frame_or_series(index=index, dtype=object).asfreq("h") result = frame_or_series([3], index=index.copy()).asfreq("h") tm.assert_index_equal(expected.index, result.index) @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) def test_tz_aware_asfreq_smoke(self, tz, frame_or_series): dr = date_range("2011-12-01", "2012-07-20", freq="D", tz=tz) obj = frame_or_series( np.random.default_rng(2).standard_normal(len(dr)), index=dr ) # it works! obj.asfreq("min") def test_asfreq_normalize(self, frame_or_series): rng = date_range("1/1/2000 09:30", periods=20) norm = date_range("1/1/2000", periods=20) vals = np.random.default_rng(2).standard_normal((20, 3)) obj = DataFrame(vals, index=rng) expected = DataFrame(vals, index=norm) if frame_or_series is Series: obj = obj[0] expected = expected[0] result = obj.asfreq("D", normalize=True) tm.assert_equal(result, expected) def test_asfreq_keep_index_name(self, frame_or_series): # GH#9854 index_name = "bar" index = date_range("20130101", periods=20, name=index_name) obj = DataFrame(list(range(20)), columns=["foo"], index=index) obj = tm.get_obj(obj, frame_or_series) assert index_name == obj.index.name assert index_name == obj.asfreq("10D").index.name def test_asfreq_ts(self, frame_or_series): index = period_range(freq="Y", start="1/1/2001", end="12/31/2010") obj = DataFrame( np.random.default_rng(2).standard_normal((len(index), 3)), index=index ) obj = tm.get_obj(obj, frame_or_series) result = obj.asfreq("D", how="end") exp_index = index.asfreq("D", how="end") assert len(result) == len(obj) tm.assert_index_equal(result.index, exp_index) result = obj.asfreq("D", how="start") exp_index = index.asfreq("D", how="start") assert len(result) == len(obj) tm.assert_index_equal(result.index, exp_index) def test_asfreq_resample_set_correct_freq(self, frame_or_series): # GH#5613 # we test if .asfreq() and .resample() set the correct value for .freq dti = to_datetime(["2012-01-01", "2012-01-02", "2012-01-03"]) obj = DataFrame({"col": [1, 2, 3]}, index=dti) obj = tm.get_obj(obj, frame_or_series) # testing the settings before calling .asfreq() and .resample() assert obj.index.freq is None assert obj.index.inferred_freq == "D" # does .asfreq() set .freq correctly? assert obj.asfreq("D").index.freq == "D" # does .resample() set .freq correctly? assert obj.resample("D").asfreq().index.freq == "D" def test_asfreq_empty(self, datetime_frame): # test does not blow up on length-0 DataFrame zero_length = datetime_frame.reindex([]) result = zero_length.asfreq("BME") assert result is not zero_length def test_asfreq(self, datetime_frame): offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd()) rule_monthly = datetime_frame.asfreq("BME") tm.assert_frame_equal(offset_monthly, rule_monthly) rule_monthly.asfreq("B", method="pad") # TODO: actually check that this worked. # don't forget! rule_monthly.asfreq("B", method="pad") def test_asfreq_datetimeindex(self): df = DataFrame( {"A": [1, 2, 3]}, index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)], ) df = df.asfreq("B") assert isinstance(df.index, DatetimeIndex) ts = df["A"].asfreq("B") assert isinstance(ts.index, DatetimeIndex) def test_asfreq_fillvalue(self): # test for fill value during upsampling, related to issue 3715 # setup rng = date_range("1/1/2016", periods=10, freq="2s") # Explicit cast to 'float' to avoid implicit cast when setting None ts = Series(np.arange(len(rng)), index=rng, dtype="float") df = DataFrame({"one": ts}) # insert pre-existing missing value df.loc["2016-01-01 00:00:08", "one"] = None actual_df = df.asfreq(freq="1s", fill_value=9.0) expected_df = df.asfreq(freq="1s").fillna(9.0) expected_df.loc["2016-01-01 00:00:08", "one"] = None tm.assert_frame_equal(expected_df, actual_df) expected_series = ts.asfreq(freq="1s").fillna(9.0) actual_series = ts.asfreq(freq="1s", fill_value=9.0) tm.assert_series_equal(expected_series, actual_series) def test_asfreq_with_date_object_index(self, frame_or_series): rng = date_range("1/1/2000", periods=20, unit="ns") ts = frame_or_series(np.random.default_rng(2).standard_normal(20), index=rng) ts2 = ts.copy() ts2.index = [x.date() for x in ts2.index] result = ts2.asfreq("4h", method="ffill") expected = ts.asfreq("4h", method="ffill") tm.assert_equal(result, expected) def test_asfreq_with_unsorted_index(self, frame_or_series): # GH#39805 # Test that rows are not dropped when the datetime index is out of order index = to_datetime(["2021-01-04", "2021-01-02", "2021-01-03", "2021-01-01"]) result = frame_or_series(range(4), index=index) expected = result.reindex(sorted(index)) expected.index = expected.index._with_freq("infer") result = result.asfreq("D") tm.assert_equal(result, expected) def test_asfreq_after_normalize(self, unit): # https://github.com/pandas-dev/pandas/issues/50727 result = DatetimeIndex( date_range("2000", periods=2).as_unit(unit).normalize(), freq="D" ) expected = DatetimeIndex(["2000-01-01", "2000-01-02"], freq="D").as_unit(unit) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "freq, freq_half", [ ("2ME", "ME"), (MonthEnd(2), MonthEnd(1)), ], ) def test_asfreq_2ME(self, freq, freq_half): index = date_range("1/1/2000", periods=6, freq=freq_half) df = DataFrame({"s": Series([0.0, 1.0, 2.0, 3.0, 4.0, 5.0], index=index)}) expected = df.asfreq(freq=freq) index = date_range("1/1/2000", periods=3, freq=freq) result = DataFrame({"s": Series([0.0, 2.0, 4.0], index=index)}) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "freq, freq_depr", [ ("2ME", "2M"), ("2ME", "2m"), ("2QE", "2Q"), ("2QE-SEP", "2Q-SEP"), ("1BQE", "1BQ"), ("2BQE-SEP", "2BQ-SEP"), ("2BQE-SEP", "2bq-sep"), ("1YE", "1y"), ("2YE-MAR", "2Y-MAR"), ], ) def test_asfreq_frequency_M_Q_Y_raises(self, freq, freq_depr): msg = f"Invalid frequency: {freq_depr}" index = date_range("1/1/2000", periods=4, freq=f"{freq[1:]}") df = DataFrame({"s": Series([0.0, 1.0, 2.0, 3.0], index=index)}) with pytest.raises(ValueError, match=msg): df.asfreq(freq=freq_depr) @pytest.mark.parametrize( "freq, error_msg", [ ( "2MS", "Invalid frequency: 2MS", ), ( offsets.MonthBegin(), r"\<MonthBegin\> is not supported as period frequency", ), ( offsets.DateOffset(months=2), r"\<DateOffset: months=2\> is not supported as period frequency", ), ], ) def test_asfreq_unsupported_freq(self, freq, error_msg): # https://github.com/pandas-dev/pandas/issues/56718 index = PeriodIndex(["2020-01-01", "2021-01-01"], freq="M") df = DataFrame({"a": Series([0, 1], index=index)}) with pytest.raises(ValueError, match=error_msg): df.asfreq(freq=freq) @pytest.mark.parametrize( "freq, freq_depr", [ ("2YE", "2A"), ("2BYE-MAR", "2BA-MAR"), ], ) def test_asfreq_frequency_A_BA_raises(self, freq, freq_depr): msg = f"Invalid frequency: {freq_depr}" index = date_range("1/1/2000", periods=4, freq=freq) df = DataFrame({"s": Series([0.0, 1.0, 2.0, 3.0], index=index)}) with pytest.raises(ValueError, match=msg): df.asfreq(freq=freq_depr)
TestAsFreq
python
falconry__falcon
falcon/routing/static.py
{ "start": 11502, "end": 12162 }
class ____(StaticRoute): """Subclass of StaticRoute with modifications to support ASGI apps.""" async def __call__( # type: ignore[override] self, req: asgi.Request, resp: asgi.Response, ws: asgi.WebSocket | None = None, **kw: Any, ) -> None: if ws is not None: raise falcon.HTTPBadRequest() super().__call__(req, resp, **kw) if resp.stream is not None: # None when in an option request # NOTE(kgriffs): Fixup resp.stream so that it is non-blocking resp.stream = _AsyncFileReader(resp.stream) # type: ignore[assignment,arg-type]
StaticRouteAsync
python
Textualize__textual
docs/examples/styles/width_comparison.py
{ "start": 127, "end": 240 }
class ____(Static): def compose(self): ruler_text = "····•" * 100 yield Label(ruler_text)
Ruler
python
falconry__falcon
falcon/media/base.py
{ "start": 6760, "end": 7938 }
class ____(metaclass=abc.ABCMeta): """Abstract Base Class for a WebSocket TEXT media handler.""" def serialize(self, media: object) -> str: """Serialize the media object to a Unicode string. By default, this method raises an instance of :class:`NotImplementedError`. Therefore, it must be overridden if the child class wishes to support serialization to TEXT (0x01) message payloads. Args: media (object): A serializable object. Returns: str: The resulting serialized string from the input object. """ raise NotImplementedError() def deserialize(self, payload: str) -> object: """Deserialize TEXT payloads from a Unicode string. By default, this method raises an instance of :class:`NotImplementedError`. Therefore, it must be overridden if the child class wishes to support deserialization from TEXT (0x01) message payloads. Args: payload (str): Message payload to deserialize. Returns: object: A deserialized object. """ raise NotImplementedError()
TextBaseHandlerWS
python
django-import-export__django-import-export
tests/core/admin.py
{ "start": 418, "end": 478 }
class ____(ImportMixin, admin.ModelAdmin): pass
ChildAdmin
python
ray-project__ray
python/ray/llm/_internal/common/utils/cloud_filesystem/azure_filesystem.py
{ "start": 514, "end": 2858 }
class ____(BaseCloudFileSystem): """Azure-specific implementation of cloud filesystem operations. **Note**: This implementation currently delegates to PyArrowFileSystem to maintain stability. Optimized implementation using azure-storage-blob SDK and azcopy will be added in a future PR. """ @staticmethod def get_file( object_uri: str, decode_as_utf_8: bool = True ) -> Optional[Union[str, bytes]]: """Download a file from cloud storage into memory. Args: object_uri: URI of the file (abfss:// or azure://) decode_as_utf_8: If True, decode the file as UTF-8 Returns: File contents as string or bytes, or None if file doesn't exist """ return PyArrowFileSystem.get_file(object_uri, decode_as_utf_8) @staticmethod def list_subfolders(folder_uri: str) -> List[str]: """List the immediate subfolders in a cloud directory. Args: folder_uri: URI of the directory (abfss:// or azure://) Returns: List of subfolder names (without trailing slashes) """ return PyArrowFileSystem.list_subfolders(folder_uri) @staticmethod def download_files( path: str, bucket_uri: str, substrings_to_include: Optional[List[str]] = None, suffixes_to_exclude: Optional[List[str]] = None, ) -> None: """Download files from cloud storage to a local directory. Args: path: Local directory where files will be downloaded bucket_uri: URI of cloud directory substrings_to_include: Only include files containing these substrings suffixes_to_exclude: Exclude certain files from download (e.g .safetensors) """ PyArrowFileSystem.download_files( path, bucket_uri, substrings_to_include, suffixes_to_exclude ) @staticmethod def upload_files( local_path: str, bucket_uri: str, ) -> None: """Upload files to cloud storage. Args: local_path: The local path of the files to upload. bucket_uri: The bucket uri to upload the files to, must start with `abfss://` or `azure://`. """ PyArrowFileSystem.upload_files(local_path, bucket_uri)
AzureFileSystem
python
PyCQA__pylint
doc/data/messages/m/match-class-bind-self/good.py
{ "start": 0, "end": 293 }
class ____: __match_args__ = ("title", "year") def __init__(self, title, year): self.title = title self.year = year def func(item: Book): match item: case Book(title=str() as title): ... case Book(year=int() as year): ...
Book
python
jazzband__django-oauth-toolkit
oauth2_provider/views/oidc.py
{ "start": 8814, "end": 19764 }
class ____(OIDCLogoutOnlyMixin, FormView): template_name = "oauth2_provider/logout_confirm.html" form_class = ConfirmLogoutForm # Only delete tokens for Application whose client type and authorization # grant type are in the respective lists. token_deletion_client_types = [ Application.CLIENT_PUBLIC, Application.CLIENT_CONFIDENTIAL, ] token_deletion_grant_types = [ Application.GRANT_AUTHORIZATION_CODE, Application.GRANT_IMPLICIT, Application.GRANT_PASSWORD, Application.GRANT_CLIENT_CREDENTIALS, Application.GRANT_OPENID_HYBRID, ] def get_initial(self): return { "id_token_hint": self.oidc_data.get("id_token_hint", None), "logout_hint": self.oidc_data.get("logout_hint", None), "client_id": self.oidc_data.get("client_id", None), "post_logout_redirect_uri": self.oidc_data.get("post_logout_redirect_uri", None), "state": self.oidc_data.get("state", None), "ui_locales": self.oidc_data.get("ui_locales", None), } def dispatch(self, request, *args, **kwargs): self.oidc_data = {} return super().dispatch(request, *args, **kwargs) def get(self, request, *args, **kwargs): id_token_hint = request.GET.get("id_token_hint") client_id = request.GET.get("client_id") post_logout_redirect_uri = request.GET.get("post_logout_redirect_uri") state = request.GET.get("state") try: application, token_user = self.validate_logout_request( id_token_hint=id_token_hint, client_id=client_id, post_logout_redirect_uri=post_logout_redirect_uri, ) except OIDCError as error: return self.error_response(error) if not self.must_prompt(token_user): return self.do_logout(application, post_logout_redirect_uri, state, token_user) self.oidc_data = { "id_token_hint": id_token_hint, "client_id": client_id, "post_logout_redirect_uri": post_logout_redirect_uri, "state": state, } form = self.get_form(self.get_form_class()) kwargs["form"] = form if application: kwargs["application"] = application return self.render_to_response(self.get_context_data(**kwargs)) def form_valid(self, form): id_token_hint = form.cleaned_data.get("id_token_hint") client_id = form.cleaned_data.get("client_id") post_logout_redirect_uri = form.cleaned_data.get("post_logout_redirect_uri") state = form.cleaned_data.get("state") try: application, token_user = self.validate_logout_request( id_token_hint=id_token_hint, client_id=client_id, post_logout_redirect_uri=post_logout_redirect_uri, ) if not self.must_prompt(token_user) or form.cleaned_data.get("allow"): return self.do_logout(application, post_logout_redirect_uri, state, token_user) else: raise LogoutDenied() except OIDCError as error: return self.error_response(error) def validate_post_logout_redirect_uri(self, application, post_logout_redirect_uri): """ Validate the OIDC RP-Initiated Logout Request post_logout_redirect_uri parameter """ if not post_logout_redirect_uri: return if not application: raise InvalidOIDCClientError() scheme = urlparse(post_logout_redirect_uri)[0] if not scheme: raise InvalidOIDCRedirectURIError("A Scheme is required for the redirect URI.") if oauth2_settings.OIDC_RP_INITIATED_LOGOUT_STRICT_REDIRECT_URIS and ( scheme == "http" and application.client_type != "confidential" ): raise InvalidOIDCRedirectURIError("http is only allowed with confidential clients.") if scheme not in application.get_allowed_schemes(): raise InvalidOIDCRedirectURIError(f'Redirect to scheme "{scheme}" is not permitted.') if not application.post_logout_redirect_uri_allowed(post_logout_redirect_uri): raise InvalidOIDCRedirectURIError("This client does not have this redirect uri registered.") def validate_logout_request_user(self, id_token_hint, client_id): """ Validate the an OIDC RP-Initiated Logout Request user """ if not id_token_hint: return # Only basic validation has been done on the IDToken at this point. id_token, claims = _load_id_token(id_token_hint) if not id_token or not _validate_claims(self.request, claims): raise InvalidIDTokenError() # If both id_token_hint and client_id are given it must be verified that they match. if client_id: if id_token.application.client_id != client_id: raise ClientIdMissmatch() return id_token def get_request_application(self, id_token, client_id): if client_id: return get_application_model().objects.get(client_id=client_id) if id_token: return id_token.application def validate_logout_request(self, id_token_hint, client_id, post_logout_redirect_uri): """ Validate an OIDC RP-Initiated Logout Request. `(application, token_user)` is returned. If it is set, `application` is the Application that is requesting the logout. `token_user` is the id_token user, which will used to revoke the tokens if found. The `id_token_hint` will be validated if given. If both `client_id` and `id_token_hint` are given they will be validated against each other. """ id_token = self.validate_logout_request_user(id_token_hint, client_id) application = self.get_request_application(id_token, client_id) self.validate_post_logout_redirect_uri(application, post_logout_redirect_uri) return application, id_token.user if id_token else None def must_prompt(self, token_user): """ per: https://openid.net/specs/openid-connect-rpinitiated-1_0.html > At the Logout Endpoint, the OP SHOULD ask the End-User whether to log > out of the OP as well. Furthermore, the OP MUST ask the End-User this > question if an id_token_hint was not provided or if the supplied ID > Token does not belong to the current OP session with the RP and/or > currently logged in End-User. """ if not self.request.user.is_authenticated: """ > the OP MUST ask ask the End-User whether to log out of the OP as If the user does not have an active session with the OP, they cannot end their OP session, so there is nothing to prompt for. This occurs in cases where the user has logged out of the OP via another channel such as the OP's own logout page, session timeout or another RP's logout page. """ return False if oauth2_settings.OIDC_RP_INITIATED_LOGOUT_ALWAYS_PROMPT: """ > At the Logout Endpoint, the OP SHOULD ask the End-User whether to > log out of the OP as well The admin has configured the OP to always prompt the userfor logout per the SHOULD recommendation. """ return True if token_user is None: """ > the OP MUST ask ask the End-User whether to log out of the OP as > well if the supplied ID Token does not belong to the current OP > session with the RP. token_user will only be populated if an ID token was found for the RP (Application) that is requesting the logout. If token_user is not then we must prompt the user. """ return True if token_user != self.request.user: """ > the OP MUST ask ask the End-User whether to log out of the OP as > well if the supplied ID Token does not belong to the logged in > End-User. is_authenticated indicates that there is a logged in user and was tested in the first condition. token_user != self.request.user indicates that the token does not belong to the logged in user, Therefore we need to prompt the user. """ return True """ We didn't find a reason to prompt the user """ return False def do_logout(self, application=None, post_logout_redirect_uri=None, state=None, token_user=None): user = token_user or self.request.user # Delete Access Tokens if a user was found if oauth2_settings.OIDC_RP_INITIATED_LOGOUT_DELETE_TOKENS and not isinstance(user, AnonymousUser): AccessToken = get_access_token_model() RefreshToken = get_refresh_token_model() access_tokens_to_delete = AccessToken.objects.filter( user=user, application__client_type__in=self.token_deletion_client_types, application__authorization_grant_type__in=self.token_deletion_grant_types, ) # This queryset has to be evaluated eagerly. The queryset would be empty with lazy evaluation # because `access_tokens_to_delete` represents an empty queryset once `refresh_tokens_to_delete` # is evaluated as all AccessTokens have been deleted. refresh_tokens_to_delete = list( RefreshToken.objects.filter(access_token__in=access_tokens_to_delete) ) for token in access_tokens_to_delete: # Delete the token and its corresponding refresh and IDTokens. if token.id_token: token.id_token.revoke() token.revoke() for refresh_token in refresh_tokens_to_delete: refresh_token.revoke() # Logout in Django logout(self.request) # Redirect if post_logout_redirect_uri: if state: return OAuth2ResponseRedirect( add_params_to_uri(post_logout_redirect_uri, [("state", state)]), application.get_allowed_schemes(), ) else: return OAuth2ResponseRedirect(post_logout_redirect_uri, application.get_allowed_schemes()) else: return OAuth2ResponseRedirect( self.request.build_absolute_uri("/"), oauth2_settings.ALLOWED_REDIRECT_URI_SCHEMES, ) def error_response(self, error): error_response = {"error": error} return self.render_to_response(error_response, status=error.status_code)
RPInitiatedLogoutView
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_dlp.py
{ "start": 20003, "end": 20723 }
class ____: @mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook") def test_list_info_types(self, mock_hook): mock_hook.return_value.list_info_types.return_value = ListInfoTypesResponse() operator = CloudDLPListInfoTypesOperator(task_id="id") operator.execute(context=mock.MagicMock()) mock_hook.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=None, ) mock_hook.return_value.list_info_types.assert_called_once_with( language_code=None, results_filter=None, retry=DEFAULT, timeout=None, metadata=(), )
TestCloudDLPListInfoTypesOperator
python
pyinstaller__pyinstaller
PyInstaller/building/datastruct.py
{ "start": 7648, "end": 17435 }
class ____(Target, list): """ This class is a way of creating a TOC (Table of Contents) list that describes some or all of the files within a directory. """ def __init__(self, root=None, prefix=None, excludes=None, typecode='DATA'): """ root The root of the tree (on the build system). prefix Optional prefix to the names of the target system. excludes A list of names to exclude. Two forms are allowed: name Files with this basename will be excluded (do not include the path). *.ext Any file with the given extension will be excluded. typecode The typecode to be used for all files found in this tree. See the TOC class for for information about the typcodes. """ Target.__init__(self) list.__init__(self) self.root = root self.prefix = prefix self.excludes = excludes self.typecode = typecode if excludes is None: self.excludes = [] self.__postinit__() _GUTS = ( # input parameters ('root', _check_guts_eq), ('prefix', _check_guts_eq), ('excludes', _check_guts_eq), ('typecode', _check_guts_eq), ('data', None), # tested below # no calculated/analysed values ) def _check_guts(self, data, last_build): if Target._check_guts(self, data, last_build): return True # Walk the collected directories as check if they have been changed - which means files have been added or # removed. There is no need to check for the files, since `Tree` is only about the directory contents (which is # the list of files). stack = [data['root']] while stack: d = stack.pop() if misc.mtime(d) > last_build: logger.info("Building %s because directory %s changed", self.tocbasename, d) return True for nm in os.listdir(d): path = os.path.join(d, nm) if os.path.isdir(path): stack.append(path) self[:] = data['data'] # collected files return False def _save_guts(self): # Use the attribute `data` to save the list self.data = self super()._save_guts() del self.data def assemble(self): logger.info("Building Tree %s", self.tocbasename) stack = [(self.root, self.prefix)] excludes = set() xexcludes = set() for name in self.excludes: if name.startswith('*'): xexcludes.add(name[1:]) else: excludes.add(name) result = [] while stack: dir, prefix = stack.pop() for filename in os.listdir(dir): if filename in excludes: continue ext = os.path.splitext(filename)[1] if ext in xexcludes: continue fullfilename = os.path.join(dir, filename) if prefix: resfilename = os.path.join(prefix, filename) else: resfilename = filename if os.path.isdir(fullfilename): stack.append((fullfilename, resfilename)) else: result.append((resfilename, fullfilename, self.typecode)) self[:] = result def normalize_toc(toc): # Default priority: 0 _TOC_TYPE_PRIORITIES = { # DEPENDENCY entries need to replace original entries, so they need the highest priority. 'DEPENDENCY': 3, # SYMLINK entries have higher priority than other regular entries 'SYMLINK': 2, # BINARY/EXTENSION entries undergo additional processing, so give them precedence over DATA and other entries. 'BINARY': 1, 'EXTENSION': 1, } def _type_case_normalization_fcn(typecode): # Case-normalize all entries except OPTION. return typecode not in { "OPTION", } return _normalize_toc(toc, _TOC_TYPE_PRIORITIES, _type_case_normalization_fcn) def normalize_pyz_toc(toc): # Default priority: 0 _TOC_TYPE_PRIORITIES = { # Ensure that entries with higher optimization level take precedence. 'PYMODULE-2': 2, 'PYMODULE-1': 1, 'PYMODULE': 0, } return _normalize_toc(toc, _TOC_TYPE_PRIORITIES) def _normalize_toc(toc, toc_type_priorities, type_case_normalization_fcn=lambda typecode: False): options_toc = [] tmp_toc = dict() for dest_name, src_name, typecode in toc: # Exempt OPTION entries from de-duplication processing. Some options might allow being specified multiple times. if typecode == 'OPTION': options_toc.append(((dest_name, src_name, typecode))) continue # Always sanitize the dest_name with `os.path.normpath` to remove any local loops with parent directory path # components. `pathlib` does not seem to offer equivalent functionality. dest_name = os.path.normpath(dest_name) # Normalize the destination name for uniqueness. Use `pathlib.PurePath` to ensure that keys are both # case-normalized (on OSes where applicable) and directory-separator normalized (just in case). if type_case_normalization_fcn(typecode): entry_key = pathlib.PurePath(dest_name) else: entry_key = dest_name existing_entry = tmp_toc.get(entry_key) if existing_entry is None: # Entry does not exist - insert tmp_toc[entry_key] = (dest_name, src_name, typecode) else: # Entry already exists - replace if its typecode has higher priority _, _, existing_typecode = existing_entry if toc_type_priorities.get(typecode, 0) > toc_type_priorities.get(existing_typecode, 0): tmp_toc[entry_key] = (dest_name, src_name, typecode) # Return the items as list. The order matches the original order due to python dict maintaining the insertion order. # The exception are OPTION entries, which are now placed at the beginning of the TOC. return options_toc + list(tmp_toc.values()) def toc_process_symbolic_links(toc): """ Process TOC entries and replace entries whose files are symbolic links with SYMLINK entries (provided original file is also being collected). """ # Dictionary of all destination names, for a fast look-up. all_dest_files = set([dest_name for dest_name, src_name, typecode in toc]) # Process the TOC to create SYMLINK entries new_toc = [] for entry in toc: dest_name, src_name, typecode = entry # Skip entries that are already symbolic links if typecode == 'SYMLINK': new_toc.append(entry) continue # Skip entries without valid source name (e.g., OPTION) if not src_name: new_toc.append(entry) continue # Source path is not a symbolic link (i.e., it is a regular file or directory) if not os.path.islink(src_name): new_toc.append(entry) continue # Try preserving the symbolic link, under strict relative-relationship-preservation check symlink_entry = _try_preserving_symbolic_link(dest_name, src_name, all_dest_files) if symlink_entry: new_toc.append(symlink_entry) else: new_toc.append(entry) return new_toc def _try_preserving_symbolic_link(dest_name, src_name, all_dest_files): seen_src_files = set() # Set initial values for the loop ref_src_file = src_name ref_dest_file = dest_name while True: # Guard against cyclic links... if ref_src_file in seen_src_files: break seen_src_files.add(ref_src_file) # Stop when referenced source file is not a symbolic link anymore. if not os.path.islink(ref_src_file): break # Read the symbolic link's target, but do not fully resolve it using os.path.realpath(), because there might be # other symbolic links involved as well (for example, /lib64 -> /usr/lib64 whereas we are processing # /lib64/liba.so -> /lib64/liba.so.1) symlink_target = os.readlink(ref_src_file) if os.path.isabs(symlink_target): break # We support only relative symbolic links. ref_dest_file = os.path.join(os.path.dirname(ref_dest_file), symlink_target) ref_dest_file = os.path.normpath(ref_dest_file) # remove any '..' ref_src_file = os.path.join(os.path.dirname(ref_src_file), symlink_target) ref_src_file = os.path.normpath(ref_src_file) # remove any '..' # Check if referenced destination file is valid (i.e., we are collecting a file under referenced name). if ref_dest_file in all_dest_files: # Sanity check: original source name and current referenced source name must, after complete resolution, # point to the same file. if os.path.realpath(src_name) == os.path.realpath(ref_src_file): # Compute relative link for the destination file (might be modified, if we went over non-collected # intermediate links). rel_link = os.path.relpath(ref_dest_file, os.path.dirname(dest_name)) return dest_name, rel_link, 'SYMLINK' # If referenced destination is not valid, do another iteration in case we are dealing with chained links and we # are not collecting an intermediate link... return None
Tree
python
kubernetes-client__python
kubernetes/client/models/v1_ceph_fs_volume_source.py
{ "start": 383, "end": 8734 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'monitors': 'list[str]', 'path': 'str', 'read_only': 'bool', 'secret_file': 'str', 'secret_ref': 'V1LocalObjectReference', 'user': 'str' } attribute_map = { 'monitors': 'monitors', 'path': 'path', 'read_only': 'readOnly', 'secret_file': 'secretFile', 'secret_ref': 'secretRef', 'user': 'user' } def __init__(self, monitors=None, path=None, read_only=None, secret_file=None, secret_ref=None, user=None, local_vars_configuration=None): # noqa: E501 """V1CephFSVolumeSource - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._monitors = None self._path = None self._read_only = None self._secret_file = None self._secret_ref = None self._user = None self.discriminator = None self.monitors = monitors if path is not None: self.path = path if read_only is not None: self.read_only = read_only if secret_file is not None: self.secret_file = secret_file if secret_ref is not None: self.secret_ref = secret_ref if user is not None: self.user = user @property def monitors(self): """Gets the monitors of this V1CephFSVolumeSource. # noqa: E501 monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501 :return: The monitors of this V1CephFSVolumeSource. # noqa: E501 :rtype: list[str] """ return self._monitors @monitors.setter def monitors(self, monitors): """Sets the monitors of this V1CephFSVolumeSource. monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501 :param monitors: The monitors of this V1CephFSVolumeSource. # noqa: E501 :type: list[str] """ if self.local_vars_configuration.client_side_validation and monitors is None: # noqa: E501 raise ValueError("Invalid value for `monitors`, must not be `None`") # noqa: E501 self._monitors = monitors @property def path(self): """Gets the path of this V1CephFSVolumeSource. # noqa: E501 path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / # noqa: E501 :return: The path of this V1CephFSVolumeSource. # noqa: E501 :rtype: str """ return self._path @path.setter def path(self, path): """Sets the path of this V1CephFSVolumeSource. path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / # noqa: E501 :param path: The path of this V1CephFSVolumeSource. # noqa: E501 :type: str """ self._path = path @property def read_only(self): """Gets the read_only of this V1CephFSVolumeSource. # noqa: E501 readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501 :return: The read_only of this V1CephFSVolumeSource. # noqa: E501 :rtype: bool """ return self._read_only @read_only.setter def read_only(self, read_only): """Sets the read_only of this V1CephFSVolumeSource. readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501 :param read_only: The read_only of this V1CephFSVolumeSource. # noqa: E501 :type: bool """ self._read_only = read_only @property def secret_file(self): """Gets the secret_file of this V1CephFSVolumeSource. # noqa: E501 secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501 :return: The secret_file of this V1CephFSVolumeSource. # noqa: E501 :rtype: str """ return self._secret_file @secret_file.setter def secret_file(self, secret_file): """Sets the secret_file of this V1CephFSVolumeSource. secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501 :param secret_file: The secret_file of this V1CephFSVolumeSource. # noqa: E501 :type: str """ self._secret_file = secret_file @property def secret_ref(self): """Gets the secret_ref of this V1CephFSVolumeSource. # noqa: E501 :return: The secret_ref of this V1CephFSVolumeSource. # noqa: E501 :rtype: V1LocalObjectReference """ return self._secret_ref @secret_ref.setter def secret_ref(self, secret_ref): """Sets the secret_ref of this V1CephFSVolumeSource. :param secret_ref: The secret_ref of this V1CephFSVolumeSource. # noqa: E501 :type: V1LocalObjectReference """ self._secret_ref = secret_ref @property def user(self): """Gets the user of this V1CephFSVolumeSource. # noqa: E501 user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501 :return: The user of this V1CephFSVolumeSource. # noqa: E501 :rtype: str """ return self._user @user.setter def user(self, user): """Sets the user of this V1CephFSVolumeSource. user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501 :param user: The user of this V1CephFSVolumeSource. # noqa: E501 :type: str """ self._user = user def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1CephFSVolumeSource): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1CephFSVolumeSource): return True return self.to_dict() != other.to_dict()
V1CephFSVolumeSource
python
Pylons__pyramid
tests/test_renderers.py
{ "start": 15950, "end": 16989 }
class ____(unittest.TestCase): def setUp(self): self.config = cleanUp() def tearDown(self): cleanUp() def _makeOne(self, *arg, **kw): from pyramid.renderers import NullRendererHelper return NullRendererHelper(*arg, **kw) def test_instance_conforms(self): from zope.interface.verify import verifyObject from pyramid.interfaces import IRendererInfo helper = self._makeOne() verifyObject(IRendererInfo, helper) def test_render_view(self): helper = self._makeOne() self.assertEqual(helper.render_view(None, True, None, None), True) def test_render(self): helper = self._makeOne() self.assertEqual(helper.render(True, None, None), True) def test_render_to_response(self): helper = self._makeOne() self.assertEqual(helper.render_to_response(True, None, None), True) def test_clone(self): helper = self._makeOne() self.assertTrue(helper.clone() is helper)
TestNullRendererHelper
python
PrefectHQ__prefect
tests/test_task_worker.py
{ "start": 6540, "end": 8841 }
class ____: async def test_task_run_via_task_worker_respects_retry_policy( self, prefect_client, events_pipeline ): count = 0 @task(retries=1, persist_result=True) def task_with_retry(): nonlocal count if count == 0: count += 1 raise ValueError("maybe next time") count += 1 return count task_worker = TaskWorker(task_with_retry) task_run_future = task_with_retry.apply_async() task_run = await prefect_client.read_task_run(task_run_future.task_run_id) await task_worker.execute_task_run(task_run) await events_pipeline.process_events() updated_task_run = await prefect_client.read_task_run( task_run_future.task_run_id ) assert updated_task_run.state.is_completed() assert await updated_task_run.state.result() == 2 assert count == 2 @pytest.mark.parametrize( "should_retry", [lambda task, task_run, state: True, lambda task, task_run, state: False], ids=["will_retry", "wont_retry"], ) async def test_task_run_via_task_worker_respects_retry_condition_fn( self, should_retry, prefect_client, events_pipeline ): count = 0 will_retry = should_retry(None, None, None) expected_count = 2 if will_retry else 1 expected_state = "COMPLETED" if will_retry else "FAILED" @task(retries=1, retry_condition_fn=should_retry) def task_with_retry_condition_fn(): nonlocal count if count == 0: count += 1 raise RuntimeError("doh") count += 1 return count task_worker = TaskWorker(task_with_retry_condition_fn) task_run_future = task_with_retry_condition_fn.apply_async() task_run = await prefect_client.read_task_run(task_run_future.task_run_id) await task_worker.execute_task_run(task_run) await events_pipeline.process_events() updated_task_run = await prefect_client.read_task_run( task_run_future.task_run_id ) assert updated_task_run.state.type == expected_state assert count == expected_count
TestTaskWorkerTaskRunRetries
python
docker__docker-py
tests/unit/errors_test.py
{ "start": 301, "end": 3874 }
class ____(unittest.TestCase): def test_api_error_is_caught_by_dockerexception(self): try: raise APIError("this should be caught by DockerException") except DockerException: pass def test_status_code_200(self): """The status_code property is present with 200 response.""" resp = requests.Response() resp.status_code = 200 err = APIError('', response=resp) assert err.status_code == 200 def test_status_code_400(self): """The status_code property is present with 400 response.""" resp = requests.Response() resp.status_code = 400 err = APIError('', response=resp) assert err.status_code == 400 def test_status_code_500(self): """The status_code property is present with 500 response.""" resp = requests.Response() resp.status_code = 500 err = APIError('', response=resp) assert err.status_code == 500 def test_is_server_error_200(self): """Report not server error on 200 response.""" resp = requests.Response() resp.status_code = 200 err = APIError('', response=resp) assert err.is_server_error() is False def test_is_server_error_300(self): """Report not server error on 300 response.""" resp = requests.Response() resp.status_code = 300 err = APIError('', response=resp) assert err.is_server_error() is False def test_is_server_error_400(self): """Report not server error on 400 response.""" resp = requests.Response() resp.status_code = 400 err = APIError('', response=resp) assert err.is_server_error() is False def test_is_server_error_500(self): """Report server error on 500 response.""" resp = requests.Response() resp.status_code = 500 err = APIError('', response=resp) assert err.is_server_error() is True def test_is_client_error_500(self): """Report not client error on 500 response.""" resp = requests.Response() resp.status_code = 500 err = APIError('', response=resp) assert err.is_client_error() is False def test_is_client_error_400(self): """Report client error on 400 response.""" resp = requests.Response() resp.status_code = 400 err = APIError('', response=resp) assert err.is_client_error() is True def test_is_error_300(self): """Report no error on 300 response.""" resp = requests.Response() resp.status_code = 300 err = APIError('', response=resp) assert err.is_error() is False def test_is_error_400(self): """Report error on 400 response.""" resp = requests.Response() resp.status_code = 400 err = APIError('', response=resp) assert err.is_error() is True def test_is_error_500(self): """Report error on 500 response.""" resp = requests.Response() resp.status_code = 500 err = APIError('', response=resp) assert err.is_error() is True def test_create_error_from_exception(self): resp = requests.Response() resp.status_code = 500 err = APIError('') try: resp.raise_for_status() except requests.exceptions.HTTPError as e: try: create_api_error_from_http_exception(e) except APIError as e: err = e assert err.is_server_error() is True
APIErrorTest
python
etianen__django-reversion
tests/test_app/tests/test_admin.py
{ "start": 1984, "end": 2322 }
class ____(LoginMixin, AdminMixin, TestBase): def testChangelistView(self): obj = TestModelParent.objects.create() response = self.client.get(resolve_url("admin:test_app_testmodelparent_changelist")) self.assertContains(response, resolve_url("admin:test_app_testmodelparent_change", obj.pk))
AdminChangelistView
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pylint/unnecessary_dunder_call.py
{ "start": 2621, "end": 3390 }
class ____: def __init__(self, stuff: Any) -> None: super().__init__() # OK super().__class__(stuff=(1, 2, 3)) # OK def __getattribute__(self, item): return object.__getattribute__(self, item) # OK def do_thing(self, item): return object.__getattribute__(self, item) # PLC2801 def use_descriptor(self, item): item.__get__(self, type(self)) # OK item.__set__(self, 1) # OK item.__delete__(self) # OK blah = lambda: {"a": 1}.__delitem__("a") # OK blah = dict[{"a": 1}.__delitem__("a")] # OK "abc".__contains__("a") # https://github.com/astral-sh/ruff/issues/14597 assert "abc".__str__() == "abc" # https://github.com/astral-sh/ruff/issues/18813 three = 1 if 1 else(3.0).__str__()
Thing
python
langchain-ai__langchain
libs/core/langchain_core/prompts/chat.py
{ "start": 22290, "end": 22479 }
class ____(_StringImageMessagePromptTemplate): """AI message prompt template. This is a message sent from the AI.""" _msg_class: type[BaseMessage] = AIMessage
AIMessagePromptTemplate
python
pytorch__pytorch
torch/_export/pass_infra/node_metadata.py
{ "start": 176, "end": 771 }
class ____: def __init__(self, data: dict[str, Any]) -> None: self.data: dict[str, Any] = data.copy() def __getitem__(self, key: str) -> NodeMetadataValue: return self.data[key] def __setitem__(self, key: str, value: NodeMetadataValue) -> NodeMetadataValue: if key in PROTECTED_KEYS: raise RuntimeError(f"Could not override node key: {key}") self.data[key] = value def __contains__(self, key: str) -> bool: return key in self.data def copy(self) -> "NodeMetadata": return NodeMetadata(self.data.copy())
NodeMetadata
python
sphinx-doc__sphinx
tests/roots/test-ext-autodoc/target/inherited_annotations.py
{ "start": 234, "end": 310 }
class ____(HasTypeAnnotatedMember): a = 1 """Local"""
NoTypeAnnotation
python
django__django
tests/admin_views/models.py
{ "start": 23020, "end": 23144 }
class ____(models.Model): """ Model whose change_view is disabled in admin Refs #20640. """
UnchangeableObject
python
getsentry__sentry
src/sentry/seer/explorer/custom_tool_utils.py
{ "start": 657, "end": 762 }
class ____(BaseModel): """Simple integer type.""" kind: Literal["integer"] = "integer"
IntegerType
python
HypothesisWorks__hypothesis
hypothesis-python/src/hypothesis/database.py
{ "start": 41324, "end": 47542 }
class ____(ExampleDatabase): """A wrapper which defers writes on the given database to a background thread. Calls to :meth:`~hypothesis.database.ExampleDatabase.fetch` wait for any enqueued writes to finish before fetching from the database. """ def __init__(self, db: ExampleDatabase) -> None: super().__init__() self._db = db self._queue: Queue[tuple[str, tuple[bytes, ...]]] = Queue() self._thread: Thread | None = None def _ensure_thread(self): if self._thread is None: self._thread = Thread(target=self._worker, daemon=True) self._thread.start() # avoid an unbounded timeout during gc. 0.1 should be plenty for most # use cases. weakref.finalize(self, self._join, 0.1) def __repr__(self) -> str: return f"BackgroundWriteDatabase({self._db!r})" def __eq__(self, other: object) -> bool: return isinstance(other, BackgroundWriteDatabase) and self._db == other._db def _worker(self) -> None: while True: method, args = self._queue.get() getattr(self._db, method)(*args) self._queue.task_done() def _join(self, timeout: float | None = None) -> None: # copy of Queue.join with a timeout. https://bugs.python.org/issue9634 with self._queue.all_tasks_done: while self._queue.unfinished_tasks: self._queue.all_tasks_done.wait(timeout) def fetch(self, key: bytes) -> Iterable[bytes]: self._join() return self._db.fetch(key) def save(self, key: bytes, value: bytes) -> None: self._ensure_thread() self._queue.put(("save", (key, value))) def delete(self, key: bytes, value: bytes) -> None: self._ensure_thread() self._queue.put(("delete", (key, value))) def move(self, src: bytes, dest: bytes, value: bytes) -> None: self._ensure_thread() self._queue.put(("move", (src, dest, value))) def _start_listening(self) -> None: self._db.add_listener(self._broadcast_change) def _stop_listening(self) -> None: self._db.remove_listener(self._broadcast_change) def _pack_uleb128(value: int) -> bytes: """ Serialize an integer into variable-length bytes. For each byte, the first 7 bits represent (part of) the integer, while the last bit indicates whether the integer continues into the next byte. https://en.wikipedia.org/wiki/LEB128 """ parts = bytearray() assert value >= 0 while True: # chop off 7 bits byte = value & ((1 << 7) - 1) value >>= 7 # set the continuation bit if we have more left if value: byte |= 1 << 7 parts.append(byte) if not value: break return bytes(parts) def _unpack_uleb128(buffer: bytes) -> tuple[int, int]: """ Inverts _pack_uleb128, and also returns the index at which at which we stopped reading. """ value = 0 for i, byte in enumerate(buffer): n = byte & ((1 << 7) - 1) value |= n << (i * 7) if not byte >> 7: break return (i + 1, value) def choices_to_bytes(choices: Iterable[ChoiceT], /) -> bytes: """Serialize a list of choices to a bytestring. Inverts choices_from_bytes.""" # We use a custom serialization format for this, which might seem crazy - but our # data is a flat sequence of elements, and standard tools like protobuf or msgpack # don't deal well with e.g. nonstandard bit-pattern-NaNs, or invalid-utf8 unicode. # # We simply encode each element with a metadata byte, if needed a uint16 size, and # then the payload bytes. For booleans, the payload is inlined into the metadata. parts = [] for choice in choices: if isinstance(choice, bool): # `000_0000v` - tag zero, low bit payload. parts.append(b"\1" if choice else b"\0") continue # `tag_ssss [uint16 size?] [payload]` if isinstance(choice, float): tag = 1 << 5 choice = struct.pack("!d", choice) elif isinstance(choice, int): tag = 2 << 5 choice = choice.to_bytes(1 + choice.bit_length() // 8, "big", signed=True) elif isinstance(choice, bytes): tag = 3 << 5 else: assert isinstance(choice, str) tag = 4 << 5 choice = choice.encode(errors="surrogatepass") size = len(choice) if size < 0b11111: parts.append((tag | size).to_bytes(1, "big")) else: parts.append((tag | 0b11111).to_bytes(1, "big")) parts.append(_pack_uleb128(size)) parts.append(choice) return b"".join(parts) def _choices_from_bytes(buffer: bytes, /) -> tuple[ChoiceT, ...]: # See above for an explanation of the format. parts: list[ChoiceT] = [] idx = 0 while idx < len(buffer): tag = buffer[idx] >> 5 size = buffer[idx] & 0b11111 idx += 1 if tag == 0: parts.append(bool(size)) continue if size == 0b11111: (offset, size) = _unpack_uleb128(buffer[idx:]) idx += offset chunk = buffer[idx : idx + size] idx += size if tag == 1: assert size == 8, "expected float64" parts.extend(struct.unpack("!d", chunk)) elif tag == 2: parts.append(int.from_bytes(chunk, "big", signed=True)) elif tag == 3: parts.append(chunk) else: assert tag == 4 parts.append(chunk.decode(errors="surrogatepass")) return tuple(parts) def choices_from_bytes(buffer: bytes, /) -> tuple[ChoiceT, ...] | None: """ Deserialize a bytestring to a tuple of choices. Inverts choices_to_bytes. Returns None if the given bytestring is not a valid serialization of choice sequences. """ try: return _choices_from_bytes(buffer) except Exception: # deserialization error, eg because our format changed or someone put junk # data in the db. return None
BackgroundWriteDatabase
python
huggingface__transformers
src/transformers/models/upernet/modeling_upernet.py
{ "start": 2212, "end": 2807 }
class ____(nn.Module): def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None: super().__init__() self.layers = [ nn.AdaptiveAvgPool2d(pool_scale), UperNetConvModule(in_channels, channels, kernel_size=1), ] for i, layer in enumerate(self.layers): self.add_module(str(i), layer) def forward(self, input: torch.Tensor) -> torch.Tensor: hidden_state = input for layer in self.layers: hidden_state = layer(hidden_state) return hidden_state
UperNetPyramidPoolingBlock
python
python-openxml__python-docx
src/docx/text/pagebreak.py
{ "start": 313, "end": 4950 }
class ____(Parented): """A page-break inserted by Word during page-layout for print or display purposes. This usually does not correspond to a "hard" page-break inserted by the document author, rather just that Word ran out of room on one page and needed to start another. The position of these can change depending on the printer and page-size, as well as margins, etc. They also will change in response to edits, but not until Word loads and saves the document. Note these are never inserted by `python-docx` because it has no rendering function. These are generally only useful for text-extraction of existing documents when `python-docx` is being used solely as a document "reader". NOTE: a rendered page-break can occur within a hyperlink; consider a multi-word hyperlink like "excellent Wikipedia article on LLMs" that happens to fall close to the end of the last line on a page such that the page breaks between "Wikipedia" and "article". In such a "page-breaks-in-hyperlink" case, THESE METHODS WILL "MOVE" THE PAGE-BREAK to occur after the hyperlink, such that the entire hyperlink appears in the paragraph returned by `.preceding_paragraph_fragment`. While this places the "tail" text of the hyperlink on the "wrong" page, it avoids having two hyperlinks each with a fragment of the actual text and pointing to the same address. """ def __init__( self, lastRenderedPageBreak: CT_LastRenderedPageBreak, parent: t.ProvidesStoryPart, ): super().__init__(parent) self._element = lastRenderedPageBreak self._lastRenderedPageBreak = lastRenderedPageBreak @property def preceding_paragraph_fragment(self) -> Paragraph | None: """A "loose" paragraph containing the content preceding this page-break. Compare `.following_paragraph_fragment` as these two are intended to be used together. This value is `None` when no content precedes this page-break. This case is common and occurs whenever a page breaks on an even paragraph boundary. Returning `None` for this case avoids "inserting" a non-existent paragraph into the content stream. Note that content can include DrawingML items like images or charts. Note the returned paragraph *is divorced from the document body*. Any changes made to it will not be reflected in the document. It is intended to provide a familiar container (`Paragraph`) to interrogate for the content preceding this page-break in the paragraph in which it occured. Contains the entire hyperlink when this break occurs within a hyperlink. """ if self._lastRenderedPageBreak.precedes_all_content: return None from docx.text.paragraph import Paragraph return Paragraph(self._lastRenderedPageBreak.preceding_fragment_p, self._parent) @property def following_paragraph_fragment(self) -> Paragraph | None: """A "loose" paragraph containing the content following this page-break. HAS POTENTIALLY SURPRISING BEHAVIORS so read carefully to be sure this is what you want. This is primarily targeted toward text-extraction use-cases for which precisely associating text with the page it occurs on is important. Compare `.preceding_paragraph_fragment` as these two are intended to be used together. This value is `None` when no content follows this page-break. This case is unlikely to occur in practice because Word places even-paragraph-boundary page-breaks on the paragraph *following* the page-break. Still, it is possible and must be checked for. Returning `None` for this case avoids "inserting" an extra, non-existent paragraph into the content stream. Note that content can include DrawingML items like images or charts, not just text. The returned paragraph *is divorced from the document body*. Any changes made to it will not be reflected in the document. It is intended to provide a container (`Paragraph`) with familiar properties and methods that can be used to characterize the paragraph content following a mid-paragraph page-break. Contains no portion of the hyperlink when this break occurs within a hyperlink. """ if self._lastRenderedPageBreak.follows_all_content: return None from docx.text.paragraph import Paragraph return Paragraph(self._lastRenderedPageBreak.following_fragment_p, self._parent)
RenderedPageBreak
python
pytorch__pytorch
torch/nn/modules/sparse.py
{ "start": 250, "end": 11009 }
class ____(Module): r"""A simple lookup table that stores embeddings of a fixed dictionary and size. This module is often used to store word embeddings and retrieve them using indices. The input to the module is a list of indices, and the output is the corresponding word embeddings. Args: num_embeddings (int): size of the dictionary of embeddings embedding_dim (int): the size of each embedding vector padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient; therefore, the embedding vector at :attr:`padding_idx` is not updated during training, i.e. it remains as a fixed "pad". For a newly constructed Embedding, the embedding vector at :attr:`padding_idx` will default to all zeros, but can be updated to another value to be used as the padding vector. max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm` is renormalized to have norm :attr:`max_norm`. norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``. scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default ``False``. sparse (bool, optional): If ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See Notes for more details regarding sparse gradients. Attributes: weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim) initialized from :math:`\mathcal{N}(0, 1)` Shape: - Input: :math:`(*)`, IntTensor or LongTensor of arbitrary shape containing the indices to extract - Output: :math:`(*, H)`, where `*` is the input shape and :math:`H=\text{embedding\_dim}` .. note:: Keep in mind that only a limited number of optimizers support sparse gradients: currently it's :class:`optim.SGD` (`CUDA` and `CPU`), :class:`optim.SparseAdam` (`CUDA` and `CPU`) and :class:`optim.Adagrad` (`CPU`) .. note:: When :attr:`max_norm` is not ``None``, :class:`Embedding`'s forward method will modify the :attr:`weight` tensor in-place. Since tensors needed for gradient computations cannot be modified in-place, performing a differentiable operation on ``Embedding.weight`` before calling :class:`Embedding`'s forward method requires cloning ``Embedding.weight`` when :attr:`max_norm` is not ``None``. For example:: n, d, m = 3, 5, 7 embedding = nn.Embedding(n, d, max_norm=1.0) W = torch.randn((m, d), requires_grad=True) idx = torch.tensor([1, 2]) a = ( embedding.weight.clone() @ W.t() ) # weight must be cloned for this to be differentiable b = embedding(idx) @ W.t() # modifies weight in-place out = a.unsqueeze(0) + b.unsqueeze(1) loss = out.sigmoid().prod() loss.backward() Examples:: >>> # an Embedding module containing 10 tensors of size 3 >>> embedding = nn.Embedding(10, 3) >>> # a batch of 2 samples of 4 indices each >>> input = torch.LongTensor([[1, 2, 4, 5], [4, 3, 2, 9]]) >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> embedding(input) tensor([[[-0.0251, -1.6902, 0.7172], [-0.6431, 0.0748, 0.6969], [ 1.4970, 1.3448, -0.9685], [-0.3677, -2.7265, -0.1685]], [[ 1.4970, 1.3448, -0.9685], [ 0.4362, -0.4004, 0.9400], [-0.6431, 0.0748, 0.6969], [ 0.9124, -2.3616, 1.1151]]]) >>> # example with padding_idx >>> embedding = nn.Embedding(10, 3, padding_idx=0) >>> input = torch.LongTensor([[0, 2, 0, 5]]) >>> embedding(input) tensor([[[ 0.0000, 0.0000, 0.0000], [ 0.1535, -2.0309, 0.9315], [ 0.0000, 0.0000, 0.0000], [-0.1655, 0.9897, 0.0635]]]) >>> # example of changing `pad` vector >>> padding_idx = 0 >>> embedding = nn.Embedding(3, 3, padding_idx=padding_idx) >>> embedding.weight Parameter containing: tensor([[ 0.0000, 0.0000, 0.0000], [-0.7895, -0.7089, -0.0364], [ 0.6778, 0.5803, 0.2678]], requires_grad=True) >>> with torch.no_grad(): ... embedding.weight[padding_idx] = torch.ones(3) >>> embedding.weight Parameter containing: tensor([[ 1.0000, 1.0000, 1.0000], [-0.7895, -0.7089, -0.0364], [ 0.6778, 0.5803, 0.2678]], requires_grad=True) """ __constants__ = [ "num_embeddings", "embedding_dim", "padding_idx", "max_norm", "norm_type", "scale_grad_by_freq", "sparse", ] num_embeddings: int embedding_dim: int padding_idx: Optional[int] max_norm: Optional[float] norm_type: float scale_grad_by_freq: bool weight: Tensor freeze: bool sparse: bool def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None, max_norm: Optional[float] = None, norm_type: float = 2.0, scale_grad_by_freq: bool = False, sparse: bool = False, _weight: Optional[Tensor] = None, _freeze: bool = False, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim if padding_idx is not None: if padding_idx > 0: assert padding_idx < self.num_embeddings, ( "Padding_idx must be within num_embeddings" ) elif padding_idx < 0: assert padding_idx >= -self.num_embeddings, ( "Padding_idx must be within num_embeddings" ) padding_idx = self.num_embeddings + padding_idx self.padding_idx = padding_idx self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq if _weight is None: self.weight = Parameter( torch.empty((num_embeddings, embedding_dim), **factory_kwargs), requires_grad=not _freeze, ) self.reset_parameters() else: assert list(_weight.shape) == [ num_embeddings, embedding_dim, ], "Shape of weight does not match num_embeddings and embedding_dim" self.weight = Parameter(_weight, requires_grad=not _freeze) self.sparse = sparse def reset_parameters(self) -> None: init.normal_(self.weight) self._fill_padding_idx_with_zero() def _fill_padding_idx_with_zero(self) -> None: if self.padding_idx is not None: with torch.no_grad(): self.weight[self.padding_idx].fill_(0) def forward(self, input: Tensor) -> Tensor: return F.embedding( input, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ) def extra_repr(self) -> str: s = "{num_embeddings}, {embedding_dim}" if self.padding_idx is not None: s += ", padding_idx={padding_idx}" if self.max_norm is not None: s += ", max_norm={max_norm}" if self.norm_type != 2: s += ", norm_type={norm_type}" if self.scale_grad_by_freq is not False: s += ", scale_grad_by_freq={scale_grad_by_freq}" if self.sparse is not False: s += ", sparse=True" return s.format(**self.__dict__) @classmethod def from_pretrained( cls, embeddings, freeze=True, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, ): r"""Create Embedding instance from given 2-dimensional FloatTensor. Args: embeddings (Tensor): FloatTensor containing weights for the Embedding. First dimension is being passed to Embedding as ``num_embeddings``, second as ``embedding_dim``. freeze (bool, optional): If ``True``, the tensor does not get updated in the learning process. Equivalent to ``embedding.weight.requires_grad = False``. Default: ``True`` padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient; therefore, the embedding vector at :attr:`padding_idx` is not updated during training, i.e. it remains as a fixed "pad". max_norm (float, optional): See module initialization documentation. norm_type (float, optional): See module initialization documentation. Default ``2``. scale_grad_by_freq (bool, optional): See module initialization documentation. Default ``False``. sparse (bool, optional): See module initialization documentation. Examples:: >>> # FloatTensor containing pretrained weights >>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]]) >>> embedding = nn.Embedding.from_pretrained(weight) >>> # Get embeddings for index 1 >>> input = torch.LongTensor([1]) >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> embedding(input) tensor([[ 4.0000, 5.1000, 6.3000]]) """ assert embeddings.dim() == 2, ( "Embeddings parameter is expected to be 2-dimensional" ) rows, cols = embeddings.shape embedding = cls( num_embeddings=rows, embedding_dim=cols, _weight=embeddings, _freeze=freeze, padding_idx=padding_idx, max_norm=max_norm, norm_type=norm_type, scale_grad_by_freq=scale_grad_by_freq, sparse=sparse, ) return embedding
Embedding
python
gevent__gevent
src/greentest/3.10/test_socket.py
{ "start": 121947, "end": 125368 }
class ____(RecvmsgIntoMixin, RecvmsgGenericTests): # Tests for recvmsg_into() which can use any socket type. def testRecvmsgIntoBadArgs(self): # Check that recvmsg_into() rejects invalid arguments. buf = bytearray(len(MSG)) self.assertRaises(TypeError, self.serv_sock.recvmsg_into) self.assertRaises(TypeError, self.serv_sock.recvmsg_into, len(MSG), 0, 0) self.assertRaises(TypeError, self.serv_sock.recvmsg_into, buf, 0, 0) self.assertRaises(TypeError, self.serv_sock.recvmsg_into, [object()], 0, 0) self.assertRaises(TypeError, self.serv_sock.recvmsg_into, [b"I'm not writable"], 0, 0) self.assertRaises(TypeError, self.serv_sock.recvmsg_into, [buf, object()], 0, 0) self.assertRaises(ValueError, self.serv_sock.recvmsg_into, [buf], -1, 0) self.assertRaises(TypeError, self.serv_sock.recvmsg_into, [buf], object(), 0) self.assertRaises(TypeError, self.serv_sock.recvmsg_into, [buf], 0, object()) nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0) self.assertEqual(nbytes, len(MSG)) self.assertEqual(buf, bytearray(MSG)) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) def _testRecvmsgIntoBadArgs(self): self.sendToServer(MSG) def testRecvmsgIntoGenerator(self): # Receive into buffer obtained from a generator (not a sequence). buf = bytearray(len(MSG)) nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into( (o for o in [buf])) self.assertEqual(nbytes, len(MSG)) self.assertEqual(buf, bytearray(MSG)) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) def _testRecvmsgIntoGenerator(self): self.sendToServer(MSG) def testRecvmsgIntoArray(self): # Receive into an array rather than the usual bytearray. buf = array.array("B", [0] * len(MSG)) nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf]) self.assertEqual(nbytes, len(MSG)) self.assertEqual(buf.tobytes(), MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) def _testRecvmsgIntoArray(self): self.sendToServer(MSG) def testRecvmsgIntoScatter(self): # Receive into multiple buffers (scatter write). b1 = bytearray(b"----") b2 = bytearray(b"0123456789") b3 = bytearray(b"--------------") nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into( [b1, memoryview(b2)[2:9], b3]) self.assertEqual(nbytes, len(b"Mary had a little lamb")) self.assertEqual(b1, bytearray(b"Mary")) self.assertEqual(b2, bytearray(b"01 had a 9")) self.assertEqual(b3, bytearray(b"little lamb---")) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) def _testRecvmsgIntoScatter(self): self.sendToServer(b"Mary had a little lamb")
RecvmsgIntoTests
python
astropy__astropy
astropy/modeling/functional_models.py
{ "start": 64594, "end": 65863 }
class ____(Fittable2DModel): """ Two dimensional Constant model. Parameters ---------- amplitude : float Value of the constant function See Also -------- Const1D Notes ----- Model formula: .. math:: f(x, y) = A """ amplitude = Parameter( default=1, description="Value of the constant function", mag=True ) linear = True @staticmethod def evaluate(x, y, amplitude): """Two dimensional Constant model function.""" if amplitude.size == 1: # This is slightly faster than using ones_like and multiplying x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype) x.fill(amplitude.item()) else: # This case is less likely but could occur if the amplitude # parameter is given an array-like value x = amplitude * np.ones_like(x, subok=False) if isinstance(amplitude, Quantity): return Quantity(x, unit=amplitude.unit, copy=False, subok=True) return x @property def input_units(self): return None def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"amplitude": outputs_unit[self.outputs[0]]}
Const2D