language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
PyCQA__pylint
|
pylint/utils/linterstats.py
|
{
"start": 1677,
"end": 13056
}
|
class ____:
"""Class used to linter stats."""
def __init__(
self,
bad_names: BadNames | None = None,
by_module: dict[str, ModuleStats] | None = None,
by_msg: dict[str, int] | None = None,
code_type_count: CodeTypeCount | None = None,
dependencies: dict[str, set[str]] | None = None,
duplicated_lines: DuplicatedLines | None = None,
node_count: NodeCount | None = None,
undocumented: UndocumentedNodes | None = None,
) -> None:
self.bad_names = bad_names or BadNames(
argument=0,
attr=0,
klass=0,
class_attribute=0,
class_const=0,
const=0,
inlinevar=0,
function=0,
method=0,
module=0,
variable=0,
typevar=0,
paramspec=0,
typevartuple=0,
typealias=0,
)
self.by_module: dict[str, ModuleStats] = by_module or {}
self.by_msg: dict[str, int] = by_msg or {}
self.code_type_count = code_type_count or CodeTypeCount(
code=0, comment=0, docstring=0, empty=0, total=0
)
self.modules_names: set[str] = set()
self.dependencies: dict[str, set[str]] = dependencies or {}
self.duplicated_lines = duplicated_lines or DuplicatedLines(
nb_duplicated_lines=0, percent_duplicated_lines=0.0
)
self.node_count = node_count or NodeCount(
function=0, klass=0, method=0, module=0
)
self.undocumented = undocumented or UndocumentedNodes(
function=0, klass=0, method=0, module=0
)
self.convention = 0
self.error = 0
self.fatal = 0
self.info = 0
self.refactor = 0
self.statement = 0
self.warning = 0
self.skipped = 0
self.global_note = 0
self.nb_duplicated_lines = 0
self.percent_duplicated_lines = 0.0
def __repr__(self) -> str:
return str(self)
def __str__(self) -> str:
return f"""{self.bad_names}
{sorted(self.by_module.items())}
{sorted(self.by_msg.items())}
{self.code_type_count}
{sorted(self.dependencies.items())}
{self.duplicated_lines}
{self.undocumented}
{self.convention}
{self.error}
{self.fatal}
{self.info}
{self.refactor}
{self.statement}
{self.warning}
{self.skipped}
{self.global_note}
{self.nb_duplicated_lines}
{self.percent_duplicated_lines}"""
def init_single_module(self, module_name: str) -> None:
"""Use through PyLinter.set_current_module so PyLinter.current_name is
consistent.
"""
self.by_module[module_name] = ModuleStats(
convention=0, error=0, fatal=0, info=0, refactor=0, statement=0, warning=0
)
def get_bad_names(
self,
node_name: Literal[
"argument",
"attr",
"class",
"class_attribute",
"class_const",
"const",
"inlinevar",
"function",
"method",
"module",
"variable",
"typevar",
"paramspec",
"typevartuple",
"typealias",
],
) -> int:
"""Get a bad names node count."""
if node_name == "class":
return self.bad_names.get("klass", 0)
return self.bad_names.get(node_name, 0)
def increase_bad_name(self, node_name: str, increase: int) -> None:
"""Increase a bad names node count."""
if node_name not in {
"argument",
"attr",
"class",
"class_attribute",
"class_const",
"const",
"inlinevar",
"function",
"method",
"module",
"variable",
"typevar",
"paramspec",
"typevartuple",
"typealias",
}:
raise ValueError("Node type not part of the bad_names stat")
node_name = cast(
Literal[
"argument",
"attr",
"class",
"class_attribute",
"class_const",
"const",
"inlinevar",
"function",
"method",
"module",
"variable",
"typevar",
"paramspec",
"typevartuple",
"typealias",
],
node_name,
)
if node_name == "class":
self.bad_names["klass"] += increase
else:
self.bad_names[node_name] += increase
def reset_bad_names(self) -> None:
"""Resets the bad_names attribute."""
self.bad_names = BadNames(
argument=0,
attr=0,
klass=0,
class_attribute=0,
class_const=0,
const=0,
inlinevar=0,
function=0,
method=0,
module=0,
variable=0,
typevar=0,
paramspec=0,
typevartuple=0,
typealias=0,
)
def get_code_count(
self, type_name: Literal["code", "comment", "docstring", "empty", "total"]
) -> int:
"""Get a code type count."""
return self.code_type_count.get(type_name, 0)
def reset_code_count(self) -> None:
"""Resets the code_type_count attribute."""
self.code_type_count = CodeTypeCount(
code=0, comment=0, docstring=0, empty=0, total=0
)
def reset_duplicated_lines(self) -> None:
"""Resets the duplicated_lines attribute."""
self.duplicated_lines = DuplicatedLines(
nb_duplicated_lines=0, percent_duplicated_lines=0.0
)
def get_node_count(
self, node_name: Literal["function", "class", "method", "module"]
) -> int:
"""Get a node count while handling some extra conditions."""
if node_name == "class":
return self.node_count.get("klass", 0)
return self.node_count.get(node_name, 0)
def reset_node_count(self) -> None:
"""Resets the node count attribute."""
self.node_count = NodeCount(function=0, klass=0, method=0, module=0)
def get_undocumented(
self, node_name: Literal["function", "class", "method", "module"]
) -> float:
"""Get a undocumented node count."""
if node_name == "class":
return self.undocumented["klass"]
return self.undocumented[node_name]
def reset_undocumented(self) -> None:
"""Resets the undocumented attribute."""
self.undocumented = UndocumentedNodes(function=0, klass=0, method=0, module=0)
def get_global_message_count(self, type_name: str) -> int:
"""Get a global message count."""
return getattr(self, type_name, 0)
def get_module_message_count(
self, modname: str, type_name: MessageTypesFullName
) -> int:
"""Get a module message count."""
return self.by_module[modname].get(type_name, 0)
def increase_single_message_count(self, type_name: str, increase: int) -> None:
"""Increase the message type count of an individual message type."""
setattr(self, type_name, getattr(self, type_name) + increase)
def increase_single_module_message_count(
self, modname: str, type_name: MessageTypesFullName, increase: int
) -> None:
"""Increase the message type count of an individual message type of a
module.
"""
self.by_module[modname][type_name] += increase
def reset_message_count(self) -> None:
"""Resets the message type count of the stats object."""
self.convention = 0
self.error = 0
self.fatal = 0
self.info = 0
self.refactor = 0
self.warning = 0
def merge_stats(stats: list[LinterStats]) -> LinterStats:
"""Used to merge multiple stats objects into a new one when pylint is run in
parallel mode.
"""
merged = LinterStats()
for stat in stats:
merged.bad_names["argument"] += stat.bad_names["argument"]
merged.bad_names["attr"] += stat.bad_names["attr"]
merged.bad_names["klass"] += stat.bad_names["klass"]
merged.bad_names["class_attribute"] += stat.bad_names["class_attribute"]
merged.bad_names["class_const"] += stat.bad_names["class_const"]
merged.bad_names["const"] += stat.bad_names["const"]
merged.bad_names["inlinevar"] += stat.bad_names["inlinevar"]
merged.bad_names["function"] += stat.bad_names["function"]
merged.bad_names["method"] += stat.bad_names["method"]
merged.bad_names["module"] += stat.bad_names["module"]
merged.bad_names["variable"] += stat.bad_names["variable"]
merged.bad_names["typevar"] += stat.bad_names["typevar"]
merged.bad_names["paramspec"] += stat.bad_names["paramspec"]
merged.bad_names["typevartuple"] += stat.bad_names["typevartuple"]
merged.bad_names["typealias"] += stat.bad_names["typealias"]
for mod_key, mod_value in stat.by_module.items():
merged.by_module[mod_key] = mod_value
for msg_key, msg_value in stat.by_msg.items():
try:
merged.by_msg[msg_key] += msg_value
except KeyError:
merged.by_msg[msg_key] = msg_value
merged.code_type_count["code"] += stat.code_type_count["code"]
merged.code_type_count["comment"] += stat.code_type_count["comment"]
merged.code_type_count["docstring"] += stat.code_type_count["docstring"]
merged.code_type_count["empty"] += stat.code_type_count["empty"]
merged.code_type_count["total"] += stat.code_type_count["total"]
for dep_key, dep_value in stat.dependencies.items():
try:
merged.dependencies[dep_key].update(dep_value)
except KeyError:
merged.dependencies[dep_key] = dep_value
merged.duplicated_lines["nb_duplicated_lines"] += stat.duplicated_lines[
"nb_duplicated_lines"
]
merged.duplicated_lines["percent_duplicated_lines"] += stat.duplicated_lines[
"percent_duplicated_lines"
]
merged.node_count["function"] += stat.node_count["function"]
merged.node_count["klass"] += stat.node_count["klass"]
merged.node_count["method"] += stat.node_count["method"]
merged.node_count["module"] += stat.node_count["module"]
merged.undocumented["function"] += stat.undocumented["function"]
merged.undocumented["klass"] += stat.undocumented["klass"]
merged.undocumented["method"] += stat.undocumented["method"]
merged.undocumented["module"] += stat.undocumented["module"]
merged.convention += stat.convention
merged.error += stat.error
merged.fatal += stat.fatal
merged.info += stat.info
merged.refactor += stat.refactor
merged.statement += stat.statement
merged.warning += stat.warning
merged.skipped += stat.skipped
merged.global_note += stat.global_note
return merged
|
LinterStats
|
python
|
plotly__plotly.py
|
plotly/graph_objs/heatmap/colorbar/_title.py
|
{
"start": 233,
"end": 3971
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "heatmap.colorbar"
_path_str = "heatmap.colorbar.title"
_valid_props = {"font", "side", "text"}
@property
def font(self):
"""
Sets this color bar's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.heatmap.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.heatmap.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h".
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def text(self):
"""
Sets the title of the color bar.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.heatmap.colorbar.Title`
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.heatmap.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.heatmap.colorbar.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("side", arg, side)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Title
|
python
|
getsentry__sentry
|
src/sentry/tagstore/types.py
|
{
"start": 3783,
"end": 3919
}
|
class ____(TagKeySerializerResponseOptional):
key: str
name: str
@register(GroupTagKey)
@register(TagKey)
|
TagKeySerializerResponse
|
python
|
apache__airflow
|
airflow-core/tests/unit/utils/test_session.py
|
{
"start": 985,
"end": 2219
}
|
class ____:
def dummy_session(self, session=None):
return session
def test_raised_provide_session(self):
with pytest.raises(ValueError, match="Function .*dummy has no `session` argument"):
@provide_session
def dummy():
pass
def test_provide_session_without_args_and_kwargs(self):
assert self.dummy_session() is None
wrapper = provide_session(self.dummy_session)
assert wrapper() is not None
def test_provide_session_with_args(self):
wrapper = provide_session(self.dummy_session)
session = object()
assert wrapper(session) is session
def test_provide_session_with_kwargs(self):
wrapper = provide_session(self.dummy_session)
session = object()
assert wrapper(session=session) is session
@pytest.mark.asyncio
async def test_async_session(self):
from airflow.settings import AsyncSession
session = AsyncSession()
session.add(Log(event="hihi1234"))
await session.commit()
my_special_log_event = await session.scalar(select(Log).where(Log.event == "hihi1234").limit(1))
assert my_special_log_event.event == "hihi1234"
|
TestSession
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/src/hypothesis/strategies/_internal/collections.py
|
{
"start": 4118,
"end": 8361
}
|
class ____(SearchStrategy[list[Ex]]):
"""A strategy for lists which takes a strategy for its elements and the
allowed lengths, and generates lists with the correct size and contents."""
_nonempty_filters: tuple[Callable[[Any], Any], ...] = (bool, len, tuple, list)
def __init__(
self,
elements: SearchStrategy[Ex],
min_size: int = 0,
max_size: float | int | None = math.inf,
):
super().__init__()
self.min_size = min_size or 0
self.max_size = max_size if max_size is not None else math.inf
assert 0 <= self.min_size <= self.max_size
self.average_size = min(
max(self.min_size * 2, self.min_size + 5),
0.5 * (self.min_size + self.max_size),
)
self.element_strategy = elements
if min_size > BUFFER_SIZE:
raise InvalidArgument(
f"{self!r} can never generate an example, because min_size is larger "
"than Hypothesis supports. Including it is at best slowing down your "
"tests for no benefit; at worst making them fail (maybe flakily) with "
"a HealthCheck error."
)
def calc_label(self) -> int:
return combine_labels(self.class_label, self.element_strategy.label)
def do_validate(self) -> None:
self.element_strategy.validate()
if self.is_empty:
raise InvalidArgument(
"Cannot create non-empty lists with elements drawn from "
f"strategy {self.element_strategy!r} because it has no values."
)
if self.element_strategy.is_empty and 0 < self.max_size < float("inf"):
raise InvalidArgument(
f"Cannot create a collection of max_size={self.max_size!r}, "
"because no elements can be drawn from the element strategy "
f"{self.element_strategy!r}"
)
def calc_is_empty(self, recur: RecurT) -> bool:
if self.min_size == 0:
return False
return recur(self.element_strategy)
def do_draw(self, data: ConjectureData) -> list[Ex]:
if self.element_strategy.is_empty:
assert self.min_size == 0
return []
elements = cu.many(
data,
min_size=self.min_size,
max_size=self.max_size,
average_size=self.average_size,
)
result = []
while elements.more():
result.append(data.draw(self.element_strategy))
return result
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}({self.element_strategy!r}, "
f"min_size={self.min_size:_}, max_size={self.max_size:_})"
)
def filter(self, condition: Callable[[list[Ex]], Any]) -> SearchStrategy[list[Ex]]:
if condition in self._nonempty_filters or is_identity_function(condition):
assert self.max_size >= 1, "Always-empty is special cased in st.lists()"
if self.min_size >= 1:
return self
new = copy.copy(self)
new.min_size = 1
return new
constraints, pred = get_integer_predicate_bounds(condition)
if constraints.get("len") and (
"min_value" in constraints or "max_value" in constraints
):
new = copy.copy(self)
new.min_size = max(
self.min_size, constraints.get("min_value", self.min_size)
)
new.max_size = min(
self.max_size, constraints.get("max_value", self.max_size)
)
# Unsatisfiable filters are easiest to understand without rewriting.
if new.min_size > new.max_size:
return SearchStrategy.filter(self, condition)
# Recompute average size; this is cheaper than making it into a property.
new.average_size = min(
max(new.min_size * 2, new.min_size + 5),
0.5 * (new.min_size + new.max_size),
)
if pred is None:
return new
return SearchStrategy.filter(new, condition)
return SearchStrategy.filter(self, condition)
|
ListStrategy
|
python
|
sphinx-doc__sphinx
|
sphinx/project.py
|
{
"start": 544,
"end": 4539
}
|
class ____:
"""A project is the source code set of the Sphinx document(s)."""
def __init__(
self, srcdir: str | os.PathLike[str], source_suffix: Iterable[str]
) -> None:
#: Source directory.
self.srcdir = _StrPath(srcdir)
#: source_suffix. Same as :confval:`source_suffix`.
self.source_suffix = tuple(source_suffix)
self._first_source_suffix = next(iter(self.source_suffix), '')
#: The name of documents belonging to this project.
self.docnames: set[str] = set()
# Bijective mapping between docnames and (srcdir relative) paths.
self._path_to_docname: dict[Path, str] = {}
self._docname_to_path: dict[str, Path] = {}
def restore(self, other: Project) -> None:
"""Take over a result of last build."""
self.docnames = other.docnames
self._path_to_docname = other._path_to_docname
self._docname_to_path = other._docname_to_path
def discover(
self, exclude_paths: Iterable[str] = (), include_paths: Iterable[str] = ('**',)
) -> set[str]:
"""Find all document files in the source directory and put them in
:attr:`docnames`.
"""
self.docnames.clear()
self._path_to_docname.clear()
self._docname_to_path.clear()
for filename in get_matching_files(
self.srcdir,
include_paths,
[*exclude_paths, *EXCLUDE_PATHS],
):
if docname := self.path2doc(filename):
if docname in self.docnames:
files = [
str(f.relative_to(self.srcdir))
for f in self.srcdir.glob(f'{docname}.*')
]
logger.warning(
__(
'multiple files found for the document "%s": %s\n'
'Use %r for the build.'
),
docname,
', '.join(files),
self.doc2path(docname, absolute=True),
once=True,
)
elif os.access(self.srcdir / filename, os.R_OK):
self.docnames.add(docname)
path = Path(filename)
self._path_to_docname[path] = docname
self._docname_to_path[docname] = path
else:
logger.warning(
__('Ignored unreadable document %r.'),
filename,
location=docname,
)
return self.docnames
def path2doc(self, filename: str | os.PathLike[str]) -> str | None:
"""Return the docname for the filename if the file is a document.
*filename* should be absolute or relative to the source directory.
"""
path = Path(filename)
try:
return self._path_to_docname[path]
except KeyError:
if path.is_absolute():
with contextlib.suppress(ValueError):
path = path.relative_to(self.srcdir)
for suffix in self.source_suffix:
if path.name.endswith(suffix):
return path_stabilize(path).removesuffix(suffix)
# the file does not have a docname
return None
def doc2path(self, docname: str, absolute: bool) -> _StrPath:
"""Return the filename for the document name.
If *absolute* is True, return as an absolute path.
Else, return as a relative path to the source directory.
"""
try:
filename = self._docname_to_path[docname]
except KeyError:
# Backwards compatibility: the document does not exist
filename = Path(docname + self._first_source_suffix)
if absolute:
return _StrPath(self.srcdir / filename)
return _StrPath(filename)
|
Project
|
python
|
sqlalchemy__sqlalchemy
|
examples/inheritance/concrete.py
|
{
"start": 1858,
"end": 4545
}
|
class ____(Person):
__tablename__ = "manager"
id: Mapped[int] = mapped_column(primary_key=True)
company_id: Mapped[int] = mapped_column(ForeignKey("company.id"))
name: Mapped[str50]
status: Mapped[str50]
manager_name: Mapped[str50]
company: Mapped[Company] = relationship(back_populates="employees")
__mapper_args__ = {"polymorphic_identity": "manager", "concrete": True}
def __repr__(self):
return (
f"Manager {self.name}, status {self.status}, "
f"manager_name {self.manager_name}"
)
engine = create_engine("sqlite://", echo=True)
Base.metadata.create_all(engine)
with Session(engine) as session:
c = Company(
name="company1",
employees=[
Manager(
name="mr krabs",
status="AAB",
manager_name="manager1",
),
Engineer(
name="spongebob",
status="BBA",
engineer_name="engineer1",
primary_language="java",
),
Person(name="joesmith"),
Engineer(
name="patrick",
status="CGG",
engineer_name="engineer2",
primary_language="python",
),
Manager(name="jsmith", status="ABA", manager_name="manager2"),
],
)
session.add(c)
session.commit()
for e in c.employees:
print(e)
spongebob = session.scalars(
select(Person).filter_by(name="spongebob")
).one()
spongebob2 = session.scalars(
select(Engineer).filter_by(name="spongebob")
).one()
assert spongebob is spongebob2
spongebob2.engineer_name = "hes spongebob!"
session.commit()
# query using with_polymorphic.
# when using ConcreteBase, use "*" to use the default selectable
# setting specific entities won't work right now.
eng_manager = with_polymorphic(Person, "*")
print(
session.scalars(
select(eng_manager).filter(
or_(
eng_manager.Engineer.engineer_name == "engineer1",
eng_manager.Manager.manager_name == "manager2",
)
)
).all()
)
# illustrate join from Company.
print(
session.scalars(
select(Company)
.join(Company.employees.of_type(eng_manager))
.filter(
or_(
eng_manager.Engineer.engineer_name == "engineer1",
eng_manager.Manager.manager_name == "manager2",
)
)
).all()
)
session.commit()
|
Manager
|
python
|
PrefectHQ__prefect
|
src/prefect/events/schemas/deployment_triggers.py
|
{
"start": 3098,
"end": 4545
}
|
class ____(BaseDeploymentTrigger, SequenceTrigger):
"""A composite trigger that requires some number of triggers to have fired
within the given time period in a specific order"""
trigger_type: ClassVar[Type[TriggerTypes]] = SequenceTrigger
def deployment_trigger_discriminator(value: Any) -> str:
"""Custom discriminator for deployment triggers that defaults to 'event' if no type is specified."""
if isinstance(value, dict):
# Check for explicit type first
if "type" in value:
return value["type"]
# Infer from posture for backward compatibility
posture = value.get("posture")
if posture == "Metric":
return "metric"
# Check for compound/sequence specific fields
if "triggers" in value and "require" in value:
return "compound"
if "triggers" in value and "require" not in value:
return "sequence"
# Default to event
return "event"
return getattr(value, "type", "event")
# Concrete deployment trigger types
DeploymentTriggerTypes: TypeAlias = Annotated[
Union[
Annotated[DeploymentEventTrigger, Tag("event")],
Annotated[DeploymentMetricTrigger, Tag("metric")],
Annotated[DeploymentCompoundTrigger, Tag("compound")],
Annotated[DeploymentSequenceTrigger, Tag("sequence")],
],
Discriminator(deployment_trigger_discriminator),
]
|
DeploymentSequenceTrigger
|
python
|
scrapy__scrapy
|
tests/test_downloader_handler_twisted_http2.py
|
{
"start": 7252,
"end": 7602
}
|
class ____(TestHttpWithCrawlerBase):
"""HTTP 2.0 test case with MockServer"""
@property
def settings_dict(self) -> dict[str, Any] | None:
return {
"DOWNLOAD_HANDLERS": {
"https": "scrapy.core.downloader.handlers.http2.H2DownloadHandler"
}
}
is_secure = True
|
TestHttp2WithCrawler
|
python
|
ipython__ipython
|
tests/test_interactiveshell.py
|
{
"start": 33466,
"end": 34563
}
|
class ____(unittest.TestCase):
"""Check that SyntaxError raised by an input transformer is handled by run_cell()"""
@staticmethod
def transformer(lines):
for line in lines:
pos = line.find("syntaxerror")
if pos >= 0:
e = SyntaxError('input contains "syntaxerror"')
e.text = line
e.offset = pos + 1
raise e
return lines
def setUp(self):
ip.input_transformers_post.append(self.transformer)
def tearDown(self):
ip.input_transformers_post.remove(self.transformer)
def test_syntaxerror_input_transformer(self):
with tt.AssertPrints("1234"):
ip.run_cell("1234")
with tt.AssertPrints("SyntaxError: invalid syntax"):
ip.run_cell("1 2 3") # plain python syntax error
with tt.AssertPrints('SyntaxError: input contains "syntaxerror"'):
ip.run_cell("2345 # syntaxerror") # input transformer syntax error
with tt.AssertPrints("3456"):
ip.run_cell("3456")
|
TestSyntaxErrorTransformer
|
python
|
conda__conda
|
conda/core/path_actions.py
|
{
"start": 6489,
"end": 7452
}
|
class ____(PrefixPathAction, metaclass=ABCMeta):
# All CreatePathAction subclasses must create a SINGLE new path
# the short/in-prefix version of that path must be returned by execute()
def __init__(
self,
transaction_context,
package_info,
source_prefix,
source_short_path,
target_prefix,
target_short_path,
):
super().__init__(transaction_context, target_prefix, target_short_path)
self.package_info = package_info
self.source_prefix = source_prefix
self.source_short_path = source_short_path
def verify(self):
self._verified = True
def cleanup(self):
# create actions typically won't need cleanup
pass
@property
def source_full_path(self):
prfx, shrt_pth = self.source_prefix, self.source_short_path
return join(prfx, win_path_ok(shrt_pth)) if prfx and shrt_pth else None
|
CreateInPrefixPathAction
|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/endpoints/test_organization_detector_workflow_index.py
|
{
"start": 1918,
"end": 4519
}
|
class ____(OrganizationDetectorWorkflowAPITestCase):
def test_detector_filter(self) -> None:
response = self.get_success_response(
self.organization.slug,
qs_params={"detector_id": self.detector_1.id},
)
assert len(response.data) == 2
assert response.data == [
{
"id": str(self.detector_1_workflow_1.id),
"detectorId": str(self.detector_1.id),
"workflowId": str(self.workflow_1.id),
},
{
"id": str(self.detector_1_workflow_2.id),
"detectorId": str(self.detector_1.id),
"workflowId": str(self.workflow_2.id),
},
]
response = self.get_success_response(
self.organization.slug,
qs_params={"detector_id": self.unconnected_detector.id},
)
assert len(response.data) == 0
def test_workflow_filter(self) -> None:
response = self.get_success_response(
self.organization.slug,
qs_params={"workflow_id": self.workflow_1.id},
)
assert len(response.data) == 2
assert response.data == [
{
"id": str(self.detector_1_workflow_1.id),
"detectorId": str(self.detector_1.id),
"workflowId": str(self.workflow_1.id),
},
{
"id": str(self.detector_2_workflow_1.id),
"detectorId": str(self.detector_2.id),
"workflowId": str(self.workflow_1.id),
},
]
response = self.get_success_response(
self.organization.slug,
qs_params={"workflow_id": self.unconnected_workflow.id},
)
assert len(response.data) == 0
def test_detector_workflow_filter(self) -> None:
response = self.get_success_response(
self.organization.slug,
qs_params={"detector_id": self.detector_1.id, "workflow_id": self.workflow_1.id},
)
assert len(response.data) == 1
assert response.data == [
{
"id": str(self.detector_1_workflow_1.id),
"detectorId": str(self.detector_1.id),
"workflowId": str(self.workflow_1.id),
}
]
response = self.get_success_response(
self.organization.slug,
qs_params={"detector_id": self.detector_2.id, "workflow_id": self.workflow_2.id},
)
assert len(response.data) == 0
@region_silo_test
|
OrganizationDetectorWorkflowIndexGetTest
|
python
|
Textualize__textual
|
src/textual/widgets/_masked_input.py
|
{
"start": 15966,
"end": 26291
}
|
class ____(Input, can_focus=True):
"""A masked text input widget."""
template: Reactive[str] = var("")
"""Input template mask currently in use."""
def __init__(
self,
template: str,
value: str | None = None,
placeholder: str = "",
*,
validators: Validator | Iterable[Validator] | None = None,
validate_on: Iterable[InputValidationOn] | None = None,
valid_empty: bool = False,
select_on_focus: bool = True,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
tooltip: RenderableType | None = None,
compact: bool = False,
) -> None:
"""Initialise the `MaskedInput` widget.
Args:
template: Template string.
value: An optional default value for the input.
placeholder: Optional placeholder text for the input.
validators: An iterable of validators that the MaskedInput value will be checked against.
validate_on: Zero or more of the values "blur", "changed", and "submitted",
which determine when to do input validation. The default is to do
validation for all messages.
valid_empty: Empty values are valid.
name: Optional name for the masked input widget.
id: Optional ID for the widget.
classes: Optional initial classes for the widget.
disabled: Whether the input is disabled or not.
tooltip: Optional tooltip.
compact: Enable compact style (without borders).
"""
self._template: _Template = None
super().__init__(
placeholder=placeholder,
validators=validators,
validate_on=validate_on,
valid_empty=valid_empty,
select_on_focus=select_on_focus,
name=name,
id=id,
classes=classes,
disabled=disabled,
compact=compact,
)
self._template = _Template(self, template)
self.template = template
value, _ = self._template.insert_separators(value or "", 0)
self.value = value
if tooltip is not None:
self.tooltip = tooltip
def validate_value(self, value: str) -> str:
"""Validates value against template."""
if self._template is None:
return value
if not self._template.check(value, True):
raise ValueError("Value does not match template!")
return value[: len(self._template.mask)]
def _watch_template(self, template: str) -> None:
"""Revalidate when template changes."""
self._template = _Template(self, template) if template else None
if self.is_mounted:
self._watch_value(self.value)
def _watch_placeholder(self, placeholder: str) -> None:
"""Update template display mask when placeholder changes."""
if self._template is not None:
self._template.update_mask(placeholder)
self.refresh()
def validate(self, value: str) -> ValidationResult | None:
"""Run all the validators associated with this MaskedInput on the supplied value.
Same as `Input.validate()` but also validates against template which acts as an
additional implicit validator.
Returns:
A ValidationResult indicating whether *all* validators succeeded or not.
That is, if *any* validator fails, the result will be an unsuccessful
validation.
"""
def set_classes() -> None:
"""Set classes for valid flag."""
valid = self._valid
self.set_class(not valid, "-invalid")
self.set_class(valid, "-valid")
result = super().validate(value)
validation_results: list[ValidationResult] = [self._template.validate(value)]
if result is not None:
validation_results.append(result)
combined_result = ValidationResult.merge(validation_results)
self._valid = combined_result.is_valid
set_classes()
return combined_result
def render_line(self, y: int) -> Strip:
if y != 0:
return Strip.blank(self.size.width, self.rich_style)
result = self._value
width = self.content_size.width
# Add the completion with a faded style.
value = self.value
value_length = len(value)
template = self._template
style = self.get_component_rich_style("input--placeholder")
result += Text(
template.mask[value_length:],
style,
end="",
)
for index, (char, char_definition) in enumerate(zip(value, template.template)):
if char == " ":
result.stylize(style, index, index + 1)
if self._cursor_visible and self.has_focus:
if self.cursor_at_end:
result.pad_right(1)
cursor_style = self.get_component_rich_style("input--cursor")
cursor = self.cursor_position
result.stylize(cursor_style, cursor, cursor + 1)
segments = list(result.render(self.app.console))
line_length = Segment.get_line_length(segments)
if line_length < width:
segments = Segment.adjust_line_length(segments, width)
line_length = width
strip = Strip(segments).crop(self.scroll_offset.x, self.scroll_offset.x + width)
return strip.apply_style(self.rich_style)
@property
def _value(self) -> Text:
"""Value rendered as text."""
value = self._template.display(self.value)
return Text(value, no_wrap=True, overflow="ignore", end="")
async def _on_click(self, event: events.Click) -> None:
"""Ensure clicking on value does not leave cursor on a separator."""
await super()._on_click(event)
if self._template.at_separator():
self._template.move_cursor(1)
def insert_text_at_cursor(self, text: str) -> None:
"""Insert new text at the cursor, move the cursor to the end of the new text.
Args:
text: New text to insert.
"""
new_value = self._template.insert_text_at_cursor(text)
if new_value is not None:
self.value, self.cursor_position = new_value
else:
self.restricted()
def clear(self) -> None:
"""Clear the masked input."""
self.value, self.cursor_position = self._template.insert_separators("", 0)
def action_cursor_left(self) -> None:
"""Move the cursor one position to the left; separators are skipped."""
self._template.move_cursor(-1)
def action_cursor_right(self) -> None:
"""Move the cursor one position to the right; separators are skipped."""
self._template.move_cursor(1)
def action_home(self) -> None:
"""Move the cursor to the start of the input."""
self._template.move_cursor(-len(self.template))
def action_cursor_left_word(self) -> None:
"""Move the cursor left next to the previous separator. If no previous
separator is found, moves the cursor to the start of the input."""
if self._template.at_separator(self.cursor_position - 1):
position = self._template.prev_separator_position(self.cursor_position - 1)
else:
position = self._template.prev_separator_position()
if position:
position += 1
self.cursor_position = position or 0
def action_cursor_right_word(self) -> None:
"""Move the cursor right next to the next separator. If no next
separator is found, moves the cursor to the end of the input."""
position = self._template.next_separator_position()
if position is None:
self.cursor_position = len(self._template.mask)
else:
self.cursor_position = position + 1
def action_delete_right(self) -> None:
"""Delete one character at the current cursor position."""
self._template.delete_at_position()
def action_delete_right_word(self) -> None:
"""Delete the current character and all rightward to next separator or
the end of the input."""
position = self._template.next_separator_position()
if position is not None:
position += 1
else:
position = len(self.value)
for index in range(self.cursor_position, position):
self.cursor_position = index
if not self._template.at_separator():
self._template.delete_at_position()
def action_delete_left(self) -> None:
"""Delete one character to the left of the current cursor position."""
if self.cursor_position <= 0:
# Cursor at the start, so nothing to delete
return
self._template.move_cursor(-1)
self._template.delete_at_position()
def action_delete_left_word(self) -> None:
"""Delete leftward of the cursor position to the previous separator or
the start of the input."""
if self.cursor_position <= 0:
return
if self._template.at_separator(self.cursor_position - 1):
position = self._template.prev_separator_position(self.cursor_position - 1)
else:
position = self._template.prev_separator_position()
if position:
position += 1
else:
position = 0
for index in range(position, self.cursor_position):
self.cursor_position = index
if not self._template.at_separator():
self._template.delete_at_position()
self.cursor_position = position
def action_delete_left_all(self) -> None:
"""Delete all characters to the left of the cursor position."""
if self.cursor_position > 0:
cursor_position = self.cursor_position
if cursor_position >= len(self.value):
self.value = ""
else:
self.value = (
self._template.empty_mask[:cursor_position]
+ self.value[cursor_position:]
)
self.cursor_position = 0
|
MaskedInput
|
python
|
realpython__materials
|
django-flashcards-app/source_code_final/cards/views.py
|
{
"start": 494,
"end": 590
}
|
class ____(CardCreateView, UpdateView):
success_url = reverse_lazy("card-list")
|
CardUpdateView
|
python
|
openai__openai-python
|
src/openai/types/chat/chat_completion_message.py
|
{
"start": 991,
"end": 1408
}
|
class ____(BaseModel):
arguments: str
"""
The arguments to call the function with, as generated by the model in JSON
format. Note that the model does not always generate valid JSON, and may
hallucinate parameters not defined by your function schema. Validate the
arguments in your code before calling your function.
"""
name: str
"""The name of the function to call."""
|
FunctionCall
|
python
|
coleifer__peewee
|
peewee.py
|
{
"start": 146725,
"end": 147987
}
|
class ____(object):
def __init__(self, db, *args, **kwargs):
self.db = db
self._begin_args = (args, kwargs)
def __call__(self, fn):
@wraps(fn)
def inner(*args, **kwargs):
a, k = self._begin_args
with _transaction(self.db, *a, **k):
return fn(*args, **kwargs)
return inner
def _begin(self):
args, kwargs = self._begin_args
self.db.begin(*args, **kwargs)
def commit(self, begin=True):
self.db.commit()
if begin:
self._begin()
def rollback(self, begin=True):
self.db.rollback()
if begin:
self._begin()
def __enter__(self):
if self.db.transaction_depth() == 0:
self._begin()
self.db.push_transaction(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
depth = self.db.transaction_depth()
try:
if exc_type and depth == 1:
self.rollback(False)
elif depth == 1:
try:
self.commit(False)
except:
self.rollback(False)
raise
finally:
self.db.pop_transaction()
|
_transaction
|
python
|
scrapy__scrapy
|
scrapy/spidermiddlewares/referer.py
|
{
"start": 4746,
"end": 5450
}
|
class ____(ReferrerPolicy):
"""
https://www.w3.org/TR/referrer-policy/#referrer-policy-same-origin
The "same-origin" policy specifies that a full URL, stripped for use as a referrer,
is sent as referrer information when making same-origin requests from a particular request client.
Cross-origin requests, on the other hand, will contain no referrer information.
A Referer HTTP header will not be sent.
"""
name: str = POLICY_SAME_ORIGIN
def referrer(self, response_url: str, request_url: str) -> str | None:
if self.origin(response_url) == self.origin(request_url):
return self.stripped_referrer(response_url)
return None
|
SameOriginPolicy
|
python
|
walkccc__LeetCode
|
solutions/747. Largest Number At Least Twice of Others/747.py
|
{
"start": 0,
"end": 306
}
|
class ____:
def dominantIndex(self, nums: list[int]) -> int:
mx = 0
secondMax = 0
for i, num in enumerate(nums):
if num > mx:
secondMax = mx
mx = num
ans = i
elif num > secondMax:
secondMax = num
return ans if mx >= 2 * secondMax else -1
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/experimental/kernel_tests/from_list_test.py
|
{
"start": 9084,
"end": 10251
}
|
class ____(
test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
dataset_range=[100],
repetitions=[1, 2],
seed=[None, 42],
reshuffle_each_iteration=[True, False])))
def test(
self,
dataset_range: int,
repetitions: int,
seed: Optional[int],
reshuffle_each_iteration: bool):
dataset = from_list.from_list(list(range(dataset_range)))
dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE)
if repetitions > 1:
dataset = dataset.repeat(repetitions)
dataset = global_shuffle_op._global_shuffle(
dataset, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration)
expected = list(range(dataset_range)) * repetitions
dataset_output = self.getDatasetOutput(
dataset, requires_initialization=True)
self.assertCountEqual(dataset_output, expected)
self.assertNotEqual(dataset_output, expected)
self.assertLen(dataset_output, self.evaluate(dataset.cardinality()))
|
FromListGlobalShuffleTest
|
python
|
oauthlib__oauthlib
|
tests/oauth2/rfc6749/endpoints/test_client_authentication.py
|
{
"start": 796,
"end": 6821
}
|
class ____(TestCase):
def inspect_client(self, request, refresh_token=False):
if not request.client or not request.client.client_id:
raise ValueError()
return 'abc'
def setUp(self):
self.validator = mock.MagicMock(spec=RequestValidator)
self.validator.is_pkce_required.return_value = False
self.validator.get_code_challenge.return_value = None
self.validator.get_default_redirect_uri.return_value = 'http://i.b./path'
self.web = WebApplicationServer(self.validator,
token_generator=self.inspect_client)
self.mobile = MobileApplicationServer(self.validator,
token_generator=self.inspect_client)
self.legacy = LegacyApplicationServer(self.validator,
token_generator=self.inspect_client)
self.backend = BackendApplicationServer(self.validator,
token_generator=self.inspect_client)
self.token_uri = 'http://example.com/path'
self.auth_uri = 'http://example.com/path?client_id=abc&response_type=token'
# should be base64 but no added value in this unittest
self.basicauth_client_creds = {"Authorization": "john:doe"}
self.basicauth_client_id = {"Authorization": "john:"}
def set_client(self, request):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def set_client_id(self, client_id, request):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def basicauth_authenticate_client(self, request):
assert "Authorization" in request.headers
assert "john:doe" in request.headers["Authorization"]
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def test_client_id_authentication(self):
token_uri = 'http://example.com/path'
# authorization code grant
self.validator.authenticate_client.return_value = False
self.validator.authenticate_client_id.return_value = False
_, body, _ = self.web.create_token_response(token_uri,
body='grant_type=authorization_code&code=mock')
self.assertEqual(json.loads(body)['error'], 'invalid_client')
self.validator.authenticate_client_id.return_value = True
self.validator.authenticate_client.side_effect = self.set_client
_, body, _ = self.web.create_token_response(token_uri,
body='grant_type=authorization_code&code=mock')
self.assertIn('access_token', json.loads(body))
# implicit grant
auth_uri = 'http://example.com/path?client_id=abc&response_type=token'
self.assertRaises(ValueError, self.mobile.create_authorization_response,
auth_uri, scopes=['random'])
self.validator.validate_client_id.side_effect = self.set_client_id
h, _, s = self.mobile.create_authorization_response(auth_uri, scopes=['random'])
self.assertEqual(302, s)
self.assertIn('Location', h)
self.assertIn('access_token', get_fragment_credentials(h['Location']))
def test_basicauth_web(self):
self.validator.authenticate_client.side_effect = self.basicauth_authenticate_client
_, body, _ = self.web.create_token_response(
self.token_uri,
body='grant_type=authorization_code&code=mock',
headers=self.basicauth_client_creds
)
self.assertIn('access_token', json.loads(body))
def test_basicauth_legacy(self):
self.validator.authenticate_client.side_effect = self.basicauth_authenticate_client
_, body, _ = self.legacy.create_token_response(
self.token_uri,
body='grant_type=password&username=abc&password=secret',
headers=self.basicauth_client_creds
)
self.assertIn('access_token', json.loads(body))
def test_basicauth_backend(self):
self.validator.authenticate_client.side_effect = self.basicauth_authenticate_client
_, body, _ = self.backend.create_token_response(
self.token_uri,
body='grant_type=client_credentials',
headers=self.basicauth_client_creds
)
self.assertIn('access_token', json.loads(body))
def test_basicauth_revoke(self):
self.validator.authenticate_client.side_effect = self.basicauth_authenticate_client
# legacy or any other uses the same RevocationEndpoint
_, body, status = self.legacy.create_revocation_response(
self.token_uri,
body='token=foobar',
headers=self.basicauth_client_creds
)
self.assertEqual(status, 200, body)
def test_basicauth_introspect(self):
self.validator.authenticate_client.side_effect = self.basicauth_authenticate_client
# legacy or any other uses the same IntrospectEndpoint
_, body, status = self.legacy.create_introspect_response(
self.token_uri,
body='token=foobar',
headers=self.basicauth_client_creds
)
self.assertEqual(status, 200, body)
def test_custom_authentication(self):
token_uri = 'http://example.com/path'
# authorization code grant
self.assertRaises(NotImplementedError,
self.web.create_token_response, token_uri,
body='grant_type=authorization_code&code=mock')
# password grant
self.validator.authenticate_client.return_value = True
self.assertRaises(NotImplementedError,
self.legacy.create_token_response, token_uri,
body='grant_type=password&username=abc&password=secret')
# client credentials grant
self.validator.authenticate_client.return_value = True
self.assertRaises(NotImplementedError,
self.backend.create_token_response, token_uri,
body='grant_type=client_credentials')
|
ClientAuthenticationTest
|
python
|
numba__numba
|
numba/tests/test_dictobject.py
|
{
"start": 29450,
"end": 32483
}
|
class ____(MemoryLeakMixin, TestCase):
def test_basic(self):
d = Dict.empty(int32, float32)
# len
self.assertEqual(len(d), 0)
# setitems
d[1] = 1
d[2] = 2.3
d[3] = 3.4
self.assertEqual(len(d), 3)
# keys
self.assertEqual(list(d.keys()), [1, 2, 3])
# values
for x, y in zip(list(d.values()), [1, 2.3, 3.4]):
self.assertAlmostEqual(x, y, places=4)
# getitem
self.assertAlmostEqual(d[1], 1)
self.assertAlmostEqual(d[2], 2.3, places=4)
self.assertAlmostEqual(d[3], 3.4, places=4)
# deltiem
del d[2]
self.assertEqual(len(d), 2)
# get
self.assertIsNone(d.get(2))
# setdefault
d.setdefault(2, 100)
d.setdefault(3, 200)
self.assertEqual(d[2], 100)
self.assertAlmostEqual(d[3], 3.4, places=4)
# update
d.update({4: 5, 5: 6})
self.assertAlmostEqual(d[4], 5)
self.assertAlmostEqual(d[5], 6)
# contains
self.assertTrue(4 in d)
# items
pyd = dict(d.items())
self.assertEqual(len(pyd), len(d))
# pop
self.assertAlmostEqual(d.pop(4), 5)
# popitem
nelem = len(d)
k, v = d.popitem()
self.assertEqual(len(d), nelem - 1)
self.assertTrue(k not in d)
# __eq__ & copy
copied = d.copy()
self.assertEqual(copied, d)
self.assertEqual(list(copied.items()), list(d.items()))
def test_copy_from_dict(self):
expect = {k: float(v) for k, v in zip(range(10), range(10, 20))}
nbd = Dict.empty(int32, float64)
for k, v in expect.items():
nbd[k] = v
got = dict(nbd)
self.assertEqual(got, expect)
def test_compiled(self):
@njit
def producer():
d = Dict.empty(int32, float64)
d[1] = 1.23
return d
@njit
def consumer(d):
return d[1]
d = producer()
val = consumer(d)
self.assertEqual(val, 1.23)
def test_gh7908(self):
d = Dict.empty(
key_type=types.Tuple([types.uint32,
types.uint32]),
value_type=int64)
d[(1, 1)] = 12345
self.assertEqual(d[(1, 1)], d.get((1, 1)))
def check_stringify(self, strfn, prefix=False):
nbd = Dict.empty(int32, int32)
d = {}
nbd[1] = 2
d[1] = 2
checker = self.assertIn if prefix else self.assertEqual
checker(strfn(d), strfn(nbd))
nbd[2] = 3
d[2] = 3
checker(strfn(d), strfn(nbd))
for i in range(10, 20):
nbd[i] = i + 1
d[i] = i + 1
checker(strfn(d), strfn(nbd))
if prefix:
self.assertTrue(strfn(nbd).startswith('DictType'))
def test_repr(self):
self.check_stringify(repr, prefix=True)
def test_str(self):
self.check_stringify(str)
|
TestTypedDict
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 817027,
"end": 817704
}
|
class ____(
sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"blocked_user",
"blocked_user_name",
"blocked_user_resource_path",
"blocked_user_url",
)
blocked_user = sgqlc.types.Field("User", graphql_name="blockedUser")
blocked_user_name = sgqlc.types.Field(String, graphql_name="blockedUserName")
blocked_user_resource_path = sgqlc.types.Field(
URI, graphql_name="blockedUserResourcePath"
)
blocked_user_url = sgqlc.types.Field(URI, graphql_name="blockedUserUrl")
|
OrgBlockUserAuditEntry
|
python
|
falconry__falcon
|
examples/asgilook/asgilook/cache.py
|
{
"start": 17,
"end": 1675
}
|
class ____:
PREFIX = 'asgilook:'
INVALIDATE_ON = frozenset({'DELETE', 'POST', 'PUT'})
CACHE_HEADER = 'X-ASGILook-Cache'
TTL = 3600
def __init__(self, config):
self._config = config
self._redis = self._config.redis_from_url(self._config.redis_host)
async def _serialize_response(self, resp):
data = await resp.render_body()
return msgpack.packb([resp.content_type, data], use_bin_type=True)
def _deserialize_response(self, resp, data):
resp.content_type, resp.data = msgpack.unpackb(data, raw=False)
resp.complete = True
resp.context.cached = True
async def process_startup(self, scope, event):
await self._redis.ping()
async def process_shutdown(self, scope, event):
await self._redis.aclose()
async def process_request(self, req, resp):
resp.context.cached = False
if req.method in self.INVALIDATE_ON:
return
key = f'{self.PREFIX}/{req.path}'
data = await self._redis.get(key)
if data is not None:
self._deserialize_response(resp, data)
resp.set_header(self.CACHE_HEADER, 'Hit')
else:
resp.set_header(self.CACHE_HEADER, 'Miss')
async def process_response(self, req, resp, resource, req_succeeded):
if not req_succeeded:
return
key = f'{self.PREFIX}/{req.path}'
if req.method in self.INVALIDATE_ON:
await self._redis.delete(key)
elif not resp.context.cached:
data = await self._serialize_response(resp)
await self._redis.set(key, data, ex=self.TTL)
|
RedisCache
|
python
|
doocs__leetcode
|
solution/0500-0599/0522.Longest Uncommon Subsequence II/Solution.py
|
{
"start": 0,
"end": 526
}
|
class ____:
def findLUSlength(self, strs: List[str]) -> int:
def check(s: str, t: str):
i = j = 0
while i < len(s) and j < len(t):
if s[i] == t[j]:
i += 1
j += 1
return i == len(s)
ans = -1
for i, s in enumerate(strs):
for j, t in enumerate(strs):
if i != j and check(s, t):
break
else:
ans = max(ans, len(s))
return ans
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/models/authidentity.py
|
{
"start": 666,
"end": 2931
}
|
class ____(ReplicatedControlModel):
__relocation_scope__ = RelocationScope.Global
category = OutboxCategory.AUTH_IDENTITY_UPDATE
replication_version = 2
# NOTE: not a fk to sentry user
user = FlexibleForeignKey(settings.AUTH_USER_MODEL)
auth_provider = FlexibleForeignKey("sentry.AuthProvider")
ident = models.CharField(max_length=128)
data = models.JSONField(default=dict)
last_verified = models.DateTimeField(default=timezone.now)
last_synced = models.DateTimeField(default=timezone.now)
date_added = models.DateTimeField(default=timezone.now)
def outbox_region_names(self) -> Collection[str]:
return find_regions_for_orgs([self.auth_provider.organization_id])
def handle_async_replication(self, region_name: str, shard_identifier: int) -> None:
from sentry.auth.services.auth.serial import serialize_auth_identity
from sentry.hybridcloud.services.replica.service import region_replica_service
serialized = serialize_auth_identity(self)
region_replica_service.upsert_replicated_auth_identity(
auth_identity=serialized, region_name=region_name
)
@classmethod
def sanitize_relocation_json(
cls, json: Any, sanitizer: Sanitizer, model_name: NormalizedModelName | None = None
) -> None:
model_name = get_model_name(cls) if model_name is None else model_name
super().sanitize_relocation_json(json, sanitizer, model_name)
sanitizer.set_json(json, SanitizableField(model_name, "data"), {})
sanitizer.set_string(json, SanitizableField(model_name, "ident"))
class Meta:
app_label = "sentry"
db_table = "sentry_authidentity"
unique_together = (("auth_provider", "ident"), ("auth_provider", "user"))
indexes = [
models.Index(fields=["last_synced"], name="auth_identity_last_synced_idx"),
]
__repr__ = sane_repr("user_id", "auth_provider_id")
def __str__(self) -> str:
return self.ident
def get_audit_log_data(self):
return {"user_id": self.user_id, "data": self.data}
def get_display_name(self):
return self.user.get_display_name()
def get_label(self):
return self.user.get_label()
|
AuthIdentity
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/array_ops/spacetobatch_op_test.py
|
{
"start": 20265,
"end": 22048
}
|
class ____(test.TestCase, PythonOpImpl):
# Check the gradients.
def _checkGrad(self, x, paddings, block_size):
assert 4 == x.ndim
with self.cached_session():
tf_x = ops.convert_to_tensor(x)
tf_y = self.space_to_batch(tf_x, paddings, block_size)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for space_to_batch of x which is a four dimensional
# tensor of shape [b, h * block_size, w * block_size, d].
def _compare(self, b, h, w, d, block_size, pad_beg, pad_end):
block_size_sq = block_size * block_size
x = np.random.normal(0, 1, b * h * w * d *
block_size_sq).astype(np.float32).reshape(
[b, h * block_size, w * block_size, d])
paddings = np.array(
[[pad_beg, pad_end], [pad_beg, pad_end]], dtype=np.int32)
self._checkGrad(x, paddings, block_size)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
@test_util.run_deprecated_v1
def testSmall(self):
block_size = 2
pad_beg = 0
pad_end = 0
self._compare(1, 2, 3, 5, block_size, pad_beg, pad_end)
@test_util.run_deprecated_v1
def testSmall2(self):
block_size = 2
pad_beg = 0
pad_end = 0
self._compare(2, 4, 3, 2, block_size, pad_beg, pad_end)
@test_util.run_deprecated_v1
def testSmallPad1x1(self):
block_size = 2
pad_beg = 1
pad_end = 1
self._compare(1, 2, 3, 5, block_size, pad_beg, pad_end)
|
SpaceToBatchGradientTest
|
python
|
getsentry__sentry
|
src/sentry/charts/types.py
|
{
"start": 53,
"end": 1142
}
|
class ____(Enum):
"""
This enum defines the chart styles we can render.
This directly maps to the chartcuterie configuration [0] in the frontend
code. Be sure to keep these in sync when adding or removing types.
[0]: app/chartcuterie/config.tsx.
"""
SLACK_DISCOVER_TOTAL_PERIOD = "slack:discover.totalPeriod"
SLACK_DISCOVER_TOTAL_DAILY = "slack:discover.totalDaily"
SLACK_DISCOVER_TOP5_PERIOD = "slack:discover.top5Period"
SLACK_DISCOVER_TOP5_PERIOD_LINE = "slack:discover.top5PeriodLine"
SLACK_DISCOVER_TOP5_DAILY = "slack:discover.top5Daily"
SLACK_DISCOVER_PREVIOUS_PERIOD = "slack:discover.previousPeriod"
SLACK_METRIC_ALERT_EVENTS = "slack:metricAlert.events"
SLACK_METRIC_ALERT_SESSIONS = "slack:metricAlert.sessions"
SLACK_PERFORMANCE_ENDPOINT_REGRESSION = "slack:performance.endpointRegression"
SLACK_PERFORMANCE_FUNCTION_REGRESSION = "slack:performance.functionRegression"
SLACK_METRIC_DETECTOR_EVENTS = "slack:metricDetector.events"
SLACK_METRIC_DETECTOR_SESSIONS = "slack:metricDetector.sessions"
|
ChartType
|
python
|
tensorflow__tensorflow
|
tensorflow/python/debug/lib/common_test.py
|
{
"start": 974,
"end": 2361
}
|
class ____(test_util.TensorFlowTestCase):
@test_util.run_v1_only("Relies on tensor name, which is unavailable in TF2")
def testOnFeedOneFetch(self):
a = constant_op.constant(10.0, name="a")
b = constant_op.constant(20.0, name="b")
run_key = common.get_run_key({"a": a}, [b])
loaded = json.loads(run_key)
self.assertItemsEqual(["a:0"], loaded[0])
self.assertItemsEqual(["b:0"], loaded[1])
@test_util.run_v1_only("Relies on tensor name, which is unavailable in TF2")
def testGetRunKeyFlat(self):
a = constant_op.constant(10.0, name="a")
b = constant_op.constant(20.0, name="b")
run_key = common.get_run_key({"a": a}, [a, b])
loaded = json.loads(run_key)
self.assertItemsEqual(["a:0"], loaded[0])
self.assertItemsEqual(["a:0", "b:0"], loaded[1])
@test_util.run_v1_only("Relies on tensor name, which is unavailable in TF2")
def testGetRunKeyNestedFetches(self):
a = constant_op.constant(10.0, name="a")
b = constant_op.constant(20.0, name="b")
c = constant_op.constant(30.0, name="c")
d = constant_op.constant(30.0, name="d")
run_key = common.get_run_key(
{}, {"set1": [a, b], "set2": {"c": c, "d": d}})
loaded = json.loads(run_key)
self.assertItemsEqual([], loaded[0])
self.assertItemsEqual(["a:0", "b:0", "c:0", "d:0"], loaded[1])
if __name__ == "__main__":
googletest.main()
|
CommonTest
|
python
|
readthedocs__readthedocs.org
|
readthedocs/rtd_tests/tests/projects/test_admin_actions.py
|
{
"start": 330,
"end": 2826
}
|
class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.owner = fixture.get(User)
cls.profile = fixture.get(UserProfile, user=cls.owner, banned=False)
cls.admin = fixture.get(User, is_staff=True, is_superuser=True)
cls.project = fixture.get(
Project,
main_language_project=None,
users=[cls.owner],
)
def setUp(self):
self.client.force_login(self.admin)
def test_project_ban_owner(self):
self.assertFalse(self.owner.profile.banned)
action_data = {
ACTION_CHECKBOX_NAME: [self.project.pk],
"action": "ban_owner",
"index": 0,
}
resp = self.client.post(
urls.reverse("admin:projects_project_changelist"),
action_data,
)
self.assertTrue(self.project.users.filter(profile__banned=True).exists())
self.assertFalse(self.project.users.filter(profile__banned=False).exists())
def test_project_ban_multiple_owners(self):
owner_b = fixture.get(User)
profile_b = fixture.get(UserProfile, user=owner_b, banned=False)
self.project.users.add(owner_b)
self.assertFalse(self.owner.profile.banned)
self.assertFalse(owner_b.profile.banned)
action_data = {
ACTION_CHECKBOX_NAME: [self.project.pk],
"action": "ban_owner",
"index": 0,
}
resp = self.client.post(
urls.reverse("admin:projects_project_changelist"),
action_data,
)
self.assertFalse(self.project.users.filter(profile__banned=True).exists())
self.assertEqual(self.project.users.filter(profile__banned=False).count(), 2)
@mock.patch("readthedocs.projects.admin.clean_project_resources")
def test_project_delete(self, clean_project_resources):
"""Test project and artifacts are removed."""
action_data = {
ACTION_CHECKBOX_NAME: [self.project.pk],
"action": "delete_selected",
"index": 0,
"post": "yes",
}
resp = self.client.post(
urls.reverse("admin:projects_project_changelist"),
action_data,
)
self.assertFalse(Project.objects.filter(pk=self.project.pk).exists())
clean_project_resources.assert_has_calls(
[
mock.call(
self.project,
),
]
)
|
ProjectAdminActionsTest
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 535177,
"end": 535649
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of CreateCheckSuite"""
__schema__ = github_schema
__field_names__ = ("check_suite", "client_mutation_id")
check_suite = sgqlc.types.Field("CheckSuite", graphql_name="checkSuite")
"""The newly created check suite."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
|
CreateCheckSuitePayload
|
python
|
pytorch__pytorch
|
torch/_inductor/fx_passes/group_batch_fusion.py
|
{
"start": 49028,
"end": 49245
}
|
class ____(BatchPointwiseOpsPostGradFusion):
def __init__(self, **kwargs) -> None:
super().__init__(aten.tanh.default, **kwargs)
@register_fusion("batch_aten_sigmoid", pre_grad=False)
|
BatchTanhPostGradFusion
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1084040,
"end": 1084568
}
|
class ____(sgqlc.types.Type, Node):
"""A common weakness enumeration"""
__schema__ = github_schema
__field_names__ = ("cwe_id", "description", "name")
cwe_id = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cweId")
"""The id of the CWE"""
description = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="description")
"""A detailed description of this CWE"""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The name of this CWE"""
|
CWE
|
python
|
PyCQA__pylint
|
doc/data/messages/n/non-parent-init-called/bad.py
|
{
"start": 0,
"end": 77
}
|
class ____:
def __init__(self):
self.is_multicellular = True
|
Animal
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/operators/test_dms.py
|
{
"start": 35185,
"end": 38911
}
|
class ____:
def mock_describe_replication_response(self, status: str):
return [
{
"ReplicationConfigIdentifier": "string",
"ReplicationConfigArn": "string",
"SourceEndpointArn": "string",
"TargetEndpointArn": "string",
"ReplicationType": "full-load",
"Status": status,
}
]
def mock_replication_response(self, status: str):
return {
"Replication": {
"ReplicationConfigIdentifier": "xxxx",
"ReplicationConfigArn": "xxxx",
"Status": status,
}
}
def test_arg_validation(self):
with pytest.raises(AirflowException):
DmsStartReplicationOperator(
task_id="start_replication",
replication_config_arn="XXXXXXXXXXXXXXX",
replication_start_type="cdc",
cdc_start_pos=1,
cdc_start_time="2024-01-01 00:00:00",
)
DmsStartReplicationOperator(
task_id="start_replication",
replication_config_arn="XXXXXXXXXXXXXXX",
replication_start_type="cdc",
cdc_start_pos=1,
)
DmsStartReplicationOperator(
task_id="start_replication",
replication_config_arn="XXXXXXXXXXXXXXX",
replication_start_type="cdc",
cdc_start_time="2024-01-01 00:00:00",
)
@mock.patch.object(DmsHook, "describe_replications")
@mock.patch.object(DmsHook, "start_replication")
def test_already_running(self, mock_replication, mock_describe):
mock_describe.return_value = self.mock_describe_replication_response("test")
op = DmsStartReplicationOperator(
task_id="start_replication",
replication_config_arn="XXXXXXXXXXXXXXX",
replication_start_type="cdc",
cdc_start_pos=1,
wait_for_completion=False,
deferrable=False,
)
op.execute({})
assert mock_replication.call_count == 0
mock_describe.return_value = self.mock_describe_replication_response("failed")
op.execute({})
mock_replication.return_value = self.mock_replication_response("running")
assert mock_replication.call_count == 1
@mock.patch.object(DmsHook, "conn")
@mock.patch.object(DmsHook, "get_waiter")
@mock.patch.object(DmsHook, "describe_replications")
def test_wait_for_completion(self, mock_describe, mock_waiter, mock_conn):
mock_describe.return_value = self.mock_describe_replication_response("stopped")
op = DmsStartReplicationOperator(
task_id="start_replication",
replication_config_arn="XXXXXXXXXXXXXXX",
replication_start_type="cdc",
cdc_start_pos=1,
wait_for_completion=True,
deferrable=False,
)
op.execute({})
mock_waiter.assert_called_with("replication_complete")
mock_waiter.assert_called_once()
@mock.patch.object(DmsHook, "conn")
@mock.patch.object(DmsHook, "describe_replications")
def test_execute(self, mock_describe, mock_conn):
mock_describe.return_value = self.mock_describe_replication_response("stopped")
op = DmsStartReplicationOperator(
task_id="start_replication",
replication_config_arn="XXXXXXXXXXXXXXX",
replication_start_type="cdc",
cdc_start_pos=1,
wait_for_completion=False,
deferrable=False,
)
op.execute({})
assert mock_conn.start_replication.call_count == 1
|
TestDmsStartReplicationOperator
|
python
|
PrefectHQ__prefect
|
tests/cli/test_deploy.py
|
{
"start": 174102,
"end": 176990
}
|
class ____:
def test_save_user_inputs_no_existing_prefect_file(self):
prefect_file = Path("prefect.yaml")
prefect_file.unlink()
assert not prefect_file.exists()
invoke_and_assert(
command="deploy flows/hello.py:my_flow",
user_input=(
# Accept default deployment name
readchar.key.ENTER
+
# accept create work pool
readchar.key.ENTER
+
# choose process work pool
readchar.key.ENTER
+
# enter work pool name
"inflatable"
+ readchar.key.ENTER
# decline schedule
+ "n"
+ readchar.key.ENTER
+
# Decline remote storage
"n"
+ readchar.key.ENTER
# accept save user inputs
+ "y"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
(
"Would you like to save configuration for this deployment for"
" faster deployments in the future?"
),
"Deployment configuration saved to prefect.yaml",
],
)
assert prefect_file.exists()
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 1
assert config["deployments"][0]["name"] == "default"
assert config["deployments"][0]["entrypoint"] == "flows/hello.py:my_flow"
assert config["deployments"][0]["schedules"] == []
assert config["deployments"][0]["work_pool"]["name"] == "inflatable"
def test_save_user_inputs_existing_prefect_file(self):
prefect_file = Path("prefect.yaml")
assert prefect_file.exists()
invoke_and_assert(
command="deploy flows/hello.py:my_flow",
user_input=(
# Accept default deployment name
readchar.key.ENTER
+
# accept create work pool
readchar.key.ENTER
+
# choose process work pool
readchar.key.ENTER
+
# enter work pool name
"inflatable"
+ readchar.key.ENTER
# decline schedule
+ "n"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains="View Deployment in UI",
)
with prefect_file.open(mode="r") as f:
config = yaml.safe_load(f)
assert len(config["deployments"]) == 1
@pytest.mark.usefixtures("project_dir", "interactive_console", "work_pool")
|
TestSaveUserInputs
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-surveycto/source_surveycto/source.py
|
{
"start": 517,
"end": 1612
}
|
class ____(HttpStream, ABC):
transformer: TypeTransformer = TypeTransformer(TransformConfig.DefaultSchemaNormalization)
def __init__(self, config: Mapping[str, Any], form_id, schema, **kwargs):
super().__init__()
self.config = config
self.schema = schema
self.server_name = config["server_name"]
self.form_id = form_id
self.start_date = config["start_date"]
# base64 encode username and password as auth token
user_name_password = f"{config['username']}:{config['password']}"
self.auth_token = self._base64_encode(user_name_password)
@property
def url_base(self) -> str:
return f"https://{self.server_name}.surveycto.com/" + "api/v2/forms/data/wide/json/"
def _base64_encode(self, string: str) -> str:
return base64.b64encode(string.encode("ascii")).decode("ascii")
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
return {}
|
SurveyStream
|
python
|
falconry__falcon
|
tests/asgi/test_hello_asgi.py
|
{
"start": 2791,
"end": 3933
}
|
class ____:
class Emitter:
def __init__(self, value, divisor):
self._value = value
self._divisor = divisor
self._remainder = None
self.closed = False
async def close(self):
self.closed = True
def __aiter__(self):
if self._remainder is None:
quotient, self._remainder = divmod(self._value, self._divisor)
assert quotient >= 0
return self
async def __anext__(self):
if self._value == 0:
raise StopAsyncIteration
if self._value >= self._divisor:
self._value -= self._divisor
return f'{self._divisor}\n'.encode()
self._value = 0
return f'{self._remainder}\n'.encode()
def __init__(self):
self.stream = None
async def on_get(self, req, resp):
self.stream = None
value = req.get_param_as_int('value', default=10)
divisor = req.get_param_as_int('divisor', default=3)
self.stream = resp.stream = self.Emitter(value, divisor)
|
ClosingStreamResource
|
python
|
mlflow__mlflow
|
tests/models/test_signature.py
|
{
"start": 12530,
"end": 12662
}
|
class ____(rag_signatures.ChatCompletionRequest):
custom_input: CustomInput | None = None
@dataclass
|
FlexibleChatCompletionRequest
|
python
|
getsentry__sentry
|
tests/apidocs/endpoints/integration_platform/test_sentry_app_installations.py
|
{
"start": 291,
"end": 1231
}
|
class ____(APIDocsTestCase):
def setUp(self) -> None:
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(name="Jessla", owner=None)
self.create_member(user=self.user, organization=self.org, role="owner")
self.sentry_app = self.create_sentry_app(
name="Tesla App", published=True, organization=self.org
)
self.install = SentryAppInstallation(
sentry_app=self.sentry_app, organization_id=self.org.id
)
self.install.save()
self.login_as(user=self.user)
self.url = reverse(
"sentry-api-0-sentry-app-installations",
kwargs={"organization_id_or_slug": self.org.slug},
)
def test_get(self) -> None:
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
|
SentryAppInstallationDocsTest
|
python
|
charliermarsh__ruff
|
crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/class_definition.py
|
{
"start": 497,
"end": 527
}
|
class ____((Aaaa)):
...
|
Test
|
python
|
django__django
|
tests/auth_tests/test_login.py
|
{
"start": 147,
"end": 1056
}
|
class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(username="testuser", password="password")
def setUp(self):
self.request = HttpRequest()
self.request.session = self.client.session
def test_user_login(self):
auth.login(self.request, self.user)
self.assertEqual(self.request.session[auth.SESSION_KEY], str(self.user.pk))
def test_inactive_user(self):
self.user.is_active = False
self.user.save(update_fields=["is_active"])
auth.login(self.request, self.user)
self.assertEqual(self.request.session[auth.SESSION_KEY], str(self.user.pk))
def test_without_user(self):
with self.assertRaisesMessage(
AttributeError,
"'NoneType' object has no attribute 'get_session_auth_hash'",
):
auth.login(self.request, None)
|
TestLogin
|
python
|
getsentry__sentry
|
src/sentry/issues/endpoints/organization_group_search_view_visit.py
|
{
"start": 716,
"end": 1705
}
|
class ____(OrganizationEndpoint):
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.ISSUES
permission_classes = (MemberPermission,)
def post(self, request: Request, organization: Organization, view_id: str) -> Response:
"""
Update the last_visited timestamp for a GroupSearchView for the current organization member.
"""
try:
view = GroupSearchView.objects.get(id=view_id, organization=organization)
except GroupSearchView.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
# Create or update the last_visited timestamp
GroupSearchViewLastVisited.objects.create_or_update(
organization=organization,
user_id=request.user.id,
group_search_view=view,
values={"last_visited": timezone.now()},
)
return Response(status=status.HTTP_204_NO_CONTENT)
|
OrganizationGroupSearchViewVisitEndpoint
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-aws/prefect_aws/observers/ecs.py
|
{
"start": 2622,
"end": 3050
}
|
class ____:
def __init__(self, *statuses: EcsTaskLastStatus):
self.statuses = statuses
def is_match(self, last_status: EcsTaskLastStatus) -> bool:
return not self.statuses or last_status in self.statuses
HandlerWithFilters = NamedTuple(
"HandlerWithFilters",
[
("handler", Union[EcsEventHandler, AsyncEcsEventHandler]),
("filters", EventHandlerFilters),
],
)
|
LastStatusFilter
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/basic_session_run_hooks_test.py
|
{
"start": 35825,
"end": 37629
}
|
class ____(test.TestCase):
def setUp(self):
self.model_dir = tempfile.mkdtemp()
self.graph = ops.Graph()
with self.graph.as_default():
self.scaffold = monitored_session.Scaffold()
with variable_scope.variable_scope('foo', use_resource=True):
self.global_step = training_util.get_or_create_global_step()
self.train_op = training_util._increment_global_step(1)
def test_save_steps_saves_periodically(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
mon_sess.run(self.train_op)
# Not saved
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Not saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# saved
self.assertEqual(5,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
|
ResourceCheckpointSaverHookTest
|
python
|
django__django
|
tests/admin_views/tests.py
|
{
"start": 297094,
"end": 308491
}
|
class ____(AdminFieldExtractionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_readonly_get(self):
response = self.client.get(reverse("admin:admin_views_post_add"))
self.assertNotContains(response, 'name="posted"')
# 3 fields + 2 submit buttons + 5 inline management form fields, + 2
# hidden fields for inlines + 1 field for the inline + 2 empty form
# + 1 logout form.
self.assertContains(response, "<input", count=17)
self.assertContains(response, formats.localize(datetime.date.today()))
self.assertContains(response, "<label>Awesomeness level:</label>")
self.assertContains(response, "Very awesome.")
self.assertContains(response, "Unknown coolness.")
self.assertContains(response, "foo")
# Multiline text in a readonly field gets <br> tags
self.assertContains(response, "Multiline<br>test<br>string")
self.assertContains(
response,
'<div class="readonly">Multiline<br>html<br>content</div>',
html=True,
)
self.assertContains(response, "InlineMultiline<br>test<br>string")
self.assertContains(
response,
formats.localize(datetime.date.today() - datetime.timedelta(days=7)),
)
self.assertContains(response, '<div class="form-row field-coolness">')
self.assertContains(response, '<div class="form-row field-awesomeness_level">')
self.assertContains(response, '<div class="form-row field-posted">')
self.assertContains(response, '<div class="form-row field-value">')
self.assertContains(response, '<div class="form-row">')
self.assertContains(response, '<div class="help"', 3)
self.assertContains(
response,
'<div class="help" id="id_title_helptext"><div>Some help text for the '
"title (with Unicode ŠĐĆŽćžšđ)</div></div>",
html=True,
)
self.assertContains(
response,
'<div class="help" id="id_content_helptext"><div>Some help text for the '
"content (with Unicode ŠĐĆŽćžšđ)</div></div>",
html=True,
)
self.assertContains(
response,
'<div class="help"><div>Some help text for the date (with Unicode ŠĐĆŽćžšđ)'
"</div></div>",
html=True,
)
p = Post.objects.create(
title="I worked on readonly_fields", content="Its good stuff"
)
response = self.client.get(
reverse("admin:admin_views_post_change", args=(p.pk,))
)
self.assertContains(response, "%d amount of cool" % p.pk)
def test_readonly_text_field(self):
p = Post.objects.create(
title="Readonly test",
content="test",
readonly_content="test\r\n\r\ntest\r\n\r\ntest\r\n\r\ntest",
)
Link.objects.create(
url="http://www.djangoproject.com",
post=p,
readonly_link_content="test\r\nlink",
)
response = self.client.get(
reverse("admin:admin_views_post_change", args=(p.pk,))
)
# Checking readonly field.
self.assertContains(response, "test<br><br>test<br><br>test<br><br>test")
# Checking readonly field in inline.
self.assertContains(response, "test<br>link")
def test_readonly_post(self):
data = {
"title": "Django Got Readonly Fields",
"content": "This is an incredible development.",
"link_set-TOTAL_FORMS": "1",
"link_set-INITIAL_FORMS": "0",
"link_set-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse("admin:admin_views_post_add"), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 1)
p = Post.objects.get()
self.assertEqual(p.posted, datetime.date.today())
data["posted"] = "10-8-1990" # some date that's not today
response = self.client.post(reverse("admin:admin_views_post_add"), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 2)
p = Post.objects.order_by("-id")[0]
self.assertEqual(p.posted, datetime.date.today())
def test_readonly_manytomany(self):
"Regression test for #13004"
response = self.client.get(reverse("admin:admin_views_pizza_add"))
self.assertEqual(response.status_code, 200)
def test_user_password_change_limited_queryset(self):
su = User.objects.filter(is_superuser=True)[0]
response = self.client.get(
reverse("admin2:auth_user_password_change", args=(su.pk,))
)
self.assertEqual(response.status_code, 404)
def test_change_form_renders_correct_null_choice_value(self):
"""
Regression test for #17911.
"""
choice = Choice.objects.create(choice=None)
response = self.client.get(
reverse("admin:admin_views_choice_change", args=(choice.pk,))
)
self.assertContains(
response, '<div class="readonly">No opinion</div>', html=True
)
def _test_readonly_foreignkey_links(self, admin_site):
"""
ForeignKey readonly fields render as links if the target model is
registered in admin.
"""
chapter = Chapter.objects.create(
title="Chapter 1",
content="content",
book=Book.objects.create(name="Book 1"),
)
language = Language.objects.create(iso="_40", name="Test")
obj = ReadOnlyRelatedField.objects.create(
chapter=chapter,
language=language,
user=self.superuser,
)
response = self.client.get(
reverse(
f"{admin_site}:admin_views_readonlyrelatedfield_change", args=(obj.pk,)
),
)
# Related ForeignKey object registered in admin.
user_url = reverse(f"{admin_site}:auth_user_change", args=(self.superuser.pk,))
self.assertContains(
response,
'<div class="readonly"><a href="%s">super</a></div>' % user_url,
html=True,
)
# Related ForeignKey with the string primary key registered in admin.
language_url = reverse(
f"{admin_site}:admin_views_language_change",
args=(quote(language.pk),),
)
self.assertContains(
response,
'<div class="readonly"><a href="%s">_40</a></div>' % language_url,
html=True,
)
# Related ForeignKey object not registered in admin.
self.assertContains(
response, '<div class="readonly">Chapter 1</div>', html=True
)
def test_readonly_foreignkey_links_default_admin_site(self):
self._test_readonly_foreignkey_links("admin")
def test_readonly_foreignkey_links_custom_admin_site(self):
self._test_readonly_foreignkey_links("namespaced_admin")
def test_readonly_manytomany_backwards_ref(self):
"""
Regression test for #16433 - backwards references for related objects
broke if the related field is read-only due to the help_text attribute
"""
topping = Topping.objects.create(name="Salami")
pizza = Pizza.objects.create(name="Americano")
pizza.toppings.add(topping)
response = self.client.get(reverse("admin:admin_views_topping_add"))
self.assertEqual(response.status_code, 200)
def test_readonly_manytomany_forwards_ref(self):
topping = Topping.objects.create(name="Salami")
pizza = Pizza.objects.create(name="Americano")
pizza.toppings.add(topping)
response = self.client.get(
reverse("admin:admin_views_pizza_change", args=(pizza.pk,))
)
self.assertContains(response, "<label>Toppings:</label>", html=True)
self.assertContains(response, '<div class="readonly">Salami</div>', html=True)
def test_readonly_onetoone_backwards_ref(self):
"""
Can reference a reverse OneToOneField in ModelAdmin.readonly_fields.
"""
v1 = Villain.objects.create(name="Adam")
pl = Plot.objects.create(name="Test Plot", team_leader=v1, contact=v1)
pd = PlotDetails.objects.create(details="Brand New Plot", plot=pl)
response = self.client.get(
reverse("admin:admin_views_plotproxy_change", args=(pl.pk,))
)
field = self.get_admin_readonly_field(response, "plotdetails")
pd_url = reverse("admin:admin_views_plotdetails_change", args=(pd.pk,))
self.assertEqual(field.contents(), '<a href="%s">Brand New Plot</a>' % pd_url)
# The reverse relation also works if the OneToOneField is null.
pd.plot = None
pd.save()
response = self.client.get(
reverse("admin:admin_views_plotproxy_change", args=(pl.pk,))
)
field = self.get_admin_readonly_field(response, "plotdetails")
self.assertEqual(field.contents(), "-") # default empty value
@skipUnlessDBFeature("supports_stored_generated_columns")
def test_readonly_unsaved_generated_field(self):
response = self.client.get(reverse("admin:admin_views_square_add"))
self.assertContains(response, '<div class="readonly">-</div>')
def test_readonly_field_overrides(self):
"""
Regression test for #22087 - ModelForm Meta overrides are ignored by
AdminReadonlyField
"""
p = FieldOverridePost.objects.create(title="Test Post", content="Test Content")
response = self.client.get(
reverse("admin:admin_views_fieldoverridepost_change", args=(p.pk,))
)
self.assertContains(
response,
'<div class="help"><div>Overridden help text for the date</div></div>',
html=True,
)
self.assertContains(
response,
'<label for="id_public">Overridden public label:</label>',
html=True,
)
self.assertNotContains(
response, "Some help text for the date (with Unicode ŠĐĆŽćžšđ)"
)
def test_correct_autoescaping(self):
"""
Make sure that non-field readonly elements are properly autoescaped
(#24461)
"""
section = Section.objects.create(name="<a>evil</a>")
response = self.client.get(
reverse("admin:admin_views_section_change", args=(section.pk,))
)
self.assertNotContains(response, "<a>evil</a>", status_code=200)
self.assertContains(response, "<a>evil</a>", status_code=200)
def test_label_suffix_translated(self):
pizza = Pizza.objects.create(name="Americano")
url = reverse("admin:admin_views_pizza_change", args=(pizza.pk,))
with self.settings(LANGUAGE_CODE="fr"):
response = self.client.get(url)
self.assertContains(response, "<label>Toppings\u00a0:</label>", html=True)
@override_settings(ROOT_URLCONF="admin_views.urls")
|
ReadonlyTest
|
python
|
dask__dask
|
dask/tests/test_expr.py
|
{
"start": 618,
"end": 2306
}
|
class ____(Expr):
called_cached_property = False
_parameters = ["foo", "bar"]
@property
def baz(self):
return self.foo + self.bar
@functools.cached_property
def cached_property(self):
if MyExprCachedProperty.called_cached_property:
raise RuntimeError("No!")
MyExprCachedProperty.called_cached_property = True
return self.foo + self.bar
@pytest.mark.slow()
def test_pickle_cached_properties():
pytest.importorskip("distributed")
from distributed import Nanny
from distributed.utils_test import gen_cluster
@gen_cluster(client=True, Worker=Nanny, nthreads=[("", 1)])
async def test(c, s, a):
expr = MyExprCachedProperty(foo=1, bar=2)
for _ in range(10):
assert expr.baz == 3
assert expr.cached_property == 3
assert MyExprCachedProperty.called_cached_property is True
rt = pickle.loads(pickle.dumps(expr))
assert rt.cached_property == 3
assert MyExprCachedProperty.called_cached_property is True
# But this does
expr3 = MyExprCachedProperty(foo=1, bar=3)
with pytest.raises(RuntimeError):
expr3.cached_property
def f(expr):
# We want the cache to be part of the pickle, i.e. this is a
# different process such that the type is reset and the property can
# be accessed without side effects
assert MyExprCachedProperty.called_cached_property is False
assert expr.cached_property == 3
assert MyExprCachedProperty.called_cached_property is False
await c.submit(f, expr)
test()
|
MyExprCachedProperty
|
python
|
kamyu104__LeetCode-Solutions
|
Python/base-7.py
|
{
"start": 306,
"end": 613
}
|
class ____(object):
def convertToBase7(self, num):
"""
:type num: int
:rtype: str
"""
if num < 0:
return '-' + self.convertToBase7(-num)
if num < 7:
return str(num)
return self.convertToBase7(num // 7) + str(num % 7)
|
Solution2
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/layers/advanced_activations.py
|
{
"start": 8717,
"end": 10985
}
|
class ____(Layer):
"""Softmax activation function.
Example without mask:
>>> inp = np.asarray([1., 2., 1.])
>>> layer = tf.keras.layers.Softmax()
>>> layer(inp).numpy()
array([0.21194157, 0.5761169 , 0.21194157], dtype=float32)
>>> mask = np.asarray([True, False, True], dtype=bool)
>>> layer(inp, mask).numpy()
array([0.5, 0. , 0.5], dtype=float32)
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Args:
axis: Integer, or list of Integers, axis along which the softmax
normalization is applied.
Call arguments:
inputs: The inputs, or logits to the softmax layer.
mask: A boolean mask of the same shape as `inputs`. Defaults to `None`. The
mask specifies 1 to keep and 0 to mask.
Returns:
softmaxed output with the same shape as `inputs`.
"""
def __init__(self, axis=-1, **kwargs):
super(Softmax, self).__init__(**kwargs)
self.supports_masking = True
self.axis = axis
def call(self, inputs, mask=None):
if mask is not None:
# Since mask is 1.0 for positions we want to keep and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -1e.9 for masked positions.
adder = (1.0 - math_ops.cast(mask, inputs.dtype)) * (
_large_compatible_negative(inputs.dtype))
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return math_ops.exp(inputs - math_ops.reduce_logsumexp(
inputs, axis=self.axis, keepdims=True))
else:
return backend.softmax(inputs, axis=self.axis[0])
return backend.softmax(inputs, axis=self.axis)
def get_config(self):
config = {'axis': self.axis}
base_config = super(Softmax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
|
Softmax
|
python
|
has2k1__plotnine
|
plotnine/scales/limits.py
|
{
"start": 3144,
"end": 3496
}
|
class ____(_lim):
"""
Set y-axis limits
Parameters
----------
*limits :
Min and max limits. Must be of size 2.
You can also pass two values e.g
`ylim(40, 100)`
Notes
-----
If the 2nd value of `limits` is less than
the first, a reversed scale will be created.
"""
aesthetic = "y"
|
ylim
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/components/resolved/base.py
|
{
"start": 1148,
"end": 8986
}
|
class ____:
"""Base class for making a class resolvable from yaml.
This framework is designed to allow complex nested objects to be resolved
from yaml documents. This allows for a single class to be instantiated from
either yaml or python without limiting the types of fields that can exist on
the python class.
Key Features:
- **Automatic yaml schema derivation**: A pydantic model is automatically generated from the class definition using its fields or `__init__` arguments and their annotations.
- **Jinja template resolution**: Fields in the yaml document may be templated strings, which are rendered from the available scope and may be arbitrary python objects.
- **Customizable resolution behavior**: Each field can customize how it is resolved from the yaml document using a `:py:class:~dagster.Resolver`.
Resolvable subclasses must be one of the following:
* pydantic model
* @dataclass
* plain class with an annotated `__init__`
* @record
Example:
.. code-block:: python
import datetime
from typing import Annotated
import dagster as dg
def resolve_timestamp(
context: dg.ResolutionContext,
raw_timestamp: str,
) -> datetime.datetime:
return datetime.datetime.fromisoformat(
context.resolve_value(raw_timestamp, as_type=str),
)
# the yaml field will be a string, which is then parsed into a datetime object
ResolvedTimestamp = Annotated[
datetime.datetime,
dg.Resolver(resolve_timestamp, model_field_type=str),
]
class MyClass(dg.Resolvable, dg.Model):
event: str
start_timestamp: ResolvedTimestamp
end_timestamp: ResolvedTimestamp
# python instantiation
in_python = MyClass(
event="test",
start_timestamp=datetime.datetime(2021, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc),
end_timestamp=datetime.datetime(2021, 1, 2, 0, 0, 0, tzinfo=datetime.timezone.utc),
)
# yaml instantiation
in_yaml = MyClass.resolve_from_yaml(
'''
event: test
start_timestamp: '{{ start_year }}-01-01T00:00:00Z'
end_timestamp: '{{ end_timestamp }}'
''',
scope={
# string templating
"start_year": "2021",
# object templating
"end_timestamp": in_python.end_timestamp,
},
)
assert in_python == in_yaml
"""
@classmethod
def model(cls) -> type[BaseModel]:
return derive_model_type(cls)
@classmethod
def resolve_from_model(cls, context: "ResolutionContext", model: BaseModel):
return cls(**resolve_fields(model, cls, context))
@classmethod
def resolve_from_yaml(
cls,
yaml: str,
*,
scope: Optional[Mapping[str, Any]] = None,
):
parsed_and_src_tree = try_parse_yaml_with_source_position(yaml)
model_cls = cls.model()
if parsed_and_src_tree:
model = _parse_and_populate_model_with_annotated_errors(
cls=model_cls,
obj_parse_root=parsed_and_src_tree,
obj_key_path_prefix=[],
)
else: # yaml parsed as None
model = model_cls()
context = ResolutionContext.default(
parsed_and_src_tree.source_position_tree if parsed_and_src_tree else None
)
if scope:
context = context.with_scope(**scope)
return cls.resolve_from_model(context, model)
@classmethod
def resolve_from_dict(cls, dictionary: dict[str, Any]):
# Convert dictionary to YAML string
# default_flow_style=False makes it use block style instead of inline
yaml_string = yaml.dump(
dictionary,
default_flow_style=False,
sort_keys=False, # Preserve dictionary order
indent=2, # Set indentation level
)
return cls.resolve_from_yaml(yaml_string)
# marker type for skipping kwargs and triggering defaults
# must be a string to make sure it is json serializable
_Unset: Final[str] = "__DAGSTER_UNSET_DEFAULT__"
def derive_model_type(
target_type: type[Resolvable],
) -> type[BaseModel]:
if target_type not in _DERIVED_MODEL_REGISTRY:
model_name = f"{target_type.__name__}Model"
model_fields: dict[
str, Any
] = {} # use Any to appease type checker when **-ing in to create_model
for name, annotation_info in _get_annotations(target_type).items():
field_resolver = _get_resolver(annotation_info.type, name)
field_name = field_resolver.model_field_name or name
field_type = field_resolver.model_field_type or annotation_info.type
field_infos = []
if annotation_info.field_info:
field_infos.append(annotation_info.field_info)
if annotation_info.has_default:
# if the annotation has a serializable default
# value, propagate it to the inner schema, otherwise
# use a marker value that will cause the kwarg
# to get omitted when we resolve fields in order
# to trigger the default on the target type
default_value = (
annotation_info.default
if type(annotation_info.default) in {int, float, str, bool, type(None)}
else _Unset
)
field_infos.append(
Field(
default=default_value,
description=field_resolver.description,
examples=field_resolver.examples,
),
)
elif field_resolver.description or field_resolver.examples:
field_infos.append(
Field(
description=field_resolver.description,
examples=field_resolver.examples,
)
)
# make all fields injectable
if field_type != str:
field_type = Union[field_type, str]
model_fields[field_name] = (
field_type,
FieldInfo.merge_field_infos(*field_infos),
)
try:
_DERIVED_MODEL_REGISTRY[target_type] = create_model(
model_name,
__base__=Model,
**model_fields,
)
except PydanticSchemaGenerationError as e:
raise ResolutionException(f"Unable to derive Model for {target_type}") from e
return _DERIVED_MODEL_REGISTRY[target_type]
def _is_implicitly_resolved_type(annotation):
if annotation in (int, float, str, bool, Any, type(None), list, dict):
return True
if safe_is_subclass(annotation, Enum):
return True
if safe_is_subclass(annotation, Resolvable):
# ensure valid Resolvable subclass
annotation.model()
return False
if safe_is_subclass(annotation, BaseModel):
_ensure_non_resolvable_model_compliance(annotation)
return True
origin = get_origin(annotation)
args = get_args(annotation)
if origin in (Union, UnionType, list, Sequence, tuple, dict, Mapping) and all(
_is_implicitly_resolved_type(arg) for arg in args
):
return True
if origin is Literal and all(_is_implicitly_resolved_type(type(arg)) for arg in args):
return True
return False
def _is_resolvable_type(annotation):
return _is_implicitly_resolved_type(annotation) or safe_is_subclass(annotation, Resolvable)
@record
|
Resolvable
|
python
|
great-expectations__great_expectations
|
great_expectations/core/serializer.py
|
{
"start": 910,
"end": 1709
}
|
class ____(abc.ABC):
"""Serializer interface.
Note: When mypy coverage is enhanced further, this Abstract class can be replaced with a Protocol.
""" # noqa: E501 # FIXME CoP
def __init__(self, schema: Schema) -> None:
"""
Args:
schema: Marshmallow schema defining raw serialized version of object.
"""
self.schema = schema
@abc.abstractmethod
def serialize(self, obj: AbstractConfig) -> dict:
"""Serialize to serializer specific data type.
Note, specific return type to be implemented in subclasses.
Args:
obj: Object to serialize.
Returns:
Representation of object in serializer specific data type.
"""
raise NotImplementedError
|
AbstractConfigSerializer
|
python
|
ray-project__ray
|
python/ray/train/v2/tests/util.py
|
{
"start": 3907,
"end": 4402
}
|
class ____(FailurePolicy):
def __init__(self, failure_config):
self._decision_queue = []
super().__init__(failure_config)
def make_decision(
self, training_failed_error: TrainingFailedError
) -> FailureDecision:
if self._decision_queue:
return self._decision_queue.pop(0)
return FailureDecision.NOOP
# === Test methods ===
def queue_decision(self, decision):
self._decision_queue.append(decision)
|
MockFailurePolicy
|
python
|
ray-project__ray
|
python/ray/serve/_private/utils.py
|
{
"start": 3003,
"end": 8384
}
|
class ____:
"""Group of custom encoders for common types that's not handled by FastAPI."""
@staticmethod
def encode_np_array(obj):
assert isinstance(obj, np.ndarray)
if obj.dtype.kind == "f": # floats
obj = obj.astype(float)
if obj.dtype.kind in {"i", "u"}: # signed and unsigned integers.
obj = obj.astype(int)
return obj.tolist()
@staticmethod
def encode_np_scaler(obj):
assert isinstance(obj, np.generic)
return obj.item()
@staticmethod
def encode_exception(obj):
assert isinstance(obj, Exception)
return str(obj)
@staticmethod
def encode_pandas_dataframe(obj):
assert isinstance(obj, pd.DataFrame)
return obj.to_dict(orient="records")
serve_encoders = {Exception: _ServeCustomEncoders.encode_exception}
if np is not None:
serve_encoders[np.ndarray] = _ServeCustomEncoders.encode_np_array
serve_encoders[np.generic] = _ServeCustomEncoders.encode_np_scaler
if pd is not None:
serve_encoders[pd.DataFrame] = _ServeCustomEncoders.encode_pandas_dataframe
@ray.remote(num_cpus=0)
def block_until_http_ready(
http_endpoint,
backoff_time_s=1,
check_ready=None,
timeout=HTTP_PROXY_TIMEOUT,
):
http_is_ready = False
start_time = time.time()
while not http_is_ready:
try:
resp = requests.get(http_endpoint)
assert resp.status_code == 200
if check_ready is None:
http_is_ready = True
else:
http_is_ready = check_ready(resp)
except Exception:
pass
if 0 < timeout < time.time() - start_time:
raise TimeoutError("HTTP proxy not ready after {} seconds.".format(timeout))
time.sleep(backoff_time_s)
def get_random_string(length: int = 8):
return get_random_alphanumeric_string(length)
def format_actor_name(actor_name, *modifiers):
name = actor_name
for modifier in modifiers:
name += "-{}".format(modifier)
return name
def ensure_serialization_context():
"""Ensure the serialization addons on registered, even when Ray has not
been started."""
ctx = StandaloneSerializationContext()
ray.util.serialization_addons.apply(ctx)
def msgpack_serialize(obj):
ctx = ray._private.worker.global_worker.get_serialization_context()
buffer = ctx.serialize(obj)
serialized = buffer.to_bytes()
return serialized
def msgpack_deserialize(data):
# todo: Ray does not provide a msgpack deserialization api.
try:
obj = MessagePackSerializer.loads(data[MESSAGE_PACK_OFFSET:], None)
except Exception:
raise
return obj
def merge_dict(dict1, dict2):
if dict1 is None and dict2 is None:
return None
if dict1 is None:
dict1 = dict()
if dict2 is None:
dict2 = dict()
result = dict()
for key in dict1.keys() | dict2.keys():
result[key] = sum([e.get(key, 0) for e in (dict1, dict2)])
return result
def parse_import_path(import_path: str):
"""
Takes in an import_path of form:
[subdirectory 1].[subdir 2]...[subdir n].[file name].[attribute name]
Parses this path and returns the module name (everything before the last
dot) and attribute name (everything after the last dot), such that the
attribute can be imported using "from module_name import attr_name".
"""
nodes = import_path.split(".")
if len(nodes) < 2:
raise ValueError(
f"Got {import_path} as import path. The import path "
f"should at least specify the file name and "
f"attribute name connected by a dot."
)
return ".".join(nodes[:-1]), nodes[-1]
def override_runtime_envs_except_env_vars(parent_env: Dict, child_env: Dict) -> Dict:
"""Creates a runtime_env dict by merging a parent and child environment.
This method is not destructive. It leaves the parent and child envs
the same.
The merge is a shallow update where the child environment inherits the
parent environment's settings. If the child environment specifies any
env settings, those settings take precdence over the parent.
- Note: env_vars are a special case. The child's env_vars are combined
with the parent.
Args:
parent_env: The environment to inherit settings from.
child_env: The environment with override settings.
Returns: A new dictionary containing the merged runtime_env settings.
Raises:
TypeError: If a dictionary is not passed in for parent_env or child_env.
"""
if not isinstance(parent_env, Dict):
raise TypeError(
f'Got unexpected type "{type(parent_env)}" for parent_env. '
"parent_env must be a dictionary."
)
if not isinstance(child_env, Dict):
raise TypeError(
f'Got unexpected type "{type(child_env)}" for child_env. '
"child_env must be a dictionary."
)
defaults = copy.deepcopy(parent_env)
overrides = copy.deepcopy(child_env)
default_env_vars = defaults.get("env_vars", {})
override_env_vars = overrides.get("env_vars", {})
defaults.update(overrides)
default_env_vars.update(override_env_vars)
defaults["env_vars"] = default_env_vars
return defaults
|
_ServeCustomEncoders
|
python
|
sympy__sympy
|
sympy/functions/special/elliptic_integrals.py
|
{
"start": 5661,
"end": 9806
}
|
class ____(DefinedFunction):
r"""
Called with two arguments $z$ and $m$, evaluates the
incomplete elliptic integral of the second kind, defined by
.. math:: E\left(z\middle| m\right) = \int_0^z \sqrt{1 - m \sin^2 t} dt
Called with a single argument $m$, evaluates the Legendre complete
elliptic integral of the second kind
.. math:: E(m) = E\left(\tfrac{\pi}{2}\middle| m\right)
Explanation
===========
The function $E(m)$ is a single-valued function on the complex
plane with branch cut along the interval $(1, \infty)$.
Note that our notation defines the incomplete elliptic integral
in terms of the parameter $m$ instead of the elliptic modulus
(eccentricity) $k$.
In this case, the parameter $m$ is defined as $m=k^2$.
Examples
========
>>> from sympy import elliptic_e, I
>>> from sympy.abc import z, m
>>> elliptic_e(z, m).series(z)
z + z**5*(-m**2/40 + m/30) - m*z**3/6 + O(z**6)
>>> elliptic_e(m).series(n=4)
pi/2 - pi*m/8 - 3*pi*m**2/128 - 5*pi*m**3/512 + O(m**4)
>>> elliptic_e(1 + I, 2 - I/2).n()
1.55203744279187 + 0.290764986058437*I
>>> elliptic_e(0)
pi/2
>>> elliptic_e(2.0 - I)
0.991052601328069 + 0.81879421395609*I
References
==========
.. [1] https://en.wikipedia.org/wiki/Elliptic_integrals
.. [2] https://functions.wolfram.com/EllipticIntegrals/EllipticE2
.. [3] https://functions.wolfram.com/EllipticIntegrals/EllipticE
"""
@classmethod
def eval(cls, m, z=None):
if z is not None:
z, m = m, z
k = 2*z/pi
if m.is_zero:
return z
if z.is_zero:
return S.Zero
elif k.is_integer:
return k*elliptic_e(m)
elif m in (S.Infinity, S.NegativeInfinity):
return S.ComplexInfinity
elif z.could_extract_minus_sign():
return -elliptic_e(-z, m)
else:
if m.is_zero:
return pi/2
elif m is S.One:
return S.One
elif m is S.Infinity:
return I*S.Infinity
elif m is S.NegativeInfinity:
return S.Infinity
elif m is S.ComplexInfinity:
return S.ComplexInfinity
def fdiff(self, argindex=1):
if len(self.args) == 2:
z, m = self.args
if argindex == 1:
return sqrt(1 - m*sin(z)**2)
elif argindex == 2:
return (elliptic_e(z, m) - elliptic_f(z, m))/(2*m)
else:
m = self.args[0]
if argindex == 1:
return (elliptic_e(m) - elliptic_k(m))/(2*m)
raise ArgumentIndexError(self, argindex)
def _eval_conjugate(self):
if len(self.args) == 2:
z, m = self.args
if (m.is_real and (m - 1).is_positive) is False:
return self.func(z.conjugate(), m.conjugate())
else:
m = self.args[0]
if (m.is_real and (m - 1).is_positive) is False:
return self.func(m.conjugate())
def _eval_nseries(self, x, n, logx, cdir=0):
from sympy.simplify import hyperexpand
if len(self.args) == 1:
return hyperexpand(self.rewrite(hyper)._eval_nseries(x, n=n, logx=logx))
return super()._eval_nseries(x, n=n, logx=logx)
def _eval_rewrite_as_hyper(self, *args, **kwargs):
if len(args) == 1:
m = args[0]
return (pi/2)*hyper((Rational(-1, 2), S.Half), (S.One,), m)
def _eval_rewrite_as_meijerg(self, *args, **kwargs):
if len(args) == 1:
m = args[0]
return -meijerg(((S.Half, Rational(3, 2)), []), \
((S.Zero,), (S.Zero,)), -m)/4
def _eval_rewrite_as_Integral(self, *args, **kwargs):
from sympy.integrals.integrals import Integral
z, m = (pi/2, self.args[0]) if len(self.args) == 1 else self.args
t = Dummy(uniquely_named_symbol('t', args).name)
return Integral(sqrt(1 - m*sin(t)**2), (t, 0, z))
|
elliptic_e
|
python
|
dateutil__dateutil
|
src/dateutil/tz/tz.py
|
{
"start": 10691,
"end": 28028
}
|
class ____(_tzinfo):
"""
This is a ``tzinfo`` subclass that allows one to use the ``tzfile(5)``
format timezone files to extract current and historical zone information.
:param fileobj:
This can be an opened file stream or a file name that the time zone
information can be read from.
:param filename:
This is an optional parameter specifying the source of the time zone
information in the event that ``fileobj`` is a file object. If omitted
and ``fileobj`` is a file stream, this parameter will be set either to
``fileobj``'s ``name`` attribute or to ``repr(fileobj)``.
See `Sources for Time Zone and Daylight Saving Time Data
<https://data.iana.org/time-zones/tz-link.html>`_ for more information.
Time zone files can be compiled from the `IANA Time Zone database files
<https://www.iana.org/time-zones>`_ with the `zic time zone compiler
<https://www.freebsd.org/cgi/man.cgi?query=zic&sektion=8>`_
.. note::
Only construct a ``tzfile`` directly if you have a specific timezone
file on disk that you want to read into a Python ``tzinfo`` object.
If you want to get a ``tzfile`` representing a specific IANA zone,
(e.g. ``'America/New_York'``), you should call
:func:`dateutil.tz.gettz` with the zone identifier.
**Examples:**
Using the US Eastern time zone as an example, we can see that a ``tzfile``
provides time zone information for the standard Daylight Saving offsets:
.. testsetup:: tzfile
from dateutil.tz import gettz
from datetime import datetime
.. doctest:: tzfile
>>> NYC = gettz('America/New_York')
>>> NYC
tzfile('/usr/share/zoneinfo/America/New_York')
>>> print(datetime(2016, 1, 3, tzinfo=NYC)) # EST
2016-01-03 00:00:00-05:00
>>> print(datetime(2016, 7, 7, tzinfo=NYC)) # EDT
2016-07-07 00:00:00-04:00
The ``tzfile`` structure contains a fully history of the time zone,
so historical dates will also have the right offsets. For example, before
the adoption of the UTC standards, New York used local solar mean time:
.. doctest:: tzfile
>>> print(datetime(1901, 4, 12, tzinfo=NYC)) # LMT
1901-04-12 00:00:00-04:56
And during World War II, New York was on "Eastern War Time", which was a
state of permanent daylight saving time:
.. doctest:: tzfile
>>> print(datetime(1944, 2, 7, tzinfo=NYC)) # EWT
1944-02-07 00:00:00-04:00
"""
def __init__(self, fileobj, filename=None):
super(tzfile, self).__init__()
file_opened_here = False
if isinstance(fileobj, string_types):
self._filename = fileobj
fileobj = open(fileobj, 'rb')
file_opened_here = True
elif filename is not None:
self._filename = filename
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = repr(fileobj)
if fileobj is not None:
if not file_opened_here:
fileobj = _nullcontext(fileobj)
with fileobj as file_stream:
tzobj = self._read_tzfile(file_stream)
self._set_tzdata(tzobj)
def _set_tzdata(self, tzobj):
""" Set the time zone data of this object from a _tzfile object """
# Copy the relevant attributes over as private attributes
for attr in _tzfile.attrs:
setattr(self, '_' + attr, getattr(tzobj, attr))
def _read_tzfile(self, fileobj):
out = _tzfile()
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4).decode() != "TZif":
raise ValueError("magic not found")
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
out.trans_list_utc = list(struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt*4)))
else:
out.trans_list_utc = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
out.trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
out.trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt).decode()
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now (but seek for correct file position)
if leapcnt:
fileobj.seek(leapcnt * 8, os.SEEK_CUR)
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# Build ttinfo list
out.ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
gmtoff = _get_supported_offset(gmtoff)
tti = _ttinfo()
tti.offset = gmtoff
tti.dstoffset = datetime.timedelta(0)
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
out.ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx]
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
out.ttinfo_std = None
out.ttinfo_dst = None
out.ttinfo_before = None
if out.ttinfo_list:
if not out.trans_list_utc:
out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0]
else:
for i in range(timecnt-1, -1, -1):
tti = out.trans_idx[i]
if not out.ttinfo_std and not tti.isdst:
out.ttinfo_std = tti
elif not out.ttinfo_dst and tti.isdst:
out.ttinfo_dst = tti
if out.ttinfo_std and out.ttinfo_dst:
break
else:
if out.ttinfo_dst and not out.ttinfo_std:
out.ttinfo_std = out.ttinfo_dst
for tti in out.ttinfo_list:
if not tti.isdst:
out.ttinfo_before = tti
break
else:
out.ttinfo_before = out.ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
lastdst = None
lastoffset = None
lastdstoffset = None
lastbaseoffset = None
out.trans_list = []
for i, tti in enumerate(out.trans_idx):
offset = tti.offset
dstoffset = 0
if lastdst is not None:
if tti.isdst:
if not lastdst:
dstoffset = offset - lastoffset
if not dstoffset and lastdstoffset:
dstoffset = lastdstoffset
tti.dstoffset = datetime.timedelta(seconds=dstoffset)
lastdstoffset = dstoffset
# If a time zone changes its base offset during a DST transition,
# then you need to adjust by the previous base offset to get the
# transition time in local time. Otherwise you use the current
# base offset. Ideally, I would have some mathematical proof of
# why this is true, but I haven't really thought about it enough.
baseoffset = offset - dstoffset
adjustment = baseoffset
if (lastbaseoffset is not None and baseoffset != lastbaseoffset
and tti.isdst != lastdst):
# The base DST has changed
adjustment = lastbaseoffset
lastdst = tti.isdst
lastoffset = offset
lastbaseoffset = baseoffset
out.trans_list.append(out.trans_list_utc[i] + adjustment)
out.trans_idx = tuple(out.trans_idx)
out.trans_list = tuple(out.trans_list)
out.trans_list_utc = tuple(out.trans_list_utc)
return out
def _find_last_transition(self, dt, in_utc=False):
# If there's no list, there are no transitions to find
if not self._trans_list:
return None
timestamp = _datetime_to_timestamp(dt)
# Find where the timestamp fits in the transition list - if the
# timestamp is a transition time, it's part of the "after" period.
trans_list = self._trans_list_utc if in_utc else self._trans_list
idx = bisect.bisect_right(trans_list, timestamp)
# We want to know when the previous transition was, so subtract off 1
return idx - 1
def _get_ttinfo(self, idx):
# For no list or after the last transition, default to _ttinfo_std
if idx is None or (idx + 1) >= len(self._trans_list):
return self._ttinfo_std
# If there is a list and the time is before it, return _ttinfo_before
if idx < 0:
return self._ttinfo_before
return self._trans_idx[idx]
def _find_ttinfo(self, dt):
idx = self._resolve_ambiguous_time(dt)
return self._get_ttinfo(idx)
def fromutc(self, dt):
"""
The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`.
:param dt:
A :py:class:`datetime.datetime` object.
:raises TypeError:
Raised if ``dt`` is not a :py:class:`datetime.datetime` object.
:raises ValueError:
Raised if this is called with a ``dt`` which does not have this
``tzinfo`` attached.
:return:
Returns a :py:class:`datetime.datetime` object representing the
wall time in ``self``'s time zone.
"""
# These isinstance checks are in datetime.tzinfo, so we'll preserve
# them, even if we don't care about duck typing.
if not isinstance(dt, datetime.datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
# First treat UTC as wall time and get the transition we're in.
idx = self._find_last_transition(dt, in_utc=True)
tti = self._get_ttinfo(idx)
dt_out = dt + datetime.timedelta(seconds=tti.offset)
fold = self.is_ambiguous(dt_out, idx=idx)
return enfold(dt_out, fold=int(fold))
def is_ambiguous(self, dt, idx=None):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
if idx is None:
idx = self._find_last_transition(dt)
# Calculate the difference in offsets from current to previous
timestamp = _datetime_to_timestamp(dt)
tti = self._get_ttinfo(idx)
if idx is None or idx <= 0:
return False
od = self._get_ttinfo(idx - 1).offset - tti.offset
tt = self._trans_list[idx] # Transition time
return timestamp < tt + od
def _resolve_ambiguous_time(self, dt):
idx = self._find_last_transition(dt)
# If we have no transitions, return the index
_fold = self._fold(dt)
if idx is None or idx == 0:
return idx
# If it's ambiguous and we're in a fold, shift to a different index.
idx_offset = int(not _fold and self.is_ambiguous(dt, idx))
return idx - idx_offset
def utcoffset(self, dt):
if dt is None:
return None
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if dt is None:
return None
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.dstoffset
@tzname_in_python2
def tzname(self, dt):
if not self._ttinfo_std or dt is None:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return NotImplemented
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
def __reduce__(self):
return self.__reduce_ex__(None)
def __reduce_ex__(self, protocol):
return (self.__class__, (None, self._filename), self.__dict__)
|
tzfile
|
python
|
ray-project__ray
|
rllib/examples/envs/classes/multi_agent/footsies/game/constants.py
|
{
"start": 363,
"end": 560
}
|
class ____:
NONE: int = 0
LEFT: int = 1 << 0
RIGHT: int = 1 << 1
ATTACK: int = 1 << 2
LEFT_ATTACK: int = LEFT | ATTACK
RIGHT_ATTACK: int = RIGHT | ATTACK
@dataclass
|
ActionBits
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/trivial_pkg_with_valid_hash/package.py
|
{
"start": 217,
"end": 562
}
|
class ____(Package):
url = "http://www.unit-test-should-replace-this-url/trivial_install-1.0"
version(
"1.0",
sha256="6ae8a75555209fd6c44157c0aed8016e763ff435a19cf186f76863140143ff72",
expand=False,
)
hashed_content = "test content"
def install(self, spec, prefix):
pass
|
TrivialPkgWithValidHash
|
python
|
ansible__ansible
|
lib/ansible/module_utils/facts/virtual/dragonfly.py
|
{
"start": 779,
"end": 959
}
|
class ____(VirtualCollector):
# Note the _fact_class impl is actually the FreeBSDVirtual impl
_fact_class = FreeBSDVirtual
_platform = 'DragonFly'
|
DragonFlyVirtualCollector
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 979796,
"end": 980464
}
|
class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"as_code_owner",
"database_id",
"pull_request",
"requested_reviewer",
)
as_code_owner = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="asCodeOwner"
)
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
pull_request = sgqlc.types.Field(
sgqlc.types.non_null(PullRequest), graphql_name="pullRequest"
)
requested_reviewer = sgqlc.types.Field(
"RequestedReviewer", graphql_name="requestedReviewer"
)
|
ReviewRequest
|
python
|
Pylons__pyramid
|
tests/test_config/test_predicates.py
|
{
"start": 21566,
"end": 21957
}
|
class ____:
def __init__(self):
self.__text__ = 'custom predicate'
def classmethod_predicate(*args): # pragma: no cover
pass
classmethod_predicate.__text__ = 'classmethod predicate'
classmethod_predicate = classmethod(classmethod_predicate)
@classmethod
def classmethod_predicate_no_text(*args):
pass # pragma: no cover
|
DummyCustomPredicate
|
python
|
bokeh__bokeh
|
src/bokeh/models/formatters.py
|
{
"start": 10976,
"end": 11743
}
|
class ____(TickFormatter):
''' Display tick values from continuous ranges as powers
of some base.
Most often useful in conjunction with a ``LogTicker``.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
ticker = Nullable(Instance(Ticker), help="""
The corresponding ``LogTicker``, used to determine the correct
base to use. If unset, the formatter will use base 10 as a default.
""")
min_exponent = Int(0, help="""
Minimum exponent to format in scientific notation. If not zero
all ticks in range from base^-min_exponent to base^min_exponent
are displayed without exponential notation.
""")
|
LogTickFormatter
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/components_test.py
|
{
"start": 13465,
"end": 25874
}
|
class ____(DeltaGeneratorTestCase):
"""Test invocation of a custom component object."""
def setUp(self):
super().setUp()
self.test_component = components.declare_component("test", url=URL)
def test_only_json_args(self):
"""Test that component with only json args is marshalled correctly."""
self.test_component(foo="bar")
proto = self.get_delta_from_queue().new_element.component_instance
assert self.test_component.name == proto.component_name
self.assertJSONEqual(
{"foo": "bar", "key": None, "default": None}, proto.json_args
)
assert str(proto.special_args) == "[]"
def test_only_df_args(self):
"""Test that component with only dataframe args is marshalled correctly."""
raw_data = {
"First Name": ["Jason", "Molly"],
"Last Name": ["Miller", "Jacobson"],
"Age": [42, 52],
}
df = pd.DataFrame(raw_data, columns=["First Name", "Last Name", "Age"])
self.test_component(df=df)
proto = self.get_delta_from_queue().new_element.component_instance
assert self.test_component.name == proto.component_name
self.assertJSONEqual({"key": None, "default": None}, proto.json_args)
assert len(proto.special_args) == 1
assert _serialize_dataframe_arg("df", df) == proto.special_args[0]
def test_only_list_args(self):
"""Test that component with only list args is marshalled correctly."""
self.test_component(data=["foo", "bar", "baz"])
proto = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual(
{"data": ["foo", "bar", "baz"], "key": None, "default": None},
proto.json_args,
)
assert str(proto.special_args) == "[]"
def test_no_args(self):
"""Test that component with no args is marshalled correctly."""
self.test_component()
proto = self.get_delta_from_queue().new_element.component_instance
assert self.test_component.name == proto.component_name
self.assertJSONEqual({"key": None, "default": None}, proto.json_args)
assert str(proto.special_args) == "[]"
def test_bytes_args(self):
self.test_component(foo=b"foo", bar=b"bar")
proto = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual({"key": None, "default": None}, proto.json_args)
assert len(proto.special_args) == 2
assert _serialize_bytes_arg("foo", b"foo") == proto.special_args[0]
assert _serialize_bytes_arg("bar", b"bar") == proto.special_args[1]
def test_mixed_args(self):
"""Test marshalling of a component with varied arg types."""
df = pd.DataFrame(
{
"First Name": ["Jason", "Molly"],
"Last Name": ["Miller", "Jacobson"],
"Age": [42, 52],
},
columns=["First Name", "Last Name", "Age"],
)
self.test_component(string_arg="string", df_arg=df, bytes_arg=b"bytes")
proto = self.get_delta_from_queue().new_element.component_instance
assert self.test_component.name == proto.component_name
self.assertJSONEqual(
{"string_arg": "string", "key": None, "default": None},
proto.json_args,
)
assert len(proto.special_args) == 2
assert _serialize_dataframe_arg("df_arg", df) == proto.special_args[0]
assert _serialize_bytes_arg("bytes_arg", b"bytes") == proto.special_args[1]
def test_duplicate_key(self):
"""Two components with the same `key` should throw DuplicateWidgetID exception"""
self.test_component(foo="bar", key="baz")
with pytest.raises(DuplicateWidgetID):
self.test_component(key="baz")
def test_key_sent_to_frontend(self):
"""We send the 'key' param to the frontend (even if it's None)."""
# Test a string key
self.test_component(key="baz")
proto = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual({"key": "baz", "default": None}, proto.json_args)
# Test an empty key
self.test_component()
proto = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual({"key": None, "default": None}, proto.json_args)
def test_widget_id_with_key(self):
"""A component with a user-supplied `key` will have a stable widget ID
even when the component's other parameters change.
This is important because a component's iframe gets unmounted and remounted - wiping all its
internal state - when the component's ID changes. We want to be able to pass new data to a
component's frontend without causing a remount.
"""
# Create a component instance with a key and some custom data
self.test_component(key="key", some_data=345)
proto1 = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual(
{"key": "key", "default": None, "some_data": 345}, proto1.json_args
)
# Clear some ScriptRunCtx data so that we can re-register the same component
# without getting a DuplicateWidgetID error
self.script_run_ctx.widget_user_keys_this_run.clear()
self.script_run_ctx.widget_ids_this_run.clear()
# Create a second component instance with the same key, and different custom data
self.test_component(key="key", some_data=678, more_data="foo")
proto2 = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual(
{"key": "key", "default": None, "some_data": 678, "more_data": "foo"},
proto2.json_args,
)
# The two component instances should have the same ID, *despite having different
# data passed to them.*
assert proto1.id == proto2.id
def test_widget_id_without_key(self):
"""Like all other widget types, two component instances with different data parameters,
and without a specified `key`, will have different widget IDs.
"""
# Create a component instance without a key and some custom data
self.test_component(some_data=345)
proto1 = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual(
{"key": None, "default": None, "some_data": 345}, proto1.json_args
)
# Create a second component instance with different custom data
self.test_component(some_data=678)
proto2 = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual(
{"key": None, "default": None, "some_data": 678}, proto2.json_args
)
# The two component instances should have different IDs (just like any other widget would).
assert proto1.id != proto2.id
def test_simple_default(self):
"""Test the 'default' param with a JSON value."""
return_value = self.test_component(default="baz")
assert return_value == "baz"
proto = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual({"key": None, "default": "baz"}, proto.json_args)
def test_bytes_default(self):
"""Test the 'default' param with a bytes value."""
return_value = self.test_component(default=b"bytes")
assert return_value == b"bytes"
proto = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual({"key": None}, proto.json_args)
assert _serialize_bytes_arg("default", b"bytes") == proto.special_args[0]
def test_df_default(self):
"""Test the 'default' param with a DataFrame value."""
df = pd.DataFrame(
{
"First Name": ["Jason", "Molly"],
"Last Name": ["Miller", "Jacobson"],
"Age": [42, 52],
},
columns=["First Name", "Last Name", "Age"],
)
return_value = self.test_component(default=df)
assert df.equals(return_value), "df != return_value"
proto = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual({"key": None}, proto.json_args)
assert _serialize_dataframe_arg("default", df) == proto.special_args[0]
def test_on_change_handler(self):
"""Test the 'on_change' callback param."""
# we use a list here so that we can update it in the lambda; we cannot assign a variable there.
callback_call_value = []
expected_element_value = "Called with foo"
def create_on_change_handler(some_arg: str):
return lambda: callback_call_value.append("Called with " + some_arg)
return_value = self.test_component(
key="key", default="baz", on_change=create_on_change_handler("foo")
)
assert return_value == "baz"
proto = self.get_delta_from_queue().new_element.component_instance
self.assertJSONEqual({"key": "key", "default": "baz"}, proto.json_args)
current_widget_states = self.script_run_ctx.session_state.get_widget_states()
new_widget_state = WidgetState()
# copy the custom components state and update the value
new_widget_state.CopyFrom(current_widget_states[0])
# update the widget's value so that the rerun will execute the callback
new_widget_state.json_value = '{"key": "key", "default": "baz2"}'
self.script_run_ctx.session_state.on_script_will_rerun(
WidgetStates(widgets=[new_widget_state])
)
assert callback_call_value[0] == expected_element_value
def assertJSONEqual(self, a, b):
"""Asserts that two JSON dicts are equal. If either arg is a string,
it will be first converted to a dict with json.loads()."""
# Ensure both objects are dicts.
dict_a = a if isinstance(a, dict) else json.loads(a)
dict_b = b if isinstance(b, dict) else json.loads(b)
assert dict_a == dict_b
def test_outside_form(self):
"""Test that form id is marshalled correctly outside of a form."""
self.test_component()
proto = self.get_delta_from_queue().new_element.component_instance
assert proto.form_id == ""
@patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=True))
def test_inside_form(self):
"""Test that form id is marshalled correctly inside of a form."""
with st.form("foo"):
self.test_component()
# 2 elements will be created: form block, widget
assert len(self.get_all_deltas_from_queue()) == 2
form_proto = self.get_delta_from_queue(0).add_block
component_instance_proto = self.get_delta_from_queue(
1
).new_element.component_instance
assert component_instance_proto.form_id == form_proto.form.form_id
def test_tab_index(self):
"""Test that tab_index parameter is marshalled correctly."""
self.test_component(tab_index=-1, key="tab_index_neg1")
proto = self.get_delta_from_queue().new_element.component_instance
assert proto.tab_index == -1
self.test_component(tab_index=0, key="tab_index_0")
proto = self.get_delta_from_queue().new_element.component_instance
assert proto.tab_index == 0
self.test_component(tab_index=10, key="tab_index_10")
proto = self.get_delta_from_queue().new_element.component_instance
assert proto.tab_index == 10
# Test with tab_index = None (default)
# The tab_index field should not be set in the proto
self.test_component(key="tab_index_none")
proto = self.get_delta_from_queue().new_element.component_instance
assert not proto.HasField("tab_index")
def test_invalid_tab_index(self):
"""Test that invalid tab_index values raise StreamlitAPIException."""
with pytest.raises(StreamlitAPIException):
self.test_component(tab_index=-2, key="invalid_tab_index_1")
with pytest.raises(StreamlitAPIException):
self.test_component(tab_index="not_an_int", key="invalid_tab_index_2")
with pytest.raises(StreamlitAPIException):
self.test_component(tab_index=True, key="invalid_tab_index_3")
|
InvokeComponentTest
|
python
|
milvus-io__pymilvus
|
pymilvus/exceptions.py
|
{
"start": 3461,
"end": 3548
}
|
class ____(MilvusException):
"""Raise when one field is invalid"""
|
FieldTypeException
|
python
|
doocs__leetcode
|
solution/2300-2399/2361.Minimum Costs Using the Train Line/Solution.py
|
{
"start": 0,
"end": 471
}
|
class ____:
def minimumCosts(
self, regular: List[int], express: List[int], expressCost: int
) -> List[int]:
n = len(regular)
f = [0] * (n + 1)
g = [inf] * (n + 1)
cost = [0] * n
for i, (a, b) in enumerate(zip(regular, express), 1):
f[i] = min(f[i - 1] + a, g[i - 1] + a)
g[i] = min(f[i - 1] + expressCost + b, g[i - 1] + b)
cost[i - 1] = min(f[i], g[i])
return cost
|
Solution
|
python
|
PrefectHQ__prefect
|
src/prefect/server/schemas/core.py
|
{
"start": 32903,
"end": 33527
}
|
class ____(TimeSeriesBaseModel, ORMBaseModel):
"""An ORM representation of log data."""
name: str = Field(default=..., description="The logger name.")
level: int = Field(default=..., description="The log level.")
message: str = Field(default=..., description="The log message.")
timestamp: DateTime = Field(default=..., description="The log timestamp.")
flow_run_id: Optional[UUID] = Field(
default=None, description="The flow run ID associated with the log."
)
task_run_id: Optional[UUID] = Field(
default=None, description="The task run ID associated with the log."
)
|
Log
|
python
|
has2k1__plotnine
|
plotnine/scales/scale_color.py
|
{
"start": 13084,
"end": 13237
}
|
class ____(scale_color_cmap_d):
"""
Create color scales using Matplotlib colormaps
"""
_aesthetics = ["fill"]
@dataclass
|
scale_fill_cmap_d
|
python
|
huggingface__transformers
|
src/transformers/models/grounding_dino/image_processing_grounding_dino.py
|
{
"start": 2213,
"end": 2426
}
|
class ____(ExplicitEnum):
COCO_DETECTION = "coco_detection"
COCO_PANOPTIC = "coco_panoptic"
SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC)
|
AnnotationFormat
|
python
|
spack__spack
|
lib/spack/spack/vendor/jinja2/nodes.py
|
{
"start": 20936,
"end": 22215
}
|
class ____(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ("test", "expr1", "expr2")
test: Expr
expr1: Expr
expr2: t.Optional[Expr]
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
def args_as_const(
node: t.Union["_FilterTestCommon", "Call"], eval_ctx: t.Optional[EvalContext]
) -> t.Tuple[t.List[t.Any], t.Dict[t.Any, t.Any]]:
args = [x.as_const(eval_ctx) for x in node.args]
kwargs = dict(x.as_const(eval_ctx) for x in node.kwargs)
if node.dyn_args is not None:
try:
args.extend(node.dyn_args.as_const(eval_ctx))
except Exception as e:
raise Impossible() from e
if node.dyn_kwargs is not None:
try:
kwargs.update(node.dyn_kwargs.as_const(eval_ctx))
except Exception as e:
raise Impossible() from e
return args, kwargs
|
CondExpr
|
python
|
spyder-ide__spyder
|
spyder/plugins/ipythonconsole/widgets/control.py
|
{
"start": 3881,
"end": 5264
}
|
class ____(QTextEdit, BaseEditMixin):
"""
Subclass of QTextEdit with features from Spyder's mixins.BaseEditMixin to
use as the paging widget for IPython widgets
"""
QT_CLASS = QTextEdit
sig_visibility_changed = Signal(bool)
sig_show_find_widget_requested = Signal()
sig_focus_changed = Signal()
def __init__(self, parent=None):
QTextEdit.__init__(self, parent)
BaseEditMixin.__init__(self)
self.found_results = []
def showEvent(self, event):
"""Reimplement Qt Method"""
self.sig_visibility_changed.emit(True)
def keyPressEvent(self, event):
"""Reimplement Qt Method - Basic keypress event handler"""
event, text, key, ctrl, shift = restore_keyevent(event)
if key == Qt.Key_Slash and self.isVisible():
self.sig_show_find_widget_requested.emit()
else:
# Let the parent widget handle the key press event
QTextEdit.keyPressEvent(self, event)
def focusInEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.sig_focus_changed.emit()
return super().focusInEvent(event)
def focusOutEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.sig_focus_changed.emit()
return super().focusOutEvent(event)
|
PageControlWidget
|
python
|
plotly__plotly.py
|
plotly/graph_objs/scatter/legendgrouptitle/_font.py
|
{
"start": 233,
"end": 9927
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter.legendgrouptitle"
_path_str = "scatter.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatter.legend
grouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Font
|
python
|
jina-ai__jina
|
tests/unit/orchestrate/deployments/test_deployments.py
|
{
"start": 3007,
"end": 3061
}
|
class ____(MyDummyExecutor):
pass
|
ChildDummyExecutor
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 561804,
"end": 562157
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of DeleteRepositoryRuleset"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id",)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
|
DeleteRepositoryRulesetPayload
|
python
|
walkccc__LeetCode
|
solutions/1682. Longest Palindromic Subsequence II/1682-2.py
|
{
"start": 0,
"end": 575
}
|
class ____:
def longestPalindromeSubseq(self, s: str) -> int:
n = len(s)
# dp[i][j][k] := the length of LPS(s[i..j]), where the previous letter is
# ('a' + k).
dp = [[[0] * 27 for _ in range(n)] for _ in range(n)]
for d in range(1, n):
for i in range(n - d):
for k in range(27):
j = i + d
if s[i] == s[j] and s[i] != chr(ord('a') + k):
dp[i][j][k] = dp[i + 1][j - 1][ord(s[i]) - ord('a')] + 2
else:
dp[i][j][k] = max(dp[i + 1][j][k], dp[i][j - 1][k])
return dp[0][n - 1][26]
|
Solution
|
python
|
sympy__sympy
|
sympy/matrices/expressions/diagonal.py
|
{
"start": 4183,
"end": 6328
}
|
class ____(MatrixExpr):
"""
Turn a vector into a diagonal matrix.
"""
def __new__(cls, vector):
vector = _sympify(vector)
obj = MatrixExpr.__new__(cls, vector)
shape = vector.shape
dim = shape[1] if shape[0] == 1 else shape[0]
if vector.shape[0] != 1:
obj._iscolumn = True
else:
obj._iscolumn = False
obj._shape = (dim, dim)
obj._vector = vector
return obj
@property
def shape(self):
return self._shape
def _entry(self, i, j, **kwargs):
if self._iscolumn:
result = self._vector._entry(i, 0, **kwargs)
else:
result = self._vector._entry(0, j, **kwargs)
if i != j:
result *= KroneckerDelta(i, j)
return result
def _eval_transpose(self):
return self
def as_explicit(self):
from sympy.matrices.dense import diag
return diag(*list(self._vector.as_explicit()))
def doit(self, **hints):
from sympy.assumptions import ask, Q
from sympy.matrices.expressions.matmul import MatMul
from sympy.matrices.expressions.transpose import Transpose
from sympy.matrices.dense import eye
from sympy.matrices.matrixbase import MatrixBase
vector = self._vector
# This accounts for shape (1, 1) and identity matrices, among others:
if ask(Q.diagonal(vector)):
return vector
if isinstance(vector, MatrixBase):
ret = eye(max(vector.shape))
for i in range(ret.shape[0]):
ret[i, i] = vector[i]
return type(vector)(ret)
if vector.is_MatMul:
matrices = [arg for arg in vector.args if arg.is_Matrix]
scalars = [arg for arg in vector.args if arg not in matrices]
if scalars:
return Mul.fromiter(scalars)*DiagMatrix(MatMul.fromiter(matrices).doit()).doit()
if isinstance(vector, Transpose):
vector = vector.arg
return DiagMatrix(vector)
def diagonalize_vector(vector):
return DiagMatrix(vector).doit()
|
DiagMatrix
|
python
|
nedbat__coveragepy
|
tests/test_arcs.py
|
{
"start": 65472,
"end": 66186
}
|
class ____(CoverageTest):
"""Tools like Jinja run code credited to non-Python files."""
def test_non_python_file(self) -> None:
# Make a code object with branches, and claim it came from an HTML file.
# With sysmon, this used to fail trying to parse the source. #2077
self.make_file("hello.html", "<h1>Hello!</h1>")
code = textwrap.dedent("""\
a = 1
while a:
c = 3
break
assert c == 5 - 2
""")
code_obj = compile(code, filename="hello.html", mode="exec")
cov = coverage.Coverage(branch=True, debug=["trace"])
with cov.collect():
exec(code_obj)
|
NonPythonFileTest
|
python
|
ray-project__ray
|
python/ray/serve/llm/__init__.py
|
{
"start": 1236,
"end": 1398
}
|
class ____(_CloudMirrorConfig):
"""The configuration for mirroring an LLM model from cloud storage."""
pass
@PublicAPI(stability="alpha")
|
CloudMirrorConfig
|
python
|
jina-ai__jina
|
jina/clients/http.py
|
{
"start": 266,
"end": 877
}
|
class ____(
HTTPBaseClient, PostMixin, ProfileMixin, MutateMixin, HealthCheckMixin
):
"""A client connecting to a Gateway using gRPC protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# select host address to connect to
c = Client(
protocol='http', asyncio=False, host='http://my.awesome.flow:1234'
) # returns HTTPClient instance
c.post(on='/index', inputs=Document(text='hello!'))
"""
|
HTTPClient
|
python
|
yaml__pyyaml
|
lib/yaml/nodes.py
|
{
"start": 1385,
"end": 1440
}
|
class ____(CollectionNode):
id = 'mapping'
|
MappingNode
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-databricks/dagster_databricks/components/databricks_asset_bundle/configs.py
|
{
"start": 6042,
"end": 7860
}
|
class ____(DatabricksBaseTask[jobs.ConditionTask]):
@property
def task_type(self) -> str:
return "condition"
@property
def task_config_metadata(self) -> Mapping[str, Any]:
task_config_metadata = {}
condition_config = self.task_config["condition_task"]
task_config_metadata["left"] = condition_config.get("left", "")
task_config_metadata["op"] = condition_config.get("op", "EQUAL_TO")
task_config_metadata["right"] = condition_config.get("right", "")
return task_config_metadata
@classmethod
def from_job_task_config(cls, job_task_config: Mapping[str, Any]) -> "DatabricksConditionTask":
condition_task = job_task_config["condition_task"]
task_config = {"condition_task": condition_task}
# Condition tasks don't have traditional parameters
task_parameters = {}
return cls(
task_key=job_task_config["task_key"],
task_config=task_config,
task_parameters=task_parameters,
depends_on=parse_depends_on(job_task_config.get("depends_on", [])),
job_name=job_task_config["job_name"],
libraries=job_task_config.get("libraries", []),
)
@property
def needs_cluster(self) -> bool:
return False
@property
def submit_task_key(self) -> str:
return "condition_task"
def to_databricks_sdk_task(self) -> jobs.ConditionTask:
condition_config = self.task_config["condition_task"]
return jobs.ConditionTask(
left=condition_config.get("left", ""),
op=getattr(
jobs.ConditionTaskOp,
condition_config.get("op", "EQUAL_TO"),
),
right=condition_config.get("right", ""),
)
@record
|
DatabricksConditionTask
|
python
|
apache__airflow
|
task-sdk/tests/task_sdk/definitions/test_connection.py
|
{
"start": 7480,
"end": 9998
}
|
class ____:
def test_get_connection_secrets_backend(self, mock_supervisor_comms, tmp_path):
"""Tests getting a connection from secrets backend."""
path = tmp_path / "conn.env"
path.write_text("CONN_A=mysql://host_a")
with conf_vars(
{
(
"workers",
"secrets_backend",
): "airflow.secrets.local_filesystem.LocalFilesystemBackend",
("workers", "secrets_backend_kwargs"): f'{{"connections_file_path": "{path}"}}',
}
):
retrieved_conn = Connection.get(conn_id="CONN_A")
assert retrieved_conn is not None
assert retrieved_conn.conn_id == "CONN_A"
@mock.patch("airflow.secrets.environment_variables.EnvironmentVariablesBackend.get_connection")
def test_get_connection_env_var(self, mock_env_get, mock_supervisor_comms):
"""Tests getting a connection from environment variable."""
mock_env_get.return_value = Connection(conn_id="something", conn_type="some-type") # return None
Connection.get("something")
mock_env_get.assert_called_once_with(conn_id="something")
@conf_vars(
{
("workers", "secrets_backend"): "airflow.secrets.local_filesystem.LocalFilesystemBackend",
("workers", "secrets_backend_kwargs"): '{"connections_file_path": "/files/conn.json"}',
}
)
@mock.patch("airflow.secrets.local_filesystem.LocalFilesystemBackend.get_connection")
@mock.patch("airflow.secrets.environment_variables.EnvironmentVariablesBackend.get_connection")
def test_backend_fallback_to_env_var(self, mock_get_connection, mock_env_get, mock_supervisor_comms):
"""Tests if connection retrieval falls back to environment variable backend if not found in secrets backend."""
mock_get_connection.return_value = None
mock_env_get.return_value = Connection(conn_id="something", conn_type="some-type")
backends = initialize_secrets_backends(DEFAULT_SECRETS_SEARCH_PATH_WORKERS)
assert len(backends) == 3
backend_classes = [backend.__class__.__name__ for backend in backends]
assert "LocalFilesystemBackend" in backend_classes
conn = Connection.get(conn_id="something")
# mock_env is only called when LocalFilesystemBackend doesn't have it
mock_env_get.assert_called()
assert conn == Connection(conn_id="something", conn_type="some-type")
|
TestConnectionsFromSecrets
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/linkedin_oauth2/views.py
|
{
"start": 228,
"end": 1866
}
|
class ____(OAuth2Adapter):
provider_id = "linkedin_oauth2"
access_token_url = "https://www.linkedin.com/oauth/v2/accessToken" # nosec
authorize_url = "https://www.linkedin.com/oauth/v2/authorization"
profile_url = "https://api.linkedin.com/v2/me"
email_url = "https://api.linkedin.com/v2/emailAddress?q=members&projection=(elements*(handle~))" # noqa
access_token_method = "GET" # nosec
def complete_login(self, request, app, token, **kwargs):
extra_data = self.get_user_info(token)
return self.get_provider().sociallogin_from_response(request, extra_data)
def get_user_info(self, token):
fields = self.get_provider().get_profile_fields()
headers = {}
headers.update(self.get_provider().get_settings().get("HEADERS", {}))
headers["Authorization"] = " ".join(["Bearer", token.token])
info = {}
if app_settings.QUERY_EMAIL:
resp = (
get_adapter()
.get_requests_session()
.get(self.email_url, headers=headers)
)
# If this response goes wrong, that is not a blocker in order to
# continue.
if resp.ok:
info = resp.json()
url = self.profile_url + "?projection=(%s)" % ",".join(fields)
resp = get_adapter().get_requests_session().get(url, headers=headers)
resp.raise_for_status()
info.update(resp.json())
return info
oauth2_login = OAuth2LoginView.adapter_view(LinkedInOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(LinkedInOAuth2Adapter)
|
LinkedInOAuth2Adapter
|
python
|
huggingface__transformers
|
src/transformers/models/fnet/modeling_fnet.py
|
{
"start": 19902,
"end": 23874
}
|
class ____(FNetPreTrainedModel):
_tied_weights_keys = {
"cls.predictions.decoder.bias": "cls.predictions.bias",
"cls.predictions.decoder.weight": "fnet.embeddings.word_embeddings.weight",
}
def __init__(self, config):
super().__init__(config)
self.fnet = FNetModel(config)
self.cls = FNetPreTrainingHeads(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
next_sentence_label: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, FNetForPreTrainingOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring) Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Example:
```python
>>> from transformers import AutoTokenizer, FNetForPreTraining
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/fnet-base")
>>> model = FNetForPreTraining.from_pretrained("google/fnet-base")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.fnet(
input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return FNetForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
)
@auto_docstring
|
FNetForPreTraining
|
python
|
getsentry__sentry
|
src/sentry/services/eventstore/snuba/backend.py
|
{
"start": 1639,
"end": 30485
}
|
class ____(EventStorage):
"""
Eventstore backend backed by Snuba
"""
def get_events_snql(
self,
organization_id: int,
group_id: int,
start: datetime | None,
end: datetime | None,
conditions: Sequence[Condition],
orderby: Sequence[str],
limit: int = DEFAULT_LIMIT,
inner_limit: int | None = None,
offset: int = DEFAULT_OFFSET,
referrer: str = "eventstore.get_events_snql",
dataset: Dataset = Dataset.Events,
tenant_ids: Mapping[str, Any] | None = None,
) -> list[Event]:
cols = self.__get_columns(dataset)
resolved_order_by = []
order_by_col_names: set[str] = set()
for order_field_alias in orderby:
if order_field_alias.startswith("-"):
direction = Direction.DESC
order_field_alias = order_field_alias[1:]
else:
direction = Direction.ASC
resolved_column_or_none = DATASETS[dataset].get(order_field_alias)
if resolved_column_or_none:
order_by_col_names.add(resolved_column_or_none)
# special-case handling for nullable column values and proper ordering based on direction
# null values are always last in the sort order regardless of Desc or Asc ordering
if order_field_alias == Columns.NUM_PROCESSING_ERRORS.value.alias:
resolved_order_by.append(
OrderBy(
Function("coalesce", [Column(resolved_column_or_none), 99999999]),
direction=direction,
)
)
elif order_field_alias == Columns.TRACE_SAMPLED.value.alias:
resolved_order_by.append(
OrderBy(
Function("coalesce", [Column(resolved_column_or_none), -1]),
direction=direction,
)
)
elif order_field_alias in (
Columns.PROFILE_ID.value.alias,
Columns.REPLAY_ID.value.alias,
):
resolved_order_by.append(
OrderBy(
Function(
"if",
[
Function("isNull", [Column(resolved_column_or_none)]),
0,
1,
],
),
direction=direction,
)
)
elif order_field_alias == Columns.TIMESTAMP.value.alias:
resolved_order_by.extend(
[
OrderBy(
Function("toStartOfDay", [Column("timestamp")]),
direction=direction,
),
OrderBy(
Column("timestamp"),
direction=direction,
),
]
)
else:
resolved_order_by.append(
OrderBy(Column(resolved_column_or_none), direction=direction)
)
start, end = _prepare_start_end(
start,
end,
organization_id,
[group_id],
)
match_entity = Entity(dataset.value)
event_filters = [
Condition(Column(DATASETS[dataset][Columns.TIMESTAMP.value.alias]), Op.GTE, start),
Condition(Column(DATASETS[dataset][Columns.TIMESTAMP.value.alias]), Op.LT, end),
] + list(conditions)
common_request_kwargs = {
"app_id": "eventstore",
"dataset": dataset.value,
"tenant_ids": tenant_ids or dict(),
}
common_query_kwargs = {
"select": [Column(col) for col in cols],
"orderby": resolved_order_by,
"limit": Limit(limit),
"offset": Offset(offset),
}
# If inner_limit provided, first limit to the most recent N rows, then apply final ordering
# and pagination on top of that subquery.
if inner_limit and inner_limit > 0:
select_and_orderby_cols = set(cols) | order_by_col_names
inner_query = Query(
match=match_entity,
select=[Column(col) for col in select_and_orderby_cols],
where=event_filters,
orderby=[
OrderBy(
Column(DATASETS[dataset][Columns.TIMESTAMP.value.alias]),
direction=Direction.DESC,
),
OrderBy(
Column(DATASETS[dataset][Columns.EVENT_ID.value.alias]),
direction=Direction.DESC,
),
],
limit=Limit(inner_limit),
)
outer_query = Query(
**common_query_kwargs,
match=inner_query,
)
snql_request = Request(**common_request_kwargs, query=outer_query)
else:
snql_request = Request(
**common_request_kwargs,
query=Query(
**common_query_kwargs,
match=match_entity,
where=event_filters,
),
)
result = raw_snql_query(snql_request, referrer, use_cache=False)
if "error" not in result:
events = [self.__make_event(evt) for evt in result["data"]]
self.bind_nodes(events)
return events
return []
def get_events(
self,
filter: Filter,
orderby: Sequence[str] | None = None,
limit: int = DEFAULT_LIMIT,
offset: int = DEFAULT_OFFSET,
referrer: str = "eventstore.get_events",
dataset: Dataset = Dataset.Events,
tenant_ids: Mapping[str, Any] | None = None,
) -> list[Event]:
"""
Get events from Snuba, with node data loaded.
"""
with sentry_sdk.start_span(op="eventstore.snuba.get_events"):
return self.__get_events(
filter,
orderby=orderby,
limit=limit,
offset=offset,
referrer=referrer,
should_bind_nodes=True,
dataset=dataset,
tenant_ids=tenant_ids,
)
def get_unfetched_events(
self,
filter: Filter,
orderby: Sequence[str] | None = None,
limit: int = DEFAULT_LIMIT,
offset: int = DEFAULT_OFFSET,
referrer: str = "eventstore.get_unfetched_events",
dataset: Dataset = Dataset.Events,
tenant_ids: Mapping[str, Any] | None = None,
) -> list[Event]:
"""
Get events from Snuba, without node data loaded.
"""
return self.__get_events(
filter,
orderby=orderby,
limit=limit,
offset=offset,
referrer=referrer,
should_bind_nodes=False,
dataset=dataset,
tenant_ids=tenant_ids,
)
def __get_events(
self,
filter: Filter,
orderby: Sequence[str] | None = None,
limit: int = DEFAULT_LIMIT,
offset: int = DEFAULT_OFFSET,
referrer: str = "eventstore.get_unfetched_events",
should_bind_nodes: bool = False,
dataset: Dataset = Dataset.Events,
tenant_ids: Mapping[str, Any] | None = None,
) -> list[Event]:
assert filter, "You must provide a filter"
cols = self.__get_columns(dataset)
orderby = orderby or DESC_ORDERING
# This is an optimization for the Group.filter_by_event_id query where we
# have a single event ID and want to check all accessible projects for a
# direct hit. In this case it's usually faster to go to nodestore first.
if (
filter.event_ids
and filter.project_ids
and len(filter.event_ids) * len(filter.project_ids) < min(limit, NODESTORE_LIMIT)
and offset == 0
and should_bind_nodes
):
event_list = [
Event(project_id=project_id, event_id=event_id)
for event_id in filter.event_ids
for project_id in filter.project_ids
]
self.bind_nodes(event_list)
# Extending date filters by +- 1s since events are second-resolution.
start = filter.start - timedelta(seconds=1) if filter.start else datetime(1970, 1, 1)
end = filter.end + timedelta(seconds=1) if filter.end else timezone.now()
start, end = start.replace(tzinfo=UTC), end.replace(tzinfo=UTC)
nodestore_events = [
event
for event in event_list
if len(event.data) and start <= event.datetime.replace(tzinfo=UTC) <= end
]
if nodestore_events:
event_ids = {event.event_id for event in nodestore_events}
project_ids = {event.project_id for event in nodestore_events}
start = min(event.datetime for event in nodestore_events)
end = max(event.datetime for event in nodestore_events) + timedelta(seconds=1)
result = snuba.aliased_query(
selected_columns=cols,
start=start,
end=end,
conditions=filter.conditions,
filter_keys={"project_id": project_ids, "event_id": event_ids},
orderby=orderby,
limit=len(nodestore_events),
offset=DEFAULT_OFFSET,
referrer=referrer,
dataset=dataset,
tenant_ids=tenant_ids,
)
if "error" not in result:
events = [self.__make_event(evt) for evt in result["data"]]
# Bind previously fetched node data
nodestore_dict = {
(e.event_id, e.project_id): e.data.data for e in nodestore_events
}
for event in events:
node_data = nodestore_dict[(event.event_id, event.project_id)]
event.data.bind_data(node_data)
return events
return []
result = snuba.aliased_query(
selected_columns=cols,
start=filter.start,
end=filter.end,
conditions=filter.conditions,
filter_keys=filter.filter_keys,
orderby=orderby,
limit=limit,
offset=offset,
referrer=referrer,
dataset=dataset,
tenant_ids=tenant_ids,
)
if "error" not in result:
events = [self.__make_event(evt) for evt in result["data"]]
if should_bind_nodes:
self.bind_nodes(events)
return events
return []
@overload
def get_event_by_id(
self,
project_id: int,
event_id: str,
group_id: int | None = None,
tenant_ids: Mapping[str, Any] | None = None,
occurrence_id: str | None = None,
*,
skip_transaction_groupevent: Literal[True],
) -> Event | None: ...
@overload
def get_event_by_id(
self,
project_id: int,
event_id: str,
group_id: int | None = None,
tenant_ids: Mapping[str, Any] | None = None,
occurrence_id: str | None = None,
*,
skip_transaction_groupevent: bool = False,
) -> Event | GroupEvent | None: ...
def get_event_by_id(
self,
project_id: int,
event_id: str,
group_id: int | None = None,
tenant_ids: Mapping[str, Any] | None = None,
occurrence_id: str | None = None,
*,
skip_transaction_groupevent: bool = False,
) -> Event | GroupEvent | None:
"""
Get an event given a project ID and event ID
Returns None if an event cannot be found
skip_transaction_groupevent: Temporary hack parameter to skip converting a transaction
event into a `GroupEvent`. Used as part of `post_process_group`.
"""
event_id = normalize_event_id(event_id)
if not event_id:
return None
event = Event(project_id=project_id, event_id=event_id)
# Return None if there was no data in nodestore
if len(event.data) == 0:
return None
if group_id is not None:
sentry_sdk.set_tag("nodestore.event_type", event.get_event_type())
if group_id is not None and (
event.get_event_type() == "error"
or (event.get_event_type() == "transaction" and skip_transaction_groupevent)
):
event.group_id = group_id
elif occurrence_id is not None and group_id is not None:
event.group_id = group_id
event._snuba_data = {
"event_id": event_id,
"group_id": group_id,
"project_id": project_id,
"timestamp": event.timestamp,
"occurrence_id": occurrence_id,
}
elif event.get_event_type() != "transaction" or group_id:
# Load group_id from Snuba if not a transaction
raw_query_kwargs = {}
if event.datetime > timezone.now() - timedelta(hours=1):
# XXX: This is a hack to bust the snuba cache. We want to avoid the case where
# we cache an empty result, since this can result in us failing to fetch new events
# in some cases.
raw_query_kwargs["conditions"] = [
[
"timestamp",
">",
datetime.fromtimestamp(random.randint(0, 1000000000)),
]
]
dataset = (
Dataset.IssuePlatform
if event.get_event_type() in ("transaction", "generic")
else Dataset.Events
)
try:
tenant_ids = tenant_ids or {"organization_id": event.project.organization_id}
filter_keys = {"project_id": [project_id], "event_id": [event_id]}
if group_id:
filter_keys["group_id"] = [group_id]
result = snuba.raw_query(
dataset=dataset,
selected_columns=self.__get_columns(dataset),
start=event.datetime,
end=event.datetime + timedelta(seconds=1),
filter_keys=filter_keys,
limit=1,
referrer="eventstore.backend.get_event_by_id_nodestore",
tenant_ids=tenant_ids,
**raw_query_kwargs,
)
except snuba.QueryOutsideRetentionError:
# this can happen due to races. We silently want to hide
# this from callers.
return None
# Return None if the event from Nodestore was not yet written to Snuba
if len(result["data"]) != 1:
logger.warning(
"eventstore.missing-snuba-event",
extra={
"project_id": project_id,
"event_id": event_id,
"group_id": group_id,
"event_datetime": event.datetime,
"event_timestamp": event.timestamp,
"nodestore_insert": event.data.get("nodestore_insert"),
"received": event.data.get("received"),
"len_data": len(result["data"]),
},
)
return None
event.group_id = result["data"][0]["group_id"]
# Inject the snuba data here to make sure any snuba columns are available
event._snuba_data = result["data"][0]
# Set passed group_id if not a transaction
if event.get_event_type() == "transaction" and not skip_transaction_groupevent and group_id:
logger.warning("eventstore.passed-group-id-for-transaction")
return event.for_group(Group.objects.get(id=group_id))
return event
def _get_dataset_for_event(self, event: Event | GroupEvent) -> Dataset:
if getattr(event, "occurrence", None) or event.get_event_type() == "generic":
return Dataset.IssuePlatform
elif event.get_event_type() == "transaction":
return Dataset.Transactions
else:
return Dataset.Discover
def get_adjacent_event_ids_snql(
self,
organization_id: int,
project_id: int,
group_id: int | None,
environments: Sequence[str],
event: Event | GroupEvent,
start: datetime | None = None,
end: datetime | None = None,
conditions: list[Any] | None = None,
) -> list[tuple[str, str] | None]:
"""
Utility function for grabbing an event's adjacent events,
which are the ones with the closest timestamps before and after.
This function is only used in project_event_details at the moment,
so it's interface is tailored to that. We use SnQL and use the project_id
and toStartOfDay(timestamp) to efficiently scan our table
"""
dataset = self._get_dataset_for_event(event)
app_id = "eventstore"
referrer = "eventstore.get_next_or_prev_event_id_snql"
tenant_ids = {"organization_id": organization_id}
if not conditions:
conditions = []
def make_constant_conditions() -> list[Condition | Or]:
environment_conditions = []
if environments:
environment_conditions.append(Condition(Column("environment"), Op.IN, environments))
group_conditions = []
if group_id:
group_conditions.append(Condition(Column("group_id"), Op.EQ, group_id))
project_conditions = [Condition(Column("project_id"), Op.EQ, project_id)]
return [
*conditions,
*environment_conditions,
*group_conditions,
*project_conditions,
]
lower_bound = start or (event.datetime - timedelta(days=100))
upper_bound = end or (event.datetime + timedelta(days=100))
def make_prev_timestamp_conditions(
event: Event | GroupEvent,
) -> list[Condition | Or]:
return [
Condition(
Column(DATASETS[dataset][Columns.TIMESTAMP.value.alias]),
Op.GTE,
lower_bound,
),
Condition(
Column(DATASETS[dataset][Columns.TIMESTAMP.value.alias]),
Op.LT,
event.datetime + timedelta(seconds=1),
),
Or(
conditions=[
Condition(
Column(DATASETS[dataset][Columns.TIMESTAMP.value.alias]),
Op.LT,
event.datetime,
),
Condition(Column("event_id"), Op.LT, event.event_id),
],
),
]
def make_next_timestamp_conditions(
event: Event | GroupEvent,
) -> list[Condition | Or]:
return [
Condition(
Column(DATASETS[dataset][Columns.TIMESTAMP.value.alias]),
Op.LT,
upper_bound,
),
Condition(
Column(DATASETS[dataset][Columns.TIMESTAMP.value.alias]),
Op.GTE,
event.datetime,
),
Or(
conditions=[
Condition(Column("event_id"), Op.GT, event.event_id),
Condition(
Column(DATASETS[dataset][Columns.TIMESTAMP.value.alias]),
Op.GT,
event.datetime,
),
],
),
]
def make_request(is_prev: bool) -> Request:
order_by_direction = Direction.DESC if is_prev else Direction.ASC
conditions = make_constant_conditions()
conditions.extend(
make_prev_timestamp_conditions(event)
if is_prev
else make_next_timestamp_conditions(event)
)
return Request(
dataset=dataset.value,
app_id=app_id,
query=Query(
match=Entity(dataset.value),
select=[Column("event_id"), Column("project_id")],
where=conditions,
orderby=[
OrderBy(
Column("project_id"),
direction=order_by_direction,
),
OrderBy(
Function("toStartOfDay", [Column("timestamp")]),
direction=order_by_direction,
),
OrderBy(
Column("timestamp"),
direction=order_by_direction,
),
OrderBy(
Column("event_id"),
direction=order_by_direction,
),
],
limit=Limit(1),
),
tenant_ids=tenant_ids,
)
snql_request_prev = make_request(is_prev=True)
snql_request_next = make_request(is_prev=False)
bulk_snql_results = bulk_snuba_queries(
[snql_request_prev, snql_request_next], referrer=referrer
)
event_ids = [self.__get_event_id_from_result(result) for result in bulk_snql_results]
return event_ids
def get_adjacent_event_ids(
self, event: Event | GroupEvent | None, filter: Filter
) -> tuple[tuple[str, str] | None, tuple[str, str] | None]:
"""
Returns (project_id, event_id) of a previous event given a current event
and a filter. Returns None if no previous event is found.
"""
assert filter, "You must provide a filter"
if event is None:
return (None, None)
prev_filter = deepcopy(filter)
prev_filter.conditions = prev_filter.conditions or []
prev_filter.conditions.extend(get_before_event_condition(event))
if not prev_filter.start:
# We only store 90 days of data, add a few extra days just in case
prev_filter.start = event.datetime - timedelta(days=100)
# the previous event can have the same timestamp, add 1 second
# to the end condition since it uses a less than condition
prev_filter.end = event.datetime + timedelta(seconds=1)
prev_filter.orderby = DESC_ORDERING
next_filter = deepcopy(filter)
next_filter.conditions = next_filter.conditions or []
next_filter.conditions.extend(get_after_event_condition(event))
next_filter.start = event.datetime
if not next_filter.end:
next_filter.end = datetime.utcnow()
next_filter.orderby = ASC_ORDERING
dataset = self._get_dataset_for_event(event)
result = self.__get_event_ids_from_filters(
filters=(prev_filter, next_filter),
dataset=dataset,
tenant_ids={"organization_id": event.project.organization_id},
)
return result[0], result[1]
def __get_columns(self, dataset: Dataset) -> list[str]:
return [
col.value.event_name
for col in EventStorage.minimal_columns[dataset]
if col.value.event_name is not None
]
def __get_event_ids_from_filters(
self,
filters: tuple[Filter, Filter],
dataset: Dataset = Dataset.Discover,
tenant_ids: Mapping[str, Any] | None = None,
) -> list[tuple[str, str] | None]:
columns = [Columns.EVENT_ID.value.alias, Columns.PROJECT_ID.value.alias]
try:
# This query uses the discover dataset to enable
# getting events across both errors and transactions, which is
# required when doing pagination in discover
results = snuba.bulk_raw_query(
[
snuba.SnubaQueryParams(
**snuba.aliased_query_params(
selected_columns=copy(columns),
conditions=filter.conditions,
filter_keys=filter.filter_keys,
start=filter.start,
end=filter.end,
orderby=filter.orderby,
limit=1,
referrer="eventstore.get_next_or_prev_event_id",
dataset=dataset,
tenant_ids=tenant_ids,
)
)
for filter in filters
],
referrer="eventstore.get_next_or_prev_event_id",
)
except (snuba.QueryOutsideRetentionError, snuba.QueryOutsideGroupActivityError):
# This can happen when the date conditions for paging
# and the current event generate impossible conditions.
return [None for _ in filters]
return [self.__get_event_id_from_result(result) for result in results]
def __get_event_id_from_result(self, result: Mapping[str, Any]) -> tuple[str, str] | None:
if "error" in result or len(result["data"]) == 0:
return None
row = result["data"][0]
return (str(row["project_id"]), str(row["event_id"]))
def __make_event(self, snuba_data: Mapping[str, Any]) -> Event:
event_id_column = Columns.EVENT_ID.value.event_name
project_id_column = Columns.PROJECT_ID.value.event_name
if event_id_column is None or project_id_column is None:
raise ValueError("Event ID or Project ID column name is None")
event_id = snuba_data[event_id_column]
project_id = snuba_data[project_id_column]
return Event(event_id=event_id, project_id=project_id, snuba_data=snuba_data)
def get_unfetched_transactions(
self,
filter: Filter,
orderby: Sequence[str] | None = None,
limit: int = DEFAULT_LIMIT,
offset: int = DEFAULT_OFFSET,
referrer: str = "eventstore.get_unfetched_transactions",
tenant_ids: Mapping[str, Any] | None = None,
) -> list[Event]:
"""
Get transactions from Snuba, without node data loaded.
"""
assert filter, "You must provide a filter"
cols = self.__get_columns(Dataset.Transactions)
orderby = orderby or DESC_ORDERING
result = snuba.aliased_query(
selected_columns=cols,
start=filter.start,
end=filter.end,
conditions=filter.conditions,
filter_keys=filter.filter_keys,
orderby=orderby,
limit=limit,
offset=offset,
referrer=referrer,
dataset=Dataset.Transactions,
tenant_ids=tenant_ids,
)
if "error" not in result:
events = [self.__make_transaction(evt) for evt in result["data"]]
return events
return []
def __make_transaction(self, snuba_data: Mapping[str, Any]) -> Event:
event_id_column = Columns.EVENT_ID.value.event_name
project_id_column = Columns.PROJECT_ID.value.event_name
if event_id_column is None or project_id_column is None:
raise ValueError("Event ID or Project ID column name is None")
event_id = snuba_data[event_id_column]
project_id = snuba_data[project_id_column]
return Event(event_id=event_id, project_id=project_id, snuba_data=snuba_data)
|
SnubaEventStorage
|
python
|
conda__conda
|
tests/plugins/test_solvers.py
|
{
"start": 783,
"end": 885
}
|
class ____:
@plugins.hookimpl
def conda_solvers(self):
yield classic_solver
|
SolverPlugin
|
python
|
run-llama__llama_index
|
llama-index-integrations/voice_agents/llama-index-voice-agents-openai/llama_index/voice_agents/openai/types.py
|
{
"start": 3019,
"end": 3118
}
|
class ____(BaseVoiceAgentEvent):
delta: Union[str, bytes]
item_id: str
|
ConversationDeltaEvent
|
python
|
bottlepy__bottle
|
bottle.py
|
{
"start": 99825,
"end": 103358
}
|
class ____:
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returned. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
|
ResourceManager
|
python
|
pypa__pip
|
src/pip/_vendor/urllib3/poolmanager.py
|
{
"start": 4791,
"end": 15549
}
|
class ____(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \\**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
proxy_config = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools)
# Locally set the pool classes and keys so other PoolManagers can
# override them.
self.pool_classes_by_scheme = pool_classes_by_scheme
self.key_fn_by_scheme = key_fn_by_scheme.copy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(self, scheme, host, port, request_context=None):
"""
Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ("scheme", "host", "port"):
request_context.pop(key, None)
if scheme == "http":
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
return pool_cls(host, port, **request_context)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
provided, it is merged with the instance's ``connection_pool_kw``
variable and used to create the new connection pool, if one is
needed.
"""
if not host:
raise LocationValueError("No host specified.")
request_context = self._merge_pool_kwargs(pool_kwargs)
request_context["scheme"] = scheme or "http"
if not port:
port = port_by_scheme.get(request_context["scheme"].lower(), 80)
request_context["port"] = port
request_context["host"] = host
return self.connection_from_context(request_context)
def connection_from_context(self, request_context):
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
"""
scheme = request_context["scheme"].lower()
pool_key_constructor = self.key_fn_by_scheme.get(scheme)
if not pool_key_constructor:
raise URLSchemeUnknown(scheme)
pool_key = pool_key_constructor(request_context)
return self.connection_from_pool_key(pool_key, request_context=request_context)
def connection_from_pool_key(self, pool_key, request_context=None):
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
"""
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
scheme = request_context["scheme"]
host = request_context["host"]
port = request_context["port"]
pool = self._new_pool(scheme, host, port, request_context=request_context)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url, pool_kwargs=None):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
"""
u = parse_url(url)
return self.connection_from_host(
u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs
)
def _merge_pool_kwargs(self, override):
"""
Merge a dictionary of override values for self.connection_pool_kw.
This does not modify self.connection_pool_kw and returns a new dict.
Any keys in the override dictionary with a value of ``None`` are
removed from the merged dictionary.
"""
base_pool_kwargs = self.connection_pool_kw.copy()
if override:
for key, value in override.items():
if value is None:
try:
del base_pool_kwargs[key]
except KeyError:
pass
else:
base_pool_kwargs[key] = value
return base_pool_kwargs
def _proxy_requires_url_absolute_form(self, parsed_url):
"""
Indicates if the proxy requires the complete destination URL in the
request. Normally this is only needed when not using an HTTP CONNECT
tunnel.
"""
if self.proxy is None:
return False
return not connection_requires_http_tunnel(
self.proxy, self.proxy_config, parsed_url.scheme
)
def _validate_proxy_scheme_url_selection(self, url_scheme):
"""
Validates that were not attempting to do TLS in TLS connections on
Python2 or with unsupported SSL implementations.
"""
if self.proxy is None or url_scheme != "https":
return
if self.proxy.scheme != "https":
return
if six.PY2 and not self.proxy_config.use_forwarding_for_https:
raise ProxySchemeUnsupported(
"Contacting HTTPS destinations through HTTPS proxies "
"'via CONNECT tunnels' is not supported in Python 2"
)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
self._validate_proxy_scheme_url_selection(u.scheme)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw["assert_same_host"] = False
kw["redirect"] = False
if "headers" not in kw:
kw["headers"] = self.headers.copy()
if self._proxy_requires_url_absolute_form(u):
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
if response.status == 303:
# Change the method according to RFC 9110, Section 15.4.4.
method = "GET"
# And lose the body not to transfer anything sensitive.
kw["body"] = None
kw["headers"] = HTTPHeaderDict(kw["headers"])._prepare_for_method_change()
retries = kw.get("retries")
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
# Strip headers marked as unsafe to forward to the redirected location.
# Check remove_headers_on_redirect to avoid a potential network call within
# conn.is_same_host() which may use socket.gethostbyname() in the future.
if retries.remove_headers_on_redirect and not conn.is_same_host(
redirect_location
):
headers = list(six.iterkeys(kw["headers"]))
for header in headers:
if header.lower() in retries.remove_headers_on_redirect:
kw["headers"].pop(header, None)
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
kw["retries"] = retries
kw["redirect"] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
response.drain_conn()
return self.urlopen(method, redirect_location, **kw)
|
PoolManager
|
python
|
joerick__pyinstrument
|
pyinstrument/renderers/html.py
|
{
"start": 4120,
"end": 4956
}
|
class ____(FrameRenderer):
"""
The HTML takes a special form of JSON-encoded session, which includes
an unprocessed frame tree rather than a list of frame records. This
reduces the amount of parsing code that must be included in the
Typescript renderer.
"""
output_file_extension = "json"
def default_processors(self) -> ProcessorList:
return []
def render(self, session: Session) -> str:
session_json = session.to_json(include_frame_records=False)
session_json_str = json.dumps(session_json)
root_frame = session.root_frame()
root_frame = self.preprocess(root_frame)
frame_tree_json_str = root_frame.to_json_str() if root_frame else "null"
return '{"session": %s, "frame_tree": %s}' % (session_json_str, frame_tree_json_str)
|
JSONForHTMLRenderer
|
python
|
bokeh__bokeh
|
src/bokeh/models/annotations/legends.py
|
{
"start": 29719,
"end": 31584
}
|
class ____(BaseBar):
""" ``SizeBar`` is a visual indicator that allows you to gauge the size of radial glyphs,
like ``Circle`` or ``Ngon``, which essentially allows you to add a third dimension to
2D scatter plots.
"""
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
renderer = Either(GlyphRendererOf(RadialGlyph), Auto, default="auto", help="""
A reference to a radial glyph renderer or ``"auto"``.
If a plot only contains a single radial glyph renderer, then it's safe to
use the automatic mode (``"auto"``). In case there are more radial glyph
renderers, ``renderer`` property allows to select the one to use with
this ``SizeBar``.
""")
# XXX keep Auto Tuple order, because of Either.transform()
bounds = Either(Auto, Tuple(Float, Float), default="auto", help="""
Allows to limit the range of displayed radii.
""")
glyph_line_props = Include(LineProps, prefix="glyph", help="""
The {prop} of the glyph.
""")
glyph_fill_props = Include(FillProps, prefix="glyph", help="""
The {prop} of the glyph.
""")
glyph_hatch_props = Include(HatchProps, prefix="glyph", help="""
The {prop} of the glyph.
""")
glyph_line_color = Override(default=None)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
SizeBar
|
python
|
apache__airflow
|
airflow-core/tests/unit/executors/test_local_executor.py
|
{
"start": 1606,
"end": 7731
}
|
class ____:
TEST_SUCCESS_COMMANDS = 5
def test_sentry_integration(self):
assert not LocalExecutor.sentry_integration
def test_is_local_default_value(self):
assert LocalExecutor.is_local
def test_serve_logs_default_value(self):
assert LocalExecutor.serve_logs
@skip_spawn_mp_start
@mock.patch("airflow.sdk.execution_time.supervisor.supervise")
def test_execution(self, mock_supervise):
success_tis = [
workloads.TaskInstance(
id=uuid7(),
dag_version_id=uuid7(),
task_id=f"success_{i}",
dag_id="mydag",
run_id="run1",
try_number=1,
state="queued",
pool_slots=1,
queue="default",
priority_weight=1,
map_index=-1,
start_date=timezone.utcnow(),
)
for i in range(self.TEST_SUCCESS_COMMANDS)
]
fail_ti = success_tis[0].model_copy(update={"id": uuid7(), "task_id": "failure"})
# We just mock both styles here, only one will be hit though
def fake_supervise(ti, **kwargs):
if ti.id == fail_ti.id:
raise RuntimeError("fake failure")
return 0
mock_supervise.side_effect = fake_supervise
executor = LocalExecutor(parallelism=2)
executor.start()
assert executor.result_queue.empty()
with spy_on(executor._spawn_worker) as spawn_worker:
for ti in success_tis:
executor.queue_workload(
workloads.ExecuteTask(
token="",
ti=ti,
dag_rel_path="some/path",
log_path=None,
bundle_info=dict(name="hi", version="hi"),
),
session=mock.MagicMock(spec=Session),
)
executor.queue_workload(
workloads.ExecuteTask(
token="",
ti=fail_ti,
dag_rel_path="some/path",
log_path=None,
bundle_info=dict(name="hi", version="hi"),
),
session=mock.MagicMock(spec=Session),
)
# Process queued workloads to trigger worker spawning
executor._process_workloads(list(executor.queued_tasks.values()))
executor.end()
expected = 2
# Depending on how quickly the tasks run, we might not need to create all the workers we could
assert 1 <= len(spawn_worker.calls) <= expected
# By that time Queues are already shutdown so we cannot check if they are empty
assert len(executor.running) == 0
assert executor._unread_messages.value == 0
for ti in success_tis:
assert executor.event_buffer[ti.key][0] == State.SUCCESS
assert executor.event_buffer[fail_ti.key][0] == State.FAILED
@mock.patch("airflow.executors.local_executor.LocalExecutor.sync")
@mock.patch("airflow.executors.base_executor.BaseExecutor.trigger_tasks")
@mock.patch("airflow.executors.base_executor.Stats.gauge")
def test_gauge_executor_metrics(self, mock_stats_gauge, mock_trigger_tasks, mock_sync):
executor = LocalExecutor()
executor.heartbeat()
calls = [
mock.call(
"executor.open_slots", value=mock.ANY, tags={"status": "open", "name": "LocalExecutor"}
),
mock.call(
"executor.queued_tasks", value=mock.ANY, tags={"status": "queued", "name": "LocalExecutor"}
),
mock.call(
"executor.running_tasks", value=mock.ANY, tags={"status": "running", "name": "LocalExecutor"}
),
]
mock_stats_gauge.assert_has_calls(calls)
@skip_if_force_lowest_dependencies_marker
@pytest.mark.execution_timeout(5)
def test_clean_stop_on_signal(self):
import signal
executor = LocalExecutor(parallelism=2)
executor.start()
# We want to ensure we start a worker process, as we now only create them on demand
executor._spawn_worker()
try:
os.kill(os.getpid(), signal.SIGINT)
except KeyboardInterrupt:
pass
finally:
executor.end()
@pytest.mark.parametrize(
("conf_values", "expected_server"),
[
(
{
("api", "base_url"): "http://test-server",
("core", "execution_api_server_url"): None,
},
"http://test-server/execution/",
),
(
{
("api", "base_url"): "http://test-server",
("core", "execution_api_server_url"): "http://custom-server/execution/",
},
"http://custom-server/execution/",
),
({}, "http://localhost:8080/execution/"),
({("api", "base_url"): "/"}, "http://localhost:8080/execution/"),
({("api", "base_url"): "/airflow/"}, "http://localhost:8080/airflow/execution/"),
],
ids=[
"base_url_fallback",
"custom_server",
"no_base_url_no_custom",
"base_url_no_custom",
"relative_base_url",
],
)
@mock.patch("airflow.sdk.execution_time.supervisor.supervise")
def test_execution_api_server_url_config(self, mock_supervise, conf_values, expected_server):
"""Test that execution_api_server_url is correctly configured with fallback"""
with conf_vars(conf_values):
_execute_work(log=mock.ANY, workload=mock.MagicMock())
mock_supervise.assert_called_with(
ti=mock.ANY,
dag_rel_path=mock.ANY,
bundle_info=mock.ANY,
token=mock.ANY,
server=expected_server,
log_path=mock.ANY,
)
|
TestLocalExecutor
|
python
|
davidhalter__jedi
|
jedi/api/keywords.py
|
{
"start": 336,
"end": 1192
}
|
class ____(AbstractArbitraryName):
api_type = 'keyword'
def py__doc__(self):
return imitate_pydoc(self.string_name)
def imitate_pydoc(string):
"""
It's not possible to get the pydoc's without starting the annoying pager
stuff.
"""
if pydoc_topics is None:
return ''
h = pydoc.help
with suppress(KeyError):
# try to access symbols
string = h.symbols[string]
string, _, related = string.partition(' ')
def get_target(s):
return h.topics.get(s, h.keywords.get(s))
while isinstance(string, str):
string = get_target(string)
try:
# is a tuple now
label, related = string
except TypeError:
return ''
try:
return pydoc_topics[label].strip() if pydoc_topics else ''
except KeyError:
return ''
|
KeywordName
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_expr.py
|
{
"start": 93250,
"end": 93326
}
|
class ____(Unaryop):
operation = operator.pos
_operator_repr = "+"
|
Pos
|
python
|
xlwings__xlwings
|
xlwings/constants.py
|
{
"start": 59335,
"end": 61882
}
|
class ____:
xlAddIn = 18 # from enum XlFileFormat
xlAddIn8 = 18 # from enum XlFileFormat
xlCSV = 6 # from enum XlFileFormat
xlCSVMSDOS = 24 # from enum XlFileFormat
xlCSVMac = 22 # from enum XlFileFormat
xlCSVWindows = 23 # from enum XlFileFormat
xlCurrentPlatformText = -4158 # from enum XlFileFormat
xlDBF2 = 7 # from enum XlFileFormat
xlDBF3 = 8 # from enum XlFileFormat
xlDBF4 = 11 # from enum XlFileFormat
xlDIF = 9 # from enum XlFileFormat
xlExcel12 = 50 # from enum XlFileFormat
xlExcel2 = 16 # from enum XlFileFormat
xlExcel2FarEast = 27 # from enum XlFileFormat
xlExcel3 = 29 # from enum XlFileFormat
xlExcel4 = 33 # from enum XlFileFormat
xlExcel4Workbook = 35 # from enum XlFileFormat
xlExcel5 = 39 # from enum XlFileFormat
xlExcel7 = 39 # from enum XlFileFormat
xlExcel8 = 56 # from enum XlFileFormat
xlExcel9795 = 43 # from enum XlFileFormat
xlHtml = 44 # from enum XlFileFormat
xlIntlAddIn = 26 # from enum XlFileFormat
xlIntlMacro = 25 # from enum XlFileFormat
xlOpenDocumentSpreadsheet = 60 # from enum XlFileFormat
xlOpenXMLAddIn = 55 # from enum XlFileFormat
xlOpenXMLTemplate = 54 # from enum XlFileFormat
xlOpenXMLTemplateMacroEnabled = 53 # from enum XlFileFormat
xlOpenXMLWorkbook = 51 # from enum XlFileFormat
xlOpenXMLWorkbookMacroEnabled = 52 # from enum XlFileFormat
xlSYLK = 2 # from enum XlFileFormat
xlTemplate = 17 # from enum XlFileFormat
xlTemplate8 = 17 # from enum XlFileFormat
xlTextMSDOS = 21 # from enum XlFileFormat
xlTextMac = 19 # from enum XlFileFormat
xlTextPrinter = 36 # from enum XlFileFormat
xlTextWindows = 20 # from enum XlFileFormat
xlUnicodeText = 42 # from enum XlFileFormat
xlWJ2WD1 = 14 # from enum XlFileFormat
xlWJ3 = 40 # from enum XlFileFormat
xlWJ3FJ3 = 41 # from enum XlFileFormat
xlWK1 = 5 # from enum XlFileFormat
xlWK1ALL = 31 # from enum XlFileFormat
xlWK1FMT = 30 # from enum XlFileFormat
xlWK3 = 15 # from enum XlFileFormat
xlWK3FM3 = 32 # from enum XlFileFormat
xlWK4 = 38 # from enum XlFileFormat
xlWKS = 4 # from enum XlFileFormat
xlWQ1 = 34 # from enum XlFileFormat
xlWebArchive = 45 # from enum XlFileFormat
xlWorkbookDefault = 51 # from enum XlFileFormat
xlWorkbookNormal = -4143 # from enum XlFileFormat
xlWorks2FarEast = 28 # from enum XlFileFormat
xlXMLSpreadsheet = 46 # from enum XlFileFormat
|
FileFormat
|
python
|
astropy__astropy
|
astropy/samp/web_profile.py
|
{
"start": 4228,
"end": 5739
}
|
class ____(ThreadingXMLRPCServer):
"""
XMLRPC server supporting the SAMP Web Profile.
"""
def __init__(
self,
addr,
log=None,
requestHandler=WebProfileRequestHandler,
logRequests=True,
allow_none=True,
encoding=None,
):
self.clients = []
ThreadingXMLRPCServer.__init__(
self, addr, log, requestHandler, logRequests, allow_none, encoding
)
def add_client(self, client_id):
self.clients.append(client_id)
def remove_client(self, client_id):
try:
self.clients.remove(client_id)
except ValueError:
# No warning here because this method gets called for all clients,
# not just web clients, and we expect it to fail for non-web
# clients.
pass
def web_profile_text_dialog(request, queue):
samp_name = "unknown"
if isinstance(request[0], str):
# To support the old protocol version
samp_name = request[0]
else:
samp_name = request[0]["samp.name"]
text = f"""A Web application which declares to be
Name: {samp_name}
Origin: {request[2]}
is requesting to be registered with the SAMP Hub.
Pay attention that if you permit its registration, such
application will acquire all current user privileges, like
file read/write.
Do you give your consent? [yes|no]"""
print(text)
answer = input(">>> ")
queue.put(answer.lower() in ["yes", "y"])
|
WebProfileXMLRPCServer
|
python
|
getsentry__sentry
|
src/sentry/workflow_engine/processors/detector.py
|
{
"start": 9333,
"end": 18689
}
|
class ____(NamedTuple):
events_with_occurrences: list[tuple[GroupEvent, int]]
error_events: list[GroupEvent]
events_missing_detectors: list[GroupEvent]
def _split_events_by_occurrence(
event_list: list[GroupEvent],
) -> _SplitEvents:
events_with_occurrences: list[tuple[GroupEvent, int]] = []
error_events: list[GroupEvent] = [] # only error events don't have occurrences
events_missing_detectors: list[GroupEvent] = []
for event in event_list:
issue_occurrence = event.occurrence
if issue_occurrence is None:
assert event.group.issue_type.slug == ErrorGroupType.slug
error_events.append(event)
elif detector_id := issue_occurrence.evidence_data.get("detector_id"):
events_with_occurrences.append((event, detector_id))
else:
events_missing_detectors.append(event)
return _SplitEvents(
events_with_occurrences,
error_events,
events_missing_detectors,
)
def _create_event_detector_map(
detectors: BaseQuerySet[Detector],
key_event_map: dict[int, list[GroupEvent]],
detector_key_extractor: Callable[[Detector], int],
) -> tuple[dict[str, Detector], set[int]]:
result: dict[str, Detector] = {}
# used to track existing keys (detector_id or project_id) to log missing keys
keys = set()
for detector in detectors:
key = detector_key_extractor(detector)
keys.add(key)
detector_events = key_event_map[key]
result.update({event.event_id: detector for event in detector_events})
return result, keys
def create_issue_platform_payload(result: DetectorEvaluationResult, detector_type: str) -> None:
occurrence, status_change = None, None
if isinstance(result.result, IssueOccurrence):
occurrence = result.result
payload_type = PayloadType.OCCURRENCE
metrics.incr(
"workflow_engine.issue_platform.payload.sent.occurrence",
tags={"detector_type": detector_type},
sample_rate=1,
)
else:
status_change = result.result
payload_type = PayloadType.STATUS_CHANGE
metrics.incr(
"workflow_engine.issue_platform.payload.sent.status_change",
tags={"detector_type": detector_type},
sample_rate=1,
)
produce_occurrence_to_kafka(
payload_type=payload_type,
occurrence=occurrence,
status_change=status_change,
event_data=result.event_data,
)
@sentry_sdk.trace
def process_detectors[T](
data_packet: DataPacket[T], detectors: list[Detector]
) -> list[tuple[Detector, dict[DetectorGroupKey, DetectorEvaluationResult]]]:
results: list[tuple[Detector, dict[DetectorGroupKey, DetectorEvaluationResult]]] = []
for detector in detectors:
handler = detector.detector_handler
if not handler:
continue
metrics.incr(
"workflow_engine.process_detector",
tags={"detector_type": detector.type},
)
with metrics.timer(
"workflow_engine.process_detectors.evaluate", tags={"detector_type": detector.type}
):
detector_results = handler.evaluate(data_packet)
for result in detector_results.values():
logger_extra = {
"detector": detector.id,
"detector_type": detector.type,
"evaluation_data": data_packet.packet,
"result": result,
}
if result.result is not None:
if isinstance(result.result, IssueOccurrence):
metrics.incr(
"workflow_engine.process_detector.triggered",
tags={"detector_type": detector.type},
)
logger.info(
"detector_triggered",
extra=logger_extra,
)
else:
metrics.incr(
"workflow_engine.process_detector.resolved",
tags={"detector_type": detector.type},
)
logger.info(
"detector_resolved",
extra=logger_extra,
)
create_issue_platform_payload(result, detector.type)
if detector_results:
results.append((detector, detector_results))
return results
def associate_new_group_with_detector(group: Group, detector_id: int | None = None) -> bool:
"""
Associate a new Group with it's Detector in the database.
If the Group is an error, it can be associated without a detector ID.
Return whether the group was associated.
"""
if detector_id is None:
# For error Groups, we know there is a Detector and we can find it by project.
if group.type == ErrorGroupType.type_id:
if not options.get("workflow_engine.associate_error_detectors", False):
return False
detector_id = Detector.get_error_detector_for_project(group.project.id).id
else:
metrics.incr(
"workflow_engine.associate_new_group_with_detector",
tags={"group_type": group.type, "result": "failure"},
)
logger.warning(
"associate_new_group_with_detector_failed",
extra={
"group_id": group.id,
"group_type": group.type,
},
)
return False
# Check if the detector exists. If not, create DetectorGroup with null detector_id
# to make it clear that we were associated with a detector that no longer exists.
if not Detector.objects.filter(id=detector_id).exists():
metrics.incr(
"workflow_engine.associate_new_group_with_detector",
tags={"group_type": group.type, "result": "detector_missing"},
)
logger.warning(
"associate_new_group_with_detector_detector_missing",
extra={
"group_id": group.id,
"group_type": group.type,
"detector_id": detector_id,
},
)
DetectorGroup.objects.get_or_create(
detector_id=None,
group_id=group.id,
)
return True
DetectorGroup.objects.get_or_create(
detector_id=detector_id,
group_id=group.id,
)
metrics.incr(
"workflow_engine.associate_new_group_with_detector",
tags={"group_type": group.type, "result": "success"},
)
return True
def ensure_association_with_detector(group: Group, detector_id: int | None = None) -> bool:
"""
Ensure a Group has a DetectorGroup association, creating it if missing.
Backdates date_added to group.first_seen for gradual backfill of existing groups.
"""
if not options.get("workflow_engine.ensure_detector_association"):
return False
# Common case: it exists, we verify and move on.
if DetectorGroup.objects.filter(group_id=group.id).exists():
return True
# Association is missing, determine the detector_id if not provided
if detector_id is None:
# For error Groups, we know there is a Detector and we can find it by project.
if group.type == ErrorGroupType.type_id:
try:
detector_id = Detector.get_error_detector_for_project(group.project.id).id
except Detector.DoesNotExist:
logger.warning(
"ensure_association_with_detector_detector_not_found",
extra={
"group_id": group.id,
"group_type": group.type,
"project_id": group.project.id,
},
)
return False
else:
return False
else:
# Check if the explicitly provided detector exists. If not, create DetectorGroup
# with null detector_id to make it clear that we were associated with a detector
# that no longer exists.
if not Detector.objects.filter(id=detector_id).exists():
detector_group, created = DetectorGroup.objects.get_or_create(
group_id=group.id,
defaults={"detector_id": None},
)
if created:
# Backdate the date_added to match the group's first_seen
DetectorGroup.objects.filter(id=detector_group.id).update(
date_added=group.first_seen
)
metrics.incr(
"workflow_engine.ensure_association_with_detector.created",
tags={"group_type": group.type},
)
return True
detector_group, created = DetectorGroup.objects.get_or_create(
group_id=group.id,
defaults={"detector_id": detector_id},
)
if created:
# Backdate the date_added to match the group's first_seen
DetectorGroup.objects.filter(id=detector_group.id).update(date_added=group.first_seen)
metrics.incr(
"workflow_engine.ensure_association_with_detector.created",
tags={"group_type": group.type},
)
return True
|
_SplitEvents
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
|
{
"start": 33747,
"end": 34283
}
|
class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, api_key: str):
"""Airbyte Source for Persistiq.
Documentation can be found at https://docs.airbyte.com/integrations/sources/persistiq
Args:
name (str): The name of the destination.
api_key (str): PersistIq API Key. See the docs for more information on where to find that key.
"""
self.api_key = check.str_param(api_key, "api_key")
super().__init__("Persistiq", name)
|
PersistiqSource
|
python
|
huggingface__transformers
|
src/transformers/models/whisper/modeling_whisper.py
|
{
"start": 23996,
"end": 30116
}
|
class ____(WhisperPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`WhisperEncoderLayer`].
Args:
config: WhisperConfig
"""
def __init__(self, config: WhisperConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.num_mel_bins = config.num_mel_bins
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_source_positions
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.conv1 = nn.Conv1d(self.num_mel_bins, embed_dim, kernel_size=3, padding=1)
self.conv2 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, stride=2, padding=1)
self.embed_positions = nn.Embedding(self.max_source_positions, embed_dim)
self.embed_positions.requires_grad_(False)
self.layers = nn.ModuleList([WhisperEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def get_input_embeddings(self) -> nn.Module:
return self.conv1
def set_input_embeddings(self, value: nn.Module):
self.conv1 = value
def forward(
self,
input_features,
attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_features (`torch.LongTensor` of shape `(batch_size, feature_size, sequence_length)`):
Float values of mel features extracted from the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
`numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or
the soundfile library (`pip install soundfile`). To prepare the array into
`input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding
and conversion into a tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`]
attention_mask (`torch.Tensor`)`, *optional*):
Whisper does not support masking of the `input_features`, this argument is preserved for compatibility,
but it is not used. By default the silence in the input log mel spectrogram are ignored.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
expected_seq_length = self.config.max_source_positions * self.conv1.stride[0] * self.conv2.stride[0]
if input_features.shape[-1] != expected_seq_length:
raise ValueError(
f"Whisper expects the mel input features to be of length {expected_seq_length}, but found {input_features.shape[-1]}. Make sure to pad the input mel features to {expected_seq_length}."
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
inputs_embeds = nn.functional.gelu(self.conv1(input_features))
inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds))
inputs_embeds = inputs_embeds.permute(0, 2, 1)
all_positions = torch.arange(self.embed_positions.num_embeddings, device=inputs_embeds.device)
hidden_states = inputs_embeds + self.embed_positions(all_positions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop: # skip the layer
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(
hidden_states,
None,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
|
WhisperEncoder
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/test/steps/python_connectors.py
|
{
"start": 8129,
"end": 8923
}
|
class ____(PytestStep):
"""A step to run the connector unit tests with Pytest."""
title = "Unit tests"
test_directory_name = "unit_tests"
common_test_dependencies = ["pytest-cov==4.1.0"]
MINIMUM_COVERAGE_FOR_CERTIFIED_CONNECTORS = 90
@property
def default_params(self) -> STEP_PARAMS:
"""Make sure the coverage computation is run for the unit tests.
Returns:
dict: The default pytest options.
"""
coverage_options = {"--cov": [self.context.connector.technical_name.replace("-", "_")]}
if self.context.connector.support_level == "certified":
coverage_options["--cov-fail-under"] = [str(self.MINIMUM_COVERAGE_FOR_CERTIFIED_CONNECTORS)]
return super().default_params | coverage_options
|
UnitTests
|
python
|
huggingface__transformers
|
src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py
|
{
"start": 12845,
"end": 13863
}
|
class ____(nn.Module):
def __init__(self, config: VitPoseBackboneConfig):
super().__init__()
self.config = config
self.layer = nn.ModuleList([VitPoseBackboneLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
# Ignore copy
def forward(
self,
hidden_states: torch.Tensor,
dataset_index: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
) -> BaseModelOutput:
all_hidden_states = [hidden_states] if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states, dataset_index)
if all_hidden_states is not None:
all_hidden_states.append(hidden_states)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=tuple(all_hidden_states) if all_hidden_states else None,
)
@auto_docstring
|
VitPoseBackboneEncoder
|
python
|
sympy__sympy
|
sympy/logic/boolalg.py
|
{
"start": 36165,
"end": 36804
}
|
class ____(BooleanFunction):
"""
Logical XNOR function.
Returns False if an odd number of the arguments are True and the rest are
False.
Returns True if an even number of the arguments are True and the rest are
False.
Examples
========
>>> from sympy.logic.boolalg import Xnor
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Xnor(True, False)
False
>>> Xnor(True, True)
True
>>> Xnor(True, False, True, True, False)
False
>>> Xnor(True, False, True, False)
True
"""
@classmethod
def eval(cls, *args):
return Not(Xor(*args))
|
Xnor
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.