language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
pallets__itsdangerous
|
src/itsdangerous/exc.py
|
{
"start": 882,
"end": 1619
}
|
class ____(BadSignature):
"""Raised if a time-based signature is invalid. This is a subclass
of :class:`BadSignature`.
"""
def __init__(
self,
message: str,
payload: t.Any | None = None,
date_signed: datetime | None = None,
):
super().__init__(message, payload)
#: If the signature expired this exposes the date of when the
#: signature was created. This can be helpful in order to
#: tell the user how long a link has been gone stale.
#:
#: .. versionchanged:: 2.0
#: The datetime value is timezone-aware rather than naive.
#:
#: .. versionadded:: 0.14
self.date_signed = date_signed
|
BadTimeSignature
|
python
|
run-llama__llama_index
|
llama-index-integrations/selectors/llama-index-selectors-notdiamond/llama_index/selectors/notdiamond/base.py
|
{
"start": 523,
"end": 987
}
|
class ____(SelectorResult):
"""A single selection of a choice provided by Not Diamond."""
class Config:
arbitrary_types_allowed = True
session_id: str
llm: LLMConfig
@classmethod
def from_selector_result(
cls, selector_result: SelectorResult, session_id: str, best_llm: LLMConfig
) -> "NotDiamondSelectorResult":
return cls(session_id=session_id, llm=best_llm, **selector_result.dict())
|
NotDiamondSelectorResult
|
python
|
gevent__gevent
|
src/greentest/3.10/test_socket.py
|
{
"start": 165097,
"end": 165395
}
|
class ____(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
|
RecvmsgIntoRFC3542AncillaryUDP6Test
|
python
|
walkccc__LeetCode
|
solutions/338. Counting Bits/338.py
|
{
"start": 0,
"end": 240
}
|
class ____:
def countBits(self, n: int) -> list[int]:
# f(i) := i's number of 1s in bitmask
# f(i) = f(i / 2) + i % 2
ans = [0] * (n + 1)
for i in range(1, n + 1):
ans[i] = ans[i // 2] + (i & 1)
return ans
|
Solution
|
python
|
getsentry__sentry
|
tests/sentry/preprod/api/models/test_project_preprod_build_details_models.py
|
{
"start": 412,
"end": 9094
}
|
class ____(TestCase):
def test_to_size_info_none_input(self):
"""Test to_size_info returns None when given None input."""
result = to_size_info([])
assert result is None
def test_to_size_info_pending_state(self):
"""Test to_size_info returns SizeInfoPending for PENDING state."""
size_metrics = PreprodArtifactSizeMetrics(
state=PreprodArtifactSizeMetrics.SizeAnalysisState.PENDING
)
result = to_size_info(list([size_metrics]))
assert isinstance(result, SizeInfoPending)
assert result.state == PreprodArtifactSizeMetrics.SizeAnalysisState.PENDING
def test_to_size_info_processing_state(self):
"""Test to_size_info returns SizeInfoProcessing for PROCESSING state."""
size_metrics = PreprodArtifactSizeMetrics(
state=PreprodArtifactSizeMetrics.SizeAnalysisState.PROCESSING
)
result = to_size_info(list([size_metrics]))
assert isinstance(result, SizeInfoProcessing)
assert result.state == PreprodArtifactSizeMetrics.SizeAnalysisState.PROCESSING
def test_to_size_info_completed_state(self):
"""Test to_size_info returns SizeInfoCompleted for COMPLETED state."""
size_metrics = PreprodArtifactSizeMetrics(
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
max_install_size=1024000,
max_download_size=512000,
)
result = to_size_info(list([size_metrics]))
assert isinstance(result, SizeInfoCompleted)
assert result.state == PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED
assert result.install_size_bytes == 1024000
assert result.download_size_bytes == 512000
def test_to_size_info_completed_state_with_multiple_metrics(self):
"""Test to_size_info returns SizeInfoCompleted for COMPLETED state with multiple metrics."""
size_metrics = [
PreprodArtifactSizeMetrics(
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
max_install_size=1024000,
max_download_size=512000,
),
PreprodArtifactSizeMetrics(
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.WATCH_ARTIFACT,
max_install_size=512000,
max_download_size=256000,
),
]
result = to_size_info(size_metrics)
assert isinstance(result, SizeInfoCompleted)
assert result.install_size_bytes == 1024000
assert result.download_size_bytes == 512000
assert len(result.size_metrics) == 2
assert (
result.size_metrics[0].metrics_artifact_type
== PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT
)
assert result.size_metrics[0].install_size_bytes == 1024000
assert result.size_metrics[0].download_size_bytes == 512000
assert (
result.size_metrics[1].metrics_artifact_type
== PreprodArtifactSizeMetrics.MetricsArtifactType.WATCH_ARTIFACT
)
assert result.size_metrics[1].install_size_bytes == 512000
assert result.size_metrics[1].download_size_bytes == 256000
def test_to_size_info_failed_state(self):
"""Test to_size_info returns SizeInfoFailed for FAILED state."""
size_metrics = PreprodArtifactSizeMetrics(
state=PreprodArtifactSizeMetrics.SizeAnalysisState.FAILED,
error_code=PreprodArtifactSizeMetrics.ErrorCode.TIMEOUT,
error_message="Analysis timed out after 30 minutes",
)
result = to_size_info(list([size_metrics]))
assert isinstance(result, SizeInfoFailed)
assert result.state == PreprodArtifactSizeMetrics.SizeAnalysisState.FAILED
assert result.error_code == PreprodArtifactSizeMetrics.ErrorCode.TIMEOUT
assert result.error_message == "Analysis timed out after 30 minutes"
def test_to_size_info_failed_state_with_different_error_codes(self):
"""Test to_size_info handles different error codes correctly."""
error_cases = [
(PreprodArtifactSizeMetrics.ErrorCode.UNKNOWN, "Unknown error occurred"),
(
PreprodArtifactSizeMetrics.ErrorCode.UNSUPPORTED_ARTIFACT,
"Artifact type not supported",
),
(PreprodArtifactSizeMetrics.ErrorCode.PROCESSING_ERROR, "Processing failed"),
]
for error_code, error_message in error_cases:
size_metrics = PreprodArtifactSizeMetrics(
state=PreprodArtifactSizeMetrics.SizeAnalysisState.FAILED,
error_code=error_code,
error_message=error_message,
)
result = to_size_info(list([size_metrics]))
assert isinstance(result, SizeInfoFailed)
assert result.error_code == error_code
assert result.error_message == error_message
def test_to_size_info_completed_with_zero_sizes(self):
"""Test to_size_info handles completed state with zero sizes."""
size_metrics = PreprodArtifactSizeMetrics(
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
max_install_size=0,
max_download_size=0,
)
result = to_size_info(list([size_metrics]))
assert isinstance(result, SizeInfoCompleted)
assert result.install_size_bytes == 0
assert result.download_size_bytes == 0
def test_to_size_info_completed_with_large_sizes(self):
"""Test to_size_info handles completed state with large file sizes."""
size_metrics = PreprodArtifactSizeMetrics(
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
max_install_size=5000000000, # ~5GB
max_download_size=2000000000, # ~2GB
)
result = to_size_info(list([size_metrics]))
assert isinstance(result, SizeInfoCompleted)
assert result.install_size_bytes == 5000000000
assert result.download_size_bytes == 2000000000
def test_to_size_info_invalid_state_raises_error(self):
"""Test to_size_info raises ValueError for unknown state."""
size_metrics = PreprodArtifactSizeMetrics(state=999) # Invalid state
with pytest.raises(ValueError, match="Unknown SizeAnalysisState 999"):
to_size_info(list([size_metrics]))
def test_to_size_info_completed_state_missing_size_fields(self):
"""Test to_size_info raises ValueError when COMPLETED state has None size fields."""
size_metrics = PreprodArtifactSizeMetrics(
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
max_install_size=None,
max_download_size=None,
)
with pytest.raises(
ValueError, match="COMPLETED state requires both max_install_size and max_download_size"
):
to_size_info(list([size_metrics]))
def test_to_size_info_failed_state_no_error_code(self):
"""Test to_size_info raises ValueError when FAILED state has only error_code."""
size_metrics = PreprodArtifactSizeMetrics(
state=PreprodArtifactSizeMetrics.SizeAnalysisState.FAILED,
error_code=None,
error_message="Processing failed",
)
with pytest.raises(
ValueError, match="FAILED state requires both error_code and error_message"
):
to_size_info(list([size_metrics]))
def test_to_size_info_failed_state_no_error_message(self):
"""Test to_size_info raises ValueError when FAILED state has only error_message."""
size_metrics = PreprodArtifactSizeMetrics(
state=PreprodArtifactSizeMetrics.SizeAnalysisState.FAILED,
error_code=PreprodArtifactSizeMetrics.ErrorCode.PROCESSING_ERROR,
error_message=None,
)
with pytest.raises(
ValueError, match="FAILED state requires both error_code and error_message"
):
to_size_info(list([size_metrics]))
|
TestToSizeInfo
|
python
|
sphinx-doc__sphinx
|
sphinx/ext/autosummary/__init__.py
|
{
"start": 26862,
"end": 32087
}
|
class ____(SphinxRole):
"""Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
otherwise expands to '*text*'.
"""
def run(self) -> tuple[list[Node], list[system_message]]:
pyobj_role = self.env.domains.python_domain.role('obj')
assert pyobj_role is not None
objects, errors = pyobj_role(
'obj',
self.rawtext,
self.text,
self.lineno,
self.inliner,
self.options,
self.content,
)
if errors:
return objects, errors
assert len(objects) == 1
pending_xref = cast('addnodes.pending_xref', objects[0])
try:
# try to import object by name
prefixes = get_import_prefixes_from_env(self.env)
name = pending_xref['reftarget']
prefixes = [
prefix
for prefix in prefixes
if prefix is None
or not (name.startswith(f'{prefix}.') or name == prefix)
]
import_by_name(name, prefixes)
except ImportExceptionGroup:
literal = cast('nodes.literal', pending_xref[0])
objects[0] = nodes.emphasis(
self.rawtext, literal.astext(), classes=literal['classes']
)
return objects, errors
def get_rst_suffix(app: Sphinx) -> str | None:
def get_supported_format(suffix: str) -> tuple[str, ...]:
parser_class = app.registry.get_source_parsers().get(suffix.removeprefix('.'))
if parser_class is None:
return ('restructuredtext',)
return parser_class.supported
suffix = None
for suffix in app.config.source_suffix:
if 'restructuredtext' in get_supported_format(suffix):
return suffix
return None
def process_generate_options(app: Sphinx) -> None:
genfiles = app.config.autosummary_generate
if genfiles is True:
env = app.env
genfiles = [
str(env.doc2path(x, base=False))
for x in env.found_docs
if env.doc2path(x).is_file()
]
elif genfiles is False:
pass
else:
ext = list(app.config.source_suffix)
genfiles = [
genfile + (ext[0] if not genfile.endswith(tuple(ext)) else '')
for genfile in genfiles
]
for entry in genfiles[:]:
if not (app.srcdir / entry).is_file():
logger.warning(__('autosummary_generate: file not found: %s'), entry)
genfiles.remove(entry)
if not genfiles:
return
suffix = get_rst_suffix(app)
if suffix is None:
logger.warning(
__(
'autosummary generates .rst files internally. '
'But your source_suffix does not contain .rst. Skipped.'
)
)
return
from sphinx.ext.autosummary.generate import generate_autosummary_docs
imported_members = app.config.autosummary_imported_members
with mock(app.config.autosummary_mock_imports):
generate_autosummary_docs(
genfiles,
suffix=suffix,
base_path=app.srcdir,
app=app,
imported_members=imported_members,
overwrite=app.config.autosummary_generate_overwrite,
encoding=app.config.source_encoding,
)
def setup(app: Sphinx) -> ExtensionMetadata:
# I need autodoc
app.setup_extension('sphinx.ext.autodoc')
app.add_node(
autosummary_toc,
html=(autosummary_toc_visit_html, autosummary_noop),
latex=(autosummary_noop, autosummary_noop),
text=(autosummary_noop, autosummary_noop),
man=(autosummary_noop, autosummary_noop),
texinfo=(autosummary_noop, autosummary_noop),
)
app.add_node(
autosummary_table,
html=(autosummary_table_visit_html, autosummary_noop),
latex=(autosummary_noop, autosummary_noop),
text=(autosummary_noop, autosummary_noop),
man=(autosummary_noop, autosummary_noop),
texinfo=(autosummary_noop, autosummary_noop),
)
app.add_directive('autosummary', Autosummary)
app.add_role('autolink', AutoLink())
app.connect('builder-inited', process_generate_options)
app.add_config_value('autosummary_context', {}, 'env', types=frozenset({dict}))
app.add_config_value(
'autosummary_filename_map', {}, 'html', types=frozenset({dict})
)
app.add_config_value(
'autosummary_generate', True, 'env', types=frozenset({bool, list})
)
app.add_config_value(
'autosummary_generate_overwrite', True, '', types=frozenset({bool})
)
app.add_config_value(
'autosummary_mock_imports',
lambda config: config.autodoc_mock_imports,
'env',
types=frozenset({list, tuple}),
)
app.add_config_value(
'autosummary_imported_members', False, '', types=frozenset({bool})
)
app.add_config_value(
'autosummary_ignore_module_all', True, 'env', types=frozenset({bool})
)
return {
'version': sphinx.__display_version__,
'parallel_read_safe': True,
}
|
AutoLink
|
python
|
cython__cython
|
Cython/Compiler/FlowControl.py
|
{
"start": 543,
"end": 997
}
|
class ____(ExprNodes.ExprNode):
# Used for declaring assignments of a specified type without a known entry.
def __init__(self, type, may_be_none=None, pos=None):
super().__init__(pos)
self.type = type
self._may_be_none = may_be_none
def may_be_none(self):
return self._may_be_none != False
# Fake rhs to silence "unused variable" warning
fake_rhs_expr = TypedExprNode(PyrexTypes.unspecified_type)
|
TypedExprNode
|
python
|
ray-project__ray
|
python/ray/serve/tests/test_config_files/test_dag/dir/subdir/a/add_and_sub.py
|
{
"start": 120,
"end": 263
}
|
class ____(str, Enum):
ADD = "ADD"
SUBTRACT = "SUB"
@serve.deployment(
ray_actor_options={
"num_cpus": 0.1,
}
)
|
Operation
|
python
|
pytorch__pytorch
|
test/inductor/test_ordered_set.py
|
{
"start": 54672,
"end": 54924
}
|
class ____(TestCopying, TestCase):
def setUp(self):
super().setUp()
self.OrderedSet = OrderedSet([((1, 2), (3, 4))])
del TestCopying
# ==============================================================================
|
TestCopyingNested
|
python
|
google__pytype
|
pytype/tests/test_typing_methods1.py
|
{
"start": 141,
"end": 11514
}
|
class ____(test_base.BaseTest):
"""Tests for typing.py."""
def _check_call(self, t, expr): # pylint: disable=invalid-name
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import {type}
def f() -> {type}: ...
""".format(type=t),
)
indented_expr = textwrap.dedent(expr).replace("\n", "\n" + " " * 8)
self.Check(
f"""
import foo
x = foo.f()
{indented_expr}
""",
pythonpath=[d.path],
)
def test_text(self):
self._check_call("Text", "x.upper()")
def test_supportsabs(self):
self._check_call("SupportsAbs", "abs(x)")
def test_supportsround(self):
self._check_call("SupportsRound", "round(x)")
def test_supportsint(self):
self._check_call("SupportsInt", "int(x); int(3)")
def test_supportsfloat(self):
self._check_call("SupportsFloat", "float(x); float(3.14)")
def test_supportscomplex(self):
self._check_call("SupportsComplex", "complex(x); complex(3j)")
def test_reversible(self):
self._check_call("Reversible", "reversed(x)")
def test_hashable(self):
self._check_call("Hashable", "hash(x)")
def test_sized(self):
self._check_call("Sized", "len(x)")
def test_iterator(self):
self._check_call("Iterator", "next(x)")
def test_iterable(self):
self._check_call("Iterable", "next(iter(x))")
def test_container(self):
self._check_call("Container", "42 in x")
def test_io(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import IO
def f() -> IO[str]: ...
""",
)
ty = self.Infer(
"""
import foo
x = foo.f()
with x as fi:
fi.read()
for b in x: pass
a = x.fileno()
x.flush()
b = x.isatty()
c = x.read()
d = x.read(30)
e = x.readable()
f = x.readline()
g = x.readlines()
h = x.seek(0)
i = x.seek(0, 1)
j = x.seekable()
k = x.tell()
x.truncate(10)
m = x.writable()
x.write("foo")
x.writelines(["foo", "bar"])
x.close()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import IO, List
fi = ... # type: IO[str]
a = ... # type: int
b = ... # type: bool
c = ... # type: str
d = ... # type: str
e = ... # type: bool
f = ... # type: str
g = ... # type: List[str]
h = ... # type: int
i = ... # type: int
j = ... # type: bool
k = ... # type: int
m = ... # type: bool
x = ... # type: IO[str]
""",
)
def test_binary_io(self):
self._check_call("BinaryIO", "x.read(10).upper()")
def test_text_io(self):
self._check_call("TextIO", "x.read(10).upper()")
def test_sequence_and_tuple(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Sequence, Tuple
def seq() -> Sequence[str]: ...
def tpl() -> Tuple[str]: ...
""",
)
ty = self.Infer(
"""
import foo
for seq in [foo.seq(), foo.tpl()]:
a = seq[0]
seq[0:10]
b = seq.index("foo")
c = seq.count("foo")
d = "foo" in seq
e = iter(seq)
f = reversed(seq)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Iterator, List, Sequence, Tuple, Union
seq = ... # type: Union[Sequence[str], Tuple[str]]
a = ... # type: str
b = ... # type: int
c = ... # type: int
d = ... # type: bool
e = ... # type: Union[Iterator[str], tupleiterator[str]]
f = ... # type: reversed[str]
""",
)
def test_mutablesequence_and_list(self):
# TODO(b/63407497): Enabling --strict-parameter-checks leads to a
# wrong-arg-types error on line 10.
self.options.tweak(strict_parameter_checks=False)
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import List, MutableSequence
def seq() -> MutableSequence[str]: ...
def lst() -> List[str]: ...
""",
)
ty = self.Infer(
"""
import foo
for seq in [foo.seq(), foo.lst()]:
seq[0] = 3
del seq[0]
a = seq.append(3)
c = seq.insert(3, "foo")
d = seq.reverse()
e = seq.pop()
f = seq.pop(4)
g = seq.remove("foo")
seq[0:5] = [1,2,3]
b = seq.extend([1,2,3])
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Iterator, List, Sequence, Union
# TODO(b/159065400): Should be List[Union[int, str]]
seq = ... # type: Union[list, typing.MutableSequence[Union[int, str]]]
a = ... # type: None
b = ... # type: None
c = ... # type: None
d = ... # type: None
e = ... # type: Union[int, str]
f = ... # type: Union[int, str]
g = ... # type: None
""",
)
def test_deque(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Deque
def deq() -> Deque[int]: ...
""",
)
ty = self.Infer(
"""
import foo
q = foo.deq()
q[0] = 3
del q[3]
a = q.append(3)
al = q.appendleft(2)
b = q.extend([1,2])
bl = q.extendleft([3,4])
c = q.pop()
cl = q.popleft()
d = q.rotate(3)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Deque
q = ... # type: Deque[int]
a = ... # type: None
al = ... # type: None
b = ... # type: None
bl = ... # type: None
c = ... # type: int
cl = ... # type: int
d = ... # type: None
""",
)
def test_mutablemapping(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import MutableMapping, TypeVar
K = TypeVar("K")
V = TypeVar("V")
class MyDict(MutableMapping[K, V]): ...
def f() -> MyDict[str, int]: ...
""",
)
ty = self.Infer(
"""
import foo
m = foo.f()
m.clear()
m[3j] = 3.14
del m["foo"]
a = m.pop("bar", 3j)
b = m.popitem()
c = m.setdefault("baz", 3j)
m.update({4j: 2.1})
m.update([(1, 2), (3, 4)])
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Tuple, Union
import foo
m = ... # type: foo.MyDict[Union[complex, int, str], Union[complex, float, int]]
a = ... # type: Union[complex, float, int]
b = ... # type: Tuple[Union[complex, str], Union[float, int]]
c = ... # type: Union[complex, float, int]
""",
)
def test_dict_and_defaultdict(self):
# Sanity checks. (Default)Dict is just MutableMapping, which is tested above
self._check_call("DefaultDict", "x[42j]")
self._check_call("Dict", "x[42j]")
def test_abstractset(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import AbstractSet
def f() -> AbstractSet[str]: ...
""",
)
ty = self.Infer(
"""
import foo
x = foo.f()
a = "bar" in x
b = x & x
c = x | x
d = x - x
e = x ^ x
f = x.isdisjoint([1,2,3])
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import AbstractSet
x = ... # type: AbstractSet[str]
a = ... # type: bool
b = ... # type: AbstractSet[str]
c = ... # type: AbstractSet[str]
d = ... # type: AbstractSet[str]
e = ... # type: AbstractSet[str]
f = ... # type: bool
""",
)
def test_frozenset(self):
# Sanity check. FrozenSet is just AbstractSet, tested above.
self._check_call("FrozenSet", "3 in x")
def test_mutableset(self):
# TODO(b/63407497): Enabling --strict-parameter-checks leads to a
# wrong-arg-types error on line 8.
self.options.tweak(strict_parameter_checks=False)
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import MutableSet
def f() -> MutableSet[str]: ...
""",
)
ty = self.Infer(
"""
import foo
x = foo.f()
x.add(1)
a = x.pop()
x.discard(2)
x.clear()
x.add(3j)
x.remove(3j)
b = x & {1,2,3}
c = x | {1,2,3}
d = x ^ {1,2,3}
e = 3 in x
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import MutableSet, Union
a = ... # type: Union[int, str]
# TODO(b/159067449): We do a clear() after adding "int".
# Why does "int" still appear for b?
b = ... # type: MutableSet[Union[complex, int, str]]
c = ... # type: MutableSet[Union[complex, int, str]]
d = ... # type: MutableSet[Union[complex, int, str]]
e = ... # type: bool
x = ... # type: MutableSet[Union[complex, int, str]]
""",
)
def test_set(self):
# Sanity check. Set is just MutableSet, tested above.
self._check_call("Set", "x.add(3)")
def test_generator(self):
self._check_call(
"Generator",
"""
next(x)
x.send(42)
x.throw(Exception())
x.close()
""",
)
def test_pattern_and_match(self):
# Basic pattern sanity check.
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Pattern
def f() -> Pattern[str]: ...
""",
)
ty = self.Infer(
"""
import foo
pattern = foo.f()
m1 = pattern.search("foo")
pattern.match("foo")
pattern.split("foo")
pattern.findall("foo")[0]
list(pattern.finditer("foo"))[0]
pattern.sub("x", "x")
pattern.subn("x", "x")
assert m1
a = m1.pos
b = m1.endpos
c = m1.group(0)
d = m1.start()
e = m1.end()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
import re
from typing import Pattern
a: int
b: int
c: str
d: int
e: int
m1: re.Match[str] | None
pattern: Pattern[str]
""",
)
if __name__ == "__main__":
test_base.main()
|
TypingMethodsTest
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_data_labels49.py
|
{
"start": 315,
"end": 1846
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels49.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [59202176, 60966784]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {
"value": True,
"position": "outside_end",
"custom": [{"value": "=Sheet1!$B$1"}],
},
}
)
chart.add_series(
{
"values": "=Sheet1!$B$1:$B$5",
"data_labels": {
"value": True,
"position": "inside_base",
"custom": [{"value": "=Sheet1!$B$2"}],
},
}
)
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
pandas-dev__pandas
|
scripts/tests/test_validate_docstrings.py
|
{
"start": 10406,
"end": 10791
}
|
class ____:
@pytest.mark.parametrize(
"name", ["pandas.Series.str.isdecimal", "pandas.Series.str.islower"]
)
def test_encode_content_write_to_file(self, name) -> None:
# GH25466
docstr = validate_docstrings.PandasDocstring(name).validate_pep8()
# the list of pep8 errors should be empty
assert not list(docstr)
|
TestPandasDocstringClass
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_iowa_zip.py
|
{
"start": 727,
"end": 1719
}
|
class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_iowa_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_iowa_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
|
ColumnValuesToBeValidIowaZip
|
python
|
getsentry__sentry
|
tests/sentry/uptime/autodetect/test_ranking.py
|
{
"start": 632,
"end": 4610
}
|
class ____(UptimeTestCase):
def assert_project_count(
self, project: Project, count: int | None, expiry: int | None
) -> int | None:
key = build_org_projects_key(project.organization)
cluster = get_cluster()
if count is None:
assert not cluster.zscore(key, str(project.id))
return None
else:
assert int(float(str(cluster.zscore(key, str(project.id))))) == count
return self.check_expiry(key, expiry)
def assert_url_count(
self, project: Project, url: str, count: int | None, expiry: int | None
) -> int | None:
key = get_project_base_url_rank_key(project)
cluster = get_cluster()
if count is None:
assert cluster.zscore(key, url) is None
return None
else:
assert cluster.zscore(key, url) == count
return self.check_expiry(key, expiry)
def check_expiry(self, key: str, expiry: int | None) -> int:
cluster = get_cluster()
ttl = cluster.ttl(key)
if expiry is None:
assert ttl > 0
else:
assert ttl == expiry
return ttl
def test(self) -> None:
project_2 = self.create_project()
url_1 = "https://sentry.io"
url_2 = "https://sentry.sentry.io"
add_base_url_to_rank(self.project, url_1)
project_1_expiry = self.assert_project_count(self.project, 1, None)
self.assert_project_count(project_2, None, -2)
project_1_url_expiry = self.assert_url_count(self.project, url_1, 1, None)
self.assert_url_count(project_2, url_1, None, -2)
add_base_url_to_rank(self.project, url_1)
self.assert_project_count(self.project, 2, project_1_expiry)
self.assert_project_count(project_2, None, -2)
self.assert_url_count(self.project, url_1, 2, project_1_url_expiry)
self.assert_url_count(project_2, url_1, None, -2)
add_base_url_to_rank(self.project, url_2)
self.assert_project_count(self.project, 3, project_1_expiry)
self.assert_project_count(project_2, None, -2)
self.assert_url_count(self.project, url_1, 2, project_1_url_expiry)
self.assert_url_count(self.project, url_2, 1, project_1_url_expiry)
self.assert_url_count(project_2, url_1, None, -2)
self.assert_url_count(project_2, url_2, None, -2)
add_base_url_to_rank(project_2, url_2)
self.assert_project_count(self.project, 3, project_1_expiry)
self.assert_project_count(project_2, 1, None)
self.assert_url_count(self.project, url_1, 2, project_1_url_expiry)
self.assert_url_count(self.project, url_2, 1, project_1_url_expiry)
project_2_url_expiry = self.assert_url_count(project_2, url_1, None, None)
self.assert_url_count(project_2, url_2, 1, project_2_url_expiry)
def test_trim(self) -> None:
with (
mock.patch("sentry.uptime.autodetect.ranking.RANKED_TRIM_CHANCE", new=1),
mock.patch("sentry.uptime.autodetect.ranking.RANKED_MAX_SIZE", new=2),
):
key = get_project_base_url_rank_key(self.project)
url_1 = "https://sentry.io"
url_2 = "https://sentry.sentry.io"
url_3 = "https://santry.sentry.io"
cluster = get_cluster()
add_base_url_to_rank(self.project, url_1)
add_base_url_to_rank(self.project, url_1)
add_base_url_to_rank(self.project, url_1)
assert cluster.zrange(key, 0, -1) == [url_1]
add_base_url_to_rank(self.project, url_2)
add_base_url_to_rank(self.project, url_2)
assert cluster.zrange(key, 0, -1) == [url_2, url_1]
# Since we're trimming immediately, this url will be immediately dropped since it's seen one time
add_base_url_to_rank(self.project, url_3)
assert cluster.zrange(key, 0, -1) == [url_2, url_1]
|
AddBaseUrlToRankTest
|
python
|
cython__cython
|
Cython/Compiler/ExprNodes.py
|
{
"start": 212942,
"end": 216999
}
|
class ____(BufferIndexNode):
is_memview_index = True
is_buffer_access = False
def analyse_types(self, env, getting=True):
# memoryviewslice indexing or slicing
from . import MemoryView
self.is_pythran_mode = has_np_pythran(env)
indices = self.indices
have_slices, indices, newaxes = MemoryView.unellipsify(indices, self.base.type.ndim)
if not getting:
self.writable_needed = True
if self.base.is_name or self.base.is_attribute:
self.base.entry.type.writable_needed = True
self.memslice_index = (not newaxes and len(indices) == self.base.type.ndim)
axes = []
index_type = PyrexTypes.c_py_ssize_t_type
new_indices = []
if len(indices) - len(newaxes) > self.base.type.ndim:
self.type = error_type
error(indices[self.base.type.ndim].pos,
"Too many indices specified for type %s" % self.base.type)
return self
axis_idx = 0
for i, index in enumerate(indices):
index = index.analyse_types(env)
if index.is_none:
self.is_memview_slice = True
new_indices.append(index)
axes.append(('direct', 'strided'))
continue
access, packing = self.base.type.axes[axis_idx]
axis_idx += 1
if index.is_slice:
self.is_memview_slice = True
if index.step.is_none:
axes.append((access, packing))
else:
axes.append((access, 'strided'))
# Coerce start, stop and step to temps of the right type
for attr in ('start', 'stop', 'step'):
value = getattr(index, attr)
if not value.is_none:
value = value.coerce_to(index_type, env)
#value = value.coerce_to_temp(env)
setattr(index, attr, value)
new_indices.append(value)
elif index.type.is_int or index.type.is_pyobject:
if index.type.is_pyobject:
performance_hint(index.pos, "Index should be typed for more efficient access", env)
self.is_memview_index = True
index = index.coerce_to(index_type, env)
indices[i] = index
new_indices.append(index)
else:
self.type = error_type
error(index.pos, "Invalid index for memoryview specified, type %s" % index.type)
return self
### FIXME: replace by MemoryViewSliceNode if is_memview_slice ?
self.is_memview_index = self.is_memview_index and not self.is_memview_slice
self.indices = new_indices
# All indices with all start/stop/step for slices.
# We need to keep this around.
self.original_indices = indices
self.nogil = env.nogil
node = self.analyse_operation(env, getting, axes)
node.wrap_in_nonecheck_node(env)
return node
def analyse_operation(self, env, getting, axes):
self.none_error_message = "Cannot index None memoryview slice"
self.analyse_buffer_index(env, getting)
return self
def analyse_broadcast_operation(self, rhs):
"""
Support broadcasting for slice assignment.
E.g.
m_2d[...] = m_1d # or,
m_1d[...] = m_2d # if the leading dimension has extent 1
"""
if self.type.is_memoryviewslice:
lhs = self
if lhs.is_memview_broadcast or rhs.is_memview_broadcast:
lhs.is_memview_broadcast = True
rhs.is_memview_broadcast = True
def analyse_as_memview_scalar_assignment(self, rhs):
lhs = self.analyse_assignment(rhs)
if lhs:
rhs.is_memview_copy_assignment = lhs.is_memview_copy_assignment
return lhs
return self
|
MemoryViewIndexNode
|
python
|
django__django
|
tests/multiple_database/routers.py
|
{
"start": 1721,
"end": 1864
}
|
class ____:
# A router that only expresses an opinion on writes
def db_for_write(self, model, **hints):
return "writer"
|
WriteRouter
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/elements/button_group_test.py
|
{
"start": 13366,
"end": 45263
}
|
class ____(DeltaGeneratorTestCase):
@parameterized.expand(
[
(
st.feedback,
("thumbs",),
None,
[":material/thumb_up:", ":material/thumb_down:"],
"content_icon",
ButtonGroupProto.Style.BORDERLESS,
False,
),
(
st.pills,
("label", ["a", "b", "c"]),
{"help": "Test help param"},
["a", "b", "c"],
"content",
ButtonGroupProto.Style.PILLS,
True,
),
(
lambda *args, **kwargs: ButtonGroupMixin._internal_button_group(
st._main, *args, **kwargs
),
(["a", "b", "c"],),
None,
["a", "b", "c"],
"content",
ButtonGroupProto.Style.SEGMENTED_CONTROL,
False,
),
]
)
def test_proto_population(
self,
command: Callable[..., None],
command_args: tuple[Any, ...],
command_kwargs: dict[str, Any] | None,
expected_options: list[str],
option_field: str,
style: ButtonGroupProto.Style,
test_label: bool,
):
if command_kwargs is None:
command_kwargs = {}
command(*command_args, **command_kwargs)
delta = self.get_delta_from_queue().new_element.button_group
assert [
getattr(option, option_field) for option in delta.options
] == expected_options
assert delta.default == []
assert delta.click_mode == ButtonGroupProto.ClickMode.SINGLE_SELECT
assert delta.disabled is False
assert delta.form_id == ""
assert (
delta.selection_visualization
== ButtonGroupProto.SelectionVisualization.ONLY_SELECTED
)
assert delta.style == style
if test_label:
assert delta.label == command_args[0]
assert (
delta.label_visibility.value
is LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE
)
@parameterized.expand(
get_command_matrix([("string_key",), (0,), (None,)], with_st_feedback=True)
)
def test_key_types(self, command: Callable[..., None], key: str | int | None):
"""Test that the key argument can be passed as expected."""
# use options that is compatible with all commands including st.feedback
command("thumbs", key=key)
delta = self.get_delta_from_queue().new_element.button_group
assert delta.id.endswith(f"-{key}")
@parameterized.expand(
[
(st.feedback, ("thumbs",)),
(
st.feedback,
("thumbs",),
{"default": 1},
1,
),
(st.feedback, ("stars",), {"default": 2}, 2),
(st.feedback, ("faces",), {"default": 3}, 3),
(st.pills, ("label", ["a", "b", "c"])),
(st.pills, ("label", ["a", "b", "c"]), {"default": "b"}, "b"),
(
lambda *args, **kwargs: ButtonGroupMixin._internal_button_group(
st._main, *args, **kwargs
),
(["a", "b", "c"],),
{"default": "b"},
"b",
),
(
st.pills,
("label", ["a", "b", "c"]),
{"default": "b", "selection_mode": "multi"},
["b"],
),
(
lambda *args, **kwargs: ButtonGroupMixin._internal_button_group(
st._main, *args, **kwargs
),
(["a", "b", "c"],),
{"default": "b", "selection_mode": "multi"},
["b"],
),
]
)
def test_default_return_value(
self,
command: Callable[..., Any],
command_args: tuple[Any, ...],
command_kwargs: dict | None = None,
expected_default: str | None = None,
):
if command_kwargs is None:
command_kwargs = {}
res = command(*command_args, **command_kwargs)
assert res == expected_default
@parameterized.expand(
[
(st.feedback, ("thumbs",)),
(st.pills, ("label", ["a", "b", "c"])),
]
)
def test_disabled(self, command: Callable, command_args: tuple[Any, ...]):
command(*command_args, disabled=True)
delta = self.get_delta_from_queue().new_element.button_group
assert delta.disabled is True
@parameterized.expand(
[
(st.segmented_control),
(st.pills),
]
)
def test_includes_label_in_id(self, command: Callable):
command(label="label 1", options=["a", "b", "c"])
button_group_1 = self.get_delta_from_queue().new_element.button_group
command(label="label 2", options=["a", "b", "c"])
button_group_2 = self.get_delta_from_queue().new_element.button_group
assert button_group_1.id != button_group_2.id
@parameterized.expand(
get_command_matrix(
[
((),),
([],),
(np.array([]),),
(pd.Series(np.array([])),),
(set(),),
]
)
)
def test_no_options(self, command: Callable[..., None], options: Any):
"""Test that it handles no options."""
command(options)
c = self.get_delta_from_queue().new_element.button_group
assert c.default[:] == []
assert [option.content for option in c.options] == []
@parameterized.expand(
get_command_matrix(
[
(("m", "f"), ["m", "f"]),
(["male", "female"], ["male", "female"]),
(np.array(["m", "f"]), ["m", "f"]),
(pd.Series(np.array(["male", "female"])), ["male", "female"]),
(pd.DataFrame({"options": ["male", "female"]}), ["male", "female"]),
(
pd.DataFrame(
data=[[1, 4, 7], [2, 5, 8], [3, 6, 9]], columns=["a", "b", "c"]
).columns,
["a", "b", "c"],
),
]
)
)
def test_various_option_types(
self,
command: Callable[..., None],
options: Any,
proto_options: list[str],
):
"""Test that it supports different types of options."""
command(options)
c = self.get_delta_from_queue().new_element.button_group
assert c.default[:] == []
assert [option.content for option in c.options] == proto_options
@parameterized.expand(
get_command_matrix(
[
(
pd.Series(np.array(["green", "blue", "red", "yellow", "brown"])),
["yellow"],
["green", "blue", "red", "yellow", "brown"],
[3],
),
(
np.array(["green", "blue", "red", "yellow", "brown"]),
["green", "red"],
["green", "blue", "red", "yellow", "brown"],
[0, 2],
),
(
("green", "blue", "red", "yellow", "brown"),
["blue"],
["green", "blue", "red", "yellow", "brown"],
[1],
),
(
["green", "blue", "red", "yellow", "brown"],
["brown"],
["green", "blue", "red", "yellow", "brown"],
[4],
),
(
pd.DataFrame({"col1": ["male", "female"], "col2": ["15", "10"]}),
["male", "female"],
["male", "female"],
[0, 1],
),
]
)
)
def test_various_option_types_with_defaults(
self,
command: Callable[..., None],
options: Any,
defaults: Any,
proto_options: list[str],
expected_defaults: list[int],
):
"""Test that it supports different types of options and works with defaults."""
command(options, default=defaults, selection_mode="multi")
c = self.get_delta_from_queue().new_element.button_group
assert [option.content for option in c.options] == proto_options
assert c.default[:] == expected_defaults
@parameterized.expand(
get_command_matrix(
[
(("Tea", "Water"), [1, 2]),
# the lambda returns a generator that needs to be fresh
# for every test run:
(lambda: (i for i in ("Tea", "Water")), [1, 2]),
(np.array(["Coffee", "Tea"]), [0, 1]),
(pd.Series(np.array(["Coffee", "Tea"])), [0, 1]),
("Coffee", [0]),
]
)
)
def test_default_types(
self, command: Callable[..., None], defaults: Any, expected: list[Any]
):
if callable(defaults):
defaults = defaults()
command(["Coffee", "Tea", "Water"], default=defaults, selection_mode="multi")
c = self.get_delta_from_queue().new_element.button_group
assert c.default[:] == expected
assert [option.content for option in c.options] == ["Coffee", "Tea", "Water"]
@parameterized.expand(
get_command_matrix([(None, []), ([], []), (["Tea", "Water"], [1, 2])])
)
def test_defaults_for_multi(
self, command: Callable[..., None], defaults: Any, expected: list[Any]
):
"""Test that valid default can be passed as expected."""
command(
["Coffee", "Tea", "Water"],
default=defaults,
selection_mode="multi",
)
c = self.get_delta_from_queue().new_element.button_group
assert c.default[:] == expected
assert [option.content for option in c.options] == ["Coffee", "Tea", "Water"]
@parameterized.expand(
get_command_matrix([(None, []), ([], []), (["Tea"], [1]), ("Coffee", [0])])
)
def test_default_for_singleselect(
self, command: Callable[..., None], defaults: Any, expected: list[Any]
):
"""Test that valid default can be passed as expected and that the default can be
a list or single value."""
command(
["Coffee", "Tea", "Water"],
default=defaults,
selection_mode="single",
)
c = self.get_delta_from_queue().new_element.button_group
assert c.default[:] == expected
assert [option.content for option in c.options] == ["Coffee", "Tea", "Water"]
@parameterized.expand(get_command_matrix([]))
def test_default_for_single_select_must_be_single_value(
self, command: Callable[..., None]
):
"""Test that passing multiple values as default for single select raises an
exception."""
with pytest.raises(StreamlitAPIException) as exception:
command(
["Coffee", "Tea", "Water"],
default=["Coffee", "Tea"],
selection_mode="single",
)
assert (
str(exception.value)
== "The default argument to `st.pills` must be a single value when "
"`selection_mode='single'`."
)
@parameterized.expand(
get_command_matrix(
[
(["Tea", "Vodka", None], StreamlitAPIException),
([1, 2], StreamlitAPIException),
]
)
)
def test_invalid_defaults(
self, command: Callable[..., None], defaults: list, expected: type[Exception]
):
"""Test that invalid default trigger the expected exception."""
with pytest.raises(expected):
command(["Coffee", "Tea", "Water"], default=defaults)
@parameterized.expand(
get_command_matrix(
[([":material/thumb_up:", ":material/thumb_down:", "foo", 0],)]
)
)
def test_format_func_is_applied(
self,
command: Callable[..., None],
options: list[str],
):
"""Test that format_func is applied to the options; since we add '!' its not a
valid icon anymore."""
command(options, format_func=lambda x: f"{x}!")
c = self.get_delta_from_queue().new_element.button_group
for index, option in enumerate(options):
assert c.options[index].content == f"{option}!"
@parameterized.expand(
[
(st.feedback, ("thumbs",)),
(st.pills, ("label", ["a", "b", "c"])),
]
)
def test_on_change_is_registered(
self,
command: Callable[..., None],
command_args: tuple[str, ...],
):
command(*command_args, on_change=lambda x: x)
ctx = get_script_run_ctx()
assert ctx is not None
session_state = ctx.session_state._state
widget_id = session_state.get_widget_states()[0].id
metadata = session_state._new_widget_state.widget_metadata.get(widget_id)
assert metadata is not None
assert metadata.callback is not None
@parameterized.expand(get_command_matrix([]))
def test_option_starting_with_icon(self, command: Callable[..., None]):
command(
[
"☕ Coffee",
"🍵 Tea",
":material/zoom_in: Water",
"Earth",
":material/zoom_out:",
]
)
c = self.get_delta_from_queue().new_element.button_group
assert c.default == []
assert [option.content for option in c.options] == [
"Coffee",
"Tea",
"Water",
"Earth",
"",
]
assert [option.content_icon for option in c.options] == [
"☕",
"🍵",
":material/zoom_in:",
"",
":material/zoom_out:",
]
@parameterized.expand(
get_command_matrix(
[
("no-icon Coffee",),
("",),
(":material/foo: Water",),
(":material/thumb_up Tea",),
]
)
)
def test_invalid_icons_are_not_set_to_content_icon_field(
self, command: Callable[..., None], option: str
):
command([option])
proto = self.get_delta_from_queue().new_element.button_group
for proto_option in proto.options:
assert proto_option.content_icon == ""
assert proto_option.content == option
@parameterized.expand(get_command_matrix([], with_st_feedback=True))
def test_outside_form(self, command: Callable[..., None]):
"""Test that form id is marshalled correctly outside of a form."""
# pass an option that is valid for st.feedback and also the other button_group
# commands
command("thumbs")
proto = self.get_delta_from_queue().new_element.button_group
assert proto.form_id == ""
@parameterized.expand(get_command_matrix([], with_st_feedback=True))
@patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=True))
def test_inside_form(self, command: Callable[..., None]):
"""Test that form id is marshalled correctly inside of a form."""
with st.form("form"):
# pass an option that is valid for st.feedback and also the other button_group
# commands
command("thumbs")
# 2 elements will be created: form block, widget
assert len(self.get_all_deltas_from_queue()) == 2
form_proto = self.get_delta_from_queue(0).add_block
proto = self.get_delta_from_queue(1).new_element.button_group
assert proto.form_id == form_proto.form.form_id
@parameterized.expand(get_command_matrix([]))
def test_inside_column(self, command: Callable[..., None]):
"""Test that button group commands work correctly inside of a column."""
col1, _ = st.columns(2)
with col1:
command(["bar", "baz"])
all_deltas = self.get_all_deltas_from_queue()
# 4 elements will be created: 1 horizontal block, 2 columns, 1 widget
assert len(all_deltas) == 4
proto = self.get_delta_from_queue().new_element.button_group
assert proto.default == []
assert [option.content for option in proto.options] == ["bar", "baz"]
def test_inside_column_feedback(self):
"""Test that st.feedback works correctly inside of a column."""
col1, _ = st.columns(2)
with col1:
st.feedback("thumbs")
all_deltas = self.get_all_deltas_from_queue()
# 4 elements will be created: 1 horizontal block, 2 columns, 1 widget
assert len(all_deltas) == 4
proto = self.get_delta_from_queue().new_element.button_group
assert proto.default == []
assert [option.content_icon for option in proto.options] == [
":material/thumb_up:",
":material/thumb_down:",
]
@parameterized.expand(get_command_matrix([]))
def test_default_string(self, command: Callable[..., None]):
"""Test if works when the default value is not a list."""
arg_options = ["some str", 123, None, {}]
proto_options = ["some str", "123", "None", "{}"]
command(
arg_options,
default="some str",
)
c = self.get_delta_from_queue().new_element.button_group
assert c.default[:] == [0]
assert [option.content for option in c.options] == proto_options
@parameterized.expand(get_command_matrix([]))
def test_invalid_selection_mode(self, command: Callable[..., None]):
"""Test that passing an invalid selection_mode raises an exception."""
with pytest.raises(StreamlitAPIException) as exception:
command(["a", "b"], selection_mode="foo")
assert (
str(exception.value)
== "The selection_mode argument must be one of ['single', 'multi']. "
"The argument passed was 'foo'."
)
@parameterized.expand(get_command_matrix([]))
def test_widget_state_changed_via_session_state_for_single_select(
self, command: Callable[..., Any]
):
st.session_state.command_key = "stars"
val = command(["thumbs", "stars"], key="command_key")
assert val == "stars"
@parameterized.expand(get_command_matrix([]))
def test_widget_state_changed_via_session_state_for_multi_select(
self, command: Callable[..., Any]
):
st.session_state.command_key = ["stars"]
val = command(["thumbs", "stars"], key="command_key", selection_mode="multi")
assert val == ["stars"]
@parameterized.expand(get_command_matrix([]))
def test_button_group_with_width(self, command: Callable[..., None]):
"""Test button group widgets with different width types."""
test_cases = [
(500, WidthConfigFields.PIXEL_WIDTH.value, "pixel_width", 500),
("stretch", WidthConfigFields.USE_STRETCH.value, "use_stretch", True),
("content", WidthConfigFields.USE_CONTENT.value, "use_content", True),
]
for width_value, expected_width_spec, field_name, field_value in test_cases:
with self.subTest(width_value=width_value):
command(["a", "b", "c"], width=width_value)
el = self.get_delta_from_queue().new_element
assert el.button_group.options[0].content == "a"
assert el.width_config.WhichOneof("width_spec") == expected_width_spec
assert getattr(el.width_config, field_name) == field_value
@parameterized.expand(get_command_matrix([]))
def test_button_group_with_invalid_width(self, command: Callable[..., None]):
"""Test button group widgets with invalid width values."""
test_cases = [
(
"invalid",
"Invalid width value: 'invalid'. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
-100,
"Invalid width value: -100. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
0,
"Invalid width value: 0. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
100.5,
"Invalid width value: 100.5. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
]
for width_value, expected_error_message in test_cases:
with self.subTest(width_value=width_value):
with pytest.raises(StreamlitAPIException) as exc:
command(["a", "b", "c"], width=width_value)
assert str(exc.value) == expected_error_message
@parameterized.expand(get_command_matrix([]))
def test_button_group_default_width(self, command: Callable[..., None]):
"""Test that button group widgets default to content width."""
command(["a", "b", "c"])
el = self.get_delta_from_queue().new_element
assert el.button_group.options[0].content == "a"
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert el.width_config.use_content is True
def test_invalid_style(self):
"""Test internal button_group command does not accept invalid style."""
with pytest.raises(StreamlitAPIException) as exception:
ButtonGroupMixin._internal_button_group(
st._main, ["a", "b", "c"], style="foo"
)
assert (
str(exception.value) == "The style argument must be one of "
"['borderless', 'pills', 'segmented_control']. "
"The argument passed was 'foo'."
)
@parameterized.expand(
[
(st.feedback, ("thumbs",), "feedback"),
(st.pills, ("label", ["a", "b", "c"]), "pills"),
(st.segmented_control, ("label", ["a", "b", "c"]), "segmented_control"),
]
)
def test_duplicate_element_id_error_message(
self, command: Callable, command_args: tuple[Any, ...], element_name: str
):
with pytest.raises(StreamlitAPIException) as exception:
# Call two times to trigger the error:
command(*command_args)
command(*command_args)
# Make sure the correct name is used in the error message
assert element_name in str(exception.value)
def test_stable_id_with_key_segmented_control(self):
"""Test that the widget ID is stable for segmented_control when a stable key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
# First render with certain params (keep whitelisted kwargs stable)
st.segmented_control(
label="Label 1",
key="segmented_control_key",
help="Help 1",
disabled=False,
width="content",
on_change=lambda: None,
args=("arg1", "arg2"),
kwargs={"kwarg1": "kwarg1"},
label_visibility="visible",
default="a",
# Whitelisted args:
options=["a", "b", "c"],
selection_mode="single",
format_func=lambda x: x.capitalize(),
)
proto1 = self.get_delta_from_queue().new_element.button_group
id1 = proto1.id
# Second render with different non-whitelisted params but same key
st.segmented_control(
label="Label 2",
key="segmented_control_key",
help="Help 2",
disabled=True,
width="stretch",
on_change=lambda: None,
args=("arg_1", "arg_2"),
kwargs={"kwarg_1": "kwarg_1"},
label_visibility="hidden",
default="b",
# Whitelisted args:
options=["a", "b", "c"],
selection_mode="single",
format_func=lambda x: x.capitalize(),
)
proto2 = self.get_delta_from_queue().new_element.button_group
id2 = proto2.id
assert id1 == id2
@parameterized.expand(
[
("options", ["a", "b"], ["x", "y"]),
("selection_mode", "single", "multi"),
("format_func", lambda x: x.capitalize(), lambda x: x.lower()),
]
)
def test_whitelisted_stable_key_kwargs_segmented_control(
self, kwarg_name: str, value1: object, value2: object
):
"""Test that the widget ID changes for segmented_control when a whitelisted kwarg changes even when the key
is provided.
"""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
base_kwargs: dict[str, object] = {
"label": "Label",
"key": "segmented_control_key_1",
"options": ["a", "b", "c"],
"selection_mode": "single",
}
# Apply first value for the whitelisted kwarg
base_kwargs[kwarg_name] = value1
st.segmented_control(**base_kwargs) # type: ignore[arg-type]
proto1 = self.get_delta_from_queue().new_element.button_group
id1 = proto1.id
# Apply second value for the whitelisted kwarg
base_kwargs[kwarg_name] = value2
st.segmented_control(**base_kwargs) # type: ignore[arg-type]
proto2 = self.get_delta_from_queue().new_element.button_group
id2 = proto2.id
assert id1 != id2
def test_stable_id_with_key_feedback(self):
"""Test that the widget ID is stable for feedback when a stable key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
# First render with certain params (keep whitelisted kwargs stable)
st.feedback(
key="feedback_key",
disabled=False,
width="content",
on_change=lambda: None,
args=("arg1", "arg2"),
kwargs={"kwarg1": "kwarg1"},
default=0,
# Whitelisted args:
options="thumbs",
)
proto1 = self.get_delta_from_queue().new_element.button_group
id1 = proto1.id
# Second render with different non-whitelisted params but same key
st.feedback(
key="feedback_key",
disabled=True,
width="stretch",
on_change=lambda: None,
args=("arg_1", "arg_2"),
kwargs={"kwarg_1": "kwarg_1"},
default=1,
# Whitelisted args:
options="thumbs",
)
proto2 = self.get_delta_from_queue().new_element.button_group
id2 = proto2.id
assert id1 == id2
@parameterized.expand(
[
("options", "thumbs", "faces"),
]
)
def test_whitelisted_stable_key_kwargs_feedback(
self, _kwarg_name: str, value1: object, value2: object
):
"""Test that the widget ID changes for feedback when a whitelisted kwarg
changes even when the key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
base_kwargs: dict[str, object] = {
"key": "feedback_key_1",
}
# Apply first value for the whitelisted kwarg
st.feedback(value1, **base_kwargs) # type: ignore[arg-type]
proto1 = self.get_delta_from_queue().new_element.button_group
id1 = proto1.id
# Apply second value for the whitelisted kwarg
st.feedback(value2, **base_kwargs) # type: ignore[arg-type]
proto2 = self.get_delta_from_queue().new_element.button_group
id2 = proto2.id
assert id1 != id2
def test_stable_id_with_key_pills(self):
"""Test that the widget ID is stable for pills when a stable key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
# First render with certain params (keep whitelisted kwargs stable)
st.pills(
label="Label 1",
key="pills_key",
help="Help 1",
disabled=False,
width="content",
on_change=lambda: None,
args=("arg1", "arg2"),
kwargs={"kwarg1": "kwarg1"},
label_visibility="visible",
default="a",
# Whitelisted args:
options=["a", "b", "c"],
selection_mode="single",
format_func=lambda x: x.capitalize(),
)
proto1 = self.get_delta_from_queue().new_element.button_group
id1 = proto1.id
# Second render with different non-whitelisted params but same key
st.pills(
label="Label 2",
key="pills_key",
help="Help 2",
disabled=True,
width="stretch",
on_change=lambda: None,
args=("arg_1", "arg_2"),
kwargs={"kwarg_1": "kwarg_1"},
label_visibility="hidden",
default="b",
# Whitelisted args:
options=["a", "b", "c"],
selection_mode="single",
format_func=lambda x: x.capitalize(),
)
proto2 = self.get_delta_from_queue().new_element.button_group
id2 = proto2.id
assert id1 == id2
@parameterized.expand(
[
("options", ["a", "b"], ["x", "y"]),
("selection_mode", "single", "multi"),
("format_func", lambda x: x.capitalize(), lambda x: x.lower()),
]
)
def test_whitelisted_stable_key_kwargs_pills(
self, kwarg_name: str, value1: object, value2: object
):
"""Test that the widget ID changes for pills when a whitelisted kwarg changes even when the key
is provided.
"""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
base_kwargs: dict[str, object] = {
"label": "Label",
"key": "pills_key_1",
"options": ["a", "b", "c"],
"selection_mode": "single",
}
# Apply first value for the whitelisted kwarg
base_kwargs[kwarg_name] = value1
st.pills(**base_kwargs) # type: ignore[arg-type]
proto1 = self.get_delta_from_queue().new_element.button_group
id1 = proto1.id
# Apply second value for the whitelisted kwarg
base_kwargs[kwarg_name] = value2
st.pills(**base_kwargs) # type: ignore[arg-type]
proto2 = self.get_delta_from_queue().new_element.button_group
id2 = proto2.id
assert id1 != id2
|
ButtonGroupCommandTests
|
python
|
scrapy__scrapy
|
tests/test_downloadermiddleware_httpauth.py
|
{
"start": 483,
"end": 737
}
|
class ____:
def setup_method(self):
self.spider = LegacySpider("foo")
def test_auth(self):
mw = HttpAuthMiddleware()
with pytest.raises(AttributeError):
mw.spider_opened(self.spider)
|
TestHttpAuthMiddlewareLegacy
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/selectable.py
|
{
"start": 76282,
"end": 77063
}
|
class ____(NamedTuple):
required_label_name: Optional[str]
"""
string label name, if non-None, must be rendered as a
label, i.e. "AS <name>"
"""
proxy_key: Optional[str]
"""
proxy_key that is to be part of the result map for this
col. this is also the key in a fromclause.c or
select.selected_columns collection
"""
fallback_label_name: Optional[str]
"""
name that can be used to render an "AS <name>" when
we have to render a label even though
required_label_name was not given
"""
column: Union[ColumnElement[Any], TextClause]
"""
the ColumnElement itself
"""
repeated: bool
"""
True if this is a duplicate of a previous column
in the list of columns
"""
|
_ColumnsPlusNames
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/filters.py
|
{
"start": 6527,
"end": 8721
}
|
class ____(ModelFilterSet):
"""
Filter and sorting for project version listing page.
This is used from the project versions list view page to provide filtering
and sorting to the version list and search UI. It is normally instantiated
with an included queryset, which provides user project authorization.
"""
VISIBILITY_HIDDEN = "hidden"
VISIBILITY_VISIBLE = "visible"
VISIBILITY_CHOICES = (
("hidden", _("Hidden versions")),
("visible", _("Visible versions")),
)
PRIVACY_CHOICES = (
("public", _("Public versions")),
("private", _("Private versions")),
)
# Attribute filter fields
slug = FilteredModelChoiceFilter(
label=_("Version"),
empty_label=_("All versions"),
to_field_name="slug",
queryset_method="get_version_queryset",
method="get_version",
label_attribute="verbose_name",
)
privacy = ChoiceFilter(
field_name="privacy_level",
label=_("Privacy"),
choices=PRIVACY_CHOICES,
empty_label=_("Any"),
)
# This field looks better as ``visibility=hidden`` than it does
# ``hidden=true``, otherwise we could use a BooleanFilter instance here
# instead
visibility = ChoiceFilter(
field_name="hidden",
label=_("Visibility"),
choices=VISIBILITY_CHOICES,
method="get_visibility",
empty_label=_("Any"),
)
sort = VersionSortOrderingFilter(
field_name="sort",
label=_("Sort by"),
)
def __init__(self, *args, project=None, **kwargs):
self.project = project
super().__init__(*args, **kwargs)
def get_version(self, queryset, field_name, version):
return queryset.filter(slug=version.slug)
def get_version_queryset(self):
# This query is passed in at instantiation
return self.queryset
def get_visibility(self, queryset, field_name, value):
if value == self.VISIBILITY_HIDDEN:
return queryset.filter(hidden=True)
if value == self.VISIBILITY_VISIBLE:
return queryset.filter(hidden=False)
return queryset
|
ProjectVersionListFilterSet
|
python
|
getsentry__sentry
|
src/sentry/audit_log/events.py
|
{
"start": 3831,
"end": 4221
}
|
class ____(AuditLogEvent):
def __init__(self) -> None:
super().__init__(event_id=10, name="ORG_ADD", api_name="org.create")
def render(self, audit_log_entry: AuditLogEntry) -> str:
if channel := audit_log_entry.data.get("channel"):
return f"created the organization with {channel} integration"
return "created the organization"
|
OrgAddAuditLogEvent
|
python
|
getsentry__sentry
|
src/sentry/release_health/base.py
|
{
"start": 4360,
"end": 5059
}
|
class ____(TypedDict, total=False):
adoption: float | None
sessions_adoption: float | None
total_users_24h: int | None
total_project_users_24h: int | None
total_sessions_24h: int | None
total_project_sessions_24h: int | None
total_sessions: int | None
total_users: int | None
has_health_data: bool
sessions_crashed: int
crash_free_users: float | None
crash_free_sessions: float | None
sessions_errored: int
duration_p50: float | None
duration_p90: float | None
stats: Mapping[StatsPeriod, ReleaseHealthStats]
sessions_unhandled: int
unhandled_session_rate: float | None
unhandled_user_rate: float | None
|
ReleaseHealthOverview
|
python
|
Textualize__textual
|
docs/blog/snippets/2022-12-07-responsive-app-background-task/blocking02.py
|
{
"start": 260,
"end": 460
}
|
class ____(Widget):
def on_click(self) -> None:
self.styles.background = Color(
randint(1, 255),
randint(1, 255),
randint(1, 255),
)
|
ColourChanger
|
python
|
doocs__leetcode
|
solution/0700-0799/0732.My Calendar III/Solution.py
|
{
"start": 1666,
"end": 2020
}
|
class ____:
def __init__(self):
self.tree = SegmentTree()
def book(self, start: int, end: int) -> int:
self.tree.modify(start + 1, end, 1)
return self.tree.query(1, int(1e9 + 1))
# Your MyCalendarThree object will be instantiated and called as such:
# obj = MyCalendarThree()
# param_1 = obj.book(start,end)
|
MyCalendarThree
|
python
|
pytorch__pytorch
|
test/nn/test_module_hooks.py
|
{
"start": 4388,
"end": 4615
}
|
class ____:
def __init__(self, inp):
self.input = inp
def __enter__(self, *args, **kwargs):
self.input.append(2)
def __exit__(self, *args, **kwargs):
self.input.append(-1)
|
DummyContextManager
|
python
|
eventlet__eventlet
|
eventlet/websocket.py
|
{
"start": 1788,
"end": 14024
}
|
class ____:
"""Wraps a websocket handler function in a WSGI application.
Use it like this::
@websocket.WebSocketWSGI
def my_handler(ws):
from_browser = ws.wait()
ws.send("from server")
The single argument to the function will be an instance of
:class:`WebSocket`. To close the socket, simply return from the
function. Note that the server will log the websocket request at
the time of closure.
An optional argument max_frame_length can be given, which will set the
maximum incoming *uncompressed* payload length of a frame. By default, this
is set to 8MiB. Note that excessive values here might create a DOS attack
vector.
"""
def __init__(self, handler, max_frame_length=DEFAULT_MAX_FRAME_LENGTH):
self.handler = handler
self.protocol_version = None
self.support_legacy_versions = True
self.supported_protocols = []
self.origin_checker = None
self.max_frame_length = max_frame_length
@classmethod
def configured(cls,
handler=None,
supported_protocols=None,
origin_checker=None,
support_legacy_versions=False):
def decorator(handler):
inst = cls(handler)
inst.support_legacy_versions = support_legacy_versions
inst.origin_checker = origin_checker
if supported_protocols:
inst.supported_protocols = supported_protocols
return inst
if handler is None:
return decorator
return decorator(handler)
def __call__(self, environ, start_response):
http_connection_parts = [
part.strip()
for part in environ.get('HTTP_CONNECTION', '').lower().split(',')]
if not ('upgrade' in http_connection_parts and
environ.get('HTTP_UPGRADE', '').lower() == 'websocket'):
# need to check a few more things here for true compliance
start_response('400 Bad Request', [('Connection', 'close')])
return []
try:
if 'HTTP_SEC_WEBSOCKET_VERSION' in environ:
ws = self._handle_hybi_request(environ)
elif self.support_legacy_versions:
ws = self._handle_legacy_request(environ)
else:
raise BadRequest()
except BadRequest as e:
status = e.status
body = e.body or b''
headers = e.headers or []
start_response(status,
[('Connection', 'close'), ] + headers)
return [body]
# We're ready to switch protocols; if running under Eventlet
# (this is not always the case) then flag the connection as
# idle to play well with a graceful stop
if 'eventlet.set_idle' in environ:
environ['eventlet.set_idle']()
try:
self.handler(ws)
except OSError as e:
if get_errno(e) not in ACCEPTABLE_CLIENT_ERRORS:
raise
# Make sure we send the closing frame
ws._send_closing_frame(True)
# use this undocumented feature of eventlet.wsgi to ensure that it
# doesn't barf on the fact that we didn't call start_response
wsgi.WSGI_LOCAL.already_handled = True
return []
def _handle_legacy_request(self, environ):
if 'eventlet.input' in environ:
sock = environ['eventlet.input'].get_socket()
elif 'gunicorn.socket' in environ:
sock = environ['gunicorn.socket']
else:
raise Exception('No eventlet.input or gunicorn.socket present in environ.')
if 'HTTP_SEC_WEBSOCKET_KEY1' in environ:
self.protocol_version = 76
if 'HTTP_SEC_WEBSOCKET_KEY2' not in environ:
raise BadRequest()
else:
self.protocol_version = 75
if self.protocol_version == 76:
key1 = self._extract_number(environ['HTTP_SEC_WEBSOCKET_KEY1'])
key2 = self._extract_number(environ['HTTP_SEC_WEBSOCKET_KEY2'])
# There's no content-length header in the request, but it has 8
# bytes of data.
environ['wsgi.input'].content_length = 8
key3 = environ['wsgi.input'].read(8)
key = struct.pack(">II", key1, key2) + key3
response = md5(key).digest()
# Start building the response
scheme = 'ws'
if environ.get('wsgi.url_scheme') == 'https':
scheme = 'wss'
location = '%s://%s%s%s' % (
scheme,
environ.get('HTTP_HOST'),
environ.get('SCRIPT_NAME'),
environ.get('PATH_INFO')
)
qs = environ.get('QUERY_STRING')
if qs is not None:
location += '?' + qs
if self.protocol_version == 75:
handshake_reply = (
b"HTTP/1.1 101 Web Socket Protocol Handshake\r\n"
b"Upgrade: WebSocket\r\n"
b"Connection: Upgrade\r\n"
b"WebSocket-Origin: " + environ.get('HTTP_ORIGIN').encode() + b"\r\n"
b"WebSocket-Location: " + location.encode() + b"\r\n\r\n"
)
elif self.protocol_version == 76:
handshake_reply = (
b"HTTP/1.1 101 WebSocket Protocol Handshake\r\n"
b"Upgrade: WebSocket\r\n"
b"Connection: Upgrade\r\n"
b"Sec-WebSocket-Origin: " + environ.get('HTTP_ORIGIN').encode() + b"\r\n"
b"Sec-WebSocket-Protocol: " +
environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', 'default').encode() + b"\r\n"
b"Sec-WebSocket-Location: " + location.encode() + b"\r\n"
b"\r\n" + response
)
else: # pragma NO COVER
raise ValueError("Unknown WebSocket protocol version.")
sock.sendall(handshake_reply)
return WebSocket(sock, environ, self.protocol_version)
def _parse_extension_header(self, header):
if header is None:
return None
res = {}
for ext in header.split(","):
parts = ext.split(";")
config = {}
for part in parts[1:]:
key_val = part.split("=")
if len(key_val) == 1:
config[key_val[0].strip().lower()] = True
else:
config[key_val[0].strip().lower()] = key_val[1].strip().strip('"').lower()
res.setdefault(parts[0].strip().lower(), []).append(config)
return res
def _negotiate_permessage_deflate(self, extensions):
if not extensions:
return None
deflate = extensions.get("permessage-deflate")
if deflate is None:
return None
for config in deflate:
# We'll evaluate each config in the client's preferred order and pick
# the first that we can support.
want_config = {
# These are bool options, we can support both
"server_no_context_takeover": config.get("server_no_context_takeover", False),
"client_no_context_takeover": config.get("client_no_context_takeover", False)
}
# These are either bool OR int options. True means the client can accept a value
# for the option, a number means the client wants that specific value.
max_wbits = min(zlib.MAX_WBITS, 15)
mwb = config.get("server_max_window_bits")
if mwb is not None:
if mwb is True:
want_config["server_max_window_bits"] = max_wbits
else:
want_config["server_max_window_bits"] = \
int(config.get("server_max_window_bits", max_wbits))
if not (8 <= want_config["server_max_window_bits"] <= 15):
continue
mwb = config.get("client_max_window_bits")
if mwb is not None:
if mwb is True:
want_config["client_max_window_bits"] = max_wbits
else:
want_config["client_max_window_bits"] = \
int(config.get("client_max_window_bits", max_wbits))
if not (8 <= want_config["client_max_window_bits"] <= 15):
continue
return want_config
return None
def _format_extension_header(self, parsed_extensions):
if not parsed_extensions:
return None
parts = []
for name, config in parsed_extensions.items():
ext_parts = [name.encode()]
for key, value in config.items():
if value is False:
pass
elif value is True:
ext_parts.append(key.encode())
else:
ext_parts.append(("%s=%s" % (key, str(value))).encode())
parts.append(b"; ".join(ext_parts))
return b", ".join(parts)
def _handle_hybi_request(self, environ):
if 'eventlet.input' in environ:
sock = environ['eventlet.input'].get_socket()
elif 'gunicorn.socket' in environ:
sock = environ['gunicorn.socket']
else:
raise Exception('No eventlet.input or gunicorn.socket present in environ.')
hybi_version = environ['HTTP_SEC_WEBSOCKET_VERSION']
if hybi_version not in ('8', '13', ):
raise BadRequest(status='426 Upgrade Required',
headers=[('Sec-WebSocket-Version', '8, 13')])
self.protocol_version = int(hybi_version)
if 'HTTP_SEC_WEBSOCKET_KEY' not in environ:
# That's bad.
raise BadRequest()
origin = environ.get(
'HTTP_ORIGIN',
(environ.get('HTTP_SEC_WEBSOCKET_ORIGIN', '')
if self.protocol_version <= 8 else ''))
if self.origin_checker is not None:
if not self.origin_checker(environ.get('HTTP_HOST'), origin):
raise BadRequest(status='403 Forbidden')
protocols = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', None)
negotiated_protocol = None
if protocols:
for p in (i.strip() for i in protocols.split(',')):
if p in self.supported_protocols:
negotiated_protocol = p
break
key = environ['HTTP_SEC_WEBSOCKET_KEY']
response = base64.b64encode(sha1(key.encode() + PROTOCOL_GUID).digest())
handshake_reply = [b"HTTP/1.1 101 Switching Protocols",
b"Upgrade: websocket",
b"Connection: Upgrade",
b"Sec-WebSocket-Accept: " + response]
if negotiated_protocol:
handshake_reply.append(b"Sec-WebSocket-Protocol: " + negotiated_protocol.encode())
parsed_extensions = {}
extensions = self._parse_extension_header(environ.get("HTTP_SEC_WEBSOCKET_EXTENSIONS"))
deflate = self._negotiate_permessage_deflate(extensions)
if deflate is not None:
parsed_extensions["permessage-deflate"] = deflate
formatted_ext = self._format_extension_header(parsed_extensions)
if formatted_ext is not None:
handshake_reply.append(b"Sec-WebSocket-Extensions: " + formatted_ext)
sock.sendall(b'\r\n'.join(handshake_reply) + b'\r\n\r\n')
return RFC6455WebSocket(sock, environ, self.protocol_version,
protocol=negotiated_protocol,
extensions=parsed_extensions,
max_frame_length=self.max_frame_length)
def _extract_number(self, value):
"""
Utility function which, given a string like 'g98sd 5[]221@1', will
return 9852211. Used to parse the Sec-WebSocket-Key headers.
"""
out = ""
spaces = 0
for char in value:
if char in string.digits:
out += char
elif char == " ":
spaces += 1
return int(out) // spaces
|
WebSocketWSGI
|
python
|
pallets__werkzeug
|
src/werkzeug/datastructures/structures.py
|
{
"start": 4212,
"end": 18773
}
|
class ____(TypeConversionDict[K, V]):
"""A :class:`MultiDict` is a dictionary subclass customized to deal with
multiple values for the same key which is for example used by the parsing
functions in the wrappers. This is necessary because some HTML form
elements pass multiple values for the same key.
:class:`MultiDict` implements all standard dictionary methods.
Internally, it saves all values for a key as a list, but the standard dict
access methods will only return the first value for a key. If you want to
gain access to the other values, too, you have to use the `list` methods as
explained below.
Basic Usage:
>>> d = MultiDict([('a', 'b'), ('a', 'c')])
>>> d
MultiDict([('a', 'b'), ('a', 'c')])
>>> d['a']
'b'
>>> d.getlist('a')
['b', 'c']
>>> 'a' in d
True
It behaves like a normal dict thus all dict functions will only return the
first value when multiple values for one key are found.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
A :class:`MultiDict` can be constructed from an iterable of
``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
onwards some keyword parameters.
:param mapping: the initial value for the :class:`MultiDict`. Either a
regular dict, an iterable of ``(key, value)`` tuples
or `None`.
.. versionchanged:: 3.1
Implement ``|`` and ``|=`` operators.
"""
def __init__(
self,
mapping: (
MultiDict[K, V]
| cabc.Mapping[K, V | list[V] | tuple[V, ...] | set[V]]
| cabc.Iterable[tuple[K, V]]
| None
) = None,
) -> None:
if mapping is None:
super().__init__()
elif isinstance(mapping, MultiDict):
super().__init__((k, vs[:]) for k, vs in mapping.lists()) # type: ignore[misc]
elif isinstance(mapping, cabc.Mapping):
tmp = {}
for key, value in mapping.items():
if isinstance(value, (list, tuple, set)):
value = list(value)
if not value:
continue
else:
value = [value]
tmp[key] = value
super().__init__(tmp) # type: ignore[arg-type]
else:
tmp = {}
for key, value in mapping:
tmp.setdefault(key, []).append(value)
super().__init__(tmp) # type: ignore[arg-type]
def __getstate__(self) -> t.Any:
return dict(self.lists())
def __setstate__(self, value: t.Any) -> None:
super().clear()
super().update(value)
def __iter__(self) -> cabc.Iterator[K]:
# https://github.com/python/cpython/issues/87412
# If __iter__ is not overridden, Python uses a fast path for dict(md),
# taking the data directly and getting lists of values, rather than
# calling __getitem__ and getting only the first value.
return super().__iter__()
def __getitem__(self, key: K) -> V:
"""Return the first data value for this key;
raises KeyError if not found.
:param key: The key to be looked up.
:raise KeyError: if the key does not exist.
"""
if key in self:
lst = super().__getitem__(key)
if len(lst) > 0: # type: ignore[arg-type]
return lst[0] # type: ignore[index,no-any-return]
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key: K, value: V) -> None:
"""Like :meth:`add` but removes an existing key first.
:param key: the key for the value.
:param value: the value to set.
"""
super().__setitem__(key, [value]) # type: ignore[assignment]
def add(self, key: K, value: V) -> None:
"""Adds a new value for the key.
.. versionadded:: 0.6
:param key: the key for the value.
:param value: the value to add.
"""
super().setdefault(key, []).append(value) # type: ignore[arg-type,attr-defined]
@t.overload
def getlist(self, key: K) -> list[V]: ...
@t.overload
def getlist(self, key: K, type: cabc.Callable[[V], T]) -> list[T]: ...
def getlist(
self, key: K, type: cabc.Callable[[V], T] | None = None
) -> list[V] | list[T]:
"""Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just like `get`,
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: Callable to convert each value. If a ``ValueError`` or
``TypeError`` is raised, the value is omitted.
:return: a :class:`list` of all the values for the key.
.. versionchanged:: 3.1
Catches ``TypeError`` in addition to ``ValueError``.
"""
try:
rv: list[V] = super().__getitem__(key) # type: ignore[assignment]
except KeyError:
return []
if type is None:
return list(rv)
result = []
for item in rv:
try:
result.append(type(item))
except (ValueError, TypeError):
pass
return result
def setlist(self, key: K, new_list: cabc.Iterable[V]) -> None:
"""Remove the old values for a key and add new ones. Note that the list
you pass the values in will be shallow-copied before it is inserted in
the dictionary.
>>> d = MultiDict()
>>> d.setlist('foo', ['1', '2'])
>>> d['foo']
'1'
>>> d.getlist('foo')
['1', '2']
:param key: The key for which the values are set.
:param new_list: An iterable with the new values for the key. Old values
are removed first.
"""
super().__setitem__(key, list(new_list)) # type: ignore[assignment]
@t.overload
def setdefault(self, key: K) -> None: ...
@t.overload
def setdefault(self, key: K, default: V) -> V: ...
def setdefault(self, key: K, default: V | None = None) -> V | None:
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key not in self:
self[key] = default # type: ignore[assignment]
return self[key]
def setlistdefault(
self, key: K, default_list: cabc.Iterable[V] | None = None
) -> list[V]:
"""Like `setdefault` but sets multiple values. The list returned
is not a copy, but the list that is actually used internally. This
means that you can put new values into the dict by appending items
to the list:
>>> d = MultiDict({"foo": 1})
>>> d.setlistdefault("foo").extend([2, 3])
>>> d.getlist("foo")
[1, 2, 3]
:param key: The key to be looked up.
:param default_list: An iterable of default values. It is either copied
(in case it was a list) or converted into a list
before returned.
:return: a :class:`list`
"""
if key not in self:
super().__setitem__(key, list(default_list or ())) # type: ignore[assignment]
return super().__getitem__(key) # type: ignore[return-value]
def items(self, multi: bool = False) -> cabc.Iterable[tuple[K, V]]: # type: ignore[override]
"""Return an iterator of ``(key, value)`` pairs.
:param multi: If set to `True` the iterator returned will have a pair
for each value of each key. Otherwise it will only
contain pairs for the first value of each key.
"""
values: list[V]
for key, values in super().items(): # type: ignore[assignment]
if multi:
for value in values:
yield key, value
else:
yield key, values[0]
def lists(self) -> cabc.Iterable[tuple[K, list[V]]]:
"""Return a iterator of ``(key, values)`` pairs, where values is the list
of all values associated with the key."""
values: list[V]
for key, values in super().items(): # type: ignore[assignment]
yield key, list(values)
def values(self) -> cabc.Iterable[V]: # type: ignore[override]
"""Returns an iterator of the first value on every key's value list."""
values: list[V]
for values in super().values(): # type: ignore[assignment]
yield values[0]
def listvalues(self) -> cabc.Iterable[list[V]]:
"""Return an iterator of all values associated with a key. Zipping
:meth:`keys` and this is the same as calling :meth:`lists`:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> zip(d.keys(), d.listvalues()) == d.lists()
True
"""
return super().values() # type: ignore[return-value]
def copy(self) -> te.Self:
"""Return a shallow copy of this object."""
return self.__class__(self)
def deepcopy(self, memo: t.Any = None) -> te.Self:
"""Return a deep copy of this object."""
return self.__class__(deepcopy(self.to_dict(flat=False), memo))
@t.overload
def to_dict(self) -> dict[K, V]: ...
@t.overload
def to_dict(self, flat: t.Literal[False]) -> dict[K, list[V]]: ...
def to_dict(self, flat: bool = True) -> dict[K, V] | dict[K, list[V]]:
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first value for each key.
:return: a :class:`dict`
"""
if flat:
return dict(self.items())
return dict(self.lists())
def update( # type: ignore[override]
self,
mapping: (
MultiDict[K, V]
| cabc.Mapping[K, V | list[V] | tuple[V, ...] | set[V]]
| cabc.Iterable[tuple[K, V]]
),
) -> None:
"""update() extends rather than replaces existing key lists:
>>> a = MultiDict({'x': 1})
>>> b = MultiDict({'x': 2, 'y': 3})
>>> a.update(b)
>>> a
MultiDict([('y', 3), ('x', 1), ('x', 2)])
If the value list for a key in ``other_dict`` is empty, no new values
will be added to the dict and the key will not be created:
>>> x = {'empty_list': []}
>>> y = MultiDict()
>>> y.update(x)
>>> y
MultiDict([])
"""
for key, value in iter_multi_items(mapping):
self.add(key, value)
def __or__( # type: ignore[override]
self, other: cabc.Mapping[K, V | list[V] | tuple[V, ...] | set[V]]
) -> MultiDict[K, V]:
if not isinstance(other, cabc.Mapping):
return NotImplemented
rv = self.copy()
rv.update(other)
return rv
def __ior__( # type: ignore[override]
self,
other: (
cabc.Mapping[K, V | list[V] | tuple[V, ...] | set[V]]
| cabc.Iterable[tuple[K, V]]
),
) -> te.Self:
if not isinstance(other, (cabc.Mapping, cabc.Iterable)):
return NotImplemented
self.update(other)
return self
@t.overload
def pop(self, key: K) -> V: ...
@t.overload
def pop(self, key: K, default: V) -> V: ...
@t.overload
def pop(self, key: K, default: T) -> V | T: ...
def pop(
self,
key: K,
default: V | T = _missing, # type: ignore[assignment]
) -> V | T:
"""Pop the first item for a list on the dict. Afterwards the
key is removed from the dict, so additional values are discarded:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> d.pop("foo")
1
>>> "foo" in d
False
:param key: the key to pop.
:param default: if provided the value to return if the key was
not in the dictionary.
"""
lst: list[V]
try:
lst = super().pop(key) # type: ignore[assignment]
if len(lst) == 0:
raise exceptions.BadRequestKeyError(key)
return lst[0]
except KeyError:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(key) from None
def popitem(self) -> tuple[K, V]:
"""Pop an item from the dict."""
item: tuple[K, list[V]]
try:
item = super().popitem() # type: ignore[assignment]
if len(item[1]) == 0:
raise exceptions.BadRequestKeyError(item[0])
return item[0], item[1][0]
except KeyError as e:
raise exceptions.BadRequestKeyError(e.args[0]) from None
def poplist(self, key: K) -> list[V]:
"""Pop the list for a key from the dict. If the key is not in the dict
an empty list is returned.
.. versionchanged:: 0.5
If the key does no longer exist a list is returned instead of
raising an error.
"""
return super().pop(key, []) # type: ignore[return-value]
def popitemlist(self) -> tuple[K, list[V]]:
"""Pop a ``(key, list)`` tuple from the dict."""
try:
return super().popitem() # type: ignore[return-value]
except KeyError as e:
raise exceptions.BadRequestKeyError(e.args[0]) from None
def __copy__(self) -> te.Self:
return self.copy()
def __deepcopy__(self, memo: t.Any) -> te.Self:
return self.deepcopy(memo=memo)
def __repr__(self) -> str:
return f"{type(self).__name__}({list(self.items(multi=True))!r})"
|
MultiDict
|
python
|
getsentry__sentry-python
|
sentry_sdk/integrations/logging.py
|
{
"start": 9418,
"end": 10498
}
|
class ____(_BaseHandler):
"""
A logging handler that records breadcrumbs for each log record.
Note that you do not have to use this class if the logging integration is enabled, which it is by default.
"""
def emit(self, record):
# type: (LogRecord) -> Any
with capture_internal_exceptions():
self.format(record)
return self._emit(record)
def _emit(self, record):
# type: (LogRecord) -> None
if not self._can_record(record):
return
sentry_sdk.add_breadcrumb(
self._breadcrumb_from_record(record), hint={"log_record": record}
)
def _breadcrumb_from_record(self, record):
# type: (LogRecord) -> Dict[str, Any]
return {
"type": "log",
"level": self._logging_to_event_level(record),
"category": record.name,
"message": record.message,
"timestamp": datetime.fromtimestamp(record.created, timezone.utc),
"data": self._extra_from_record(record),
}
|
BreadcrumbHandler
|
python
|
huggingface__transformers
|
tests/utils/test_convert_slow_tokenizer.py
|
{
"start": 194,
"end": 245
}
|
class ____:
vocab_file: str
|
FakeOriginalTokenizer
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_textbox15.py
|
{
"start": 315,
"end": 899
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox15.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox(
"E9", "This is some text", {"align": {"horizontal": "center"}}
)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
tensorflow__tensorflow
|
tensorflow/python/checkpoint/benchmarks_test.py
|
{
"start": 1197,
"end": 1421
}
|
class ____(base.Trackable):
def _serialize_to_tensors(self):
return {base.VARIABLE_VALUE_KEY: array_ops.ones([])}
def _restore_from_tensors(self, restored_tensors):
return control_flow_ops.no_op()
|
_TrivialRestore
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-facebook-pages/source_facebook_pages/components.py
|
{
"start": 717,
"end": 2490
}
|
class ____(NoAuth):
config: Config
page_id: Union[InterpolatedString, str]
access_token: Union[InterpolatedString, str]
def __post_init__(self, parameters: Mapping[str, Any]):
self._page_id = InterpolatedString.create(self.page_id, parameters=parameters).eval(self.config)
self._access_token = InterpolatedString.create(self.access_token, parameters=parameters).eval(self.config)
def __call__(self, request: requests.PreparedRequest) -> requests.PreparedRequest:
"""Attach the page access token to params to authenticate on the HTTP request"""
page_access_token = self.generate_page_access_token()
request.prepare_url(url=request.url, params={"access_token": page_access_token})
return request
def generate_page_access_token(self) -> str:
# We are expecting to receive User access token from config. To access
# Pages API we need to generate Page access token. Page access tokens
# can be generated from another Page access token (with the same page ID)
# so if user manually set Page access token instead of User access
# token it would be no problem unless it has wrong page ID.
# https://developers.facebook.com/docs/pages/access-tokens#get-a-page-access-token
try:
r = requests.get(
f"https://graph.facebook.com/{self._page_id}", params={"fields": "access_token", "access_token": self._access_token}
)
if r.status_code != HTTPStatus.OK:
raise HTTPError(r.text)
return r.json().get("access_token")
except Exception as e:
raise Exception(f"Error while generating page access token: {e}") from e
@dataclass
|
AuthenticatorFacebookPageAccessToken
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1alpha1_storage_version.py
|
{
"start": 383,
"end": 7932
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'object',
'status': 'V1alpha1StorageVersionStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1StorageVersion - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.spec = spec
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1alpha1StorageVersion. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1alpha1StorageVersion. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1alpha1StorageVersion.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1alpha1StorageVersion. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1alpha1StorageVersion. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1alpha1StorageVersion. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1alpha1StorageVersion.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1alpha1StorageVersion. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1alpha1StorageVersion. # noqa: E501
:return: The metadata of this V1alpha1StorageVersion. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1alpha1StorageVersion.
:param metadata: The metadata of this V1alpha1StorageVersion. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1alpha1StorageVersion. # noqa: E501
Spec is an empty spec. It is here to comply with Kubernetes API style. # noqa: E501
:return: The spec of this V1alpha1StorageVersion. # noqa: E501
:rtype: object
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1alpha1StorageVersion.
Spec is an empty spec. It is here to comply with Kubernetes API style. # noqa: E501
:param spec: The spec of this V1alpha1StorageVersion. # noqa: E501
:type: object
"""
if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
self._spec = spec
@property
def status(self):
"""Gets the status of this V1alpha1StorageVersion. # noqa: E501
:return: The status of this V1alpha1StorageVersion. # noqa: E501
:rtype: V1alpha1StorageVersionStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1alpha1StorageVersion.
:param status: The status of this V1alpha1StorageVersion. # noqa: E501
:type: V1alpha1StorageVersionStatus
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1StorageVersion):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1StorageVersion):
return True
return self.to_dict() != other.to_dict()
|
V1alpha1StorageVersion
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/frame_methods.py
|
{
"start": 23533,
"end": 24381
}
|
class ____:
def setup(self):
rng = np.random.default_rng()
self.df = DataFrame(rng.uniform(size=(1_000_000, 10)))
idx = rng.choice(range(1_000_000), size=1_000_000, replace=False)
self.df_random = DataFrame(self.df, index=idx)
idx = rng.choice(range(1_000_000), size=100_000, replace=False)
cols = rng.choice(range(10), size=2, replace=False)
self.df_sample = DataFrame(
rng.uniform(size=(100_000, 2)), index=idx, columns=cols
)
def time_to_update_big_frame_small_arg(self):
self.df.update(self.df_sample)
def time_to_update_random_indices(self):
self.df_random.update(self.df_sample)
def time_to_update_small_frame_big_arg(self):
self.df_sample.update(self.df)
from .pandas_vb_common import setup # noqa: F401 isort:skip
|
Update
|
python
|
getsentry__sentry
|
src/sentry/notifications/notifications/strategies/owner_recipient_strategy.py
|
{
"start": 134,
"end": 223
}
|
class ____(RoleBasedRecipientStrategy):
role = roles.get_top_dog()
|
OwnerRecipientStrategy
|
python
|
dagster-io__dagster
|
examples/docs_snippets/docs_snippets/integrations/pandera/example.py
|
{
"start": 454,
"end": 1172
}
|
class ____(pa.DataFrameModel):
"""Open/close prices for one or more stocks by day."""
name: Series[str] = pa.Field(description="Ticker symbol of stock")
date: Series[str] = pa.Field(description="Date of prices")
open: Series[float] = pa.Field(ge=0, description="Price at market open")
close: Series[float] = pa.Field(ge=0, description="Price at market close")
@op(out=Out(dagster_type=pandera_schema_to_dagster_type(StockPrices)))
def apple_stock_prices_dirty():
prices = pd.DataFrame(APPLE_STOCK_PRICES)
i = random.choice(prices.index)
prices.loc[i, "open"] = pd.NA
prices.loc[i, "close"] = pd.NA
return prices
@job
def stocks_job():
apple_stock_prices_dirty()
|
StockPrices
|
python
|
falconry__falcon
|
falcon/_typing.py
|
{
"start": 6409,
"end": 6729
}
|
class ____(Protocol[_ReqT, _RespT]):
"""WSGI Middleware with response handler."""
def process_response(
self,
req: _ReqT,
resp: _RespT,
resource: Resource | None,
req_succeeded: bool,
) -> None: ...
# ASGI lifespan middleware interface
|
WsgiMiddlewareWithProcessResponse
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_alloy_db.py
|
{
"start": 56208,
"end": 64455
}
|
class ____:
def setup_method(self):
self.operator = AlloyDBCreateUserOperator(
task_id=TEST_TASK_ID,
user_id=TEST_USER_ID,
user_configuration=TEST_USER,
cluster_id=TEST_CLUSTER_ID,
project_id=TEST_GCP_PROJECT,
location=TEST_GCP_REGION,
gcp_conn_id=TEST_GCP_CONN_ID,
request_id=TEST_REQUEST_ID,
validate_request=TEST_VALIDATE_ONLY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
def test_init(self):
assert self.operator.user_id == TEST_USER_ID
assert self.operator.user_configuration == TEST_USER
assert self.operator.cluster_id == TEST_CLUSTER_ID
def test_template_fields(self):
expected_template_fields = {
"cluster_id",
"user_id",
"user_configuration",
} | set(AlloyDBWriteBaseOperator.template_fields)
assert set(AlloyDBCreateUserOperator.template_fields) == expected_template_fields
@mock.patch(CREATE_USER_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_get_user_not_found(self, mock_hook, mock_log):
mock_get_user = mock_hook.return_value.get_user
mock_get_user.side_effect = NotFound("Not found")
result = self.operator._get_user()
mock_get_user.assert_called_once_with(
cluster_id=TEST_CLUSTER_ID,
user_id=TEST_USER_ID,
location=TEST_GCP_REGION,
project_id=TEST_GCP_PROJECT,
)
mock_log.info.assert_has_calls(
[
call("Checking if the user %s exists already...", TEST_USER_ID),
call("The user %s does not exist yet.", TEST_USER_ID),
]
)
assert result is None
@mock.patch(CREATE_USER_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_get_user_exception(self, mock_hook, mock_log):
mock_get_user = mock_hook.return_value.get_user
mock_get_user.side_effect = Exception("Test exception")
with pytest.raises(AirflowException):
self.operator._get_user()
mock_get_user.assert_called_once_with(
cluster_id=TEST_CLUSTER_ID,
user_id=TEST_USER_ID,
location=TEST_GCP_REGION,
project_id=TEST_GCP_PROJECT,
)
mock_log.info.assert_called_once_with("Checking if the user %s exists already...", TEST_USER_ID)
@mock.patch(OPERATOR_MODULE_PATH.format("alloydb_v1.User.to_dict"))
@mock.patch(CREATE_USER_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_get_user(self, mock_hook, mock_log, mock_to_dict):
mock_get_user = mock_hook.return_value.get_user
mock_user = mock_get_user.return_value
expected_result = mock_to_dict.return_value
result = self.operator._get_user()
mock_get_user.assert_called_once_with(
user_id=TEST_USER_ID,
cluster_id=TEST_CLUSTER_ID,
location=TEST_GCP_REGION,
project_id=TEST_GCP_PROJECT,
)
mock_log.info.assert_has_calls(
[
call("Checking if the user %s exists already...", TEST_USER_ID),
call("AlloyDB user %s already exists in the cluster %s.", TEST_USER_ID, TEST_CLUSTER_ID),
]
)
mock_to_dict.assert_called_once_with(mock_user)
assert result == expected_result
@mock.patch(OPERATOR_MODULE_PATH.format("alloydb_v1.User.to_dict"))
@mock.patch(CREATE_USER_OPERATOR_PATH.format("_get_user"))
@mock.patch(CREATE_USER_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute(self, mock_hook, mock_log, mock_get_user, mock_to_dict):
mock_get_user.return_value = None
mock_create_user = mock_hook.return_value.create_user
mock_user = mock_create_user.return_value
expected_result = mock_to_dict.return_value
mock_context = mock.MagicMock()
result = self.operator.execute(context=mock_context)
mock_log.info.assert_has_calls(
[
call("Creating an AlloyDB user."),
call("AlloyDB user %s was successfully created.", TEST_USER_ID),
]
)
mock_get_user.assert_called_once()
mock_create_user.assert_called_once_with(
user_id=TEST_USER_ID,
cluster_id=TEST_CLUSTER_ID,
user=TEST_USER,
location=TEST_GCP_REGION,
project_id=TEST_GCP_PROJECT,
request_id=TEST_REQUEST_ID,
validate_only=TEST_VALIDATE_ONLY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_to_dict.assert_called_once_with(mock_user)
assert result == expected_result
@mock.patch(OPERATOR_MODULE_PATH.format("alloydb_v1.User.to_dict"))
@mock.patch(CREATE_USER_OPERATOR_PATH.format("_get_user"))
@mock.patch(CREATE_USER_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute_validate_request(self, mock_hook, mock_log, mock_get_user, mock_to_dict):
mock_get_user.return_value = None
mock_create_user = mock_hook.return_value.create_user
mock_context = mock.MagicMock()
self.operator.validate_request = True
result = self.operator.execute(context=mock_context)
mock_log.info.assert_called_once_with("Validating a Create AlloyDB user request.")
mock_get_user.assert_called_once()
mock_create_user.assert_called_once_with(
user_id=TEST_USER_ID,
cluster_id=TEST_CLUSTER_ID,
user=TEST_USER,
location=TEST_GCP_REGION,
project_id=TEST_GCP_PROJECT,
request_id=TEST_REQUEST_ID,
validate_only=True,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
assert not mock_to_dict.called
assert result is None
@mock.patch(CREATE_USER_OPERATOR_PATH.format("_get_user"))
@mock.patch(CREATE_USER_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute_already_exists(self, mock_hook, mock_log, mock_get_user):
expected_result = mock_get_user.return_value
mock_create_user = mock_hook.return_value.create_user
mock_context = mock.MagicMock()
result = self.operator.execute(context=mock_context)
assert not mock_log.info.called
mock_get_user.assert_called_once()
assert not mock_create_user.called
assert result == expected_result
@mock.patch(OPERATOR_MODULE_PATH.format("alloydb_v1.User.to_dict"))
@mock.patch(CREATE_USER_OPERATOR_PATH.format("_get_user"))
@mock.patch(CREATE_USER_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute_exception(self, mock_hook, mock_log, mock_get_user, mock_to_dict):
mock_get_user.return_value = None
mock_create_user = mock_hook.return_value.create_user
mock_create_user.side_effect = Exception()
mock_context = mock.MagicMock()
with pytest.raises(AirflowException):
self.operator.execute(context=mock_context)
mock_log.info.assert_called_once_with("Creating an AlloyDB user.")
mock_get_user.assert_called_once()
mock_create_user.assert_called_once_with(
user_id=TEST_USER_ID,
cluster_id=TEST_CLUSTER_ID,
user=TEST_INSTANCE,
location=TEST_GCP_REGION,
project_id=TEST_GCP_PROJECT,
request_id=TEST_REQUEST_ID,
validate_only=TEST_VALIDATE_ONLY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
assert not mock_to_dict.called
|
TestAlloyDBCreateUserOperator
|
python
|
kamyu104__LeetCode-Solutions
|
Python/clone-graph.py
|
{
"start": 143,
"end": 848
}
|
class ____(object):
# @param node, a undirected graph node
# @return a undirected graph node
def cloneGraph(self, node):
if node is None:
return None
cloned_node = UndirectedGraphNode(node.label)
cloned, queue = {node:cloned_node}, [node]
while queue:
current = queue.pop()
for neighbor in current.neighbors:
if neighbor not in cloned:
queue.append(neighbor)
cloned_neighbor = UndirectedGraphNode(neighbor.label)
cloned[neighbor] = cloned_neighbor
cloned[current].neighbors.append(cloned[neighbor])
return cloned[node]
|
Solution
|
python
|
openai__openai-python
|
src/openai/types/beta/threads/image_url_content_block.py
|
{
"start": 230,
"end": 365
}
|
class ____(BaseModel):
image_url: ImageURL
type: Literal["image_url"]
"""The type of the content part."""
|
ImageURLContentBlock
|
python
|
dagster-io__dagster
|
helm/dagster/schema/schema/charts/dagster_user_deployments/subschema/user_deployments.py
|
{
"start": 127,
"end": 344
}
|
class ____(BaseModel):
enabled: bool
ReadinessProbeWithEnabled = create_model(
"ReadinessProbeWithEnabled", __base__=(kubernetes.ReadinessProbe), enabled=(bool, ...)
)
|
UserDeploymentIncludeConfigInLaunchedRuns
|
python
|
astropy__astropy
|
astropy/modeling/projections.py
|
{
"start": 20988,
"end": 21445
}
|
class ____(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical equal area projection - pixel to sky.
Corresponds to the ``CEA`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^{\circ}}\lambda y\right)
Parameters
----------
lam : float
Radius of the cylinder in spherical radii, λ. Default is 1.
"""
lam = _ParameterDS(default=1)
|
Pix2Sky_CylindricalEqualArea
|
python
|
scipy__scipy
|
scipy/interpolate/tests/test_interpolate.py
|
{
"start": 822,
"end": 1017
}
|
class ____:
def test_interp2d(self):
y, x = mgrid[0:2:20j, 0:pi:21j]
z = sin(x+0.5*y)
with assert_raises(NotImplementedError):
interp2d(x, y, z)
|
TestInterp2D
|
python
|
scipy__scipy
|
scipy/special/tests/test_iv_ratio.py
|
{
"start": 288,
"end": 5612
}
|
class ____:
@pytest.mark.parametrize('v,x,r', [
(0.5, 0.16666666666666666, 0.16514041292462933),
(0.5, 0.3333333333333333, 0.32151273753163434),
(0.5, 0.5, 0.46211715726000974),
(0.5, 0.6666666666666666, 0.5827829453479101),
(0.5, 0.8333333333333335, 0.6822617902381698),
(1, 0.3380952380952381, 0.1666773049170313),
(1, 0.7083333333333333, 0.33366443586989925),
(1, 1.1666666666666667, 0.5023355231537423),
(1, 1.8666666666666665, 0.674616572252164),
(1, 3.560606060606061, 0.844207659503163),
(2.34, 0.7975238095238094, 0.16704903081553285),
(2.34, 1.7133333333333334, 0.3360215931268845),
(2.34, 2.953333333333333, 0.50681909317803),
(2.34, 5.0826666666666656, 0.6755252698800679),
(2.34, 10.869696969696973, 0.8379351104498762),
(56.789, 19.46575238095238, 0.1667020505391409),
(56.789, 42.55008333333333, 0.33353809996933026),
(56.789, 75.552, 0.5003932381177826),
(56.789, 135.76026666666667, 0.6670528221946127),
(56.789, 307.8642424242425, 0.8334999441460798),
])
def test_against_reference_values(self, v, x, r):
"""The reference values are computed using mpmath as follows.
from mpmath import mp
mp.dps = 100
def iv_ratio_mp(v, x):
return mp.besseli(v, x) / mp.besseli(v - 1, x)
def _sample(n, *, v):
'''Return n positive real numbers x such that iv_ratio(v, x) are
roughly evenly spaced over (0, 1). The formula is taken from [1].
[1] Banerjee A., Dhillon, I. S., Ghosh, J., Sra, S. (2005).
"Clustering on the Unit Hypersphere using von Mises-Fisher
Distributions." Journal of Machine Learning Research,
6(46):1345-1382.
'''
r = np.arange(1, n+1) / (n+1)
return r * (2*v-r*r) / (1-r*r)
for v in (0.5, 1, 2.34, 56.789):
xs = _sample(5, v=v)
for x in xs:
print(f"({v}, {x}, {float(iv_ratio_mp(v,x))}),")
"""
assert_allclose(iv_ratio(v, x), r, rtol=4e-16, atol=0)
@pytest.mark.parametrize('v,x,r', [
(1, np.inf, 1),
(np.inf, 1, 0),
])
def test_inf(self, v, x, r):
"""If exactly one of v or x is inf and the other is within domain,
should return 0 or 1 accordingly."""
assert_equal(iv_ratio(v, x), r)
@pytest.mark.parametrize('v', [0.49, -np.inf, np.nan, np.inf])
@pytest.mark.parametrize('x', [-np.finfo(float).smallest_normal,
-np.finfo(float).smallest_subnormal,
-np.inf, np.nan, np.inf])
def test_nan(self, v, x):
"""If at least one argument is out of domain, or if v = x = inf,
the function should return nan."""
assert_equal(iv_ratio(v, x), np.nan)
@pytest.mark.parametrize('v', [0.5, 1, np.finfo(float).max, np.inf])
def test_zero_x(self, v):
"""If x is +/-0.0, return x to ensure iv_ratio is an odd function."""
assert_equal(iv_ratio(v, 0.0), 0.0)
assert_equal(iv_ratio(v, -0.0), -0.0)
@pytest.mark.parametrize('v,x', [
(1, np.finfo(float).smallest_normal),
(1, np.finfo(float).smallest_subnormal),
(1, np.finfo(float).smallest_subnormal*2),
(1e20, 123),
(np.finfo(float).max, 1),
(np.finfo(float).max, np.sqrt(np.finfo(float).max)),
])
def test_tiny_x(self, v, x):
"""If x is much less than v, the bounds
x x
--------------------------- <= R <= -----------------------
v-0.5+sqrt(x**2+(v+0.5)**2) v-1+sqrt(x**2+(v+1)**2)
collapses to R ~= x/2v. Test against this asymptotic expression.
"""
assert_equal(iv_ratio(v, x), (0.5*x)/v)
@pytest.mark.parametrize('v,x', [
(1, 1e16),
(1e20, 1e40),
(np.sqrt(np.finfo(float).max), np.finfo(float).max),
])
def test_huge_x(self, v, x):
"""If x is much greater than v, the bounds
x x
--------------------------- <= R <= ---------------------------
v-0.5+sqrt(x**2+(v+0.5)**2) v-0.5+sqrt(x**2+(v-0.5)**2)
collapses to R ~= 1. Test against this asymptotic expression.
"""
assert_equal(iv_ratio(v, x), 1.0)
@pytest.mark.parametrize('v,x', [
(np.finfo(float).max, np.finfo(float).max),
(np.finfo(float).max / 3, np.finfo(float).max),
(np.finfo(float).max, np.finfo(float).max / 3),
])
def test_huge_v_x(self, v, x):
"""If both x and v are very large, the bounds
x x
--------------------------- <= R <= -----------------------
v-0.5+sqrt(x**2+(v+0.5)**2) v-1+sqrt(x**2+(v+1)**2)
collapses to R ~= x/(v+sqrt(x**2+v**2). Test against this asymptotic
expression, and in particular that no numerical overflow occurs during
intermediate calculations.
"""
t = x / v
expected = t / (1 + np.hypot(1, t))
assert_allclose(iv_ratio(v, x), expected, rtol=4e-16, atol=0)
|
TestIvRatio
|
python
|
apache__airflow
|
providers/standard/src/airflow/providers/standard/operators/python.py
|
{
"start": 14888,
"end": 23845
}
|
class ____(PythonOperator, metaclass=ABCMeta):
BASE_SERIALIZABLE_CONTEXT_KEYS = {
"ds",
"ds_nodash",
"expanded_ti_count",
"inlets",
"outlets",
"run_id",
"task_instance_key_str",
"test_mode",
"ts",
"ts_nodash",
"ts_nodash_with_tz",
# The following should be removed when Airflow 2 support is dropped.
"next_ds",
"next_ds_nodash",
"prev_ds",
"prev_ds_nodash",
"tomorrow_ds",
"tomorrow_ds_nodash",
"yesterday_ds",
"yesterday_ds_nodash",
}
if AIRFLOW_V_3_0_PLUS:
BASE_SERIALIZABLE_CONTEXT_KEYS.add("task_reschedule_count")
PENDULUM_SERIALIZABLE_CONTEXT_KEYS = {
"data_interval_end",
"data_interval_start",
"logical_date",
"prev_data_interval_end_success",
"prev_data_interval_start_success",
"prev_start_date_success",
"prev_end_date_success",
# The following should be removed when Airflow 2 support is dropped.
"execution_date",
"next_execution_date",
"prev_execution_date",
"prev_execution_date_success",
}
AIRFLOW_SERIALIZABLE_CONTEXT_KEYS = {
"macros",
"conf",
"dag",
"dag_run",
"task",
"params",
"triggering_asset_events",
# The following should be removed when Airflow 2 support is dropped.
"triggering_dataset_events",
}
def __init__(
self,
*,
python_callable: Callable,
serializer: _SerializerTypeDef | None = None,
op_args: Collection[Any] | None = None,
op_kwargs: Mapping[str, Any] | None = None,
string_args: Iterable[str] | None = None,
templates_dict: dict | None = None,
templates_exts: list[str] | None = None,
expect_airflow: bool = True,
skip_on_exit_code: int | Container[int] | None = None,
env_vars: dict[str, str] | None = None,
inherit_env: bool = True,
**kwargs,
):
if (
not isinstance(python_callable, types.FunctionType)
or isinstance(python_callable, types.LambdaType)
and python_callable.__name__ == "<lambda>"
):
raise ValueError(f"{type(self).__name__} only supports functions for python_callable arg")
if inspect.isgeneratorfunction(python_callable):
raise ValueError(f"{type(self).__name__} does not support using 'yield' in python_callable")
super().__init__(
python_callable=python_callable,
op_args=op_args,
op_kwargs=op_kwargs,
templates_dict=templates_dict,
templates_exts=templates_exts,
**kwargs,
)
self.string_args = string_args or []
serializer = serializer or "pickle"
if serializer not in _SERIALIZERS:
msg = (
f"Unsupported serializer {serializer!r}. Expected one of {', '.join(map(repr, _SERIALIZERS))}"
)
raise AirflowException(msg)
self.pickling_library = _SERIALIZERS[serializer]
self.serializer: _SerializerTypeDef = serializer
self.expect_airflow = expect_airflow
self.skip_on_exit_code = (
skip_on_exit_code
if isinstance(skip_on_exit_code, Container)
else [skip_on_exit_code]
if skip_on_exit_code is not None
else []
)
self.env_vars = env_vars
self.inherit_env = inherit_env
@abstractmethod
def _iter_serializable_context_keys(self):
pass
def execute(self, context: Context) -> Any:
serializable_keys = set(self._iter_serializable_context_keys())
new = {k: v for k, v in context.items() if k in serializable_keys}
serializable_context = cast("Context", new)
return super().execute(context=serializable_context)
def get_python_source(self):
"""Return the source of self.python_callable."""
return textwrap.dedent(inspect.getsource(self.python_callable))
def _write_args(self, file: Path):
def resolve_proxies(obj):
"""Recursively replaces lazy_object_proxy.Proxy instances with their resolved values."""
if isinstance(obj, lazy_object_proxy.Proxy):
return obj.__wrapped__ # force evaluation
if isinstance(obj, dict):
return {k: resolve_proxies(v) for k, v in obj.items()}
if isinstance(obj, list):
return [resolve_proxies(v) for v in obj]
return obj
if self.op_args or self.op_kwargs:
self.log.info("Use %r as serializer.", self.serializer)
file.write_bytes(
self.pickling_library.dumps({"args": self.op_args, "kwargs": resolve_proxies(self.op_kwargs)})
)
def _write_string_args(self, file: Path):
file.write_text("\n".join(map(str, self.string_args)))
def _read_result(self, path: Path):
if path.stat().st_size == 0:
return None
try:
return self.pickling_library.loads(path.read_bytes())
except ValueError as value_error:
raise DeserializingResultError() from value_error
def __deepcopy__(self, memo):
# module objects can't be copied _at all__
memo[id(self.pickling_library)] = self.pickling_library
return super().__deepcopy__(memo)
def _execute_python_callable_in_subprocess(self, python_path: Path):
with TemporaryDirectory(prefix="venv-call") as tmp:
tmp_dir = Path(tmp)
op_kwargs: dict[str, Any] = dict(self.op_kwargs)
if self.templates_dict:
op_kwargs["templates_dict"] = self.templates_dict
input_path = tmp_dir / "script.in"
output_path = tmp_dir / "script.out"
string_args_path = tmp_dir / "string_args.txt"
script_path = tmp_dir / "script.py"
termination_log_path = tmp_dir / "termination.log"
airflow_context_path = tmp_dir / "airflow_context.json"
self._write_args(input_path)
self._write_string_args(string_args_path)
jinja_context = {
"op_args": self.op_args,
"op_kwargs": op_kwargs,
"expect_airflow": self.expect_airflow,
"pickling_library": self.serializer,
"python_callable": self.python_callable.__name__,
"python_callable_source": self.get_python_source(),
}
if inspect.getfile(self.python_callable) == self.dag.fileloc:
jinja_context["modified_dag_module_name"] = get_unique_dag_module_name(self.dag.fileloc)
write_python_script(
jinja_context=jinja_context,
filename=os.fspath(script_path),
render_template_as_native_obj=self.dag.render_template_as_native_obj,
)
env_vars = dict(os.environ) if self.inherit_env else {}
if fd := os.getenv("__AIRFLOW_SUPERVISOR_FD"):
env_vars["__AIRFLOW_SUPERVISOR_FD"] = fd
if self.env_vars:
env_vars.update(self.env_vars)
try:
cmd: list[str] = [
os.fspath(python_path),
os.fspath(script_path),
os.fspath(input_path),
os.fspath(output_path),
os.fspath(string_args_path),
os.fspath(termination_log_path),
os.fspath(airflow_context_path),
]
_execute_in_subprocess(
cmd=cmd,
env=env_vars,
)
except subprocess.CalledProcessError as e:
if e.returncode in self.skip_on_exit_code:
raise AirflowSkipException(f"Process exited with code {e.returncode}. Skipping.")
if termination_log_path.exists() and termination_log_path.stat().st_size > 0:
error_msg = f"Process returned non-zero exit status {e.returncode}.\n"
with open(termination_log_path) as file:
error_msg += file.read()
raise AirflowException(error_msg) from None
raise
if 0 in self.skip_on_exit_code:
raise AirflowSkipException("Process exited with code 0. Skipping.")
return self._read_result(output_path)
def determine_kwargs(self, context: Mapping[str, Any]) -> Mapping[str, Any]:
keyword_params = KeywordParameters.determine(self.python_callable, self.op_args, context)
if AIRFLOW_V_3_0_PLUS:
return keyword_params.unpacking()
return keyword_params.serializing() # type: ignore[attr-defined]
|
_BasePythonVirtualenvOperator
|
python
|
getsentry__sentry
|
src/sentry/workflow_engine/processors/delayed_workflow.py
|
{
"start": 8180,
"end": 8829
}
|
class ____:
"""
Parameters to query a UniqueConditionQuery with in Snuba.
"""
group_ids: set[GroupId] = field(default_factory=set)
timestamp: datetime | None = None
def update(self, group_ids: set[GroupId], timestamp: datetime | None) -> None:
"""
Use the latest timestamp for a set of group IDs with the same Snuba query.
We will query backwards in time from this point.
"""
self.group_ids.update(group_ids)
if timestamp is not None:
self.timestamp = timestamp if self.timestamp is None else max(timestamp, self.timestamp)
@dataclass(frozen=True)
|
GroupQueryParams
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/runnables/utils.py
|
{
"start": 16990,
"end": 17764
}
|
class ____(NamedTuple):
"""Field that can be configured by the user with multiple default values."""
id: str
"""The unique identifier of the field."""
options: Mapping[str, Any]
"""The options for the field."""
default: Sequence[str]
"""The default values for the field."""
name: str | None = None
"""The name of the field. """
description: str | None = None
"""The description of the field. """
is_shared: bool = False
"""Whether the field is shared."""
@override
def __hash__(self) -> int:
return hash((self.id, tuple(self.options.keys()), tuple(self.default)))
AnyConfigurableField = (
ConfigurableField | ConfigurableFieldSingleOption | ConfigurableFieldMultiOption
)
|
ConfigurableFieldMultiOption
|
python
|
google__pytype
|
pytype/tests/test_overriding.py
|
{
"start": 66,
"end": 27883
}
|
class ____(test_base.BaseTest):
"""Tests for overridden and overriding methods signature match."""
# Positional-or-keyword -> positional-or-keyword, same name or underscore.
def test_positional_or_keyword_match(self):
self.Check("""
class Foo:
def f(self, a: int, b: str) -> None:
pass
class Bar(Foo):
def f(self, a: int, b: str = "", c: int = 1, *, d: int = 2) -> None:
pass
""")
def test_positional_or_keyword_underscore_match(self):
self.Check("""
class Foo:
def f(self, a: int, _: str) -> None:
pass
class Bar(Foo):
def f(self, _: int, b: str) -> None:
pass
""")
# Positional-or-keyword -> positional-or-keyword, same name or underscore.
def test_positional_or_keyword_name_mismatch(self):
# We don't report it as an error, as this is a very common practice
# in the absence of positional-only parameters.
self.Check("""
class Foo:
def f(self, a: int) -> None:
pass
class Bar(Foo):
def f(self, b: int) -> None:
pass
""")
# Positional-or-keyword -> positional-or-keyword, same name or underscore.
def test_positional_or_keyword_name_and_type_mismatch(self):
# We don't report an error, since we have already disregarded the name
# mismatch and now cannot be sure a param with the same name is the same
# parameter for type checking purposes.
self.Check("""
class Foo:
def f(self, a: int, b: str) -> None:
pass
class Bar(Foo):
def f(self, b: int, c: int) -> None:
pass
""")
# Positional-or-keyword -> positional-or-keyword, same name or underscore.
def test_positional_or_keyword_name_and_count_mismatch(self):
self.CheckWithErrors("""
class Foo:
def f(self, a: int, b: int) -> None:
pass
class Bar(Foo):
def f(self, b: int) -> None: # signature-mismatch
pass
class Baz(Foo):
def f(self, b: int, c:int, d: int) -> None: # signature-mismatch
pass
""")
# Positional-or-keyword -> positional-or-keyword, same name.
def test_positional_or_keyword_to_keyword_only_mismatch(self):
errors = self.CheckWithErrors("""
class Foo:
def f(self, a: int) -> None:
pass
class Bar(Foo):
def f(self, *, a: int) -> None: # signature-mismatch[e]
pass
""")
self.assertErrorSequences(
errors,
{
"e": [
"Overriding method signature mismatch",
"Base signature: ",
"Subclass signature: ",
"Not enough positional parameters in overriding method.",
]
},
)
# Keyword-only -> Positional-or-keyword or keyword-only, same name
def test_keyword_only_match(self):
self.Check("""
class Foo:
def f(self, *, a: int, b: int, c: int = 0) -> None:
pass
class Bar(Foo):
def f(self, a: int, *, b: int, c: int = 0, d: int = 1) -> None:
pass
""")
# Keyword-only -> Positional-or-keyword or keyword-only, same name
def test_keyword_only_name_mismatch(self):
self.CheckWithErrors("""
class Foo:
def f(self, *, a: int) -> None:
pass
class Bar(Foo):
def f(self, *, b: int) -> None: # signature-mismatch
pass
""")
def test_keyword_only_name_mismatch_twice(self):
self.CheckWithErrors("""
class Foo:
def f(self, *, a: int) -> None:
pass
def g(self, *, c: int) -> None:
pass
class Bar(Foo):
def f(self, *, b: int) -> None: # signature-mismatch
pass
def g(self, *, d: int) -> None: # signature-mismatch
pass
""")
# Keyword-only -> Positional-or-keyword or keyword-only, same name
def test_keyword_only_count_mismatch(self):
self.CheckWithErrors("""
class Foo:
def f(self, *, a: int, b: int) -> None:
pass
class Bar(Foo):
def f(self, *, a: int) -> None: # signature-mismatch
pass
""")
# Non-default -> non-default
def test_default_to_non_default_mismatch(self):
self.CheckWithErrors("""
class Foo:
def f(self, a: int = 0) -> None:
pass
class Bar(Foo):
def f(self, a: int) -> None: # signature-mismatch
pass
""")
# Default or missing -> default with the same value
def test_default_to_default_match(self):
self.Check("""
class Foo:
def f(self, a: int = 0, *, c: int = 2) -> None:
pass
class Bar(Foo):
def f(self, a: int = 0, b: int = 1, * , c: int = 2, d: int = 3) -> None:
pass
""")
# Default or missing -> default with a different value
def test_keyword_default_value_mismatch(self):
errors = self.CheckWithErrors("""
class Foo:
def f(self, *, t: int = 0) -> None:
pass
class Bar(Foo):
def f(self, *, t: int = 1) -> None: # signature-mismatch[e]
pass
""")
self.assertErrorSequences(errors, {"e": ["t: int = 0", "t: int = 1"]})
def test_default_value_imported_class(self):
with self.DepTree([(
"foo.py",
"""
class Foo:
def f(self, x: int = 0):
pass
""",
)]):
self.Check("""
import foo
class Bar(foo.Foo):
def f(self, x: int = 0):
pass
""")
def test_partial_annotations(self):
self.Check("""
class Foo:
def f(self, t, g: int) -> str:
return ""
class Bar(Foo):
def f(self, t: int, g: int):
pass
""")
def test_parameter_type_mismatch(self):
self.CheckWithErrors("""
class Foo:
def f(self, t: int) -> None:
pass
class Bar(Foo):
def f(self, t: str) -> None: # signature-mismatch
pass
""")
def test_return_type_mismatch(self):
self.CheckWithErrors("""
class Foo:
def f(self) -> int:
return 0
class Bar(Foo):
def f(self) -> str: # signature-mismatch
return ''
""")
def test_none_return_type_mismatch(self):
self.CheckWithErrors("""
class Foo:
def f(self) -> None:
pass
class Bar(Foo):
def f(self) -> str: # signature-mismatch
return ''
""")
def test_return_type_matches_empty(self):
with self.DepTree([(
"foo.py",
"""
class Foo:
def f(self):
raise NotImplementedError()
""",
)]):
self.Check("""
import foo
class Bar(foo.Foo):
def f(self) -> None:
pass
""")
def test_pytdclass_signature_match(self):
self.Check("""
class Foo(list):
def clear(self) -> None:
pass
""")
def test_pytdclass_parameter_type_mismatch(self):
errors = self.CheckWithErrors("""
class Foo(list):
def clear(self, x: int) -> None: # signature-mismatch[e]
pass
""")
self.assertErrorSequences(errors, {"e": ["list.clear(self)"]})
def test_pytdclass_return_type_mismatch(self):
self.CheckWithErrors("""
class Foo(list):
def clear(self) -> str: # signature-mismatch
return ""
""")
def test_pytdclass_default_value_match(self):
self.Check("""
import unittest
class A(unittest.case.TestCase):
def assertDictEqual(self, d1, d2, msg=None):
pass
""")
def test_pytdclass_default_value_mismatch(self):
self.Check("""
import unittest
class A(unittest.case.TestCase):
def assertDictEqual(self, d1, d2, msg=""):
pass
""")
def test_subclass_subclass_signature_match(self):
self.Check("""
class Foo:
def f(self, t: int) -> None:
pass
class Bar(Foo):
pass
class Baz(Bar):
def f(self, t: int) -> None:
pass
""")
def test_subclass_subclass_parameter_type_mismatch(self):
self.CheckWithErrors("""
class Foo:
def f(self, t: int) -> None:
pass
class Bar(Foo):
pass
class Baz(Bar):
def f(self, t: str) -> None: # signature-mismatch
pass
""")
def test_keyword_type_mismatch(self):
self.CheckWithErrors("""
class Foo:
def f(self, *, t: int) -> None:
pass
class Bar(Foo):
def f(self, *, t: str) -> None: # signature-mismatch
pass
""")
def test_keyword_to_positional_type_mismatch(self):
self.CheckWithErrors("""
class Foo:
def f(self, *, t: int) -> None:
pass
class Bar(Foo):
def f(self, t: str) -> None: # signature-mismatch
pass
""")
def test_subclass_parameter_type_match(self):
self.Check("""
class A:
pass
class B(A):
pass
class Foo:
def f(self, t: B) -> None:
pass
class Bar(Foo):
def f(self, t: A) -> None:
pass
""")
def test_subclass_parameter_type_mismatch(self):
self.CheckWithErrors("""
class A:
pass
class B(A):
pass
class Foo:
def f(self, t: A) -> None:
pass
class Bar(Foo):
def f(self, t: B) -> None: # signature-mismatch
pass
""")
def test_subclass_return_type_match(self):
self.Check("""
class A:
pass
class B(A):
pass
class Foo:
def f(self, t) -> A:
return A()
class Bar(Foo):
def f(self, t) -> B:
return B()
""")
def test_subclass_return_type_mismatch(self):
self.CheckWithErrors("""
class A:
pass
class B(A):
pass
class Foo:
def f(self, t) -> B:
return B()
class Bar(Foo):
def f(self, t) -> A: # signature-mismatch
return A()
""")
def test_multiple_inheritance_parameter_type_match(self):
self.Check("""
class A:
pass
class B(A):
pass
class C(A):
pass
class Foo:
def f(self, t: B) -> None:
pass
class Bar:
def f(self, t: C) -> None:
pass
class Baz(Foo, Bar):
def f(self, t: A) -> None:
pass
""")
def test_multiple_inheritance_parameter_type_mismatch(self):
self.CheckWithErrors("""
class A:
pass
class B(A):
pass
class C(B):
pass
class Foo:
def f(self, t: A) -> None:
pass
class Bar:
def f(self, t: C) -> None:
pass
class Baz(Foo, Bar):
def f(self, t: B) -> None: # signature-mismatch
pass
""")
def test_multiple_inheritance_return_type_match(self):
self.Check("""
class A:
pass
class B:
pass
class C(A, B):
pass
class Foo:
def f(self, t) -> A:
return A()
class Bar:
def f(self, t) -> B:
return B()
class Baz(Foo, Bar):
def f(self, t) -> C:
return C()
""")
def test_multiple_inheritance_return_type_mismatch(self):
self.CheckWithErrors("""
class A:
pass
class B(A):
pass
class C(B):
pass
class Foo:
def f(self, t) -> A:
return C()
class Bar:
def f(self, t) -> C:
return C()
class Baz(Foo, Bar):
def f(self, t) -> B: # signature-mismatch
return C()
""")
# If the method is defined in several base classes, but not in the class
# itself, then the first signature by MRO should match all other signatures.
# Note that mismatch errors is reported on the class definition and not on
# the method that triggers an error.
def test_multiple_inheritance_base_parameter_type_mismatch(self):
self.CheckWithErrors("""
class Foo:
def f(self, a: int) -> None:
pass
class Bar(Foo):
pass
class Baz:
def f(self, a: int, b: int) -> None:
pass
class Qux(Bar, Baz): # signature-mismatch
pass
""")
def test_multiple_inheritance_signature_order(self):
self.Check("""
class Base:
def f(self):
pass
class Parent1(Base):
pass
class Parent2(Base):
def f(self, a=None):
pass
class Child(Parent1, Parent2):
pass
""")
def test_multiple_inheritance_override_dunder_method(self):
self.Check("""
class Parent1:
pass
class Parent2:
def __eq__(self, other, extra=None):
return False
class Child(Parent1, Parent2):
pass
""")
def test_generic_type_match(self):
self.Check("""
from typing import Callable, Sequence
class A:
pass
class B(A):
pass
class Foo:
def f(self, t: Callable[[A], B]) -> Sequence[Callable[[B], A]]:
return []
class Bar(Foo):
def f(self, t: Callable[[B], A]) -> Sequence[Callable[[A], B]]:
return []
""")
def test_covariant_generic_parameter_type_mismatch(self):
self.CheckWithErrors("""
from typing import Sequence, Iterable
class A:
pass
class B(A):
pass
class Foo:
def f(self, t: Iterable[A]) -> None:
pass
class Bar(Foo):
def f(self, t: Iterable[B]) -> None: # signature-mismatch
pass
""")
def test_contravariant_generic_parameter_type_mismatch(self):
self.CheckWithErrors("""
from typing import Callable
class A:
pass
class B(A):
pass
class Foo:
def f(self, t: Callable[[B], None]) -> None:
pass
class Bar(Foo):
def f(self, t: Callable[[A], None]) -> None: # signature-mismatch
pass
""")
def test_covariant_generic_return_type_mismatch(self):
self.CheckWithErrors("""
from typing import Sequence
class A:
pass
class B(A):
pass
class Foo:
def f(self, t) -> Sequence[B]:
return [B()]
class Bar(Foo):
def f(self, t) -> Sequence[A]: # signature-mismatch
return [A()]
""")
def test_subclass_of_generic_for_builtin_types(self):
self.CheckWithErrors("""
from typing import Generic, TypeVar
T = TypeVar('T')
class A(Generic[T]):
def f(self, t: T) -> None:
pass
def g(self, t: int) -> None:
pass
class B(A[int]):
def f(self, t: str) -> None: # signature-mismatch
pass
def g(self, t: str) -> None: # signature-mismatch
pass
class C(A[list]):
def f(self, t: list) -> None:
pass
def g(self, t: int) -> None:
pass
""")
def test_subclass_of_generic_for_simple_types(self):
self.CheckWithErrors("""
from typing import Generic, TypeVar
T = TypeVar('T')
U = TypeVar('U')
class A(Generic[T, U]):
def f(self, t: T) -> U:
pass
class Y:
pass
class X(Y):
pass
class B(A[X, Y]):
def f(self, t: X) -> Y:
return Y()
class C(A[X, Y]):
def f(self, t: Y) -> X:
return X()
class D(A[Y, X]):
def f(self, t: X) -> X: # signature-mismatch
return X()
class E(A[Y, X]):
def f(self, t: Y) -> Y: # signature-mismatch
return Y()
""")
def test_subclass_of_generic_for_bound_types(self):
self.CheckWithErrors("""
from typing import Generic, TypeVar
class X:
pass
T = TypeVar('T', bound=X)
class A(Generic[T]):
def f(self, t: T) -> T:
return T()
class Y(X):
pass
class B(A[Y]):
def f(self, t: Y) -> Y:
return Y()
class C(A[Y]):
def f(self, t: X) -> Y:
return Y()
class D(A[Y]):
def f(self, t: Y) -> X: # signature-mismatch
return X()
""")
def test_subclass_of_generic_match_for_generic_types(self):
self.Check("""
from typing import Generic, List, Sequence, TypeVar
T = TypeVar('T')
U = TypeVar('U')
class A(Generic[T, U]):
def f(self, t: List[T]) -> Sequence[U]:
return []
class X:
pass
class Y:
pass
class B(A[X, Y]):
def f(self, t: Sequence[X]) -> List[Y]:
return []
class Z(X):
pass
class C(A[Z, X]):
def f(self, t: List[X]) -> Sequence[Z]:
return []
""")
def test_subclass_of_generic_mismatch_for_generic_types(self):
self.CheckWithErrors("""
from typing import Generic, List, Sequence, TypeVar
T = TypeVar('T')
U = TypeVar('U')
class A(Generic[T, U]):
def f(self, t: Sequence[T]) -> List[U]:
return []
class X:
pass
class Y:
pass
class B(A[X, Y]):
def f(self, t: List[X]) -> List[Y]: # signature-mismatch
return []
class C(A[X, Y]):
def f(self, t: Sequence[X]) -> Sequence[Y]: # signature-mismatch
return []
class Z(X):
pass
class D(A[X, Z]):
def f(self, t: Sequence[Z]) -> List[Z]: # signature-mismatch
return []
class E(A[X, Z]):
def f(self, t: Sequence[X]) -> List[X]: # signature-mismatch
return []
""")
def test_nested_generic_types(self):
self.CheckWithErrors("""
from typing import Callable, Generic, TypeVar
T = TypeVar('T')
U = TypeVar('U')
V = TypeVar('V')
class A(Generic[T, U]):
def f(self, t: Callable[[T], U]) -> None:
pass
class Super:
pass
class Sub(Super):
pass
class B(A[Sub, Super]):
def f(self, t: Callable[[Sub], Super]) -> None:
pass
class C(A[Sub, Super]):
def f(self, t: Callable[[Super], Super]) -> None: # signature-mismatch
pass
class D(A[Sub, Super]):
def f(self, t: Callable[[Sub], Sub]) -> None: # signature-mismatch
pass
""")
def test_nested_generic_types2(self):
self.CheckWithErrors("""
from typing import Callable, Generic, TypeVar
T = TypeVar('T')
U = TypeVar('U')
V = TypeVar('V') # not in the class template
class A(Generic[T, U]):
def f(self, t: Callable[[T, Callable[[T], V]], U]) -> None:
pass
class Super:
pass
class Sub(Super):
pass
class B(Generic[T], A[Sub, T]):
pass
class C(B[Super]):
def f(self, t: Callable[[Sub, Callable[[Sub], V]], Super]) -> None:
pass
class D(B[Super]):
def f(self, t: Callable[[Sub, Callable[[Super], Sub]], Super]) -> None:
pass
class E(B[Super]):
def f(self, t: Callable[[Super, Callable[[Sub], V]], Super]) -> None: # signature-mismatch
pass
class F(Generic[T], B[T]):
def f(self, t: Callable[[Sub, Callable[[Sub], V]], T]) -> None:
pass
class G(Generic[T], B[T]):
def f(self, t: Callable[[Sub, Callable[[Super], Super]], T]) -> None:
pass
class H(Generic[T], B[T]):
def f(self, t: Callable[[Super, Callable[[Sub], V]], T]) -> None: # signature-mismatch
pass
""")
def test_subclass_of_generic_for_renamed_type_parameters(self):
self.Check("""
from typing import Generic, TypeVar
T = TypeVar('T')
U = TypeVar('U')
class A(Generic[T]):
def f(self, t: T) -> None:
pass
class B(Generic[U], A[U]):
pass
class X:
pass
class C(B[X]):
def f(self, t: X) -> None:
pass
""")
def test_subclass_of_generic_for_renamed_type_parameters2(self):
self.CheckWithErrors("""
from typing import Generic, TypeVar
T = TypeVar('T')
U = TypeVar('U')
class A(Generic[T, U]):
def f(self, t: T) -> U:
return U()
class X:
pass
class B(Generic[T], A[X, T]):
pass
class Y:
pass
class C(B[Y]):
def f(self, t: X) -> Y:
return Y()
class D(B[Y]):
def f(self, t: X) -> X: # signature-mismatch
return X()
""")
def test_subclass_of_generic_for_generic_method(self):
self.CheckWithErrors("""
from typing import Generic, TypeVar
T = TypeVar('T')
U = TypeVar('U')
class A(Generic[T]):
def f(self, t: T, u: U) -> U:
return U()
class Y:
pass
class X(Y):
pass
class B(A[X]):
def f(self, t: X, u: U) -> U:
return U()
class C(A[X]):
def f(self, t: Y, u: U) -> U:
return U()
class D(A[Y]):
def f(self, t: X, u: U) -> U: # signature-mismatch
return U()
""")
def test_varargs_match(self):
self.Check("""
class Foo:
def f(self, a: int, b: int) -> None:
pass
class Bar(Foo):
def f(self, a: int, *args: int) -> None:
pass
""")
def test_varargs_mismatch(self):
self.CheckWithErrors("""
class Foo:
def f(self, a: int, b: str) -> None:
pass
class Bar(Foo):
def f(self, a: int, *args: int) -> None: # signature-mismatch
pass
""")
def test_varargs_count_match(self):
self.Check("""
class Foo:
def f(self, a: int) -> None:
pass
class Bar(Foo):
def f(self, a: int, *args: str) -> None:
pass
""")
def test_pytd_varargs_not_annotated(self):
with self.DepTree([(
"foo.py",
"""
class Foo:
def f(self, *args):
pass
""",
)]):
self.Check("""
import foo
class Bar(foo.Foo):
def f(self, x: int):
pass
""")
def test_kwargs_match(self):
self.Check("""
class Foo:
def f(self, a: int, *, b: int, c: int) -> None:
pass
class Bar(Foo):
def f(self, a: int, **kwargs: int) -> None:
pass
""")
def test_kwargs_mismatch(self):
self.CheckWithErrors("""
class Foo:
def f(self, a: int, *, b: str) -> None:
pass
class Bar(Foo):
def f(self, a: int, **kwargs: int) -> None: # signature-mismatch
pass
""")
def test_kwargs_count_match(self):
self.Check("""
class Foo:
def f(self, a: int, *, b: int) -> None:
pass
class Bar(Foo):
def f(self, a: int, *, b: int, **kwargs: str) -> None:
pass
""")
def test_default_value_to_varargs(self):
self.Check("""
class Foo:
def call(self, x: str, y: int = 0) -> None:
pass
class Bar(Foo):
def call(self, x, *args) -> None:
pass
""")
def test_default_value_to_kwargs(self):
self.Check("""
class Foo:
def call(self, x: int, *, y: int, z: int = 0) -> None:
pass
class Bar(Foo):
def call(self, x: int, **kwargs) -> None:
pass
""")
def test_class_and_static_methods(self):
self.Check("""
class Foo:
def f(self, a: int) -> None:
pass
class Bar:
@classmethod
def f(cls, b: str) -> None:
pass
class Baz:
@staticmethod
def f(c: list) -> None:
pass
""")
def test_self_name(self):
self.Check("""
class Foo:
def f(self, a: int) -> None:
pass
class Bar(Foo):
def f(this, self: int) -> None:
pass
""")
def test_keyword_only_double_underscore_name_mismatch(self):
# Names with two leading underscores are mangled by Python.
# See https://peps.python.org/pep-0008/#method-names-and-instance-variables.
self.CheckWithErrors("""
class Foo:
def f(self, *, __a: int) -> None:
pass
class Bar(Foo):
def f(self, *, __a: int) -> None: # signature-mismatch
pass
""")
# Positional-only -> Positional-only or positional-or-keyword, any name.
def test_positional_only_match(self):
self.Check("""
class Foo:
def f(self, a: int, b: str, c: int = 0, /) -> None:
pass
class Bar(Foo):
def f(self, d: int, / , e: str, f: int = 0, g: int = 1) -> None:
pass
""")
# Positional-only -> Positional-only or positional-or-keyword, any name.
def test_positional_only_to_keyword_only(self):
self.CheckWithErrors("""
class Foo:
def f(self, a: int, /) -> None:
pass
class Bar(Foo):
def f(self, * , a: int) -> None: # signature-mismatch
pass
""")
# Positional-or-keyword -> positional-only.
def test_positional_or_keyword_to_positional_only_mismatch(self):
self.CheckWithErrors("""
class Foo:
def f(self, a: int) -> None:
pass
class Bar(Foo):
def f(self, a: int, /) -> None: # signature-mismatch
pass
""")
# Keyword-only -> Positional-or-keyword or keyword-only, same name.
def test_keyword_only_to_positional_only_mismatch(self):
self.CheckWithErrors("""
class Foo:
def f(self, *, a: int) -> None:
pass
class Bar(Foo):
def f(self, a: int, /) -> None: # signature-mismatch
pass
""")
# Keyword-only -> Positional-only, same name.
def test_keyword_only_to_positional_only_count_mismatch(self):
self.CheckWithErrors("""
class Foo:
def f(self, *, a: int) -> None:
pass
class Bar(Foo):
def f(self, a: int, /) -> None: # signature-mismatch
pass
""")
def test_callable_multiple_inheritance(self):
self.Check("""
from typing import Callable
class Foo:
def __call__(self, x: int, *, y: str):
pass
class Bar(Callable, Foo):
pass
""")
def test_async(self):
with self.DepTree([(
"foo.py",
"""
class Foo:
async def f(self) -> int:
return 0
def g(self) -> int:
return 0
""",
)]):
self.CheckWithErrors("""
import foo
class Good(foo.Foo):
async def f(self) -> int:
return 0
class Bad(foo.Foo):
async def f(self) -> str: # signature-mismatch
return ''
# Test that we catch the non-async/async mismatch even without a
# return annotation.
async def g(self): # signature-mismatch
return 0
""")
def test_disable(self):
self.Check("""
class Foo:
def f(self, x) -> int:
return 0
class Bar(Foo):
def f( # pytype: disable=signature-mismatch
self, x) -> str:
return "0"
class Baz(Foo):
def f(
self, x) -> str: # pytype: disable=signature-mismatch
return "0"
class Qux(Foo):
def f(
self, # pytype: disable=signature-mismatch
x) -> str:
return "0"
""")
def test_noreturn(self):
# NoReturn is the bottom type, so it is considered a subtype of int (and all
# other types).
self.Check("""
from typing import NoReturn
class A:
def f(self) -> int:
return 0
class B(A):
def f(self) -> NoReturn:
raise ValueError()
""")
|
OverridingTest
|
python
|
MongoEngine__mongoengine
|
tests/test_signals.py
|
{
"start": 97,
"end": 17748
}
|
class ____(unittest.TestCase):
"""
Testing signals before/after saving and deleting.
"""
def get_signal_output(self, fn, *args, **kwargs):
# Flush any existing signal output
global signal_output
signal_output = []
fn(*args, **kwargs)
return signal_output
def setUp(self):
connect(db="mongoenginetest")
class Author(Document):
# Make the id deterministic for easier testing
id = SequenceField(primary_key=True)
name = StringField()
def __unicode__(self):
return self.name
@classmethod
def pre_init(cls, sender, document, *args, **kwargs):
signal_output.append("pre_init signal, %s" % cls.__name__)
signal_output.append(kwargs["values"])
@classmethod
def post_init(cls, sender, document, **kwargs):
signal_output.append(
"post_init signal, %s, document._created = %s"
% (document, document._created)
)
@classmethod
def pre_save(cls, sender, document, **kwargs):
signal_output.append("pre_save signal, %s" % document)
signal_output.append(kwargs)
@classmethod
def pre_save_post_validation(cls, sender, document, **kwargs):
signal_output.append("pre_save_post_validation signal, %s" % document)
if kwargs.pop("created", False):
signal_output.append("Is created")
else:
signal_output.append("Is updated")
signal_output.append(kwargs)
@classmethod
def post_save(cls, sender, document, **kwargs):
dirty_keys = list(document._delta()[0].keys()) + list(
document._delta()[1].keys()
)
signal_output.append("post_save signal, %s" % document)
signal_output.append("post_save dirty keys, %s" % dirty_keys)
if kwargs.pop("created", False):
signal_output.append("Is created")
else:
signal_output.append("Is updated")
signal_output.append(kwargs)
@classmethod
def pre_delete(cls, sender, document, **kwargs):
signal_output.append("pre_delete signal, %s" % document)
signal_output.append(kwargs)
@classmethod
def post_delete(cls, sender, document, **kwargs):
signal_output.append("post_delete signal, %s" % document)
signal_output.append(kwargs)
@classmethod
def pre_bulk_insert(cls, sender, documents, **kwargs):
signal_output.append("pre_bulk_insert signal, %s" % documents)
signal_output.append(kwargs)
@classmethod
def post_bulk_insert(cls, sender, documents, **kwargs):
signal_output.append("post_bulk_insert signal, %s" % documents)
if kwargs.pop("loaded", False):
signal_output.append("Is loaded")
else:
signal_output.append("Not loaded")
signal_output.append(kwargs)
self.Author = Author
Author.drop_collection()
Author.id.set_next_value(0)
class Another(Document):
name = StringField()
def __unicode__(self):
return self.name
@classmethod
def pre_delete(cls, sender, document, **kwargs):
signal_output.append("pre_delete signal, %s" % document)
signal_output.append(kwargs)
@classmethod
def post_delete(cls, sender, document, **kwargs):
signal_output.append("post_delete signal, %s" % document)
signal_output.append(kwargs)
self.Another = Another
Another.drop_collection()
class ExplicitId(Document):
id = IntField(primary_key=True)
@classmethod
def post_save(cls, sender, document, **kwargs):
if "created" in kwargs:
if kwargs["created"]:
signal_output.append("Is created")
else:
signal_output.append("Is updated")
self.ExplicitId = ExplicitId
ExplicitId.drop_collection()
class Post(Document):
title = StringField()
content = StringField()
active = BooleanField(default=False)
def __unicode__(self):
return self.title
@classmethod
def pre_bulk_insert(cls, sender, documents, **kwargs):
signal_output.append(
"pre_bulk_insert signal, %s"
% [
(doc, {"active": documents[n].active})
for n, doc in enumerate(documents)
]
)
# make changes here, this is just an example -
# it could be anything that needs pre-validation or looks-ups before bulk bulk inserting
for document in documents:
if not document.active:
document.active = True
signal_output.append(kwargs)
@classmethod
def post_bulk_insert(cls, sender, documents, **kwargs):
signal_output.append(
"post_bulk_insert signal, %s"
% [
(doc, {"active": documents[n].active})
for n, doc in enumerate(documents)
]
)
if kwargs.pop("loaded", False):
signal_output.append("Is loaded")
else:
signal_output.append("Not loaded")
signal_output.append(kwargs)
self.Post = Post
Post.drop_collection()
# Save up the number of connected signals so that we can check at the
# end that all the signals we register get properly unregistered
self.pre_signals = (
len(signals.pre_init.receivers),
len(signals.post_init.receivers),
len(signals.pre_save.receivers),
len(signals.pre_save_post_validation.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
len(signals.pre_bulk_insert.receivers),
len(signals.post_bulk_insert.receivers),
)
signals.pre_init.connect(Author.pre_init, sender=Author)
signals.post_init.connect(Author.post_init, sender=Author)
signals.pre_save.connect(Author.pre_save, sender=Author)
signals.pre_save_post_validation.connect(
Author.pre_save_post_validation, sender=Author
)
signals.post_save.connect(Author.post_save, sender=Author)
signals.pre_delete.connect(Author.pre_delete, sender=Author)
signals.post_delete.connect(Author.post_delete, sender=Author)
signals.pre_bulk_insert.connect(Author.pre_bulk_insert, sender=Author)
signals.post_bulk_insert.connect(Author.post_bulk_insert, sender=Author)
signals.pre_delete.connect(Another.pre_delete, sender=Another)
signals.post_delete.connect(Another.post_delete, sender=Another)
signals.post_save.connect(ExplicitId.post_save, sender=ExplicitId)
signals.pre_bulk_insert.connect(Post.pre_bulk_insert, sender=Post)
signals.post_bulk_insert.connect(Post.post_bulk_insert, sender=Post)
def tearDown(self):
signals.pre_init.disconnect(self.Author.pre_init)
signals.post_init.disconnect(self.Author.post_init)
signals.post_delete.disconnect(self.Author.post_delete)
signals.pre_delete.disconnect(self.Author.pre_delete)
signals.post_save.disconnect(self.Author.post_save)
signals.pre_save_post_validation.disconnect(
self.Author.pre_save_post_validation
)
signals.pre_save.disconnect(self.Author.pre_save)
signals.pre_bulk_insert.disconnect(self.Author.pre_bulk_insert)
signals.post_bulk_insert.disconnect(self.Author.post_bulk_insert)
signals.post_delete.disconnect(self.Another.post_delete)
signals.pre_delete.disconnect(self.Another.pre_delete)
signals.post_save.disconnect(self.ExplicitId.post_save)
signals.pre_bulk_insert.disconnect(self.Post.pre_bulk_insert)
signals.post_bulk_insert.disconnect(self.Post.post_bulk_insert)
# Check that all our signals got disconnected properly.
post_signals = (
len(signals.pre_init.receivers),
len(signals.post_init.receivers),
len(signals.pre_save.receivers),
len(signals.pre_save_post_validation.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
len(signals.pre_bulk_insert.receivers),
len(signals.post_bulk_insert.receivers),
)
self.ExplicitId.objects.delete()
# Note that there is a chance that the following assert fails in case
# some receivers (eventually created in other tests)
# gets garbage collected (https://pythonhosted.org/blinker/#blinker.base.Signal.connect)
assert self.pre_signals == post_signals
def test_model_signals(self):
"""Model saves should throw some signals."""
def create_author():
self.Author(name="Bill Shakespeare")
def bulk_create_author_with_load():
a1 = self.Author(name="Bill Shakespeare")
self.Author.objects.insert([a1], load_bulk=True)
def bulk_create_author_without_load():
a1 = self.Author(name="Bill Shakespeare")
self.Author.objects.insert([a1], load_bulk=False)
def load_existing_author():
a = self.Author(name="Bill Shakespeare")
a.save()
self.get_signal_output(lambda: None) # eliminate signal output
_ = self.Author.objects(name="Bill Shakespeare")[0]
assert self.get_signal_output(create_author) == [
"pre_init signal, Author",
{"name": "Bill Shakespeare"},
"post_init signal, Bill Shakespeare, document._created = True",
]
a1 = self.Author(name="Bill Shakespeare")
assert self.get_signal_output(a1.save) == [
"pre_save signal, Bill Shakespeare",
{},
"pre_save_post_validation signal, Bill Shakespeare",
"Is created",
{},
"post_save signal, Bill Shakespeare",
"post_save dirty keys, ['name']",
"Is created",
{},
]
a1.reload()
a1.name = "William Shakespeare"
assert self.get_signal_output(a1.save) == [
"pre_save signal, William Shakespeare",
{},
"pre_save_post_validation signal, William Shakespeare",
"Is updated",
{},
"post_save signal, William Shakespeare",
"post_save dirty keys, ['name']",
"Is updated",
{},
]
assert self.get_signal_output(a1.delete) == [
"pre_delete signal, William Shakespeare",
{},
"post_delete signal, William Shakespeare",
{},
]
assert self.get_signal_output(load_existing_author) == [
"pre_init signal, Author",
{"id": 2, "name": "Bill Shakespeare"},
"post_init signal, Bill Shakespeare, document._created = False",
]
assert self.get_signal_output(bulk_create_author_with_load) == [
"pre_init signal, Author",
{"name": "Bill Shakespeare"},
"post_init signal, Bill Shakespeare, document._created = True",
"pre_bulk_insert signal, [<Author: Bill Shakespeare>]",
{},
"pre_init signal, Author",
{"id": 3, "name": "Bill Shakespeare"},
"post_init signal, Bill Shakespeare, document._created = False",
"post_bulk_insert signal, [<Author: Bill Shakespeare>]",
"Is loaded",
{},
]
assert self.get_signal_output(bulk_create_author_without_load) == [
"pre_init signal, Author",
{"name": "Bill Shakespeare"},
"post_init signal, Bill Shakespeare, document._created = True",
"pre_bulk_insert signal, [<Author: Bill Shakespeare>]",
{},
"post_bulk_insert signal, [<Author: Bill Shakespeare>]",
"Not loaded",
{},
]
def test_signal_kwargs(self):
"""Make sure signal_kwargs is passed to signals calls."""
def live_and_let_die():
a = self.Author(name="Bill Shakespeare")
a.save(signal_kwargs={"live": True, "die": False})
a.delete(signal_kwargs={"live": False, "die": True})
assert self.get_signal_output(live_and_let_die) == [
"pre_init signal, Author",
{"name": "Bill Shakespeare"},
"post_init signal, Bill Shakespeare, document._created = True",
"pre_save signal, Bill Shakespeare",
{"die": False, "live": True},
"pre_save_post_validation signal, Bill Shakespeare",
"Is created",
{"die": False, "live": True},
"post_save signal, Bill Shakespeare",
"post_save dirty keys, ['name']",
"Is created",
{"die": False, "live": True},
"pre_delete signal, Bill Shakespeare",
{"die": True, "live": False},
"post_delete signal, Bill Shakespeare",
{"die": True, "live": False},
]
def bulk_create_author():
a1 = self.Author(name="Bill Shakespeare")
self.Author.objects.insert([a1], signal_kwargs={"key": True})
assert self.get_signal_output(bulk_create_author) == [
"pre_init signal, Author",
{"name": "Bill Shakespeare"},
"post_init signal, Bill Shakespeare, document._created = True",
"pre_bulk_insert signal, [<Author: Bill Shakespeare>]",
{"key": True},
"pre_init signal, Author",
{"id": 2, "name": "Bill Shakespeare"},
"post_init signal, Bill Shakespeare, document._created = False",
"post_bulk_insert signal, [<Author: Bill Shakespeare>]",
"Is loaded",
{"key": True},
]
def test_queryset_delete_signals(self):
"""Queryset delete should throw some signals."""
self.Another(name="Bill Shakespeare").save()
assert self.get_signal_output(self.Another.objects.delete) == [
"pre_delete signal, Bill Shakespeare",
{},
"post_delete signal, Bill Shakespeare",
{},
]
def test_signals_with_explicit_doc_ids(self):
"""Model saves must have a created flag the first time."""
ei = self.ExplicitId(id=123)
# post save must received the created flag, even if there's already
# an object id present
assert self.get_signal_output(ei.save) == ["Is created"]
# second time, it must be an update
assert self.get_signal_output(ei.save) == ["Is updated"]
def test_signals_with_switch_collection(self):
ei = self.ExplicitId(id=123)
ei.switch_collection("explicit__1")
assert self.get_signal_output(ei.save) == ["Is created"]
ei.switch_collection("explicit__1")
assert self.get_signal_output(ei.save) == ["Is updated"]
ei.switch_collection("explicit__1", keep_created=False)
assert self.get_signal_output(ei.save) == ["Is created"]
ei.switch_collection("explicit__1", keep_created=False)
assert self.get_signal_output(ei.save) == ["Is created"]
def test_signals_with_switch_db(self):
connect("mongoenginetest")
register_connection("testdb-1", "mongoenginetest2")
ei = self.ExplicitId(id=123)
ei.switch_db("testdb-1")
assert self.get_signal_output(ei.save) == ["Is created"]
ei.switch_db("testdb-1")
assert self.get_signal_output(ei.save) == ["Is updated"]
ei.switch_db("testdb-1", keep_created=False)
assert self.get_signal_output(ei.save) == ["Is created"]
ei.switch_db("testdb-1", keep_created=False)
assert self.get_signal_output(ei.save) == ["Is created"]
def test_signals_bulk_insert(self):
def bulk_set_active_post():
posts = [
self.Post(title="Post 1"),
self.Post(title="Post 2"),
self.Post(title="Post 3"),
]
self.Post.objects.insert(posts)
results = self.get_signal_output(bulk_set_active_post)
assert results == [
"pre_bulk_insert signal, [(<Post: Post 1>, {'active': False}), (<Post: Post 2>, {'active': False}), (<Post: Post 3>, {'active': False})]",
{},
"post_bulk_insert signal, [(<Post: Post 1>, {'active': True}), (<Post: Post 2>, {'active': True}), (<Post: Post 3>, {'active': True})]",
"Is loaded",
{},
]
if __name__ == "__main__":
unittest.main()
|
TestSignal
|
python
|
jamielennox__requests-mock
|
requests_mock/mocker.py
|
{
"start": 2147,
"end": 9163
}
|
class ____(object):
"""A wrapper around common mocking functions.
Automate the process of mocking the requests library. This will keep the
same general options available and prevent repeating code.
"""
_PROXY_FUNCS = {
'last_request',
'add_matcher',
'request_history',
'called',
'called_once',
'call_count',
'reset',
}
case_sensitive = False
"""case_sensitive handles a backwards incompatible bug. The URL used to
match against our matches and that is saved in request_history is always
lowercased. This is incorrect as it reports incorrect history to the user
and doesn't allow case sensitive path matching.
Unfortunately fixing this change is backwards incompatible in the 1.X
series as people may rely on this behaviour. To work around this you can
globally set:
requests_mock.mock.case_sensitive = True
or for pytest set in your configuration:
[pytest]
requests_mock_case_sensitive = True
which will prevent the lowercase being executed and return case sensitive
url and query information.
This will become the default in a 2.X release. See bug: #1584008.
"""
def __init__(self, session=None, **kwargs):
if session and not isinstance(session, requests.Session):
raise TypeError("Only a requests.Session object can be mocked")
self._mock_target = session or requests.Session
self.case_sensitive = kwargs.pop('case_sensitive', self.case_sensitive)
self._adapter = (
kwargs.pop('adapter', None) or
adapter.Adapter(case_sensitive=self.case_sensitive)
)
self._json_encoder = kwargs.pop('json_encoder', None)
self.real_http = kwargs.pop('real_http', False)
self._last_send = None
if kwargs:
raise TypeError('Unexpected Arguments: %s' % ', '.join(kwargs))
def start(self):
"""Start mocking requests.
Install the adapter and the wrappers required to intercept requests.
"""
if self._last_send:
raise RuntimeError('Mocker has already been started')
# backup last `send` for restoration on `self.stop`
self._last_send = self._mock_target.send
self._last_get_adapter = self._mock_target.get_adapter
def _fake_get_adapter(session, url):
return self._adapter
def _fake_send(session, request, **kwargs):
# NOTE(phodge): we need to use a threading lock here in case there
# are multiple threads running - one thread could restore the
# original get_adapter() just as a second thread is about to
# execute _original_send() below
with threading_rlock(timeout=10):
# mock get_adapter
#
# NOTE(phodge): requests.Session.send() is actually
# reentrant due to how it resolves redirects with nested
# calls to send(), however the reentry occurs _after_ the
# call to self.get_adapter(), so it doesn't matter that we
# will restore _last_get_adapter before a nested send() has
# completed as long as we monkeypatch get_adapter() each
# time immediately before calling original send() like we
# are doing here.
_set_method(session, "get_adapter", _fake_get_adapter)
# NOTE(jamielennox): self._last_send vs _original_send. Whilst
# it seems like here we would use _last_send there is the
# possibility that the user has messed up and is somehow
# nesting their mockers. If we call last_send at this point
# then we end up calling this function again and the outer
# level adapter ends up winning. All we really care about here
# is that our adapter is in place before calling send so we
# always jump directly to the real function so that our most
# recently patched send call ends up putting in the most recent
# adapter. It feels funny, but it works.
try:
return _original_send(session, request, **kwargs)
except exceptions.NoMockAddress:
if not self.real_http:
raise
except adapter._RunRealHTTP:
# this mocker wants you to run the request through the real
# requests library rather than the mocking. Let it.
pass
finally:
# restore get_adapter
_set_method(session, "get_adapter", self._last_get_adapter)
# if we are here it means we must run the real http request
# Or, with nested mocks, to the parent mock, that is why we use
# _last_send here instead of _original_send
if isinstance(self._mock_target, type):
return self._last_send(session, request, **kwargs)
else:
return self._last_send(request, **kwargs)
_set_method(self._mock_target, "send", _fake_send)
def stop(self):
"""Stop mocking requests.
This should have no impact if mocking has not been started.
When nesting mockers, make sure to stop the innermost first.
"""
if self._last_send:
self._mock_target.send = self._last_send
self._last_send = None
# for familiarity with MagicMock
def reset_mock(self):
self.reset()
def __getattr__(self, name):
if name in self._PROXY_FUNCS:
try:
return getattr(self._adapter, name)
except AttributeError:
pass
raise AttributeError(name)
def register_uri(self, *args, **kwargs):
# you can pass real_http here, but it's private to pass direct to the
# adapter, because if you pass direct to the adapter you'll see the exc
kwargs['_real_http'] = kwargs.pop('real_http', False)
kwargs.setdefault('json_encoder', self._json_encoder)
return self._adapter.register_uri(*args, **kwargs)
def request(self, *args, **kwargs):
return self.register_uri(*args, **kwargs)
def get(self, *args, **kwargs):
return self.request(GET, *args, **kwargs)
def options(self, *args, **kwargs):
return self.request(OPTIONS, *args, **kwargs)
def head(self, *args, **kwargs):
return self.request(HEAD, *args, **kwargs)
def post(self, *args, **kwargs):
return self.request(POST, *args, **kwargs)
def put(self, *args, **kwargs):
return self.request(PUT, *args, **kwargs)
def patch(self, *args, **kwargs):
return self.request(PATCH, *args, **kwargs)
def delete(self, *args, **kwargs):
return self.request(DELETE, *args, **kwargs)
|
MockerCore
|
python
|
getsentry__sentry
|
tests/sentry/api/test_authentication.py
|
{
"start": 10837,
"end": 12360
}
|
class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.auth = OrgAuthTokenAuthentication()
self.org = self.create_organization(owner=self.user)
self.token = "sntrys_abc123_xyz"
self.org_auth_token = self.create_org_auth_token(
name="Test Token 1",
token_hashed=hash_token(self.token),
organization_id=self.org.id,
token_last_characters="xyz",
scope_list=[],
date_last_used=None,
)
def test_authenticate(self) -> None:
request = _drf_request()
request.META["HTTP_AUTHORIZATION"] = f"Bearer {self.token}"
result = self.auth.authenticate(request)
assert result is not None
user, auth = result
assert user.is_anonymous
assert AuthenticatedToken.from_token(auth) == AuthenticatedToken.from_token(
self.org_auth_token
)
def test_no_match(self) -> None:
request = _drf_request()
request.META["HTTP_AUTHORIZATION"] = "Bearer sntrys_abc"
with pytest.raises(AuthenticationFailed):
self.auth.authenticate(request)
def test_inactive_key(self) -> None:
self.org_auth_token.update(date_deactivated=datetime.now(UTC))
request = _drf_request()
request.META["HTTP_AUTHORIZATION"] = f"Bearer {self.token}"
with pytest.raises(AuthenticationFailed):
self.auth.authenticate(request)
@control_silo_test
|
TestOrgAuthTokenAuthentication
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-fivetran/dagster_fivetran/translator.py
|
{
"start": 10449,
"end": 10804
}
|
class ____(NamespacedMetadataSet):
connector_id: Optional[str] = None
connector_name: Optional[str] = None
destination_id: Optional[str] = None
destination_schema_name: Optional[str] = None
destination_table_name: Optional[str] = None
@classmethod
def namespace(cls) -> str:
return "dagster-fivetran"
|
FivetranMetadataSet
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/v1/input_lib.py
|
{
"start": 14666,
"end": 16259
}
|
class ____(object):
"""Iterator for a single tensor-returning callable."""
def __init__(self, fn, worker, devices):
self._fn = fn
self._worker = worker
self._devices = devices
def get_next(self, device, name=None):
"""Get next element for the given device from the callable."""
del device, name
with ops.device(self._worker):
return self._fn()
def get_next_as_list(self, name=None):
"""Get next element from the callable."""
del name
with ops.device(self._worker):
data_list = [self._fn() for _ in self._devices]
return data_list
def get_next_as_optional_list(self):
with ops.device(self._worker):
data_list = [
optional_ops.Optional.from_value(self._fn()) for _ in self._devices
]
return data_list
def initialize(self):
# TODO(petebu) Should this throw an exception instead?
return []
def _create_iterators_per_worker(worker_datasets, input_workers, options=None):
"""Create a multidevice iterator on each of the workers."""
assert isinstance(input_workers, input_lib.InputWorkers)
assert len(worker_datasets) == len(input_workers.worker_devices)
iterators = []
for i, worker in enumerate(input_workers.worker_devices):
with ops.device(worker):
worker_devices = input_workers.compute_devices_for_worker(i)
iterator = _SingleWorkerDatasetIterator(
worker_datasets[i], # pylint: disable=protected-access
worker,
worker_devices,
options)
iterators.append(iterator)
return iterators
|
_SingleWorkerCallableIterator
|
python
|
walkccc__LeetCode
|
solutions/2221. Find Triangular Sum of an Array/2221.py
|
{
"start": 0,
"end": 202
}
|
class ____:
def triangularSum(self, nums: list[int]) -> int:
for sz in range(len(nums), 0, -1):
for i in range(sz - 1):
nums[i] = (nums[i] + nums[i + 1]) % 10
return nums[0]
|
Solution
|
python
|
pytorch__pytorch
|
test/distributed/checkpoint/test_pg_transport.py
|
{
"start": 6933,
"end": 7646
}
|
class ____(MultiProcContinuousTest):
world_size = 8
timeout: timedelta = timedelta(seconds=20)
@classmethod
def backend_str(cls) -> Optional[str]:
return "gloo"
@classmethod
def device_type(cls) -> str:
return "cpu"
@property
def device(self) -> torch.device:
return torch.device(self.device_type())
def test_pg_transport(self) -> None:
_test_pg_transport(self, self.device)
def test_pg_transport_with_mixed_content(self) -> None:
_test_pg_transport_with_mixed_content(self, self.device)
def test_pg_transport_with_sharded_tensor(self) -> None:
_test_pg_transport_with_sharded_tensor(self, self.device)
|
PgTransportCPU
|
python
|
getsentry__sentry
|
tests/sentry/api/serializers/test_tagvalue.py
|
{
"start": 277,
"end": 1434
}
|
class ____(TestCase):
def test_with_user(self) -> None:
user = self.create_user()
tagvalue = TagValue(
key="sentry:user",
value="username:ted",
times_seen=1,
first_seen=datetime(2018, 1, 1),
last_seen=datetime(2018, 1, 1),
)
result = serialize(tagvalue, user)
assert result["key"] == "user"
assert result["value"] == "username:ted"
assert result["name"] == "ted"
assert result["query"] == 'user.username:"ted"'
def test_release(self) -> None:
user = self.create_user()
tagvalue = TagValue(
key="sentry:release",
value="df84bccbb23ca15f2868be1f2a5f7c7a6464fadd",
times_seen=1,
first_seen=datetime(2018, 1, 1),
last_seen=datetime(2018, 1, 1),
)
result = serialize(tagvalue, user)
assert result["key"] == "release"
assert result["value"] == "df84bccbb23ca15f2868be1f2a5f7c7a6464fadd"
assert result["name"] == "df84bccbb23ca15f2868be1f2a5f7c7a6464fadd"
assert "query" not in result
|
TagValueSerializerTest
|
python
|
pypa__pip
|
src/pip/_vendor/rich/containers.py
|
{
"start": 1678,
"end": 5502
}
|
class ____:
"""A list subclass which can render to the console."""
def __init__(self, lines: Iterable["Text"] = ()) -> None:
self._lines: List["Text"] = list(lines)
def __repr__(self) -> str:
return f"Lines({self._lines!r})"
def __iter__(self) -> Iterator["Text"]:
return iter(self._lines)
@overload
def __getitem__(self, index: int) -> "Text":
...
@overload
def __getitem__(self, index: slice) -> List["Text"]:
...
def __getitem__(self, index: Union[slice, int]) -> Union["Text", List["Text"]]:
return self._lines[index]
def __setitem__(self, index: int, value: "Text") -> "Lines":
self._lines[index] = value
return self
def __len__(self) -> int:
return self._lines.__len__()
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
"""Console render method to insert line-breaks."""
yield from self._lines
def append(self, line: "Text") -> None:
self._lines.append(line)
def extend(self, lines: Iterable["Text"]) -> None:
self._lines.extend(lines)
def pop(self, index: int = -1) -> "Text":
return self._lines.pop(index)
def justify(
self,
console: "Console",
width: int,
justify: "JustifyMethod" = "left",
overflow: "OverflowMethod" = "fold",
) -> None:
"""Justify and overflow text to a given width.
Args:
console (Console): Console instance.
width (int): Number of cells available per line.
justify (str, optional): Default justify method for text: "left", "center", "full" or "right". Defaults to "left".
overflow (str, optional): Default overflow for text: "crop", "fold", or "ellipsis". Defaults to "fold".
"""
from .text import Text
if justify == "left":
for line in self._lines:
line.truncate(width, overflow=overflow, pad=True)
elif justify == "center":
for line in self._lines:
line.rstrip()
line.truncate(width, overflow=overflow)
line.pad_left((width - cell_len(line.plain)) // 2)
line.pad_right(width - cell_len(line.plain))
elif justify == "right":
for line in self._lines:
line.rstrip()
line.truncate(width, overflow=overflow)
line.pad_left(width - cell_len(line.plain))
elif justify == "full":
for line_index, line in enumerate(self._lines):
if line_index == len(self._lines) - 1:
break
words = line.split(" ")
words_size = sum(cell_len(word.plain) for word in words)
num_spaces = len(words) - 1
spaces = [1 for _ in range(num_spaces)]
index = 0
if spaces:
while words_size + num_spaces < width:
spaces[len(spaces) - index - 1] += 1
num_spaces += 1
index = (index + 1) % len(spaces)
tokens: List[Text] = []
for index, (word, next_word) in enumerate(
zip_longest(words, words[1:])
):
tokens.append(word)
if index < len(spaces):
style = word.get_style_at_offset(console, -1)
next_style = next_word.get_style_at_offset(console, 0)
space_style = style if style == next_style else line.style
tokens.append(Text(" " * spaces[index], style=space_style))
self[line_index] = Text("").join(tokens)
|
Lines
|
python
|
bottlepy__bottle
|
bottle.py
|
{
"start": 7967,
"end": 8080
}
|
class ____(RouteError):
""" The route parser found something not supported by this router. """
|
RouteSyntaxError
|
python
|
huggingface__transformers
|
src/transformers/models/patchtst/modeling_patchtst.py
|
{
"start": 23614,
"end": 25165
}
|
class ____(PreTrainedModel):
config: PatchTSTConfig
base_model_prefix = "model"
main_input_name = "past_values"
input_modalities = ("time",)
supports_gradient_checkpointing = False
@torch.no_grad()
def _init_weights(self, module: nn.Module):
"""
Initialize weights
"""
if isinstance(module, PatchTSTPositionalEncoding):
# get the number of patches
num_patches = (
max(self.config.context_length, self.config.patch_length) - self.config.patch_length
) // self.config.patch_stride + 1
# initialize cls_token
if self.config.use_cls_token:
init.normal_(module.cls_token, std=0.02)
num_patches += 1
# initialize positional encoding
init.copy_(module.position_enc, module._init_pe(self.config, num_patches))
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, PatchTSTBatchNorm):
init.zeros_(module.batchnorm.bias)
init.ones_(module.batchnorm.weight)
elif isinstance(module, nn.Linear):
init.normal_(module.weight, mean=0.0, std=self.config.init_std)
if module.bias is not None:
init.zeros_(module.bias)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (PatchTSTEncoder)):
module.gradient_checkpointing = value
|
PatchTSTPreTrainedModel
|
python
|
ansible__ansible
|
lib/ansible/plugins/lookup/list.py
|
{
"start": 865,
"end": 1074
}
|
class ____(LookupBase):
def run(self, terms, variables=None, **kwargs):
if not isinstance(terms, Sequence):
raise AnsibleError("with_list expects a list")
return terms
|
LookupModule
|
python
|
PrefectHQ__prefect
|
src/prefect/client/schemas/objects.py
|
{
"start": 42251,
"end": 43353
}
|
class ____(ObjectBaseModel):
"""An ORM representation of a block document reference."""
parent_block_document_id: UUID = Field(
default=..., description="ID of block document the reference is nested within"
)
parent_block_document: Optional[BlockDocument] = Field(
default=None, description="The block document the reference is nested within"
)
reference_block_document_id: UUID = Field(
default=..., description="ID of the nested block document"
)
reference_block_document: Optional[BlockDocument] = Field(
default=None, description="The nested block document"
)
name: str = Field(
default=..., description="The name that the reference is nested under"
)
@model_validator(mode="before")
@classmethod
def validate_parent_and_ref_are_different(cls, values: Any) -> Any:
if isinstance(values, dict):
return validate_parent_and_ref_diff(values) # pyright: ignore[reportUnknownVariableType, reportUnknownArgumentType] unable to narrow dict type
return values
|
BlockDocumentReference
|
python
|
django__django
|
django/contrib/gis/feeds.py
|
{
"start": 5455,
"end": 5994
}
|
class ____(BaseFeed):
"""
This is a subclass of the `Feed` from `django.contrib.syndication`.
This allows users to define a `geometry(obj)` and/or `item_geometry(item)`
methods on their own subclasses so that geo-referenced information may
placed in the feed.
"""
feed_type = GeoRSSFeed
def feed_extra_kwargs(self, obj):
return {"geometry": self._get_dynamic_attr("geometry", obj)}
def item_extra_kwargs(self, item):
return {"geometry": self._get_dynamic_attr("item_geometry", item)}
|
Feed
|
python
|
Lightning-AI__lightning
|
src/lightning/pytorch/demos/boring_classes.py
|
{
"start": 8913,
"end": 9602
}
|
class ____(LightningModule):
"""
.. warning:: This is meant for testing/debugging and is experimental.
"""
def __init__(self, out_dim: int = 10, learning_rate: float = 0.02):
super().__init__()
self.l1 = torch.nn.Linear(32, out_dim)
self.learning_rate = learning_rate
def forward(self, x: Tensor) -> Tensor:
return torch.relu(self.l1(x.view(x.size(0), -1)))
def training_step(self, batch: Any, batch_nb: int) -> STEP_OUTPUT:
x = batch
x = self(x)
return x.sum()
def configure_optimizers(self) -> torch.optim.Optimizer:
return torch.optim.Adam(self.parameters(), lr=self.learning_rate)
|
DemoModel
|
python
|
streamlit__streamlit
|
lib/streamlit/elements/widgets/checkbox.py
|
{
"start": 1596,
"end": 1830
}
|
class ____:
value: bool
def serialize(self, v: bool) -> bool:
return bool(v)
def deserialize(self, ui_value: bool | None) -> bool:
return bool(ui_value if ui_value is not None else self.value)
|
CheckboxSerde
|
python
|
joerick__pyinstrument
|
pyinstrument/renderers/speedscope.py
|
{
"start": 949,
"end": 1229
}
|
class ____:
"""
Data class to store speedscope's concept of an "event", which
corresponds to opening or closing stack frames as functions or
methods are entered or exited.
"""
type: SpeedscopeEventType
at: float
frame: int
@dataclass
|
SpeedscopeEvent
|
python
|
scrapy__scrapy
|
tests/AsyncCrawlerRunner/custom_loop_same.py
|
{
"start": 265,
"end": 792
}
|
class ____(Spider):
name = "no_request"
custom_settings = {
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"ASYNCIO_EVENT_LOOP": "uvloop.Loop",
}
async def start(self):
return
yield
@deferred_f_from_coro_f
async def main(reactor):
configure_logging()
runner = AsyncCrawlerRunner()
await runner.crawl(NoRequestsSpider)
install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor", "uvloop.Loop")
react(main)
|
NoRequestsSpider
|
python
|
aio-libs__aiohttp
|
aiohttp/payload.py
|
{
"start": 1505,
"end": 1889
}
|
class ____:
def __init__(self, type: Any, *, order: Order = Order.normal) -> None:
self.type = type
self.order = order
def __call__(self, factory: type["Payload"]) -> type["Payload"]:
register_payload(factory, self.type, order=self.order)
return factory
PayloadType = type["Payload"]
_PayloadRegistryItem = tuple[PayloadType, Any]
|
payload_type
|
python
|
run-llama__llama_index
|
llama-index-integrations/graph_stores/llama-index-graph-stores-ApertureDB/llama_index/graph_stores/ApertureDB/property_graph.py
|
{
"start": 2541,
"end": 14657
}
|
class ____(PropertyGraphStore):
"""
ApertureDB graph store.
Args:
config (dict): Configuration for the graph store.
**kwargs: Additional keyword arguments.
"""
flat_metadata: bool = True
@property
def client(self) -> Any:
"""Get client."""
return self._client
def __init__(self, *args, **kwargs) -> None:
try:
from aperturedb.CommonLibrary import create_connector, execute_query
from aperturedb.Query import QueryBuilder
except ImportError:
raise ImportError(
"ApertureDB is not installed. Please install it using "
"'pip install --upgrade aperturedb'"
)
self._client = create_connector()
global query_executor
query_executor = execute_query
global query_builder
query_builder = QueryBuilder
def get_rel_map(
self,
subjs: List[LabelledNode],
depth: int = 2,
limit: int = 30,
ignore_rels: Optional[List[str]] = None,
) -> List[Triplet]:
"""Get depth-aware rel map."""
if subjs is None or len(subjs) == 0:
return []
if depth <= 0:
return []
rel_map = []
ignore_rels = ignore_rels or []
for s in subjs:
query = [
query_builder.find_command(
oclass=s.label,
params={
"_ref": 1,
"constraints": {UNIQUEID_PROPERTY: ["==", s.id]},
"results": {"all_properties": True, "limit": limit},
},
)
]
for i in range(1, 2):
query.extend(
[
{
"FindEntity": {
"_ref": i + 1,
"is_connected_to": {"ref": i, "direction": "out"},
"results": {"all_properties": True, "limit": limit},
}
},
{
"FindConnection": {
"src": i,
"results": {"all_properties": True, "limit": limit},
}
},
]
)
result, response, _ = query_executor(
self._client,
query,
)
assert result == 0, response
adjacent_nodes = []
if "entities" in response[0]["FindEntity"]:
for entity in response[0]["FindEntity"]["entities"]:
for c, ce in zip(
response[1]["FindEntity"]["entities"],
response[2]["FindConnection"]["connections"],
):
if ce[UNIQUEID_PROPERTY] in ignore_rels:
continue
source = EntityNode(
name=entity[UNIQUEID_PROPERTY],
label=entity["label"],
properties=entity,
)
target = EntityNode(
name=c[UNIQUEID_PROPERTY],
label=c["label"],
properties=c,
)
relation = Relation(
source_id=c[UNIQUEID_PROPERTY],
target_id=c[UNIQUEID_PROPERTY],
label=ce[UNIQUEID_PROPERTY],
)
adjacent_nodes.append(target)
rel_map.append([source, relation, target])
rel_map.extend(self.get_rel_map(adjacent_nodes, depth - 1))
return rel_map
def delete(
self,
entity_names: Optional[List[str]] = None,
relation_names: Optional[List[str]] = None,
properties: Optional[dict] = None,
ids: Optional[List[str]] = None,
) -> None:
"""Delete nodes."""
if ids and len(ids) > 0:
query = query_for_ids("DeleteEntity", [id.capitalize() for id in ids])
result, response, _ = query_executor(
self._client,
query,
)
assert result == 0, response
if properties and len(properties) > 0:
query = query_for_properties("DeleteEntity", properties)
result, response, _ = query_executor(
self._client,
query,
)
assert result == 0, response
if entity_names and len(entity_names) > 0:
for name in entity_names:
query = [
{
"DeleteEntity": {
"with_class": name,
"constraints": {"_uniqueid": ["!=", "0.0.0"]},
}
}
]
result, response, _ = query_executor(
self._client,
query,
)
assert result == 0, response
if relation_names and len(relation_names) > 0:
for relation_name in set(relation_names):
query = [
{
"DeleteConnection": {
"with_class": relation_name,
"constraints": {"_uniqueid": ["!=", "0.0.0"]},
}
}
]
result, response, _ = query_executor(
self._client,
query,
)
assert result == 0, response
def get(
self, properties: Optional[dict] = None, ids: Optional[List[str]] = None
) -> List[LabelledNode]:
entities = []
if ids and len(ids) > 0:
query = query_for_ids("FindEntity", ids)
result, response, _ = query_executor(
self._client,
query,
)
assert result == 0, response
entities.extend(response[0]["FindEntity"].get("entities", []))
elif properties and len(properties) > 0:
query = query_for_properties("FindEntity", properties)
result, response, _ = query_executor(
self._client,
query,
)
assert result == 0, response
entities.extend(response[0]["FindEntity"].get("entities", []))
else:
query = [
{
"FindEntity": {
"results": {"all_properties": True, "limit": BATCHSIZE}
}
}
]
result, response, _ = query_executor(
self._client,
query,
)
assert result == 0, response
entities.extend(response[0]["FindEntity"].get("entities", []))
response = []
if len(entities) > 0:
for e in entities:
if e["label"] == "text_chunk":
node = ChunkNode(
properties={
"_node_content": e["node_content"],
"_node_type": e["node_type"],
},
text=e["text"],
id=e[UNIQUEID_PROPERTY],
)
else:
node = EntityNode(
label=e["label"], properties=e, name=e[UNIQUEID_PROPERTY]
)
response.append(node)
return response
def get_triplets(
self, entity_names=None, relation_names=None, properties=None, ids=None
):
raise NotImplementedError("get_triplets is not implemented")
def structured_query(
self, query: str, param_map: Optional[Dict[str, Any]] = None
) -> Any:
query = [{query: param_map}]
blobs = []
result, response, _ = query_executor(self._client, query, blobs)
assert result == 0, response
return response
def upsert_nodes(self, nodes: List[EntityNode]) -> List[str]:
ids = []
data = []
for node in nodes:
# TODO: nodes can be of type EntityNode or ChunkNode
properties = node.properties
id = node.id.capitalize()
if isinstance(node, ChunkNode):
sane_props = {
"text": node.text,
}
for k, v in node.properties.items():
if k.startswith("_"):
sane_props[k[1:]] = v
properties = sane_props
entity = get_entity(self._client, node.label, id)
combined_properties = properties | {
UNIQUEID_PROPERTY: id,
"label": node.label,
}
command = None
if entity is None:
command = {
"AddEntity": {
"class": node.label,
"if_not_found": {UNIQUEID_PROPERTY: ["==", id]},
"properties": combined_properties,
}
}
else:
to_update, to_delete = changed(entity, combined_properties)
if len(to_update) > 0 or len(to_delete) > 0:
command = {
"UpdateEntity": {
"constraints": {UNIQUEID_PROPERTY: ["==", id]},
"properties": to_update,
"remove_props": to_delete,
}
}
if command is not None:
query = [command]
blobs = []
result, response, _ = query_executor(self._client, query, blobs)
assert result == 0, response
data.append((query, blobs))
ids.append(id)
return ids
def upsert_relations(self, relations: List[Relation]) -> None:
"""Upsert relations."""
ids = []
for i, r in enumerate(relations):
query = [
{
"FindEntity": {
"constraints": {
UNIQUEID_PROPERTY: ["==", r.source_id.capitalize()]
},
"_ref": 1,
}
},
{
"FindEntity": {
"constraints": {
UNIQUEID_PROPERTY: ["==", r.target_id.capitalize()]
},
"_ref": 2,
}
},
{
"AddConnection": {
"class": r.label,
"src": 1,
"dst": 2,
"properties": r.properties
| {
UNIQUEID_PROPERTY: f"{r.id}",
"src_id": r.source_id.capitalize(),
"dst_id": r.target_id.capitalize(),
},
"if_not_found": {
UNIQUEID_PROPERTY: ["==", f"{r.id}"],
"src_id": ["==", r.source_id.capitalize()],
"dst_id": ["==", r.target_id.capitalize()],
},
}
},
]
result, response, _ = query_executor(
self._client, query, success_statuses=[0, 2]
)
assert result == 0, response
ids.append(r.id)
return ids
def vector_query(self, query, **kwargs):
raise NotImplementedError("vector_query is not implemented")
|
ApertureDBGraphStore
|
python
|
ansible__ansible
|
test/units/module_utils/basic/test_run_command.py
|
{
"start": 5765,
"end": 6971
}
|
class ____:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_prompt_bad_regex(self, rc_am):
with pytest.raises(SystemExit):
rc_am.run_command('foo', prompt_regex='[pP)assword:')
assert rc_am.fail_json.called
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_prompt_no_match(self, mocker, rc_am):
rc_am._os._cmd_out[mocker.sentinel.stdout] = BytesIO(b'hello')
(rc, stdout, stderr) = rc_am.run_command('foo', prompt_regex='[pP]assword:')
assert rc == 0
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_prompt_match_wo_data(self, mocker, rc_am):
rc_am._subprocess._output = {mocker.sentinel.stdout:
SpecialBytesIO(b'Authentication required!\nEnter password: ',
fh=mocker.sentinel.stdout),
mocker.sentinel.stderr:
SpecialBytesIO(b'', fh=mocker.sentinel.stderr)}
(rc, stdout, stderr) = rc_am.run_command('foo', prompt_regex=r'[pP]assword:', data=None)
assert rc == 257
|
TestRunCommandPrompt
|
python
|
pallets__werkzeug
|
src/werkzeug/routing/exceptions.py
|
{
"start": 4580,
"end": 4846
}
|
class ____(Exception):
__slots__ = ("have_match_for", "websocket_mismatch")
def __init__(self, have_match_for: set[str], websocket_mismatch: bool) -> None:
self.have_match_for = have_match_for
self.websocket_mismatch = websocket_mismatch
|
NoMatch
|
python
|
pypa__pip
|
src/pip/_vendor/rich/_win32_console.py
|
{
"start": 758,
"end": 1544
}
|
class ____(NamedTuple):
"""Coordinates in the Windows Console API are (y, x), not (x, y).
This class is intended to prevent that confusion.
Rows and columns are indexed from 0.
This class can be used in place of wintypes._COORD in arguments and argtypes.
"""
row: int
col: int
@classmethod
def from_param(cls, value: "WindowsCoordinates") -> COORD:
"""Converts a WindowsCoordinates into a wintypes _COORD structure.
This classmethod is internally called by ctypes to perform the conversion.
Args:
value (WindowsCoordinates): The input coordinates to convert.
Returns:
wintypes._COORD: The converted coordinates struct.
"""
return COORD(value.col, value.row)
|
WindowsCoordinates
|
python
|
zarr-developers__zarr-python
|
tests/test_group.py
|
{
"start": 60519,
"end": 60904
}
|
class ____:
def test_from_dict_extra_fields(self):
data = {
"attributes": {"key": "value"},
"_nczarr_superblock": {"version": "2.0.0"},
"zarr_format": 2,
}
result = GroupMetadata.from_dict(data)
expected = GroupMetadata(attributes={"key": "value"}, zarr_format=2)
assert result == expected
|
TestGroupMetadata
|
python
|
pandas-dev__pandas
|
pandas/tests/indexes/string/test_indexing.py
|
{
"start": 5268,
"end": 7850
}
|
class ____:
@pytest.mark.parametrize(
"in_slice,expected",
[
# error: Slice index must be an integer or None
(pd.IndexSlice[::-1], "yxdcb"),
(pd.IndexSlice["b":"y":-1], ""), # type: ignore[misc]
(pd.IndexSlice["b"::-1], "b"), # type: ignore[misc]
(pd.IndexSlice[:"b":-1], "yxdcb"), # type: ignore[misc]
(pd.IndexSlice[:"y":-1], "y"), # type: ignore[misc]
(pd.IndexSlice["y"::-1], "yxdcb"), # type: ignore[misc]
(pd.IndexSlice["y"::-4], "yb"), # type: ignore[misc]
# absent labels
(pd.IndexSlice[:"a":-1], "yxdcb"), # type: ignore[misc]
(pd.IndexSlice[:"a":-2], "ydb"), # type: ignore[misc]
(pd.IndexSlice["z"::-1], "yxdcb"), # type: ignore[misc]
(pd.IndexSlice["z"::-3], "yc"), # type: ignore[misc]
(pd.IndexSlice["m"::-1], "dcb"), # type: ignore[misc]
(pd.IndexSlice[:"m":-1], "yx"), # type: ignore[misc]
(pd.IndexSlice["a":"a":-1], ""), # type: ignore[misc]
(pd.IndexSlice["z":"z":-1], ""), # type: ignore[misc]
(pd.IndexSlice["m":"m":-1], ""), # type: ignore[misc]
],
)
def test_slice_locs_negative_step(self, in_slice, expected, any_string_dtype):
index = Index(list("bcdxy"), dtype=any_string_dtype)
s_start, s_stop = index.slice_locs(in_slice.start, in_slice.stop, in_slice.step)
result = index[s_start : s_stop : in_slice.step]
expected = Index(list(expected), dtype=any_string_dtype)
tm.assert_index_equal(result, expected)
def test_slice_locs_negative_step_oob(self, any_string_dtype):
index = Index(list("bcdxy"), dtype=any_string_dtype)
result = index[-10:5:1]
tm.assert_index_equal(result, index)
result = index[4:-10:-1]
expected = Index(list("yxdcb"), dtype=any_string_dtype)
tm.assert_index_equal(result, expected)
def test_slice_locs_dup(self, any_string_dtype):
index = Index(["a", "a", "b", "c", "d", "d"], dtype=any_string_dtype)
assert index.slice_locs("a", "d") == (0, 6)
assert index.slice_locs(end="d") == (0, 6)
assert index.slice_locs("a", "c") == (0, 4)
assert index.slice_locs("b", "d") == (2, 6)
index2 = index[::-1]
assert index2.slice_locs("d", "a") == (0, 6)
assert index2.slice_locs(end="a") == (0, 6)
assert index2.slice_locs("d", "b") == (0, 4)
assert index2.slice_locs("c", "a") == (2, 6)
|
TestSliceLocs
|
python
|
PyCQA__pylint
|
tests/functional/r/regression/regression_4891.py
|
{
"start": 97,
"end": 348
}
|
class ____:
'''
class docstring
'''
def __init__(self):
self.data = {}
def process(self):
'''
another method is responsible for putting "static_key"
'''
copy.copy(self.data['static_key'])
|
MyData
|
python
|
getsentry__sentry
|
src/sentry/seer/similarity/utils.py
|
{
"start": 3644,
"end": 23471
}
|
class ____(TypedDict):
frame_count: int
html_frame_count: int # for a temporary metric
has_no_filename: bool # for a temporary metric
found_non_snipped_context_line: bool
def get_stacktrace_string(data: dict[str, Any]) -> str:
"""Format a stacktrace string from the grouping information."""
app_hash = get_path(data, "app", "hash")
app_component = get_path(data, "app", "component", "values")
system_hash = get_path(data, "system", "hash")
system_component = get_path(data, "system", "component", "values")
if not (app_hash or system_hash):
return ""
# Get the data used for grouping
if app_hash:
exceptions = app_component
else:
exceptions = system_component
# Handle chained exceptions
if exceptions and exceptions[0].get("id") == "chained_exception":
exceptions = exceptions[0].get("values")
metrics.distribution("seer.grouping.exceptions.length", len(exceptions))
frame_metrics: FramesMetrics = {
"frame_count": 0,
"html_frame_count": 0, # for a temporary metric
"has_no_filename": False, # for a temporary metric
"found_non_snipped_context_line": False,
}
result_parts = []
# Reverse the list of exceptions in order to prioritize the outermost/most recent ones in cases
# where there are chained exceptions and we end up truncating
# Limit the number of chained exceptions
for exception in reversed(exceptions[-MAX_EXCEPTION_COUNT:]):
exception_type = exception.get("id")
if not exception.get("contributes") or exception_type not in [
"exception",
"threads",
"stacktrace",
]:
continue
exc_type, exc_value, frame_strings, frame_metrics = process_exception_frames(
exception, frame_metrics
)
# Only exceptions have the type and value properties, so we don't need to handle the threads
# case here
header = f"{exc_type}: {exc_value}\n" if exception["id"] == "exception" else ""
result_parts.append((header, frame_strings))
return generate_stacktrace_string(result_parts, frame_metrics)
def generate_stacktrace_string(
result_parts: Sequence[tuple[str, list[str]]],
frame_metrics: FramesMetrics,
) -> str:
stacktrace_str = ""
final_frame_count = 0
for header, frame_strings in result_parts:
# For performance reasons, if the entire stacktrace is made of minified frames, restrict the
# result to include only the first 20 frames, since minified frames are significantly more
# token-dense than non-minified ones
if not frame_metrics["found_non_snipped_context_line"]:
frame_strings = _discard_excess_frames(
frame_strings, FULLY_MINIFIED_STACKTRACE_MAX_FRAME_COUNT, final_frame_count
)
final_frame_count += len(frame_strings)
stacktrace_str += header + "".join(frame_strings)
metrics.incr(
"seer.grouping.html_in_stacktrace",
sample_rate=options.get("seer.similarity.metrics_sample_rate"),
tags={
"html_frames": (
"none"
if frame_metrics["html_frame_count"] == 0
else "all" if frame_metrics["html_frame_count"] == final_frame_count else "some"
)
},
)
# Return empty stacktrace for events with no header, only one frame and no filename
# since this is too little info to group on
if frame_metrics["has_no_filename"] and len(result_parts) == 1:
header, frames = result_parts[0][0], result_parts[0][1]
if header == "" and len(frames) == 1:
stacktrace_str = ""
return stacktrace_str.strip()
def process_exception_frames(
exception: dict[str, Any], frame_metrics: FramesMetrics
) -> tuple[str, str, list[str], FramesMetrics]:
# For each exception, extract its type, value, and up to limit number of stacktrace frames
exc_type, exc_value = "", ""
frame_strings: list[str] = []
exception_type = exception.get("id")
if exception_type == "stacktrace":
frame_strings, frame_metrics = _process_frames(exception.get("values", []), frame_metrics)
else:
for exception_value in exception.get("values", []):
if exception_value.get("id") == "type":
exc_type = _get_value_if_exists(exception_value)
elif exception_value.get("id") == "value":
exc_value = _get_value_if_exists(exception_value)
elif (
exception_value.get("id") == "stacktrace"
and frame_metrics["frame_count"] < MAX_FRAME_COUNT
):
frame_strings, frame_metrics = _process_frames(
exception_value["values"], frame_metrics
)
return exc_type, exc_value, frame_strings, frame_metrics
def _process_frames(
frames: list[dict[str, Any]], frame_metrics: FramesMetrics
) -> tuple[list[str], FramesMetrics]:
frame_strings = []
contributing_frames = [
frame for frame in frames if frame.get("id") == "frame" and frame.get("contributes")
]
contributing_frames = _discard_excess_frames(
contributing_frames, MAX_FRAME_COUNT, frame_metrics["frame_count"]
)
frame_metrics["frame_count"] += len(contributing_frames)
for frame in contributing_frames:
frame_dict = extract_values_from_frame_values(frame.get("values", []))
filename = extract_filename(frame_dict) or "None"
if not _is_snipped_context_line(frame_dict["context_line"]):
frame_metrics["found_non_snipped_context_line"] = True
if not frame_dict["filename"]:
frame_metrics["has_no_filename"] = True
# Not an exhaustive list of tests we could run to detect HTML, but this is only
# meant to be a temporary, quick-and-dirty metric
# TODO: Don't let this, and the metric below, hang around forever. It's only to
# help us get a sense of whether it's worthwhile trying to more accurately
# detect, and then exclude, frames containing HTML
if frame_dict["filename"].endswith("html") or "<html>" in frame_dict["context_line"]:
frame_metrics["html_frame_count"] += 1
if is_base64_encoded_frame(frame_dict):
continue
frame_strings.append(
f' File "{filename}", function {frame_dict["function"]}\n {frame_dict["context_line"]}\n'
)
return frame_strings, frame_metrics
def extract_values_from_frame_values(values: Sequence[Mapping[str, Any]]) -> dict[str, Any]:
frame_dict = {"filename": "", "function": "", "context_line": "", "module": ""}
for frame_values in values:
if frame_values.get("id") in frame_dict:
frame_dict[frame_values["id"]] = _get_value_if_exists(frame_values)
return frame_dict
def extract_filename(frame_dict: Mapping[str, Any]) -> str:
"""
Extract the filename from the frame dictionary. Fallback to module if filename is not present.
"""
filename = frame_dict["filename"]
if filename in IGNORED_FILENAMES:
filename = ""
if filename == "" and frame_dict["module"] != "":
filename = frame_dict["module"]
return filename
def is_base64_encoded_frame(frame_dict: Mapping[str, Any]) -> bool:
# We want to skip frames with base64 encoded filenames since they can be large
# and not contain any usable information
base64_encoded = False
for base64_prefix in BASE64_ENCODED_PREFIXES:
if frame_dict["filename"].startswith(base64_prefix):
base64_encoded = True
break
return base64_encoded
def event_content_has_stacktrace(event: GroupEvent | Event) -> bool:
# If an event has no stacktrace, there's no data for Seer to analyze, so no point in making the
# API call. If we ever start analyzing message-only events, we'll need to add `event.title in
# PLACEHOLDER_EVENT_TITLES` to this check.
exception_stacktrace = get_path(event.data, "exception", "values", -1, "stacktrace", "frames")
threads_stacktrace = get_path(event.data, "threads", "values", -1, "stacktrace", "frames")
only_stacktrace = get_path(event.data, "stacktrace", "frames")
return exception_stacktrace or threads_stacktrace or only_stacktrace
def record_did_call_seer_metric(
event: Event, *, call_made: bool, blocker: str, training_mode: bool = False
) -> None:
metrics.incr(
"grouping.similarity.did_call_seer",
sample_rate=options.get("seer.similarity.metrics_sample_rate"),
tags={
"call_made": call_made,
"blocker": blocker,
"platform": event.platform,
"training_mode": training_mode,
},
)
def stacktrace_exceeds_limits(
event: Event | GroupEvent,
variants: dict[str, BaseVariant],
referrer: ReferrerOptions,
) -> bool:
"""
Check if a stacktrace exceeds length limits for Seer similarity analysis.
This checks both frame count and token count limits to determine if the stacktrace
is too long to send to Seer. Different platforms have different filtering behaviors:
- Platforms in EVENT_PLATFORMS_BYPASSING_FRAME_COUNT_CHECK bypass all checks
- Other platforms are checked against MAX_FRAME_COUNT and max_token_count limits
"""
platform: str = event.platform or "unknown"
shared_tags = {"referrer": referrer.value, "platform": platform}
contributing_variant, contributing_component = get_contributing_variant_and_component(variants)
# Ideally we're calling this function after we already know the event both has a stacktrace and
# is using it for grouping (in which case none of the below conditions should apply), but still
# worth checking that we have enough information to answer the question just in case
if (
# Fingerprint, checksum, fallback variants
not isinstance(contributing_variant, ComponentVariant)
# Security violations, log-message-based grouping
or contributing_variant.variant_name == "default"
# Any ComponentVariant will have this, but this reassures mypy
or not contributing_component
# Exception-message-based grouping
or not hasattr(contributing_component, "frame_counts")
):
# We don't bother to collect a metric on this outcome, because we shouldn't have called the
# function in the first place
return False
# Certain platforms were backfilled before we added this filter, so to keep new events matching
# with the existing data, we turn off the filter for them (instead their stacktraces will be
# truncated)
if platform in EVENT_PLATFORMS_BYPASSING_FRAME_COUNT_CHECK:
metrics.incr(
"grouping.similarity.stacktrace_length_filter",
sample_rate=options.get("seer.similarity.metrics_sample_rate"),
tags={**shared_tags, "outcome": "bypass"},
)
report_token_count_metric(event, variants, "bypass")
return False
stacktrace_type = "in_app" if contributing_variant.variant_name == "app" else "system"
key = f"{stacktrace_type}_contributing_frames"
shared_tags["stacktrace_type"] = stacktrace_type
if contributing_component.frame_counts[key] > MAX_FRAME_COUNT:
metrics.incr(
"grouping.similarity.stacktrace_length_filter",
sample_rate=options.get("seer.similarity.metrics_sample_rate"),
tags={**shared_tags, "outcome": "block_frames"},
)
report_token_count_metric(event, variants, "block_frames")
return True
# For platforms that filter by frame count, also check token count
token_count = get_token_count(event, variants, platform)
max_token_count = options.get("seer.similarity.max_token_count")
if token_count > max_token_count:
metrics.incr(
"grouping.similarity.stacktrace_length_filter",
sample_rate=options.get("seer.similarity.metrics_sample_rate"),
tags={**shared_tags, "outcome": "block_tokens"},
)
report_token_count_metric(event, variants, "block_tokens", token_count)
return True
metrics.incr(
"grouping.similarity.stacktrace_length_filter",
sample_rate=options.get("seer.similarity.metrics_sample_rate"),
tags={**shared_tags, "outcome": "pass"},
)
report_token_count_metric(event, variants, "pass")
return False
def killswitch_enabled(
project_id: int | None,
referrer: ReferrerOptions,
event: Event | None = None,
) -> bool:
"""
Check both the global and similarity-specific Seer killswitches.
"""
is_ingest = referrer == ReferrerOptions.INGEST
logger_prefix = f"grouping.similarity.{referrer.value}"
logger_extra = {"event_id": event.event_id if event else None, "project_id": project_id}
if options.get("seer.global-killswitch.enabled"):
logger.warning(
f"{logger_prefix}.seer_global_killswitch_enabled", # noqa
extra=logger_extra,
)
# When it's ingest, `event` will always be defined - the second check is purely for mypy
if is_ingest and event:
record_did_call_seer_metric(event, call_made=False, blocker="global-killswitch")
return True
if options.get("seer.similarity-killswitch.enabled"):
logger.warning(
f"{logger_prefix}.seer_similarity_killswitch_enabled", # noqa
extra=logger_extra,
)
if is_ingest and event:
record_did_call_seer_metric(event, call_made=False, blocker="similarity-killswitch")
return True
if killswitch_matches_context(
"seer.similarity.grouping_killswitch_projects", {"project_id": project_id}
):
logger.warning(
f"{logger_prefix}.seer_similarity_project_killswitch_enabled", # noqa
extra=logger_extra,
)
if is_ingest and event:
record_did_call_seer_metric(event, call_made=False, blocker="project-killswitch")
return True
return False
def filter_null_from_string(string: str) -> str:
"""
Filter out null bytes from string so that it can be saved in records table.
"""
return string.replace("\x00", "")
T = TypeVar("T", dict[str, Any], str)
def _discard_excess_frames(frames: list[T], max_frames: int, current_frame_count: int) -> list[T]:
if current_frame_count >= max_frames:
return []
# If adding in all of the new frames would put us over the limit, truncate the list
if current_frame_count + len(frames) > max_frames:
remaining_frames_allowed = max_frames - current_frame_count
# Pull from the end of the list, since those frames are the most recent
frames = frames[-remaining_frames_allowed:]
return frames
def _is_snipped_context_line(context_line: str) -> bool:
# This check is implicitly restricted to JS (and friends) events by the fact that the `{snip]`
# is only added in the JS processor. See
# https://github.com/getsentry/sentry/blob/d077a5bb7e13a5927794b35d9ae667a4f181feb7/src/sentry/lang/javascript/utils.py#L72-L77.
return context_line.startswith("{snip}") or context_line.endswith("{snip}")
def project_is_seer_eligible(project: Project) -> bool:
"""
Return True if the project hasn't already been backfilled and the feature is enabled in the region.
"""
is_backfill_completed = project.get_option("sentry:similarity_backfill_completed")
is_region_enabled = options.get("similarity.new_project_seer_grouping.enabled")
return not is_backfill_completed and is_region_enabled
def set_default_project_autofix_automation_tuning(
organization: Organization, project: Project
) -> None:
org_default_autofix_automation_tuning = organization.get_option(
"sentry:default_autofix_automation_tuning"
)
if org_default_autofix_automation_tuning and org_default_autofix_automation_tuning != "off":
project.update_option(
"sentry:default_autofix_automation_tuning", org_default_autofix_automation_tuning
)
def set_default_project_seer_scanner_automation(
organization: Organization, project: Project
) -> None:
org_default_seer_scanner_automation = organization.get_option(
"sentry:default_seer_scanner_automation"
)
if org_default_seer_scanner_automation:
project.update_option(
"sentry:default_seer_scanner_automation", org_default_seer_scanner_automation
)
def report_token_count_metric(
event: Event | GroupEvent,
variants: dict[str, BaseVariant],
outcome: str,
token_count: int | None = None,
) -> None:
"""
Calculate token count and report metrics for stacktrace token analysis.
This function is gated by the 'seer.similarity.token_count_metrics_enabled' option
and will do nothing if disabled.
Args:
event: A Sentry Event object containing stack trace data
variants: Optional pre-calculated grouping variants to avoid recalculation
outcome: The frame check outcome ("pass", "block", "bypass")
"""
if not options.get("seer.similarity.token_count_metrics_enabled", False):
return
platform = event.platform or "unknown"
if token_count is None:
token_count = get_token_count(event, variants, platform)
metrics.distribution(
"grouping.similarity.token_count",
token_count,
sample_rate=options.get("seer.similarity.metrics_sample_rate"),
tags={
"platform": platform,
"frame_check_outcome": outcome,
},
)
def get_token_count(
event: Event | GroupEvent, variants: dict[str, BaseVariant], platform: str
) -> int:
"""
Count the number of tokens in the stack trace of an event.
Stacktrace string should be already cached on the event, and only calculates it if needed.
Args:
event: A Sentry Event object containing stack trace data
variants: Pre-calculated grouping variants to avoid recalculation
platform: The platform of the event (e.g., "python", "java")
Returns:
The number of tokens in the stack trace text
"""
with metrics.timer(
"grouping.similarity.get_token_count",
sample_rate=options.get("seer.similarity.metrics_sample_rate"),
tags={"platform": platform},
) as timer_tags:
try:
stacktrace_text = event.data.get("stacktrace_string")
timer_tags["has_content"] = False
timer_tags["source"] = "cached_stacktrace_string"
if stacktrace_text is None:
timer_tags["source"] = "generated_stacktrace_string"
if not variants:
timer_tags["source"] = "no_variants"
return 0
stacktrace_text = get_stacktrace_string(
get_grouping_info_from_variants_legacy(variants)
)
if stacktrace_text:
timer_tags["has_content"] = True
encoding = get_tokenizer().encode(stacktrace_text)
return len(encoding.ids)
timer_tags["source"] = "no_stacktrace_string"
return 0
except Exception as e:
timer_tags["error"] = True
logger.exception("Error calculating token count")
sentry_sdk.capture_exception(
e,
tags={
"event_id": getattr(event, "event_id", None),
"project_id": getattr(event, "project_id", None),
"platform": platform,
},
)
return 0
|
FramesMetrics
|
python
|
getsentry__sentry
|
src/sentry/testutils/helpers/notifications.py
|
{
"start": 739,
"end": 2027
}
|
class ____(BaseNotification):
group: Group
template_path = ""
metrics_key = "dummy"
reference = None
def get_subject(self, context: Mapping[str, Any] | None = None) -> str:
return "My Subject"
def determine_recipients(self) -> list[Actor]:
return []
def build_attachment_title(self, *args) -> str:
return "My Title"
def get_title_link(self, *args):
return None
def get_notification_title(
self, provider: ExternalProviders, context: Mapping[str, Any] | None = None
) -> str:
return "Notification Title"
def record_notification_sent(self, *args):
pass
def build_notification_footer(self, *args) -> str:
return ""
def get_participants(self):
return []
def get_message_actions(
self, recipient: Actor, provider: ExternalProviders
) -> Sequence[MessageAction]:
zombo_link = MessageAction(
name="Go to Zombo.com",
style="primary",
url="http://zombo.com",
)
sentry_link = MessageAction(
name="Sentry Link",
label="Go to Sentry",
style="primary",
url="http://sentry.io",
)
return [zombo_link, sentry_link]
|
DummyNotification
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/scaffold/branch/ai.py
|
{
"start": 2305,
"end": 2869
}
|
class ____(ABC):
"""Abstract base class for input types."""
@classmethod
def matches(cls, user_input: str) -> bool:
"""Whether the user input matches this input type."""
raise NotImplementedError
@classmethod
def get_context(cls, user_input: str) -> str:
"""Fetches context from the user input, to be passed to AI tools."""
raise NotImplementedError
@classmethod
def additional_allowed_tools(cls) -> list[str]:
"""Additional allowed tools to be passed to AI tools."""
return []
|
InputType
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1alpha1_volume_attributes_class.py
|
{
"start": 383,
"end": 9717
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'driver_name': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'parameters': 'dict(str, str)'
}
attribute_map = {
'api_version': 'apiVersion',
'driver_name': 'driverName',
'kind': 'kind',
'metadata': 'metadata',
'parameters': 'parameters'
}
def __init__(self, api_version=None, driver_name=None, kind=None, metadata=None, parameters=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1VolumeAttributesClass - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._driver_name = None
self._kind = None
self._metadata = None
self._parameters = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.driver_name = driver_name
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if parameters is not None:
self.parameters = parameters
@property
def api_version(self):
"""Gets the api_version of this V1alpha1VolumeAttributesClass. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1alpha1VolumeAttributesClass. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1alpha1VolumeAttributesClass.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1alpha1VolumeAttributesClass. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def driver_name(self):
"""Gets the driver_name of this V1alpha1VolumeAttributesClass. # noqa: E501
Name of the CSI driver This field is immutable. # noqa: E501
:return: The driver_name of this V1alpha1VolumeAttributesClass. # noqa: E501
:rtype: str
"""
return self._driver_name
@driver_name.setter
def driver_name(self, driver_name):
"""Sets the driver_name of this V1alpha1VolumeAttributesClass.
Name of the CSI driver This field is immutable. # noqa: E501
:param driver_name: The driver_name of this V1alpha1VolumeAttributesClass. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and driver_name is None: # noqa: E501
raise ValueError("Invalid value for `driver_name`, must not be `None`") # noqa: E501
self._driver_name = driver_name
@property
def kind(self):
"""Gets the kind of this V1alpha1VolumeAttributesClass. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1alpha1VolumeAttributesClass. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1alpha1VolumeAttributesClass.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1alpha1VolumeAttributesClass. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1alpha1VolumeAttributesClass. # noqa: E501
:return: The metadata of this V1alpha1VolumeAttributesClass. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1alpha1VolumeAttributesClass.
:param metadata: The metadata of this V1alpha1VolumeAttributesClass. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def parameters(self):
"""Gets the parameters of this V1alpha1VolumeAttributesClass. # noqa: E501
parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass. This field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field. # noqa: E501
:return: The parameters of this V1alpha1VolumeAttributesClass. # noqa: E501
:rtype: dict(str, str)
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this V1alpha1VolumeAttributesClass.
parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass. This field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field. # noqa: E501
:param parameters: The parameters of this V1alpha1VolumeAttributesClass. # noqa: E501
:type: dict(str, str)
"""
self._parameters = parameters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1VolumeAttributesClass):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1VolumeAttributesClass):
return True
return self.to_dict() != other.to_dict()
|
V1alpha1VolumeAttributesClass
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-palindromic-subsequences.py
|
{
"start": 1028,
"end": 1649
}
|
class ____(object):
def countPalindromes(self, s):
"""
:type s: str
:rtype: int
"""
MOD = 10**9+7
result = 0
for i in xrange(10):
for j in xrange(10):
pattern = "%s%s*%s%s" % (i, j, j, i)
dp = [0]*(5+1)
dp[0] = 1
for k in xrange(len(s)):
for l in reversed(xrange(5)):
if pattern[l] == '*' or pattern[l] == s[k]:
dp[l+1] = (dp[l+1]+dp[l])%MOD
result = (result+dp[5])%MOD
return result
|
Solution2
|
python
|
huggingface__transformers
|
src/transformers/models/xglm/modeling_xglm.py
|
{
"start": 4301,
"end": 11443
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: Optional[float] = 0.0,
is_decoder: Optional[bool] = False,
bias: Optional[bool] = True,
layer_idx: Optional[bool] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.layer_idx = layer_idx
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(bsz, src_len, -1, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, src_len, -1, self.head_dim).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = query_states.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2)
query_states = query_states.reshape(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = torch.max(
attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device)
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
# upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437
if attn_weights.dtype == torch.float16:
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(torch.float16)
else:
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned across GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped
|
XGLMAttention
|
python
|
huggingface__transformers
|
tests/models/auto/test_image_processing_auto.py
|
{
"start": 1244,
"end": 12857
}
|
class ____(unittest.TestCase):
def setUp(self):
transformers.dynamic_module_utils.TIME_OUT_REMOTE_CODE = 0
def test_image_processor_from_model_shortcut(self):
config = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32")
self.assertIsInstance(config, CLIPImageProcessor)
def test_image_processor_from_local_directory_from_key(self):
with tempfile.TemporaryDirectory() as tmpdirname:
processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json"
config_tmpfile = Path(tmpdirname) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"},
open(processor_tmpfile, "w"),
)
json.dump({"model_type": "clip"}, open(config_tmpfile, "w"))
config = AutoImageProcessor.from_pretrained(tmpdirname)
self.assertIsInstance(config, CLIPImageProcessor)
def test_image_processor_from_local_directory_from_feature_extractor_key(self):
# Ensure we can load the image processor from the feature extractor config
# Though we don't have any `CLIPFeatureExtractor` class, we can't be sure that
# there are no models in the hub serialized with `processor_type=CLIPFeatureExtractor`
with tempfile.TemporaryDirectory() as tmpdirname:
processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json"
config_tmpfile = Path(tmpdirname) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"},
open(processor_tmpfile, "w"),
)
json.dump({"model_type": "clip"}, open(config_tmpfile, "w"))
config = AutoImageProcessor.from_pretrained(tmpdirname)
self.assertIsInstance(config, CLIPImageProcessor)
def test_image_processor_from_new_filename(self):
with tempfile.TemporaryDirectory() as tmpdirname:
processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json"
config_tmpfile = Path(tmpdirname) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"},
open(processor_tmpfile, "w"),
)
json.dump({"model_type": "clip"}, open(config_tmpfile, "w"))
config = AutoImageProcessor.from_pretrained(tmpdirname)
self.assertIsInstance(config, CLIPImageProcessor)
def test_image_processor_from_local_directory_from_config(self):
with tempfile.TemporaryDirectory() as tmpdirname:
model_config = CLIPConfig()
# Create a dummy config file with image_processor_type
processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json"
config_tmpfile = Path(tmpdirname) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"},
open(processor_tmpfile, "w"),
)
json.dump({"model_type": "clip"}, open(config_tmpfile, "w"))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
config_dict = AutoImageProcessor.from_pretrained(tmpdirname).to_dict()
config_dict.pop("image_processor_type")
config = CLIPImageProcessor(**config_dict)
# save in new folder
model_config.save_pretrained(tmpdirname)
config.save_pretrained(tmpdirname)
config = AutoImageProcessor.from_pretrained(tmpdirname)
# make sure private variable is not incorrectly saved
dict_as_saved = json.loads(config.to_json_string())
self.assertTrue("_processor_class" not in dict_as_saved)
self.assertIsInstance(config, CLIPImageProcessor)
def test_image_processor_from_local_file(self):
with tempfile.TemporaryDirectory() as tmpdirname:
processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"},
open(processor_tmpfile, "w"),
)
config = AutoImageProcessor.from_pretrained(processor_tmpfile)
self.assertIsInstance(config, CLIPImageProcessor)
def test_repo_not_found(self):
with self.assertRaisesRegex(
EnvironmentError, "clip-base is not a local folder and is not a valid model identifier"
):
_ = AutoImageProcessor.from_pretrained("clip-base")
def test_revision_not_found(self):
with self.assertRaisesRegex(
EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"
):
_ = AutoImageProcessor.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa")
def test_image_processor_not_found(self):
with self.assertRaisesRegex(
EnvironmentError,
"Can't load image processor for 'hf-internal-testing/config-no-model'.",
):
_ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model")
@require_vision
@require_torchvision
def test_use_fast_selection(self):
checkpoint = "hf-internal-testing/tiny-random-vit"
# TODO: @yoni, change in v4.48 (when use_fast set to True by default)
# Slow image processor is selected by default
image_processor = AutoImageProcessor.from_pretrained(checkpoint)
self.assertIsInstance(image_processor, ViTImageProcessor)
# Fast image processor is selected when use_fast=True
image_processor = AutoImageProcessor.from_pretrained(checkpoint, use_fast=True)
self.assertIsInstance(image_processor, ViTImageProcessorFast)
# Slow image processor is selected when use_fast=False
image_processor = AutoImageProcessor.from_pretrained(checkpoint, use_fast=False)
self.assertIsInstance(image_processor, ViTImageProcessor)
def test_from_pretrained_dynamic_image_processor(self):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(ValueError):
image_processor = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor")
# If remote code is disabled, we can't load this config.
with self.assertRaises(ValueError):
image_processor = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor", trust_remote_code=False
)
image_processor = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor", trust_remote_code=True
)
self.assertEqual(image_processor.__class__.__name__, "NewImageProcessor")
# Test the dynamic module is loaded only once.
reloaded_image_processor = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor", trust_remote_code=True
)
self.assertIs(image_processor.__class__, reloaded_image_processor.__class__)
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(tmp_dir)
reloaded_image_processor = AutoImageProcessor.from_pretrained(tmp_dir, trust_remote_code=True)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, "image_processor.py"))) # Assert we saved custom code
self.assertEqual(
reloaded_image_processor.auto_map["AutoImageProcessor"], "image_processor.NewImageProcessor"
)
self.assertEqual(reloaded_image_processor.__class__.__name__, "NewImageProcessor")
# Test the dynamic module is reloaded if we force it.
reloaded_image_processor = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor", trust_remote_code=True, force_download=True
)
self.assertIsNot(image_processor.__class__, reloaded_image_processor.__class__)
def test_new_image_processor_registration(self):
try:
AutoConfig.register("custom", CustomConfig)
AutoImageProcessor.register(CustomConfig, CustomImageProcessor)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(ValueError):
AutoImageProcessor.register(CLIPConfig, CLIPImageProcessor)
with tempfile.TemporaryDirectory() as tmpdirname:
processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json"
config_tmpfile = Path(tmpdirname) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"},
open(processor_tmpfile, "w"),
)
json.dump({"model_type": "clip"}, open(config_tmpfile, "w"))
image_processor = CustomImageProcessor.from_pretrained(tmpdirname)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(tmp_dir)
new_image_processor = AutoImageProcessor.from_pretrained(tmp_dir)
self.assertIsInstance(new_image_processor, CustomImageProcessor)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def test_from_pretrained_dynamic_image_processor_conflict(self):
class NewImageProcessor(CLIPImageProcessor):
is_local = True
try:
AutoConfig.register("custom", CustomConfig)
AutoImageProcessor.register(CustomConfig, NewImageProcessor)
# If remote code is not set, the default is to use local
image_processor = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor")
self.assertEqual(image_processor.__class__.__name__, "NewImageProcessor")
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
image_processor = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor", trust_remote_code=False
)
self.assertEqual(image_processor.__class__.__name__, "NewImageProcessor")
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
image_processor = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor", trust_remote_code=True
)
self.assertEqual(image_processor.__class__.__name__, "NewImageProcessor")
self.assertTrue(not hasattr(image_processor, "is_local"))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
|
AutoImageProcessorTest
|
python
|
getsentry__sentry
|
src/sentry/integrations/api/serializers/models/external_actor.py
|
{
"start": 549,
"end": 711
}
|
class ____(ExternalActorResponseOptional):
id: str
provider: str
externalName: str
integrationId: str
@register(ExternalActor)
|
ExternalActorResponse
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_grpc/types.py
|
{
"start": 26388,
"end": 26964
}
|
class ____(
NamedTuple(
"_ShutdownServerResult",
[("success", bool), ("serializable_error_info", Optional[SerializableErrorInfo])],
)
):
def __new__(cls, success: bool, serializable_error_info: Optional[SerializableErrorInfo]):
return super().__new__(
cls,
success=check.bool_param(success, "success"),
serializable_error_info=check.opt_inst_param(
serializable_error_info, "serializable_error_info", SerializableErrorInfo
),
)
@whitelist_for_serdes
|
ShutdownServerResult
|
python
|
mlflow__mlflow
|
mlflow/genai/labeling/stores.py
|
{
"start": 876,
"end": 1438
}
|
class ____(MlflowException):
"""Exception thrown when building a labeling store with an unsupported URI"""
def __init__(self, unsupported_uri: str, supported_uri_schemes: list[str]) -> None:
message = (
f"Labeling functionality is unavailable; got unsupported URI"
f" '{unsupported_uri}' for labeling data storage. Supported URI schemes are:"
f" {supported_uri_schemes}."
)
super().__init__(message)
self.supported_uri_schemes = supported_uri_schemes
|
UnsupportedLabelingStoreURIException
|
python
|
google__pytype
|
pytype/tools/traces/source_test.py
|
{
"start": 89,
"end": 187
}
|
class ____:
def __init__(self, name, line):
self.name = name
self.line = line
|
_FakeOpcode
|
python
|
RaRe-Technologies__gensim
|
gensim/test/test_probability_estimation.py
|
{
"start": 504,
"end": 3052
}
|
class ____:
class ProbabilityEstimationBase(unittest.TestCase):
texts = [
['human', 'interface', 'computer'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees']
]
dictionary = None
def build_segmented_topics(self):
# Suppose the segmented topics from s_one_pre are:
token2id = self.dictionary.token2id
computer_id = token2id['computer']
system_id = token2id['system']
user_id = token2id['user']
graph_id = token2id['graph']
self.segmented_topics = [
[
(system_id, graph_id),
(computer_id, graph_id),
(computer_id, system_id)
], [
(computer_id, graph_id),
(user_id, graph_id),
(user_id, computer_id)
]
]
self.computer_id = computer_id
self.system_id = system_id
self.user_id = user_id
self.graph_id = graph_id
def setup_dictionary(self):
raise NotImplementedError
def setUp(self):
self.setup_dictionary()
self.corpus = [self.dictionary.doc2bow(text) for text in self.texts]
self.build_segmented_topics()
def test_p_boolean_document(self):
"""Test p_boolean_document()"""
accumulator = probability_estimation.p_boolean_document(
self.corpus, self.segmented_topics)
obtained = accumulator.index_to_dict()
expected = {
self.graph_id: {5},
self.user_id: {1, 3},
self.system_id: {1, 2},
self.computer_id: {0}
}
self.assertEqual(expected, obtained)
def test_p_boolean_sliding_window(self):
"""Test p_boolean_sliding_window()"""
# Test with window size as 2. window_id is zero indexed.
accumulator = probability_estimation.p_boolean_sliding_window(
self.texts, self.segmented_topics, self.dictionary, 2)
self.assertEqual(1, accumulator[self.computer_id])
self.assertEqual(3, accumulator[self.user_id])
self.assertEqual(1, accumulator[self.graph_id])
self.assertEqual(4, accumulator[self.system_id])
|
BaseTestCases
|
python
|
ansible__ansible
|
lib/ansible/_internal/_yaml/_constructor.py
|
{
"start": 1128,
"end": 7758
}
|
class ____(_BaseConstructor):
"""Ansible constructor which supports Ansible custom behavior such as `Origin` tagging, but no Ansible-specific YAML tags."""
name: t.Any # provided by the YAML parser, which retrieves it from the stream
def __init__(self, origin: Origin, trusted_as_template: bool) -> None:
if not origin.line_num:
origin = origin.replace(line_num=1)
self._origin = origin
self._trusted_as_template = trusted_as_template
self._duplicate_key_mode = C.config.get_config_value('DUPLICATE_YAML_DICT_KEY')
super().__init__()
@property
def trusted_as_template(self) -> bool:
return self._trusted_as_template
def construct_yaml_map(self, node):
data = self._node_position_info(node).tag({}) # always an ordered dictionary on py3.7+
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
# Delegate to built-in implementation to construct the mapping.
# This is done before checking for duplicates to leverage existing error checking on the input node.
mapping = super().construct_mapping(node, deep)
keys = set()
# Now that the node is known to be a valid mapping, handle any duplicate keys.
for key_node, _value_node in node.value:
if (key := self.construct_object(key_node, deep=deep)) in keys:
msg = f'Found duplicate mapping key {key!r}.'
if self._duplicate_key_mode == 'error':
raise AnsibleConstructorError(problem=msg, problem_mark=key_node.start_mark)
if self._duplicate_key_mode == 'warn':
display.warning(msg=msg, obj=key, help_text='Using last defined value only.')
keys.add(key)
return mapping
def construct_yaml_int(self, node):
value = super().construct_yaml_int(node)
return self._node_position_info(node).tag(value)
def construct_yaml_float(self, node):
value = super().construct_yaml_float(node)
return self._node_position_info(node).tag(value)
def construct_yaml_timestamp(self, node):
value = super().construct_yaml_timestamp(node)
return self._node_position_info(node).tag(value)
def construct_yaml_omap(self, node):
origin = self._node_position_info(node)
display.deprecated(
msg='Use of the YAML `!!omap` tag is deprecated.',
version='2.23',
obj=origin,
help_text='Use a standard mapping instead, as key order is always preserved.',
)
items = list(super().construct_yaml_omap(node))[0]
items = [origin.tag(item) for item in items]
yield origin.tag(items)
def construct_yaml_pairs(self, node):
origin = self._node_position_info(node)
display.deprecated(
msg='Use of the YAML `!!pairs` tag is deprecated.',
version='2.23',
obj=origin,
help_text='Use a standard mapping instead.',
)
items = list(super().construct_yaml_pairs(node))[0]
items = [origin.tag(item) for item in items]
yield origin.tag(items)
def construct_yaml_str(self, node: ScalarNode) -> str:
# Override the default string handling function
# to always return unicode objects
# DTFIX-FUTURE: is this to_text conversion still necessary under Py3?
value = to_text(self.construct_scalar(node))
tags: list[AnsibleDatatagBase] = [self._node_position_info(node)]
if self.trusted_as_template:
# NB: since we're not context aware, this will happily add trust to dictionary keys; this is actually necessary for
# certain backward compat scenarios, though might be accomplished in other ways if we wanted to avoid trusting keys in
# the general scenario
tags.append(_TRUSTED_AS_TEMPLATE)
return AnsibleTagHelper.tag(value, tags)
def construct_yaml_binary(self, node):
value = super().construct_yaml_binary(node)
return AnsibleTagHelper.tag(value, self._node_position_info(node))
def construct_yaml_set(self, node):
data = AnsibleTagHelper.tag(set(), self._node_position_info(node))
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_seq(self, node):
data = self._node_position_info(node).tag([])
yield data
data.extend(self.construct_sequence(node))
def _resolve_and_construct_object(self, node):
# use a copied node to avoid mutating existing node and tripping the recursion check in construct_object
copied_node = copy.copy(node)
# repeat implicit resolution process to determine the proper tag for the value in the unsafe node
copied_node.tag = t.cast(BaseResolver, self).resolve(type(node), node.value, (True, False))
# re-entrant call using the correct tag
# non-deferred construction of hierarchical nodes so the result is a fully realized object, and so our stateful unsafe propagation behavior works
return self.construct_object(copied_node, deep=True)
def _node_position_info(self, node) -> Origin:
# the line number where the previous token has ended (plus empty lines)
# Add one so that the first line is line 1 rather than line 0
return self._origin.replace(line_num=node.start_mark.line + self._origin.line_num, col_num=node.start_mark.column + 1)
@classmethod
def _register_constructors(cls) -> None:
constructors: dict[str, t.Callable] = {
'tag:yaml.org,2002:binary': cls.construct_yaml_binary,
'tag:yaml.org,2002:float': cls.construct_yaml_float,
'tag:yaml.org,2002:int': cls.construct_yaml_int,
'tag:yaml.org,2002:map': cls.construct_yaml_map,
'tag:yaml.org,2002:omap': cls.construct_yaml_omap,
'tag:yaml.org,2002:pairs': cls.construct_yaml_pairs,
'tag:yaml.org,2002:python/dict': cls.construct_yaml_map,
'tag:yaml.org,2002:python/unicode': cls.construct_yaml_str,
'tag:yaml.org,2002:seq': cls.construct_yaml_seq,
'tag:yaml.org,2002:set': cls.construct_yaml_set,
'tag:yaml.org,2002:str': cls.construct_yaml_str,
'tag:yaml.org,2002:timestamp': cls.construct_yaml_timestamp,
}
for tag, constructor in constructors.items():
cls.add_constructor(tag, constructor)
|
AnsibleInstrumentedConstructor
|
python
|
kamyu104__LeetCode-Solutions
|
Python/longest-unequal-adjacent-groups-subsequence-ii.py
|
{
"start": 1734,
"end": 2409
}
|
class ____(object):
def getWordsInLongestSubsequence(self, n, words, groups):
"""
:type n: int
:type words: List[str]
:type groups: List[int]
:rtype: List[str]
"""
def check(s1, s2):
return len(s1) == len(s2) and sum(a != b for a, b in itertools.izip(s1, s2)) == 1
dp = [[] for _ in xrange(n)]
for i in xrange(n):
for j in xrange(i):
if groups[i] != groups[j] and check(words[j], words[i]) and len(dp[j]) > len(dp[i]):
dp[i] = dp[j]
dp[i] = dp[i]+[i]
return map(lambda x: words[x], max(dp, key=lambda x: len(x)))
|
Solution3
|
python
|
coleifer__peewee
|
tests/signals.py
|
{
"start": 226,
"end": 283
}
|
class ____(BaseSignalModel):
b = TextField(default='')
|
B
|
python
|
mlflow__mlflow
|
mlflow/utils/autologging_utils/client.py
|
{
"start": 1663,
"end": 3178
}
|
class ____:
"""
Represents a collection of operations on one or more MLflow Runs, such as run creation
or metric logging.
"""
def __init__(self, operation_futures):
self._operation_futures = operation_futures
def await_completion(self):
"""
Blocks on completion of the MLflow Run operations.
"""
failed_operations = []
for future in self._operation_futures:
try:
future.result()
except Exception as e:
failed_operations.append(e)
if len(failed_operations) > 0:
raise MlflowException(
message=(
"The following failures occurred while performing one or more logging"
f" operations: {failed_operations}"
)
)
# Define a threadpool for use across `MlflowAutologgingQueueingClient` instances to ensure that
# `MlflowAutologgingQueueingClient` instances can be pickled (ThreadPoolExecutor objects are not
# pickleable and therefore cannot be assigned as instance attributes).
#
# We limit the number of threads used for run operations, using at most 8 threads or 2 * the number
# of CPU cores available on the system (whichever is smaller)
num_cpus = os.cpu_count() or 4
num_logging_workers = min(num_cpus * 2, 8)
_AUTOLOGGING_QUEUEING_CLIENT_THREAD_POOL = ThreadPoolExecutor(
max_workers=num_logging_workers,
thread_name_prefix="MlflowAutologgingQueueingClient",
)
|
RunOperations
|
python
|
doocs__leetcode
|
solution/2200-2299/2276.Count Integers in Intervals/Solution.py
|
{
"start": 0,
"end": 269
}
|
class ____:
__slots__ = ("left", "right", "l", "r", "mid", "v", "add")
def __init__(self, l, r):
self.left = None
self.right = None
self.l = l
self.r = r
self.mid = (l + r) // 2
self.v = 0
self.add = 0
|
Node
|
python
|
PyCQA__pylint
|
tests/functional/r/regression_02/regression_4982.py
|
{
"start": 440,
"end": 541
}
|
class ____(subclass):
"""Create a class from the __subclasses__ attribute of another class"""
|
Another
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/chartsheet/test_initialisation.py
|
{
"start": 303,
"end": 848
}
|
class ____(unittest.TestCase):
"""
Test initialisation of the Chartsheet class and call a method.
"""
def setUp(self):
self.fh = StringIO()
self.chartsheet = Chartsheet()
self.chartsheet._set_filehandle(self.fh)
def test_xml_declaration(self):
"""Test Chartsheet xml_declaration()"""
self.chartsheet._xml_declaration()
exp = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
|
TestInitialisation
|
python
|
numpy__numpy
|
numpy/_core/tests/test_numerictypes.py
|
{
"start": 8023,
"end": 8215
}
|
class ____(ReadValuesPlain):
"""Check the values of heterogeneous arrays (plain, multiple rows)"""
_descr = Pdescr
multiple_rows = 1
_buffer = PbufferT
|
TestReadValuesPlainMultiple
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pylint/redefined_slots_in_subclass.py
|
{
"start": 172,
"end": 210
}
|
class ____(Grandparent):
pass
|
Parent
|
python
|
realpython__materials
|
hashtable/01_hashtable_prototype/06_get_the_key_value_pairs/hashtable.py
|
{
"start": 107,
"end": 1014
}
|
class ____:
def __init__(self, capacity):
self.pairs = capacity * [None]
def __len__(self):
return len(self.pairs)
def __delitem__(self, key):
if key in self:
self.pairs[self._index(key)] = None
else:
raise KeyError(key)
def __setitem__(self, key, value):
self.pairs[self._index(key)] = Pair(key, value)
def __getitem__(self, key):
pair = self.pairs[self._index(key)]
if pair is None:
raise KeyError(key)
return pair.value
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def _index(self, key):
return hash(key) % len(self)
|
HashTable
|
python
|
keras-team__keras
|
keras/src/ops/numpy.py
|
{
"start": 56685,
"end": 57579
}
|
class ____(Operation):
def call(self, x, y):
return backend.numpy.right_shift(x, y)
def compute_output_spec(self, x, y):
if isinstance(y, int):
dtype = x.dtype
else:
dtype = dtypes.result_type(x.dtype, y.dtype)
return KerasTensor(x.shape, dtype=dtype)
@keras_export(["keras.ops.right_shift", "keras.ops.numpy.right_shift"])
def right_shift(x, y):
"""Shift the bits of an integer to the right.
Bits are shifted to the right `y`. Because the internal representation of
numbers is in binary format, this operation is equivalent to dividing `x` by
`2**y`.
Args:
x: Input integer tensor.
y: Input integer tensor.
Returns:
Result tensor.
"""
if any_symbolic_tensors((x, y)):
return RightShift().symbolic_call(x, y)
return backend.numpy.right_shift(x, y)
|
RightShift
|
python
|
great-expectations__great_expectations
|
tests/integration/fluent/test_snowflake_datasource.py
|
{
"start": 2380,
"end": 5204
}
|
class ____:
"""Test column expectations for Snowflake datasources"""
@parameterize_batch_for_data_sources(
data_source_configs=[
SnowflakeDatasourceTestConfig(table_name=TEST_TABLE_NAME.lower()),
],
data=pd.DataFrame({"unquoted_lower_col": ["test_value"]}),
)
def test_unquoted_lower_col(self, batch_for_datasource):
"""Test Snowflake column expectation for unquoted_lower_col"""
_run_column_expectation_test(batch_for_datasource, "snowflake", "unquoted_lower_col")
@parameterize_batch_for_data_sources(
data_source_configs=[
SnowflakeDatasourceTestConfig(table_name=TEST_TABLE_NAME.lower()),
],
data=pd.DataFrame({"UNQUOTED_UPPER_COL": ["test_value"]}),
)
def test_unquoted_upper_col(self, batch_for_datasource):
"""Test Snowflake column expectation for unquoted_upper_col"""
_run_column_expectation_test(batch_for_datasource, "snowflake", "unquoted_upper_col")
@parameterize_batch_for_data_sources(
data_source_configs=[
SnowflakeDatasourceTestConfig(table_name=TEST_TABLE_NAME.lower()),
],
data=pd.DataFrame({'"quoted_lower_col"': ["test_value"]}),
)
def test_quoted_lower_col(self, batch_for_datasource):
"""Test Snowflake column expectation for quoted_lower_col"""
_run_column_expectation_test(batch_for_datasource, "snowflake", '"quoted_lower_col"')
@parameterize_batch_for_data_sources(
data_source_configs=[
SnowflakeDatasourceTestConfig(table_name=TEST_TABLE_NAME.lower()),
],
data=pd.DataFrame({'"QUOTED_UPPER_COL"': ["test_value"]}),
)
def test_quoted_upper_col(self, batch_for_datasource):
"""Test Snowflake column expectation for quoted_upper_col"""
_run_column_expectation_test(batch_for_datasource, "snowflake", '"QUOTED_UPPER_COL"')
@parameterize_batch_for_data_sources(
data_source_configs=[
SnowflakeDatasourceTestConfig(table_name=TEST_TABLE_NAME.lower()),
],
data=pd.DataFrame({'"quotedMixed"': ["test_value"]}),
)
def test_quotedmixed(self, batch_for_datasource):
"""Test Snowflake column expectation for quotedMixed"""
_run_column_expectation_test(batch_for_datasource, "snowflake", '"quotedMixed"')
@parameterize_batch_for_data_sources(
data_source_configs=[
SnowflakeDatasourceTestConfig(table_name=TEST_TABLE_NAME.lower()),
],
data=pd.DataFrame({'"quoted.w.dots"': ["test_value"]}),
)
def test_quoted_w_dots(self, batch_for_datasource):
"""Test Snowflake column expectation for quoted.w.dots"""
_run_column_expectation_test(batch_for_datasource, "snowflake", '"quoted.w.dots"')
|
TestSnowflakeColumnExpectations
|
python
|
encode__django-rest-framework
|
tests/test_fields.py
|
{
"start": 56033,
"end": 56690
}
|
class ____(FieldValues):
"""
Valid and invalid values for `TimeField`.
"""
valid_inputs = {
'13:00': datetime.time(13, 00),
datetime.time(13, 00): datetime.time(13, 00),
}
invalid_inputs = {
'abc': ['Time has wrong format. Use one of these formats instead: hh:mm[:ss[.uuuuuu]].'],
'99:99': ['Time has wrong format. Use one of these formats instead: hh:mm[:ss[.uuuuuu]].'],
}
outputs = {
datetime.time(13, 0): '13:00:00',
datetime.time(0, 0): '00:00:00',
'00:00:00': '00:00:00',
None: None,
'': None,
}
field = serializers.TimeField()
|
TestTimeField
|
python
|
django__django
|
tests/utils_tests/test_lazyobject.py
|
{
"start": 13655,
"end": 15552
}
|
class ____(TestCase):
"""
Regression test for pickling a SimpleLazyObject wrapping a model (#25389).
Also covers other classes with a custom __reduce__ method.
"""
def test_pickle_with_reduce(self):
"""
Test in a fairly synthetic setting.
"""
# Test every pickle protocol available
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
lazy_objs = [
SimpleLazyObject(lambda: BaseBaz()),
SimpleLazyObject(lambda: Baz(1)),
SimpleLazyObject(lambda: BazProxy(Baz(2))),
]
for obj in lazy_objs:
pickled = pickle.dumps(obj, protocol)
unpickled = pickle.loads(pickled)
self.assertEqual(unpickled, obj)
self.assertEqual(unpickled.baz, "right")
def test_pickle_model(self):
"""
Test on an actual model, based on the report in #25426.
"""
category = Category.objects.create(name="thing1")
CategoryInfo.objects.create(category=category)
# Test every pickle protocol available
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
lazy_category = SimpleLazyObject(lambda: category)
# Test both if we accessed a field on the model and if we didn't.
lazy_category.categoryinfo
lazy_category_2 = SimpleLazyObject(lambda: category)
with warnings.catch_warnings(record=True) as recorded:
self.assertEqual(
pickle.loads(pickle.dumps(lazy_category, protocol)), category
)
self.assertEqual(
pickle.loads(pickle.dumps(lazy_category_2, protocol)), category
)
# Assert that there were no warnings.
self.assertEqual(len(recorded), 0)
|
SimpleLazyObjectPickleTestCase
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.