language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
django__django
|
django/utils/datastructures.py
|
{
"start": 10290,
"end": 10863
}
|
class ____:
"""
Wrap a dict, allowing deferred access to a sub-dict under a given key.
The value at ``deferred_key`` must itself be a dict. Accessing
``DeferredSubDict(parent_dict, deferred_key)[key]`` retrieves
``parent_dict[deferred_key][key]`` at access time, so updates to
the parent dict are reflected.
"""
def __init__(self, parent_dict, deferred_key):
self.parent_dict = parent_dict
self.deferred_key = deferred_key
def __getitem__(self, key):
return self.parent_dict[self.deferred_key][key]
|
DeferredSubDict
|
python
|
h5py__h5py
|
h5py/tests/test_dataset.py
|
{
"start": 24005,
"end": 24626
}
|
class ____(BaseDataset):
"""
Feature: Datasets created from an existing named type
"""
def test_named(self):
""" Named type object works and links the dataset to type """
name = make_name("type")
self.f[name] = np.dtype('f8')
dt = self.f[name]
dset = self.f.create_dataset(make_name("x"), (100,), dtype=dt)
self.assertEqual(dset.dtype, np.dtype('f8'))
self.assertEqual(dset.id.get_type(), dt.id)
self.assertTrue(dset.id.get_type().committed())
@ut.skipIf('gzip' not in h5py.filters.encode, "DEFLATE is not installed")
|
TestCreateNamedType
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/filters.py
|
{
"start": 5723,
"end": 6527
}
|
class ____(ModelFilterSet):
"""
Project list filter set for project list view.
This filter set enables list view sorting using a custom filter, and
provides search-as-you-type lookup filter as well.
"""
slug = FilteredModelChoiceFilter(
label=_("Project"),
empty_label=_("All projects"),
to_field_name="slug",
queryset_method="get_project_queryset",
method="get_project",
label_attribute="name",
)
sort = ProjectSortOrderingFilter(
field_name="sort",
label=_("Sort by"),
)
def get_project_queryset(self):
return Project.objects.for_user(user=self.request.user)
def get_project(self, queryset, field_name, project):
return queryset.filter(slug=project.slug)
|
ProjectListFilterSet
|
python
|
miyuchina__mistletoe
|
mistletoe/token.py
|
{
"start": 577,
"end": 2992
}
|
class ____:
"""
Base token class.
`Token` has two subclasses:
* `block_token.BlockToken`, for all block level tokens. A block level token
is text which occupies the entire horizontal width of the "page" and is
offset for the surrounding sibling block with line breaks.
* `span_token.SpanToken`, for all span-level (or inline-level) tokens.
A span-level token appears inside the flow of the text lines without any
surrounding line break.
Custom ``__repr__`` methods in subclasses: The default ``__repr__``
implementation outputs the number of child tokens (from the attribute
``children``) if applicable, and the ``content`` attribute if applicable.
If any additional attributes should be included in the ``__repr__`` output,
this can be specified by setting the class attribute ``repr_attributes``
to a tuple containing the attribute names to be output.
"""
repr_attributes = ()
def __repr__(self):
output = "<{}.{}".format(
self.__class__.__module__,
self.__class__.__name__
)
if self.children is not None:
count = len(self.children)
if count == 1:
output += " with 1 child"
else:
output += " with {} children".format(count)
if "content" in vars(self):
output += " content=" + _short_repr(self.content)
for attrname in self.repr_attributes:
attrvalue = getattr(self, attrname)
output += " {}={}".format(attrname, _short_repr(attrvalue))
output += " at {:#x}>".format(id(self))
return output
@property
def parent(self) -> Optional['Token']:
"""Returns the parent token, if there is any."""
return getattr(self, '_parent', None)
@property
def children(self) -> Optional[Iterable['Token']]:
"""
Returns the child (nested) tokens.
Returns `None` if the token is a leaf token.
"""
return getattr(self, '_children', None)
@children.setter
def children(self, value: Iterable['Token']):
""""
Sets new child (nested) tokens.
Passed tokens are iterated and their ``parent`` property is set to
this token.
"""
self._children = value
if value:
for child in value:
child._parent = self
|
Token
|
python
|
tqdm__tqdm
|
tqdm/rich.py
|
{
"start": 461,
"end": 1376
}
|
class ____(ProgressColumn):
"""Renders completed/total, e.g. '0.5/2.3 G'."""
def __init__(self, unit_scale=False, unit_divisor=1000):
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
super().__init__()
def render(self, task):
"""Calculate common unit for completed and total."""
completed = int(task.completed)
total = int(task.total)
if self.unit_scale:
unit, suffix = filesize.pick_unit_and_suffix(
total,
["", "K", "M", "G", "T", "P", "E", "Z", "Y"],
self.unit_divisor,
)
else:
unit, suffix = filesize.pick_unit_and_suffix(total, [""], 1)
precision = 0 if unit == 1 else 1
return Text(
f"{completed/unit:,.{precision}f}/{total/unit:,.{precision}f} {suffix}",
style="progress.download")
|
FractionColumn
|
python
|
getsentry__sentry
|
tests/sentry/post_process_forwarder/test_post_process_forwarder.py
|
{
"start": 1696,
"end": 6250
}
|
class ____(TestCase):
def _get_producer(self, cluster_name: str) -> Producer:
conf = settings.KAFKA_CLUSTERS[cluster_name]["common"]
return Producer(conf)
def setUp(self) -> None:
super().setUp()
self.consumer_and_topic_suffix = uuid.uuid4().hex
self.events_topic = f"events-{self.consumer_and_topic_suffix}"
self.commit_log_topic = f"events-commit-{self.consumer_and_topic_suffix}"
self.override_settings_cm = override_settings(
KAFKA_TOPIC_OVERRIDES={
"events": self.events_topic,
"transactions": self.events_topic,
},
)
self.override_settings_cm.__enter__()
cluster_options = kafka_config.get_kafka_admin_cluster_options(
"default", {"allow.auto.create.topics": "true"}
)
self.admin_client = AdminClient(cluster_options)
wait_for_topics(self.admin_client, [self.events_topic, self.commit_log_topic])
def tearDown(self) -> None:
super().tearDown()
self.override_settings_cm.__exit__(None, None, None)
self.admin_client.delete_topics([self.events_topic, self.commit_log_topic])
metrics._metrics_backend = None
def get_test_stream_processor(
self, mode: str, consumer_group: str, synchronize_commit_group: str
) -> StreamProcessor[KafkaPayload]:
return get_stream_processor(
consumer_name="post-process-forwarder-errors",
consumer_args=[f"--mode={mode}"],
topic=self.events_topic,
synchronize_commit_log_topic=self.commit_log_topic,
synchronize_commit_group=synchronize_commit_group,
cluster=None,
group_id=consumer_group,
auto_offset_reset="earliest",
strict_offset_reset=False,
join_timeout=None,
max_poll_interval_ms=None,
enable_dlq=False,
healthcheck_file_path=None,
enforce_schema=True,
)
def run_post_process_forwarder_streaming_consumer(self, ppf_mode: str) -> None:
consumer_group = f"consumer-{self.consumer_and_topic_suffix}"
synchronize_commit_group = f"sync-consumer-{self.consumer_and_topic_suffix}"
events_producer = self._get_producer("default")
commit_log_producer = self._get_producer("default")
message = json.dumps(kafka_message_payload()).encode()
import sentry.consumers
importlib.reload(sentry.consumers)
processor = self.get_test_stream_processor(
mode=ppf_mode,
consumer_group=consumer_group,
synchronize_commit_group=synchronize_commit_group,
)
# produce message to the events topic
events_producer.produce(self.events_topic, message)
assert events_producer.flush(5) == 0, "events producer did not successfully flush queue"
# Move the committed offset forward for our synchronizing group.
commit_log_producer.produce(
self.commit_log_topic,
key=f"{self.events_topic}:0:{synchronize_commit_group}".encode(),
value=b'{"orig_message_ts": 123456, "offset": 1}',
)
assert (
commit_log_producer.flush(5) == 0
), "snuba-commit-log producer did not successfully flush queue"
with patch("sentry.eventstream.kafka.dispatch.dispatch_post_process_group_task") as mock:
# Run the loop for sometime
for _ in range(3):
processor._run_once()
time.sleep(1)
# Verify that the task gets called once
mock.assert_called_once_with(
event_id="fe0ee9a2bc3b415497bad68aaf70dc7f",
project_id=1,
group_id=43,
primary_hash="311ee66a5b8e697929804ceb1c456ffe",
is_new=False,
is_regression=None,
queue="post_process_errors",
is_new_group_environment=False,
group_states=None,
occurrence_id=None,
eventstream_type=EventStreamEventType.Error.value,
)
processor.signal_shutdown()
processor.run()
def test_multithreaded_post_process_forwarder(self) -> None:
self.run_post_process_forwarder_streaming_consumer(ppf_mode="multithreaded")
def test_multiprocess_post_process_forwarder(self) -> None:
self.run_post_process_forwarder_streaming_consumer(ppf_mode="multiprocess")
|
PostProcessForwarderTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/island-perimeter.py
|
{
"start": 51,
"end": 996
}
|
class ____(object):
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
count, repeat = 0, 0
for i in xrange(len(grid)):
for j in xrange(len(grid[i])):
if grid[i][j] == 1:
count += 1
if i != 0 and grid[i - 1][j] == 1:
repeat += 1
if j != 0 and grid[i][j - 1] == 1:
repeat += 1
return 4*count - 2*repeat
# Since there are no lakes, every pair of neighbour cells with different values is part of the perimeter
# (more precisely, the edge between them is). So just count the differing pairs, both horizontally and vertically
# (for the latter I simply transpose the grid).
def islandPerimeter2(self, grid):
return sum(sum(map(operator.ne, [0] + row, row + [0])) for row in grid + map(list, zip(*grid)))
|
Solution
|
python
|
pytest-dev__pytest-django
|
pytest_django/plugin.py
|
{
"start": 25051,
"end": 25454
}
|
class ____:
def __init__(self, db_blocker: DjangoDbBlocker) -> None:
self._db_blocker = db_blocker
def __enter__(self) -> None:
pass
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: types.TracebackType | None,
) -> None:
self._db_blocker.restore()
|
_DatabaseBlockerContextManager
|
python
|
kamyu104__LeetCode-Solutions
|
Python/flatten-binary-tree-to-linked-list.py
|
{
"start": 661,
"end": 998
}
|
class ____(object):
list_head = None
# @param root, a tree node
# @return nothing, do it in place
def flatten(self, root):
if root:
self.flatten(root.right)
self.flatten(root.left)
root.right = self.list_head
root.left = None
self.list_head = root
|
Solution2
|
python
|
facelessuser__pymdown-extensions
|
tests/test_extensions/test_slugs.py
|
{
"start": 559,
"end": 1157
}
|
class ____(util.MdCase):
"""Test Unicode encoded slugs."""
extension = ['markdown.extensions.toc']
extension_configs = {
'markdown.extensions.toc': {
"slugify": slugs.slugify(case="lower", percent_encode=True)
}
}
def test_slug(self):
"""Test the slug output."""
self.check_markdown(
r'# Testing unicode-slugs_headers ±♠Ωℑ with encoding',
r'<h1 id="testing-unicode-slugs_headers-%CF%89%E2%84%91-with-encoding">'
'Testing unicode-slugs_headers ±♠Ωℑ with encoding</h1>'
)
|
TestUslugifyEncoded
|
python
|
django__django
|
tests/annotations/models.py
|
{
"start": 1052,
"end": 1129
}
|
class ____(Store):
chain = models.CharField(max_length=255)
|
DepartmentStore
|
python
|
huggingface__transformers
|
tests/repo_utils/test_tests_fetcher.py
|
{
"start": 2116,
"end": 7514
}
|
class ____:
'''
This is the docstring.
'''
This is the code. It has been updated
"""
def create_tmp_repo(tmp_dir, models=None):
"""
Creates a repository in a temporary directory mimicking the structure of Transformers. Uses the list of models
provided (which defaults to just `["bert"]`).
"""
tmp_dir = Path(tmp_dir)
if tmp_dir.exists():
shutil.rmtree(tmp_dir)
tmp_dir.mkdir(exist_ok=True)
repo = Repo.init(tmp_dir)
if models is None:
models = ["bert"]
class_names = [model[0].upper() + model[1:] for model in models]
transformers_dir = tmp_dir / "src" / "transformers"
transformers_dir.mkdir(parents=True, exist_ok=True)
with open(transformers_dir / "__init__.py", "w") as f:
init_lines = ["from .utils import cached_file, is_torch_available"]
init_lines.extend(
[f"from .models.{model} import {cls}Config, {cls}Model" for model, cls in zip(models, class_names)]
)
f.write("\n".join(init_lines) + "\n")
with open(transformers_dir / "configuration_utils.py", "w") as f:
f.write("from .utils import cached_file\n\ncode")
with open(transformers_dir / "modeling_utils.py", "w") as f:
f.write("from .utils import cached_file\n\ncode")
utils_dir = tmp_dir / "src" / "transformers" / "utils"
utils_dir.mkdir(exist_ok=True)
with open(utils_dir / "__init__.py", "w") as f:
f.write("from .hub import cached_file\nfrom .imports import is_torch_available\n")
with open(utils_dir / "hub.py", "w") as f:
f.write("import huggingface_hub\n\ncode")
with open(utils_dir / "imports.py", "w") as f:
f.write("code")
model_dir = tmp_dir / "src" / "transformers" / "models"
model_dir.mkdir(parents=True, exist_ok=True)
with open(model_dir / "__init__.py", "w") as f:
f.write("\n".join([f"import {model}" for model in models]))
for model, cls in zip(models, class_names):
model_dir = tmp_dir / "src" / "transformers" / "models" / model
model_dir.mkdir(parents=True, exist_ok=True)
with open(model_dir / "__init__.py", "w") as f:
f.write(f"from .configuration_{model} import {cls}Config\nfrom .modeling_{model} import {cls}Model\n")
with open(model_dir / f"configuration_{model}.py", "w") as f:
f.write("from ...configuration_utils import PreTrainedConfig\ncode")
with open(model_dir / f"modeling_{model}.py", "w") as f:
modeling_code = BERT_MODEL_FILE.replace("bert", model).replace("Bert", cls)
f.write(modeling_code)
test_dir = tmp_dir / "tests"
test_dir.mkdir(exist_ok=True)
with open(test_dir / "test_modeling_common.py", "w") as f:
f.write("from transformers.modeling_utils import PreTrainedModel\ncode")
for model, cls in zip(models, class_names):
test_model_dir = test_dir / "models" / model
test_model_dir.mkdir(parents=True, exist_ok=True)
(test_model_dir / "__init__.py").touch()
with open(test_model_dir / f"test_modeling_{model}.py", "w") as f:
f.write(
f"from transformers import {cls}Config, {cls}Model\nfrom ...test_modeling_common import ModelTesterMixin\n\ncode"
)
example_dir = tmp_dir / "examples"
example_dir.mkdir(exist_ok=True)
framework_dir = example_dir / "pytorch"
framework_dir.mkdir(exist_ok=True)
with open(framework_dir / "test_pytorch_examples.py", "w") as f:
f.write("""test_args = "run_glue.py"\n""")
glue_dir = framework_dir / "text-classification"
glue_dir.mkdir(exist_ok=True)
with open(glue_dir / "run_glue.py", "w") as f:
f.write("from transformers import BertModel\n\ncode")
repo.index.add(["examples", "src", "tests"])
repo.index.commit("Initial commit")
repo.create_head("main")
repo.head.reference = repo.refs.main
repo.delete_head("master")
return repo
@contextmanager
def patch_transformer_repo_path(new_folder):
"""
Temporarily patches the variables defines in `tests_fetcher` to use a different location for the repo.
"""
old_repo_path = tests_fetcher.PATH_TO_REPO
tests_fetcher.PATH_TO_REPO = Path(new_folder).resolve()
tests_fetcher.PATH_TO_EXAMPLES = tests_fetcher.PATH_TO_REPO / "examples"
tests_fetcher.PATH_TO_TRANSFORMERS = tests_fetcher.PATH_TO_REPO / "src/transformers"
tests_fetcher.PATH_TO_TESTS = tests_fetcher.PATH_TO_REPO / "tests"
try:
yield
finally:
tests_fetcher.PATH_TO_REPO = old_repo_path
tests_fetcher.PATH_TO_EXAMPLES = tests_fetcher.PATH_TO_REPO / "examples"
tests_fetcher.PATH_TO_TRANSFORMERS = tests_fetcher.PATH_TO_REPO / "src/transformers"
tests_fetcher.PATH_TO_TESTS = tests_fetcher.PATH_TO_REPO / "tests"
def commit_changes(filenames, contents, repo, commit_message="Commit"):
"""
Commit new `contents` to `filenames` inside a given `repo`.
"""
if not isinstance(filenames, list):
filenames = [filenames]
if not isinstance(contents, list):
contents = [contents]
folder = Path(repo.working_dir)
for filename, content in zip(filenames, contents):
with open(folder / filename, "w") as f:
f.write(content)
repo.index.add(filenames)
commit = repo.index.commit(commit_message)
return commit.hexsha
|
BertModel
|
python
|
joke2k__faker
|
tests/providers/test_internet.py
|
{
"start": 31843,
"end": 32059
}
|
class ____(TestFilPh):
"""Test tl_PH internet provider methods"""
def test_slug(self, faker):
num_of_samples = 100
for _ in range(num_of_samples):
assert faker.slug() != ""
|
TestTlPh
|
python
|
ipython__ipython
|
IPython/core/prefilter.py
|
{
"start": 21656,
"end": 22287
}
|
class ____(PrefilterHandler):
handler_name = Unicode('magic')
esc_strings = List([ESC_MAGIC])
def handle(self, line_info):
"""Execute magic functions."""
ifun = line_info.ifun
the_rest = line_info.the_rest
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
t_arg_s = ifun + " " + the_rest
t_magic_name, _, t_magic_arg_s = t_arg_s.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
cmd = '%sget_ipython().run_line_magic(%r, %r)' % (line_info.pre_whitespace, t_magic_name, t_magic_arg_s)
return cmd
|
MagicHandler
|
python
|
google__pytype
|
pytype/pyi/entire_file_parser_test.py
|
{
"start": 133,
"end": 396
}
|
class ____(parser_test_base.ParserTestBase):
def test_builtins(self):
_, builtins = builtin_stubs.GetPredefinedFile("builtins", "builtins")
self.check(builtins, expected=parser_test_base.IGNORE)
if __name__ == "__main__":
unittest.main()
|
EntireFileTest
|
python
|
walkccc__LeetCode
|
solutions/375. Guess Number Higher or Lower II/375-2.py
|
{
"start": 0,
"end": 426
}
|
class ____:
def getMoneyAmount(self, n: int) -> int:
# dp[i][j] := the minimum money you need to guarantee a win of picking i..j
dp = [[0] * (n + 2) for _ in range(n + 2)]
for d in range(1, n + 1):
for i in range(1, n - d + 1):
j = i + d
dp[i][j] = math.inf
for k in range(i, j + 1):
dp[i][j] = min(dp[i][j], max(dp[i][k - 1], dp[k + 1][j]) + k)
return dp[1][n]
|
Solution
|
python
|
plotly__plotly.py
|
plotly/graph_objs/layout/_shape.py
|
{
"start": 235,
"end": 48186
}
|
class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout"
_path_str = "layout.shape"
_valid_props = {
"editable",
"fillcolor",
"fillrule",
"label",
"layer",
"legend",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"line",
"name",
"opacity",
"path",
"showlegend",
"templateitemname",
"type",
"visible",
"x0",
"x0shift",
"x1",
"x1shift",
"xanchor",
"xref",
"xsizemode",
"y0",
"y0shift",
"y1",
"y1shift",
"yanchor",
"yref",
"ysizemode",
}
@property
def editable(self):
"""
Determines whether the shape could be activated for edit or
not. Has no effect when the older editable shapes mode is
enabled via `config.editable` or `config.edits.shapePosition`.
The 'editable' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["editable"]
@editable.setter
def editable(self, val):
self["editable"] = val
@property
def fillcolor(self):
"""
Sets the color filling the shape's interior. Only applies to
closed shapes.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["fillcolor"]
@fillcolor.setter
def fillcolor(self, val):
self["fillcolor"] = val
@property
def fillrule(self):
"""
Determines which regions of complex paths constitute the
interior. For more info please visit
https://developer.mozilla.org/en-
US/docs/Web/SVG/Attribute/fill-rule
The 'fillrule' property is an enumeration that may be specified as:
- One of the following enumeration values:
['evenodd', 'nonzero']
Returns
-------
Any
"""
return self["fillrule"]
@fillrule.setter
def fillrule(self, val):
self["fillrule"] = val
@property
def label(self):
"""
The 'label' property is an instance of Label
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.shape.Label`
- A dict of string/value properties that will be passed
to the Label constructor
Returns
-------
plotly.graph_objs.layout.shape.Label
"""
return self["label"]
@label.setter
def label(self, val):
self["label"] = val
@property
def layer(self):
"""
Specifies whether shapes are drawn below gridlines ("below"),
between gridlines and traces ("between") or above traces
("above").
The 'layer' property is an enumeration that may be specified as:
- One of the following enumeration values:
['below', 'above', 'between']
Returns
-------
Any
"""
return self["layer"]
@layer.setter
def layer(self, val):
self["layer"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show this shape in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgroup(self):
"""
Sets the legend group for this shape. Traces and shapes part of
the same legend group hide/show at the same time when toggling
legend items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.shape.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.layout.shape.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for this shape. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
shape.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.shape.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.layout.shape.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def opacity(self):
"""
Sets the opacity of the shape.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def path(self):
"""
For `type` "path" - a valid SVG path with the pixel values
replaced by data values in `xsizemode`/`ysizemode` being
"scaled" and taken unmodified as pixels relative to `xanchor`
and `yanchor` in case of "pixel" size mode. There are a few
restrictions / quirks only absolute instructions, not relative.
So the allowed segments are: M, L, H, V, Q, C, T, S, and Z arcs
(A) are not allowed because radius rx and ry are relative. In
the future we could consider supporting relative commands, but
we would have to decide on how to handle date and log axes.
Note that even as is, Q and C Bezier paths that are smooth on
linear axes may not be smooth on log, and vice versa. no
chained "polybezier" commands - specify the segment type for
each one. On category axes, values are numbers scaled to the
serial numbers of categories because using the categories
themselves there would be no way to describe fractional
positions On data axes: because space and T are both normal
components of path strings, we can't use either to separate
date from time parts. Therefore we'll use underscore for this
purpose: 2015-02-21_13:45:56.789
The 'path' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["path"]
@path.setter
def path(self, val):
self["path"] = val
@property
def showlegend(self):
"""
Determines whether or not this shape is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def type(self):
"""
Specifies the shape type to be drawn. If "line", a line is
drawn from (`x0`,`y0`) to (`x1`,`y1`) with respect to the axes'
sizing mode. If "circle", a circle is drawn from
((`x0`+`x1`)/2, (`y0`+`y1`)/2)) with radius (|(`x0`+`x1`)/2 -
`x0`|, |(`y0`+`y1`)/2 -`y0`)|) with respect to the axes' sizing
mode. If "rect", a rectangle is drawn linking (`x0`,`y0`),
(`x1`,`y0`), (`x1`,`y1`), (`x0`,`y1`), (`x0`,`y0`) with respect
to the axes' sizing mode. If "path", draw a custom SVG path
using `path`. with respect to the axes' sizing mode.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['circle', 'rect', 'path', 'line']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
@property
def visible(self):
"""
Determines whether or not this shape is visible. If
"legendonly", the shape is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def x0(self):
"""
Sets the shape's starting x position. See `type` and
`xsizemode` for more info.
The 'x0' property accepts values of any type
Returns
-------
Any
"""
return self["x0"]
@x0.setter
def x0(self, val):
self["x0"] = val
@property
def x0shift(self):
"""
Shifts `x0` away from the center of the category when `xref` is
a "category" or "multicategory" axis. -0.5 corresponds to the
start of the category and 0.5 corresponds to the end of the
category.
The 'x0shift' property is a number and may be specified as:
- An int or float in the interval [-1, 1]
Returns
-------
int|float
"""
return self["x0shift"]
@x0shift.setter
def x0shift(self, val):
self["x0shift"] = val
@property
def x1(self):
"""
Sets the shape's end x position. See `type` and `xsizemode` for
more info.
The 'x1' property accepts values of any type
Returns
-------
Any
"""
return self["x1"]
@x1.setter
def x1(self, val):
self["x1"] = val
@property
def x1shift(self):
"""
Shifts `x1` away from the center of the category when `xref` is
a "category" or "multicategory" axis. -0.5 corresponds to the
start of the category and 0.5 corresponds to the end of the
category.
The 'x1shift' property is a number and may be specified as:
- An int or float in the interval [-1, 1]
Returns
-------
int|float
"""
return self["x1shift"]
@x1shift.setter
def x1shift(self, val):
self["x1shift"] = val
@property
def xanchor(self):
"""
Only relevant in conjunction with `xsizemode` set to "pixel".
Specifies the anchor point on the x axis to which `x0`, `x1`
and x coordinates within `path` are relative to. E.g. useful to
attach a pixel sized shape to a certain data value. No effect
when `xsizemode` not set to "pixel".
The 'xanchor' property accepts values of any type
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def xref(self):
"""
Sets the shape's x coordinate axis. If set to a x axis id (e.g.
"x" or "x2"), the `x` position refers to a x coordinate. If set
to "paper", the `x` position refers to the distance from the
left of the plotting area in normalized coordinates where 0 (1)
corresponds to the left (right). If set to a x axis ID followed
by "domain" (separated by a space), the position behaves like
for "paper", but refers to the distance in fractions of the
domain length from the left of the domain of that axis: e.g.,
*x2 domain* refers to the domain of the second x axis and a x
position of 0.5 refers to the point between the left and the
right of the domain of the second x axis.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['paper']
- A string that matches one of the following regular expressions:
['^x([2-9]|[1-9][0-9]+)?( domain)?$']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def xsizemode(self):
"""
Sets the shapes's sizing mode along the x axis. If set to
"scaled", `x0`, `x1` and x coordinates within `path` refer to
data values on the x axis or a fraction of the plot area's
width (`xref` set to "paper"). If set to "pixel", `xanchor`
specifies the x position in terms of data or plot fraction but
`x0`, `x1` and x coordinates within `path` are pixels relative
to `xanchor`. This way, the shape can have a fixed width while
maintaining a position relative to data or plot fraction.
The 'xsizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['scaled', 'pixel']
Returns
-------
Any
"""
return self["xsizemode"]
@xsizemode.setter
def xsizemode(self, val):
self["xsizemode"] = val
@property
def y0(self):
"""
Sets the shape's starting y position. See `type` and
`ysizemode` for more info.
The 'y0' property accepts values of any type
Returns
-------
Any
"""
return self["y0"]
@y0.setter
def y0(self, val):
self["y0"] = val
@property
def y0shift(self):
"""
Shifts `y0` away from the center of the category when `yref` is
a "category" or "multicategory" axis. -0.5 corresponds to the
start of the category and 0.5 corresponds to the end of the
category.
The 'y0shift' property is a number and may be specified as:
- An int or float in the interval [-1, 1]
Returns
-------
int|float
"""
return self["y0shift"]
@y0shift.setter
def y0shift(self, val):
self["y0shift"] = val
@property
def y1(self):
"""
Sets the shape's end y position. See `type` and `ysizemode` for
more info.
The 'y1' property accepts values of any type
Returns
-------
Any
"""
return self["y1"]
@y1.setter
def y1(self, val):
self["y1"] = val
@property
def y1shift(self):
"""
Shifts `y1` away from the center of the category when `yref` is
a "category" or "multicategory" axis. -0.5 corresponds to the
start of the category and 0.5 corresponds to the end of the
category.
The 'y1shift' property is a number and may be specified as:
- An int or float in the interval [-1, 1]
Returns
-------
int|float
"""
return self["y1shift"]
@y1shift.setter
def y1shift(self, val):
self["y1shift"] = val
@property
def yanchor(self):
"""
Only relevant in conjunction with `ysizemode` set to "pixel".
Specifies the anchor point on the y axis to which `y0`, `y1`
and y coordinates within `path` are relative to. E.g. useful to
attach a pixel sized shape to a certain data value. No effect
when `ysizemode` not set to "pixel".
The 'yanchor' property accepts values of any type
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def yref(self):
"""
Sets the shape's y coordinate axis. If set to a y axis id (e.g.
"y" or "y2"), the `y` position refers to a y coordinate. If set
to "paper", the `y` position refers to the distance from the
bottom of the plotting area in normalized coordinates where 0
(1) corresponds to the bottom (top). If set to a y axis ID
followed by "domain" (separated by a space), the position
behaves like for "paper", but refers to the distance in
fractions of the domain length from the bottom of the domain of
that axis: e.g., *y2 domain* refers to the domain of the second
y axis and a y position of 0.5 refers to the point between the
bottom and the top of the domain of the second y axis.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['paper']
- A string that matches one of the following regular expressions:
['^y([2-9]|[1-9][0-9]+)?( domain)?$']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def ysizemode(self):
"""
Sets the shapes's sizing mode along the y axis. If set to
"scaled", `y0`, `y1` and y coordinates within `path` refer to
data values on the y axis or a fraction of the plot area's
height (`yref` set to "paper"). If set to "pixel", `yanchor`
specifies the y position in terms of data or plot fraction but
`y0`, `y1` and y coordinates within `path` are pixels relative
to `yanchor`. This way, the shape can have a fixed height while
maintaining a position relative to data or plot fraction.
The 'ysizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['scaled', 'pixel']
Returns
-------
Any
"""
return self["ysizemode"]
@ysizemode.setter
def ysizemode(self, val):
self["ysizemode"] = val
@property
def _prop_descriptions(self):
return """\
editable
Determines whether the shape could be activated for
edit or not. Has no effect when the older editable
shapes mode is enabled via `config.editable` or
`config.edits.shapePosition`.
fillcolor
Sets the color filling the shape's interior. Only
applies to closed shapes.
fillrule
Determines which regions of complex paths constitute
the interior. For more info please visit
https://developer.mozilla.org/en-
US/docs/Web/SVG/Attribute/fill-rule
label
:class:`plotly.graph_objects.layout.shape.Label`
instance or dict with compatible properties
layer
Specifies whether shapes are drawn below gridlines
("below"), between gridlines and traces ("between") or
above traces ("above").
legend
Sets the reference to a legend to show this shape in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this shape. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.layout.shape.Legendgroupti
tle` instance or dict with compatible properties
legendrank
Sets the legend rank for this shape. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this shape.
line
:class:`plotly.graph_objects.layout.shape.Line`
instance or dict with compatible properties
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the shape.
path
For `type` "path" - a valid SVG path with the pixel
values replaced by data values in
`xsizemode`/`ysizemode` being "scaled" and taken
unmodified as pixels relative to `xanchor` and
`yanchor` in case of "pixel" size mode. There are a few
restrictions / quirks only absolute instructions, not
relative. So the allowed segments are: M, L, H, V, Q,
C, T, S, and Z arcs (A) are not allowed because radius
rx and ry are relative. In the future we could consider
supporting relative commands, but we would have to
decide on how to handle date and log axes. Note that
even as is, Q and C Bezier paths that are smooth on
linear axes may not be smooth on log, and vice versa.
no chained "polybezier" commands - specify the segment
type for each one. On category axes, values are numbers
scaled to the serial numbers of categories because
using the categories themselves there would be no way
to describe fractional positions On data axes: because
space and T are both normal components of path strings,
we can't use either to separate date from time parts.
Therefore we'll use underscore for this purpose:
2015-02-21_13:45:56.789
showlegend
Determines whether or not this shape is shown in the
legend.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
type
Specifies the shape type to be drawn. If "line", a line
is drawn from (`x0`,`y0`) to (`x1`,`y1`) with respect
to the axes' sizing mode. If "circle", a circle is
drawn from ((`x0`+`x1`)/2, (`y0`+`y1`)/2)) with radius
(|(`x0`+`x1`)/2 - `x0`|, |(`y0`+`y1`)/2 -`y0`)|) with
respect to the axes' sizing mode. If "rect", a
rectangle is drawn linking (`x0`,`y0`), (`x1`,`y0`),
(`x1`,`y1`), (`x0`,`y1`), (`x0`,`y0`) with respect to
the axes' sizing mode. If "path", draw a custom SVG
path using `path`. with respect to the axes' sizing
mode.
visible
Determines whether or not this shape is visible. If
"legendonly", the shape is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x0
Sets the shape's starting x position. See `type` and
`xsizemode` for more info.
x0shift
Shifts `x0` away from the center of the category when
`xref` is a "category" or "multicategory" axis. -0.5
corresponds to the start of the category and 0.5
corresponds to the end of the category.
x1
Sets the shape's end x position. See `type` and
`xsizemode` for more info.
x1shift
Shifts `x1` away from the center of the category when
`xref` is a "category" or "multicategory" axis. -0.5
corresponds to the start of the category and 0.5
corresponds to the end of the category.
xanchor
Only relevant in conjunction with `xsizemode` set to
"pixel". Specifies the anchor point on the x axis to
which `x0`, `x1` and x coordinates within `path` are
relative to. E.g. useful to attach a pixel sized shape
to a certain data value. No effect when `xsizemode` not
set to "pixel".
xref
Sets the shape's x coordinate axis. If set to a x axis
id (e.g. "x" or "x2"), the `x` position refers to a x
coordinate. If set to "paper", the `x` position refers
to the distance from the left of the plotting area in
normalized coordinates where 0 (1) corresponds to the
left (right). If set to a x axis ID followed by
"domain" (separated by a space), the position behaves
like for "paper", but refers to the distance in
fractions of the domain length from the left of the
domain of that axis: e.g., *x2 domain* refers to the
domain of the second x axis and a x position of 0.5
refers to the point between the left and the right of
the domain of the second x axis.
xsizemode
Sets the shapes's sizing mode along the x axis. If set
to "scaled", `x0`, `x1` and x coordinates within `path`
refer to data values on the x axis or a fraction of the
plot area's width (`xref` set to "paper"). If set to
"pixel", `xanchor` specifies the x position in terms of
data or plot fraction but `x0`, `x1` and x coordinates
within `path` are pixels relative to `xanchor`. This
way, the shape can have a fixed width while maintaining
a position relative to data or plot fraction.
y0
Sets the shape's starting y position. See `type` and
`ysizemode` for more info.
y0shift
Shifts `y0` away from the center of the category when
`yref` is a "category" or "multicategory" axis. -0.5
corresponds to the start of the category and 0.5
corresponds to the end of the category.
y1
Sets the shape's end y position. See `type` and
`ysizemode` for more info.
y1shift
Shifts `y1` away from the center of the category when
`yref` is a "category" or "multicategory" axis. -0.5
corresponds to the start of the category and 0.5
corresponds to the end of the category.
yanchor
Only relevant in conjunction with `ysizemode` set to
"pixel". Specifies the anchor point on the y axis to
which `y0`, `y1` and y coordinates within `path` are
relative to. E.g. useful to attach a pixel sized shape
to a certain data value. No effect when `ysizemode` not
set to "pixel".
yref
Sets the shape's y coordinate axis. If set to a y axis
id (e.g. "y" or "y2"), the `y` position refers to a y
coordinate. If set to "paper", the `y` position refers
to the distance from the bottom of the plotting area in
normalized coordinates where 0 (1) corresponds to the
bottom (top). If set to a y axis ID followed by
"domain" (separated by a space), the position behaves
like for "paper", but refers to the distance in
fractions of the domain length from the bottom of the
domain of that axis: e.g., *y2 domain* refers to the
domain of the second y axis and a y position of 0.5
refers to the point between the bottom and the top of
the domain of the second y axis.
ysizemode
Sets the shapes's sizing mode along the y axis. If set
to "scaled", `y0`, `y1` and y coordinates within `path`
refer to data values on the y axis or a fraction of the
plot area's height (`yref` set to "paper"). If set to
"pixel", `yanchor` specifies the y position in terms of
data or plot fraction but `y0`, `y1` and y coordinates
within `path` are pixels relative to `yanchor`. This
way, the shape can have a fixed height while
maintaining a position relative to data or plot
fraction.
"""
def __init__(
self,
arg=None,
editable=None,
fillcolor=None,
fillrule=None,
label=None,
layer=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
line=None,
name=None,
opacity=None,
path=None,
showlegend=None,
templateitemname=None,
type=None,
visible=None,
x0=None,
x0shift=None,
x1=None,
x1shift=None,
xanchor=None,
xref=None,
xsizemode=None,
y0=None,
y0shift=None,
y1=None,
y1shift=None,
yanchor=None,
yref=None,
ysizemode=None,
**kwargs,
):
"""
Construct a new Shape object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.Shape`
editable
Determines whether the shape could be activated for
edit or not. Has no effect when the older editable
shapes mode is enabled via `config.editable` or
`config.edits.shapePosition`.
fillcolor
Sets the color filling the shape's interior. Only
applies to closed shapes.
fillrule
Determines which regions of complex paths constitute
the interior. For more info please visit
https://developer.mozilla.org/en-
US/docs/Web/SVG/Attribute/fill-rule
label
:class:`plotly.graph_objects.layout.shape.Label`
instance or dict with compatible properties
layer
Specifies whether shapes are drawn below gridlines
("below"), between gridlines and traces ("between") or
above traces ("above").
legend
Sets the reference to a legend to show this shape in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this shape. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.layout.shape.Legendgroupti
tle` instance or dict with compatible properties
legendrank
Sets the legend rank for this shape. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this shape.
line
:class:`plotly.graph_objects.layout.shape.Line`
instance or dict with compatible properties
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the shape.
path
For `type` "path" - a valid SVG path with the pixel
values replaced by data values in
`xsizemode`/`ysizemode` being "scaled" and taken
unmodified as pixels relative to `xanchor` and
`yanchor` in case of "pixel" size mode. There are a few
restrictions / quirks only absolute instructions, not
relative. So the allowed segments are: M, L, H, V, Q,
C, T, S, and Z arcs (A) are not allowed because radius
rx and ry are relative. In the future we could consider
supporting relative commands, but we would have to
decide on how to handle date and log axes. Note that
even as is, Q and C Bezier paths that are smooth on
linear axes may not be smooth on log, and vice versa.
no chained "polybezier" commands - specify the segment
type for each one. On category axes, values are numbers
scaled to the serial numbers of categories because
using the categories themselves there would be no way
to describe fractional positions On data axes: because
space and T are both normal components of path strings,
we can't use either to separate date from time parts.
Therefore we'll use underscore for this purpose:
2015-02-21_13:45:56.789
showlegend
Determines whether or not this shape is shown in the
legend.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
type
Specifies the shape type to be drawn. If "line", a line
is drawn from (`x0`,`y0`) to (`x1`,`y1`) with respect
to the axes' sizing mode. If "circle", a circle is
drawn from ((`x0`+`x1`)/2, (`y0`+`y1`)/2)) with radius
(|(`x0`+`x1`)/2 - `x0`|, |(`y0`+`y1`)/2 -`y0`)|) with
respect to the axes' sizing mode. If "rect", a
rectangle is drawn linking (`x0`,`y0`), (`x1`,`y0`),
(`x1`,`y1`), (`x0`,`y1`), (`x0`,`y0`) with respect to
the axes' sizing mode. If "path", draw a custom SVG
path using `path`. with respect to the axes' sizing
mode.
visible
Determines whether or not this shape is visible. If
"legendonly", the shape is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x0
Sets the shape's starting x position. See `type` and
`xsizemode` for more info.
x0shift
Shifts `x0` away from the center of the category when
`xref` is a "category" or "multicategory" axis. -0.5
corresponds to the start of the category and 0.5
corresponds to the end of the category.
x1
Sets the shape's end x position. See `type` and
`xsizemode` for more info.
x1shift
Shifts `x1` away from the center of the category when
`xref` is a "category" or "multicategory" axis. -0.5
corresponds to the start of the category and 0.5
corresponds to the end of the category.
xanchor
Only relevant in conjunction with `xsizemode` set to
"pixel". Specifies the anchor point on the x axis to
which `x0`, `x1` and x coordinates within `path` are
relative to. E.g. useful to attach a pixel sized shape
to a certain data value. No effect when `xsizemode` not
set to "pixel".
xref
Sets the shape's x coordinate axis. If set to a x axis
id (e.g. "x" or "x2"), the `x` position refers to a x
coordinate. If set to "paper", the `x` position refers
to the distance from the left of the plotting area in
normalized coordinates where 0 (1) corresponds to the
left (right). If set to a x axis ID followed by
"domain" (separated by a space), the position behaves
like for "paper", but refers to the distance in
fractions of the domain length from the left of the
domain of that axis: e.g., *x2 domain* refers to the
domain of the second x axis and a x position of 0.5
refers to the point between the left and the right of
the domain of the second x axis.
xsizemode
Sets the shapes's sizing mode along the x axis. If set
to "scaled", `x0`, `x1` and x coordinates within `path`
refer to data values on the x axis or a fraction of the
plot area's width (`xref` set to "paper"). If set to
"pixel", `xanchor` specifies the x position in terms of
data or plot fraction but `x0`, `x1` and x coordinates
within `path` are pixels relative to `xanchor`. This
way, the shape can have a fixed width while maintaining
a position relative to data or plot fraction.
y0
Sets the shape's starting y position. See `type` and
`ysizemode` for more info.
y0shift
Shifts `y0` away from the center of the category when
`yref` is a "category" or "multicategory" axis. -0.5
corresponds to the start of the category and 0.5
corresponds to the end of the category.
y1
Sets the shape's end y position. See `type` and
`ysizemode` for more info.
y1shift
Shifts `y1` away from the center of the category when
`yref` is a "category" or "multicategory" axis. -0.5
corresponds to the start of the category and 0.5
corresponds to the end of the category.
yanchor
Only relevant in conjunction with `ysizemode` set to
"pixel". Specifies the anchor point on the y axis to
which `y0`, `y1` and y coordinates within `path` are
relative to. E.g. useful to attach a pixel sized shape
to a certain data value. No effect when `ysizemode` not
set to "pixel".
yref
Sets the shape's y coordinate axis. If set to a y axis
id (e.g. "y" or "y2"), the `y` position refers to a y
coordinate. If set to "paper", the `y` position refers
to the distance from the bottom of the plotting area in
normalized coordinates where 0 (1) corresponds to the
bottom (top). If set to a y axis ID followed by
"domain" (separated by a space), the position behaves
like for "paper", but refers to the distance in
fractions of the domain length from the bottom of the
domain of that axis: e.g., *y2 domain* refers to the
domain of the second y axis and a y position of 0.5
refers to the point between the bottom and the top of
the domain of the second y axis.
ysizemode
Sets the shapes's sizing mode along the y axis. If set
to "scaled", `y0`, `y1` and y coordinates within `path`
refer to data values on the y axis or a fraction of the
plot area's height (`yref` set to "paper"). If set to
"pixel", `yanchor` specifies the y position in terms of
data or plot fraction but `y0`, `y1` and y coordinates
within `path` are pixels relative to `yanchor`. This
way, the shape can have a fixed height while
maintaining a position relative to data or plot
fraction.
Returns
-------
Shape
"""
super().__init__("shapes")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.Shape
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Shape`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("editable", arg, editable)
self._set_property("fillcolor", arg, fillcolor)
self._set_property("fillrule", arg, fillrule)
self._set_property("label", arg, label)
self._set_property("layer", arg, layer)
self._set_property("legend", arg, legend)
self._set_property("legendgroup", arg, legendgroup)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("line", arg, line)
self._set_property("name", arg, name)
self._set_property("opacity", arg, opacity)
self._set_property("path", arg, path)
self._set_property("showlegend", arg, showlegend)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("type", arg, type)
self._set_property("visible", arg, visible)
self._set_property("x0", arg, x0)
self._set_property("x0shift", arg, x0shift)
self._set_property("x1", arg, x1)
self._set_property("x1shift", arg, x1shift)
self._set_property("xanchor", arg, xanchor)
self._set_property("xref", arg, xref)
self._set_property("xsizemode", arg, xsizemode)
self._set_property("y0", arg, y0)
self._set_property("y0shift", arg, y0shift)
self._set_property("y1", arg, y1)
self._set_property("y1shift", arg, y1shift)
self._set_property("yanchor", arg, yanchor)
self._set_property("yref", arg, yref)
self._set_property("ysizemode", arg, ysizemode)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Shape
|
python
|
facebook__pyre-check
|
client/commands/server_event.py
|
{
"start": 718,
"end": 1501
}
|
class ____(enum.Enum):
WATCHMAN = "Watchman"
BUCK_INTERNAL = "BuckInternal"
BUCK_USER = "BuckUser"
PYRE = "Pyre"
UNKNOWN = "Unknown"
def __str__(self) -> str:
return self.value
@staticmethod
def from_string(input_string: str) -> "ErrorKind":
for item in ErrorKind:
if input_string == str(item):
return item
return ErrorKind.UNKNOWN
def to_exit_code(kind) -> ExitCode:
if kind == ErrorKind.WATCHMAN:
return ExitCode.WATCHMAN_ERROR
elif kind == ErrorKind.BUCK_INTERNAL:
return ExitCode.BUCK_INTERNAL_ERROR
elif kind == ErrorKind.BUCK_USER:
return ExitCode.BUCK_USER_ERROR
return ExitCode.FAILURE
@dataclasses.dataclass
|
ErrorKind
|
python
|
scipy__scipy
|
scipy/sparse/linalg/_matfuncs.py
|
{
"start": 5607,
"end": 10215
}
|
class ____(LinearOperator):
"""
For now, this is limited to products of multiple square matrices.
"""
def __init__(self, *args, **kwargs):
self._structure = kwargs.get('structure', None)
for A in args:
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError(
'For now, the ProductOperator implementation is '
'limited to the product of multiple square matrices.')
if args:
n = args[0].shape[0]
for A in args:
for d in A.shape:
if d != n:
raise ValueError(
'The square matrices of the ProductOperator '
'must all have the same shape.')
self.shape = (n, n)
self.ndim = len(self.shape)
self.dtype = np.result_type(*[x.dtype for x in args])
self._operator_sequence = args
def _matvec(self, x):
for A in reversed(self._operator_sequence):
x = A.dot(x)
return x
def _rmatvec(self, x):
x = x.ravel()
for A in self._operator_sequence:
x = A.T.dot(x)
return x
def _matmat(self, X):
for A in reversed(self._operator_sequence):
X = _smart_matrix_product(A, X, structure=self._structure)
return X
@property
def T(self):
T_args = [A.T for A in reversed(self._operator_sequence)]
return ProductOperator(*T_args)
def _onenormest_matrix_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of A^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse arrays.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
MatrixPowerOperator(A, p, structure=structure))
def _onenormest_product(operator_seq,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of the matrix product of the args.
Parameters
----------
operator_seq : linear operator sequence
Matrices whose 1-norm of product is to be computed.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
structure : str, optional
A string describing the structure of all operators.
Only `upper_triangular` is currently supported.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse arrays.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
ProductOperator(*operator_seq, structure=structure))
|
ProductOperator
|
python
|
PyCQA__isort
|
isort/_vendored/tomli/_parser.py
|
{
"start": 6263,
"end": 7279
}
|
class ____:
def __init__(self) -> None:
# The parsed content of the TOML document
self.dict: Dict[str, Any] = {}
def get_or_create_nest(
self,
key: Key,
*,
access_lists: bool = True,
) -> dict:
cont: Any = self.dict
for k in key:
if k not in cont:
cont[k] = {}
cont = cont[k]
if access_lists and isinstance(cont, list):
cont = cont[-1]
if not isinstance(cont, dict):
raise KeyError("There is no nest behind this key")
return cont
def append_nest_to_list(self, key: Key) -> None:
cont = self.get_or_create_nest(key[:-1])
last_key = key[-1]
if last_key in cont:
list_ = cont[last_key]
if not isinstance(list_, list):
raise KeyError("An object other than list found behind this key")
list_.append({})
else:
cont[last_key] = [{}]
|
NestedDict
|
python
|
langchain-ai__langchain
|
libs/standard-tests/tests/unit_tests/test_decorated_tool.py
|
{
"start": 341,
"end": 784
}
|
class ____(ToolsUnitTests):
@property
def tool_constructor(self) -> BaseTool:
return parrot_multiply_tool
@property
def tool_invoke_params_example(self) -> dict:
"""Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not
have {"name", "id", "args"} keys.
"""
return {"a": 2, "b": 3}
|
TestParrotMultiplyToolUnit
|
python
|
numpy__numpy
|
numpy/matrixlib/tests/test_defmatrix.py
|
{
"start": 8601,
"end": 10101
}
|
class ____:
def test_instance_methods(self):
a = matrix([1.0], dtype='f8')
methodargs = {
'astype': ('intc',),
'clip': (0.0, 1.0),
'compress': ([1],),
'repeat': (1,),
'reshape': (1,),
'swapaxes': (0, 0),
'dot': np.array([1.0]),
}
excluded_methods = [
'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield',
'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize',
'searchsorted', 'setflags', 'setfield', 'sort',
'partition', 'argpartition', 'to_device',
'take', 'tofile', 'tolist', 'tobytes', 'all', 'any',
'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp',
'prod', 'std', 'ctypes', 'bitwise_count',
]
for attrib in dir(a):
if attrib.startswith('_') or attrib in excluded_methods:
continue
f = getattr(a, attrib)
if isinstance(f, collections.abc.Callable):
# reset contents of a
a.astype('f8')
a.fill(1.0)
args = methodargs.get(attrib, ())
b = f(*args)
assert_(type(b) is matrix, f"{attrib}")
assert_(type(a.real) is matrix)
assert_(type(a.imag) is matrix)
c, d = matrix([0.0]).nonzero()
assert_(type(c) is np.ndarray)
assert_(type(d) is np.ndarray)
|
TestMatrixReturn
|
python
|
pennersr__django-allauth
|
tests/apps/socialaccount/providers/dingtalk/tests.py
|
{
"start": 244,
"end": 942
}
|
class ____(OAuth2TestsMixin, TestCase):
provider_id = DingTalkProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""{
"nick": "aiden",
"unionId": "hTaCSb1nM4RXii6jaQvHZqQiEiE",
"avatarUrl": "https://static-legacy.dingtalk.com/media/lADPDg7mViaksW3NBJPNBJI_1170_1171.jpg",
"openId": "ELdCPlk0V2LodZHx3n0p5AiEiE"
}""",
)
def get_login_response_json(self, with_refresh_token=True):
return """{
"accessToken": "testac",
"expireIn": "3600",
"refreshToken": "testrf"
}"""
def get_expected_to_str(self):
return "aiden"
|
DingTalkTests
|
python
|
pandas-dev__pandas
|
pandas/tests/arrays/categorical/test_indexing.py
|
{
"start": 10237,
"end": 12967
}
|
class ____:
def test_contains(self):
# GH#21508
cat = Categorical(list("aabbca"), categories=list("cab"))
assert "b" in cat
assert "z" not in cat
assert np.nan not in cat
with pytest.raises(TypeError, match="unhashable type: 'list'"):
assert [1] in cat
# assert codes NOT in index
assert 0 not in cat
assert 1 not in cat
cat = Categorical(list("aabbca") + [np.nan], categories=list("cab"))
assert np.nan in cat
@pytest.mark.parametrize(
"item, expected",
[
(Interval(0, 1), True),
(1.5, True),
(Interval(0.5, 1.5), False),
("a", False),
(Timestamp(1), False),
(Timedelta(1), False),
],
ids=str,
)
def test_contains_interval(self, item, expected):
# GH#23705
cat = Categorical(IntervalIndex.from_breaks(range(3)))
result = item in cat
assert result is expected
def test_contains_list(self):
# GH#21729
cat = Categorical([1, 2, 3])
assert "a" not in cat
with pytest.raises(TypeError, match="unhashable type"):
["a"] in cat
with pytest.raises(TypeError, match="unhashable type"):
["a", "b"] in cat
@pytest.mark.parametrize("index", [True, False])
def test_mask_with_boolean(index):
ser = Series(range(3))
idx = Categorical([True, False, True])
if index:
idx = CategoricalIndex(idx)
assert com.is_bool_indexer(idx)
result = ser[idx]
expected = ser[idx.astype("object")]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("index", [True, False])
def test_mask_with_boolean_na_treated_as_false(index):
# https://github.com/pandas-dev/pandas/issues/31503
ser = Series(range(3))
idx = Categorical([True, False, None])
if index:
idx = CategoricalIndex(idx)
result = ser[idx]
expected = ser[idx.fillna(False)]
tm.assert_series_equal(result, expected)
@pytest.fixture
def non_coercible_categorical(monkeypatch):
"""
Monkeypatch Categorical.__array__ to ensure no implicit conversion.
Raises
------
ValueError
When Categorical.__array__ is called.
"""
# TODO(Categorical): identify other places where this may be
# useful and move to a conftest.py
def array(self, dtype=None):
raise ValueError("I cannot be converted.")
with monkeypatch.context() as m:
m.setattr(Categorical, "__array__", array)
yield
def test_series_at():
arr = Categorical(["a", "b", "c"])
ser = Series(arr)
result = ser.at[0]
assert result == "a"
|
TestContains
|
python
|
falconry__falcon
|
tests/test_after_hooks.py
|
{
"start": 1362,
"end": 1737
}
|
class ____:
def __call__(self, req, resp, resource):
fluffiness(req, resp, resource)
def cuteness(req, resp, resource, check, postfix=' and cute'):
assert resource
if resp.text == check:
resp.text += postfix
def resource_aware_cuteness(req, resp, resource):
assert resource
cuteness(req, resp, resource, 'fluffy')
|
ResourceAwareFluffiness
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_cloud_run.py
|
{
"start": 13658,
"end": 14735
}
|
class ____:
def test_template_fields(self):
operator = CloudRunListJobsOperator(
task_id=TASK_ID, project_id=PROJECT_ID, region=REGION, limit=2, show_deleted=False
)
_assert_common_template_fields(operator.template_fields)
@mock.patch(CLOUD_RUN_HOOK_PATH)
def test_execute(self, hook_mock):
limit = 2
show_deleted = True
operator = CloudRunListJobsOperator(
task_id=TASK_ID, project_id=PROJECT_ID, region=REGION, limit=limit, show_deleted=show_deleted
)
operator.execute(context=mock.MagicMock())
hook_mock.return_value.list_jobs.assert_called_once_with(
region=REGION, project_id=PROJECT_ID, limit=limit, show_deleted=show_deleted
)
@mock.patch(CLOUD_RUN_HOOK_PATH)
def test_execute_with_invalid_limit(self, hook_mock):
limit = -1
with pytest.raises(expected_exception=AirflowException):
CloudRunListJobsOperator(task_id=TASK_ID, project_id=PROJECT_ID, region=REGION, limit=limit)
|
TestCloudRunListJobsOperator
|
python
|
pypa__pip
|
src/pip/_vendor/urllib3/exceptions.py
|
{
"start": 6817,
"end": 6912
}
|
class ____(HTTPError):
"""The header provided was somehow invalid."""
pass
|
InvalidHeader
|
python
|
doocs__leetcode
|
solution/1900-1999/1948.Delete Duplicate Folders in System/Solution.py
|
{
"start": 134,
"end": 1328
}
|
class ____:
def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:
root = Trie()
for path in paths:
cur = root
for name in path:
if cur.children[name] is None:
cur.children[name] = Trie()
cur = cur.children[name]
g: Dict[str, Trie] = {}
def dfs(node: Trie) -> str:
if not node.children:
return ""
subs: List[str] = []
for name, child in node.children.items():
subs.append(f"{name}({dfs(child)})")
s = "".join(sorted(subs))
if s in g:
node.deleted = g[s].deleted = True
else:
g[s] = node
return s
def dfs2(node: Trie) -> None:
if node.deleted:
return
if path:
ans.append(path[:])
for name, child in node.children.items():
path.append(name)
dfs2(child)
path.pop()
dfs(root)
ans: List[List[str]] = []
path: List[str] = []
dfs2(root)
return ans
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/fixtures/github.py
|
{
"start": 673,
"end": 5473
}
|
class ____:
BASE_URL = "https://api.github.com"
def __init__(self, token: str, repository: str):
self.token = token
self.repository = repository
self.session = requests.Session()
self.session.headers.update(self.get_headers(self.token))
self.branches: Optional[list] = None
@staticmethod
def get_headers(token: str):
return {
"Authorization": f"Bearer {token}",
"Accept": "application/vnd.github.v3+json application/vnd.github.inertia-preview+json",
}
def run(self) -> None:
self.get_all_branches()
self.pull_requests()
self.create_milestone()
labels = ["important", "bug", "critical"]
milestone = 1
assignees = []
self.create_release()
self.star_a_repository()
self.create_projects()
self.add_issues_with_comments(labels, milestone, assignees)
@logger
def get_all_branches(self) -> Iterator:
url = f"{self.BASE_URL}/repos/{self.repository}/branches"
response = self.session.get(url=url)
self.branches = response.json()
yield response
@logger
def pull_requests(self) -> Iterator:
url = f"{self.BASE_URL}/repos/{self.repository}/pulls"
for branch in self.branches:
create_pr_data = {
"title": f"New PR from {branch.get('name')}",
"head": branch.get("name"),
"base": "master",
}
# create PR
response = self.session.post(url=url, data=json.dumps(create_pr_data))
yield response
if response.status_code == 200 or 201:
# create review for PR
create_review_data = {
"body": f"Review commit for branch {branch.get('name')}",
"event": "COMMENT",
}
review_url = f"{self.BASE_URL}/repos/{self.repository}/pulls/{response.json().get('number')}/reviews"
response = self.session.post(url=review_url, data=json.dumps(create_review_data))
yield response
# create comment for commit
create_comment_data = {
"body": f"comment for {branch.get('commit').get('sha')} branch",
}
commit_url = f"https://api.github.com/repos/{self.repository}/commits/{branch.get('commit').get('sha')}/comments"
response = self.session.post(url=commit_url, data=json.dumps(create_comment_data))
yield response
@logger
def add_issues_with_comments(
self,
labels: Optional[list],
milestone: Optional[list],
assignees: Optional[list],
) -> Iterator:
url = f"{self.BASE_URL}/repos/{self.repository}/issues"
for branch in self.branches:
data = {
"title": f"Issue for branch {branch.get('name')}",
"head": branch.get("name"),
"labels": labels,
"milestone": milestone,
"assignees": assignees,
}
# add issue
response = self.session.post(url=url, data=json.dumps(data))
yield response
# add issue comment
comments_url = response.json().get("comments_url")
response = self.add_issue_comment(comments_url)
yield response
def add_issue_comment(self, comments_url: str) -> requests.Response:
return self.session.post(
url=comments_url,
data=json.dumps({"body": f"comment for issues {comments_url}"}),
)
@logger
def create_release(self) -> Iterator:
url = f"{self.BASE_URL}/repos/{self.repository}/releases"
for i in range(10):
data = {"tag_name": f"dev-0.{i}", "name": "{i} global release"}
response = self.session.post(url=url, data=json.dumps(data))
yield response
@logger
def star_a_repository(self) -> Iterator:
url = f"{self.BASE_URL}/user/starred/{self.repository}"
response = self.session.put(url=url)
yield response
@logger
def create_projects(self) -> Iterator:
url = f"{self.BASE_URL}/repos/{self.repository}/projects"
for name in ["project_1", "project_2", "project_3"]:
response = self.session.post(url=url, data=json.dumps({"name": name}))
yield response
@logger
def create_milestone(self) -> Iterator:
url = f"{self.BASE_URL}/repos/{self.repository}/milestones"
for title in ["main", "test", "feature"]:
data = {"title": title}
response = self.session.post(url=url, data=json.dumps(data))
yield response
|
GitHubFiller
|
python
|
getsentry__sentry
|
tests/acceptance/test_project_alert_settings.py
|
{
"start": 153,
"end": 1776
}
|
class ____(AcceptanceTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(name="Rowdy Tiger", owner=None)
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.create_member(user=self.user, organization=self.org, role="owner", teams=[self.team])
action_data = [
{
"id": "sentry.rules.actions.notify_event.NotifyEventAction",
"name": "Send a notification (for all legacy integrations)",
},
{
"id": "sentry.rules.actions.notify_event_service.NotifyEventServiceAction",
"service": "mail",
"name": "Send a notification via mail",
},
]
condition_data = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
"name": "A new issue is created",
},
{
"id": "sentry.rules.conditions.every_event.EveryEventCondition",
"name": "The event occurs",
},
]
Rule.objects.filter(project=self.project).delete()
Rule.objects.create(
project=self.project, data={"conditions": condition_data, "actions": action_data}
)
self.login_as(self.user)
self.path1 = f"/settings/{self.org.slug}/projects/{self.project.slug}/alerts/"
|
ProjectAlertSettingsTest
|
python
|
pandas-dev__pandas
|
pandas/util/version/__init__.py
|
{
"start": 1227,
"end": 2331
}
|
class ____:
def __repr__(self) -> str:
return "-Infinity"
def __hash__(self) -> int:
return hash(repr(self))
def __lt__(self, other: object) -> bool:
return True
def __le__(self, other: object) -> bool:
return True
def __eq__(self, other: object) -> bool:
return isinstance(other, type(self))
def __gt__(self, other: object) -> bool:
return False
def __ge__(self, other: object) -> bool:
return False
def __neg__(self: object) -> InfinityType:
return Infinity
NegativeInfinity = NegativeInfinityType()
LocalType: TypeAlias = tuple[int | str, ...]
CmpPrePostDevType: TypeAlias = InfinityType | NegativeInfinityType | tuple[str, int]
CmpLocalType: TypeAlias = (
NegativeInfinityType
| tuple[tuple[int, str] | tuple[NegativeInfinityType, int | str], ...]
)
CmpKey: TypeAlias = tuple[
int,
tuple[int, ...],
CmpPrePostDevType,
CmpPrePostDevType,
CmpPrePostDevType,
CmpLocalType,
]
VersionComparisonMethod: TypeAlias = Callable[[CmpKey, CmpKey], bool]
|
NegativeInfinityType
|
python
|
kamyu104__LeetCode-Solutions
|
Python/remove-all-ones-with-row-and-column-flips-ii.py
|
{
"start": 68,
"end": 1381
}
|
class ____(object):
def removeOnes(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
rows = [0]*len(grid)
mask, bit = 0, 1
for _ in xrange(len(grid[0])):
mask += bit
bit <<= 1
for i in xrange(len(grid)):
rows[i] = mask
mask <<= len(grid[0])
cols = [0]*len(grid[0])
mask, bit = 0, 1
for _ in xrange(len(grid)):
mask += bit
bit <<= len(grid[0])
for j in xrange(len(grid[0])):
cols[j] = mask
mask <<= 1
full_mask = (1<<(len(grid)*len(grid[0])))-1
masks = [[full_mask for _ in xrange(len(grid[0]))] for _ in xrange(len(grid))]
target, bit = 0, 1
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
target += bit*grid[i][j]
masks[i][j] -= (rows[i]+cols[j]-bit)
bit <<= 1
dp = [float("inf") for _ in xrange(target+1)]
dp[0] = 0
for mask in xrange(1, target+1):
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
if grid[i][j]:
dp[mask] = min(dp[mask], dp[mask&masks[i][j]]+1)
return dp[target]
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/grouping/enhancer/__init__.py
|
{
"start": 13107,
"end": 26766
}
|
class ____:
# NOTE: You must add a version to ``VERSIONS`` any time attributes are added
# to this class, s.t. no enhancements lacking these attributes are loaded
# from cache.
# See ``GroupingConfigLoader._get_enhancements`` in src/sentry/grouping/api.py.
def __init__(
self,
rules: list[EnhancementRule],
split_enhancement_configs: (
tuple[EnhancementsConfigData, EnhancementsConfigData] | None
) = None,
version: int | None = None,
bases: list[str] | None = None,
id: str | None = None,
):
self.id = id
self.rules = rules
self.version = version or DEFAULT_ENHANCEMENTS_VERSION
self.bases = bases or []
classifier_config, contributes_config = split_enhancement_configs or _split_rules(rules)
self.classifier_rules = classifier_config.rules
self.contributes_rules = contributes_config.rules
self.classifier_rust_enhancements = _merge_rust_enhancements(
self.bases, classifier_config.rust_enhancements, type="classifier"
)
self.contributes_rust_enhancements = _merge_rust_enhancements(
self.bases, contributes_config.rust_enhancements, type="contributes"
)
# We store the rule strings individually in a set so it's quick to test if a given rule
# mentioned in a hint is custom or built-in
self.custom_rule_strings = set(
classifier_config.rule_strings + contributes_config.rule_strings
)
def apply_category_and_updated_in_app_to_frames(
self,
frames: Sequence[dict[str, Any]],
platform: str,
exception_data: dict[str, Any],
) -> None:
"""
Apply enhancement rules to each frame, adding a category (if any) and updating the `in_app`
value if necessary.
Both the category and `in_app` data will be used during grouping. The `in_app` values will
also be persisted in the saved event, so they can be used in the UI and when determining
things like suspect commits and suggested assignees.
"""
# TODO: Fix this type to list[MatchFrame] once it's fixed in ophio
match_frames: list[Any] = [create_match_frame(frame, platform) for frame in frames]
rust_exception_data = _make_rust_exception_data(exception_data)
with metrics.timer("grouping.enhancements.get_in_app") as metrics_timer_tags:
metrics_timer_tags["split"] = True
category_and_in_app_results = (
self.classifier_rust_enhancements.apply_modifications_to_frames(
match_frames, rust_exception_data
)
)
for frame, (category, in_app) in zip(frames, category_and_in_app_results):
if in_app is not None:
# If the `in_app` value changes as a result of this call, the original value (in
# integer form) will be added to `frame.data` under the key "orig_in_app"
set_in_app(frame, in_app)
if category is not None:
set_path(frame, "data", "category", value=category)
def assemble_stacktrace_component(
self,
variant_name: str,
frame_components: list[FrameGroupingComponent],
frames: list[dict[str, Any]],
platform: str | None,
exception_data: dict[str, Any] | None = None,
) -> StacktraceGroupingComponent:
with metrics.timer("grouping.enhancements.get_contributes_and_hint") as metrics_timer_tags:
metrics_timer_tags.update({"split": True, "variant": variant_name})
rust_exception_data = _make_rust_exception_data(exception_data)
# Create a set of rust frames to which we can ask rust to add in-app hints. (We know all
# hints generated by classifier enhancements are in-app by definition.)
in_app_rust_frames = [EmptyRustFrame() for frame in frames]
# TODO: Fix this type to list[MatchFrame] once it's fixed in ophio
in_app_match_frames: list[Any] = [
create_match_frame(frame, platform) for frame in frames
]
# Only spend the time to get in-app hints if we might use them
if variant_name == "app":
self.classifier_rust_enhancements.assemble_stacktrace_component(
in_app_match_frames, rust_exception_data, in_app_rust_frames
)
# Do the same for contributes hints, this time using the contributes enhancements. These
# rust frames will also collect `contributes` values, along with the `contributes` and
# `hint` values for the stacktrace.
contributes_rust_frames = [
RustFrame(contributes=c.contributes) for c in frame_components
]
contributes_match_frames = [
# We don't want to include `orig_in_app` here because otherwise +/-group hints can
# get clobbered by +/-app hints
{**match_frame, "orig_in_app": None}
for match_frame in in_app_match_frames
]
rust_stacktrace_results = (
self.contributes_rust_enhancements.assemble_stacktrace_component(
contributes_match_frames, rust_exception_data, contributes_rust_frames
)
)
# Update frame components with results from rust
for frame, frame_component, in_app_rust_frame, contributes_rust_frame in zip(
frames, frame_components, in_app_rust_frames, contributes_rust_frames
):
# System frames should never contribute in the app variant, so if that's what we have,
# force `contribtues=False`, regardless of the rust results
if variant_name == "app" and not frame_component.in_app:
contributes = False
else:
contributes = bool( # bool-ing this to please mypy
contributes_rust_frame.contributes
)
frame_component.update(contributes=contributes)
in_app_hint = (
_get_hint_for_frame(
variant_name,
frame,
frame_component,
in_app_rust_frame,
"in-app",
self.custom_rule_strings,
)
if variant_name == "app"
else None # In-app hints don't apply to the system stacktrace
)
contributes_hint = _get_hint_for_frame(
variant_name,
frame,
frame_component,
contributes_rust_frame,
"contributes",
self.custom_rule_strings,
)
hint = _combine_hints(variant_name, frame_component, in_app_hint, contributes_hint)
frame_component.update(hint=hint)
stacktrace_component = StacktraceGroupingComponent(
values=frame_components,
hint=rust_stacktrace_results.hint,
contributes=rust_stacktrace_results.contributes,
)
return stacktrace_component
def _get_base64_bytes_from_rules(self, rules: list[EnhancementRule]) -> bytes:
pickled = msgpack.dumps(
[self.version, self.bases, [rule._to_config_structure(self.version) for rule in rules]]
)
compressed_pickle = zstandard.compress(pickled)
return base64.urlsafe_b64encode(compressed_pickle).strip(b"=")
@cached_property
def base64_string(self) -> str:
"""A base64 string representation of the enhancements object"""
rulesets = [self.rules, self.classifier_rules, self.contributes_rules]
# Create a base64 bytestring for each set of rules, and join them with a character we know
# can never appear in base64. We do it this way rather than combining all three sets of
# rules into a single bytestring because the rust enhancer only knows how to deal with
# bytestrings encoding data of the form `[version, bases, rules]` (not
# `[version, bases, rules, rules, rules]`).
base64_bytes = BASE64_ENHANCEMENTS_DELIMITER.join(
self._get_base64_bytes_from_rules(ruleset) for ruleset in rulesets
)
base64_str = base64_bytes.decode("ascii")
return base64_str
@classmethod
def _get_config_from_base64_bytes(cls, bytes_str: bytes) -> EnhancementsConfigData:
padded_bytes = bytes_str + b"=" * (4 - (len(bytes_str) % 4))
try:
compressed_pickle = base64.urlsafe_b64decode(padded_bytes)
if compressed_pickle.startswith(b"\x28\xb5\x2f\xfd"):
pickled = zstandard.decompress(compressed_pickle)
else:
pickled = zlib.decompress(compressed_pickle)
config_structure = msgpack.loads(pickled, raw=False)
version, bases, rules = config_structure
if version not in VERSIONS:
raise InvalidEnhancerConfig(f"Unknown enhancements version: {version}")
rules = [EnhancementRule._from_config_structure(rule, version) for rule in rules]
rust_enhancements = _get_rust_enhancements("config_structure", pickled)
except (LookupError, AttributeError, TypeError, ValueError) as e:
raise ValueError("invalid stack trace rule config: %s" % e)
return EnhancementsConfigData(
rules, [rule.text for rule in rules], rust_enhancements, version, bases
)
@classmethod
def from_base64_string(
cls, base64_string: str | bytes, referrer: str | None = None
) -> EnhancementsConfig:
"""Convert a base64 string into an `EnhancementsConfig` object"""
with metrics.timer("grouping.enhancements.creation") as metrics_timer_tags:
metrics_timer_tags.update({"source": "base64_string", "referrer": referrer})
raw_bytes_str = (
base64_string.encode("ascii", "ignore")
if isinstance(base64_string, str)
else base64_string
)
# Split the string to get encoded data for each set of rules: unsplit rules (i.e., rules
# the way they're stored in project config), classifier rules, and contributes rules.
# Older base64 strings - such as those stored in events created before rule-splitting
# was introduced - will only have one part and thus will end up unchanged by the split.
# (The delimiter is chosen specifically to be a character which can't appear in base64.)
bytes_strs = raw_bytes_str.split(BASE64_ENHANCEMENTS_DELIMITER)
configs = [cls._get_config_from_base64_bytes(bytes_str) for bytes_str in bytes_strs]
unsplit_config = configs[0]
split_configs = None
if len(configs) == 3:
split_configs = (configs[1], configs[2])
version = unsplit_config.version
bases = unsplit_config.bases
metrics_timer_tags.update({"split": version == 3})
return cls(
rules=unsplit_config.rules,
split_enhancement_configs=split_configs,
version=version,
bases=bases,
)
@classmethod
@sentry_sdk.tracing.trace
def from_rules_text(
cls,
rules_text: str,
bases: list[str] | None = None,
id: str | None = None,
version: int | None = None,
referrer: str | None = None,
) -> EnhancementsConfig:
"""Create an `EnhancementsConfig` object from a text blob containing stacktrace rules"""
with metrics.timer("grouping.enhancements.creation") as metrics_timer_tags:
metrics_timer_tags.update(
{"split": version == 3, "source": "rules_text", "referrer": referrer}
)
return EnhancementsConfig(
rules=parse_enhancements(rules_text),
version=version,
bases=bases,
id=id,
)
def _load_configs() -> dict[str, EnhancementsConfig]:
enhancement_bases = {}
configs_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), "enhancement-configs")
for filename in os.listdir(configs_dir):
if filename.endswith(".txt"):
with open(os.path.join(configs_dir, filename), encoding="utf-8") as f:
# Strip the extension
filename = filename.replace(".txt", "")
# We cannot use `:` in filenames on Windows but we already have ids with
# `:` in their names hence this trickery.
filename = filename.replace("@", ":")
enhancements = EnhancementsConfig.from_rules_text(
f.read(), id=filename, referrer="default_rules"
)
enhancement_bases[filename] = enhancements
return enhancement_bases
ENHANCEMENT_BASES = _load_configs()
del _load_configs
# TODO: Shim to cover the time period before events which have the old default enhancements name
# encoded in their base64 grouping config expire. Should be able to be deleted after Nov 2025. (Note
# that the new name is hard-coded, rather than a reference to `DEFAULT_ENHANCEMENTS_BASE`, because
# if we make a new default in the meantime, the old name should still point to
# `all-platforms:2023-01-11`.)
ENHANCEMENT_BASES["newstyle:2023-01-11"] = ENHANCEMENT_BASES["all-platforms:2023-01-11"]
|
EnhancementsConfig
|
python
|
mozilla__bleach
|
bleach/_vendor/html5lib/treewalkers/dom.py
|
{
"start": 115,
"end": 1413
}
|
class ____(base.NonRecursiveTreeWalker):
def getNodeDetails(self, node):
if node.nodeType == Node.DOCUMENT_TYPE_NODE:
return base.DOCTYPE, node.name, node.publicId, node.systemId
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
return base.TEXT, node.nodeValue
elif node.nodeType == Node.ELEMENT_NODE:
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
if attr.namespaceURI:
attrs[(attr.namespaceURI, attr.localName)] = attr.value
else:
attrs[(None, attr.name)] = attr.value
return (base.ELEMENT, node.namespaceURI, node.nodeName,
attrs, node.hasChildNodes())
elif node.nodeType == Node.COMMENT_NODE:
return base.COMMENT, node.nodeValue
elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
return (base.DOCUMENT,)
else:
return base.UNKNOWN, node.nodeType
def getFirstChild(self, node):
return node.firstChild
def getNextSibling(self, node):
return node.nextSibling
def getParentNode(self, node):
return node.parentNode
|
TreeWalker
|
python
|
django__django
|
tests/backends/tests.py
|
{
"start": 25155,
"end": 30915
}
|
class ____(TransactionTestCase):
available_apps = ["backends"]
def setUp(self):
# Create a Reporter.
self.r = Reporter.objects.create(first_name="John", last_name="Smith")
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = Article(
headline="This is a test",
pub_date=datetime.datetime(2005, 7, 27),
reporter_id=30,
)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy Refs #17519
a2 = Article(
headline="This is another test",
reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30,
)
with self.assertRaises(IntegrityError):
a2.save()
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a1 = Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy Refs #17519
# Create another article
r_proxy = ReporterProxy.objects.get(pk=self.r.pk)
Article.objects.create(
headline="Another article",
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r,
reporter_proxy=r_proxy,
)
# Retrieve the second article from the DB
a2 = Article.objects.get(headline="Another article")
a2.reporter_proxy_id = 30
with self.assertRaises(IntegrityError):
a2.save()
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data
without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be
able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in
the DB.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
try:
connection.check_constraints(table_names=[Article._meta.db_table])
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
transaction.set_rollback(True)
def test_check_constraints_sql_keywords(self):
with transaction.atomic():
obj = SQLKeywordsModel.objects.create(reporter=self.r)
obj.refresh_from_db()
obj.reporter_id = 30
with connection.constraint_checks_disabled():
obj.save()
try:
connection.check_constraints(table_names=["order"])
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
transaction.set_rollback(True)
|
FkConstraintsTests
|
python
|
doocs__leetcode
|
lcof2/剑指 Offer II 110. 所有路径/Solution.py
|
{
"start": 0,
"end": 398
}
|
class ____:
def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:
ans = []
def dfs(i, path):
if i == len(graph) - 1:
ans.append(path.copy())
return
for j in graph[i]:
path.append(j)
dfs(j, path)
path.pop(-1)
dfs(0, [0])
return ans
|
Solution
|
python
|
ray-project__ray
|
rllib/examples/_old_api_stack/connectors/self_play_with_policy_checkpoint.py
|
{
"start": 1273,
"end": 4976
}
|
class ____(RLlibCallback):
def __init__(self, checkpoint_dir):
self._checkpoint_dir = checkpoint_dir
super().__init__()
def on_algorithm_init(self, *, algorithm, metrics_logger, **kwargs):
policy = Policy.from_checkpoint(
self._checkpoint_dir, policy_ids=[OPPONENT_POLICY_ID]
)
# Add restored policy to Algorithm.
# Note that this policy doesn't have to be trained with the same algorithm
# of the training stack. You can even mix up TF policies with a Torch stack.
algorithm.add_policy(
policy_id=OPPONENT_POLICY_ID,
policy=policy,
add_to_eval_env_runners=True,
)
def policy_mapping_fn(agent_id, episode, worker, **kwargs):
# main policy plays against opponent policy.
return MAIN_POLICY_ID if episode.episode_id % 2 == agent_id else OPPONENT_POLICY_ID
def main(checkpoint_dir):
config = (
SACConfig()
.environment("open_spiel_env")
.framework("torch")
.callbacks(partial(AddPolicyCallback, checkpoint_dir))
.env_runners(
num_env_runners=1,
num_envs_per_env_runner=5,
# We will be restoring a TF2 policy.
# So tell the RolloutWorkers to enable TF eager exec as well, even if
# framework is set to torch.
enable_tf1_exec_eagerly=True,
)
.training(model={"fcnet_hiddens": [512, 512]})
.multi_agent(
# Initial policy map: Random and PPO. This will be expanded
# to more policy snapshots taken from "main" against which "main"
# will then play (instead of "random"). This is done in the
# custom callback defined above (`SelfPlayCallback`).
# Note: We will add the "opponent" policy with callback.
policies={MAIN_POLICY_ID}, # Our main policy, we'd like to optimize.
# Assign agent 0 and 1 randomly to the "main" policy or
# to the opponent ("random" at first). Make sure (via episode_id)
# that "main" always plays against "random" (and not against
# another "main").
policy_mapping_fn=policy_mapping_fn,
# Always just train the "main" policy.
policies_to_train=[MAIN_POLICY_ID],
)
)
stop = {TRAINING_ITERATION: args.train_iteration}
# Train the "main" policy to play really well using self-play.
tuner = tune.Tuner(
"SAC",
param_space=config.to_dict(),
run_config=tune.RunConfig(
stop=stop,
checkpoint_config=tune.CheckpointConfig(
checkpoint_at_end=True,
checkpoint_frequency=10,
),
verbose=2,
progress_reporter=CLIReporter(
metric_columns={
TRAINING_ITERATION: "iter",
"time_total_s": "time_total_s",
f"{NUM_ENV_STEPS_SAMPLED_LIFETIME}": "ts",
f"{ENV_RUNNER_RESULTS}/{NUM_EPISODES}": "train_episodes",
(
f"{ENV_RUNNER_RESULTS}/module_episode_returns_mean/main"
): "reward_main",
},
sort_by_metric=True,
),
),
)
tuner.fit()
if __name__ == "__main__":
ray.init()
with tempfile.TemporaryDirectory() as tmpdir:
create_open_spiel_checkpoint(tmpdir)
policy_checkpoint_path = os.path.join(
tmpdir,
"checkpoint_000000",
"policies",
OPPONENT_POLICY_ID,
)
main(policy_checkpoint_path)
ray.shutdown()
|
AddPolicyCallback
|
python
|
django__django
|
tests/admin_utils/admin.py
|
{
"start": 658,
"end": 846
}
|
class ____(admin.ModelAdmin):
inlines = [ArticleInline]
site = admin.AdminSite(name="admin")
site.register(Article)
site.register(ArticleProxy)
site.register(Site, SiteAdmin)
|
SiteAdmin
|
python
|
milvus-io__pymilvus
|
pymilvus/bulk_writer/buffer.py
|
{
"start": 1032,
"end": 1635
}
|
class ____(json.JSONEncoder):
def default(self, obj: object):
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, np.generic):
return obj.item()
return json.JSONEncoder.default(self, obj)
def to_raw_type(obj: dict):
keys = obj.keys()
for k in keys:
v = obj[k]
if isinstance(v, dict):
obj[k] = to_raw_type(v)
continue
if isinstance(v, np.ndarray):
obj[k] = v.tolist()
elif isinstance(v, np.generic):
obj[k] = v.item()
return obj
|
NumpyEncoder
|
python
|
django-mptt__django-mptt
|
mptt/templatetags/mptt_tags.py
|
{
"start": 9202,
"end": 11060
}
|
class ____(template.Node):
def __init__(self, template_nodes, queryset_var):
self.template_nodes = template_nodes
self.queryset_var = queryset_var
def _render_node(self, context, node):
bits = []
context.push()
for child in node.get_children():
bits.append(self._render_node(context, child))
context["node"] = node
context["children"] = mark_safe("".join(bits))
rendered = self.template_nodes.render(context)
context.pop()
return rendered
def render(self, context):
queryset = self.queryset_var.resolve(context)
roots = cache_tree_children(queryset)
bits = [self._render_node(context, node) for node in roots]
return "".join(bits)
@register.tag
def recursetree(parser, token):
"""
Iterates over the nodes in the tree, and renders the contained block for each node.
This tag will recursively render children into the template variable {{ children }}.
Only one database query is required (children are cached for the whole tree)
Usage:
<ul>
{% recursetree nodes %}
<li>
{{ node.name }}
{% if not node.is_leaf_node %}
<ul>
{{ children }}
</ul>
{% endif %}
</li>
{% endrecursetree %}
</ul>
"""
bits = token.contents.split()
if len(bits) != 2:
raise template.TemplateSyntaxError(_("%s tag requires a queryset") % bits[0])
queryset_var = template.Variable(bits[1])
template_nodes = parser.parse(("endrecursetree",))
parser.delete_first_token()
return RecurseTreeNode(template_nodes, queryset_var)
|
RecurseTreeNode
|
python
|
apache__airflow
|
airflow-core/src/airflow/serialization/dag_dependency.py
|
{
"start": 920,
"end": 4327
}
|
class ____:
"""
Dataclass for representing dependencies between dags.
These are calculated during serialization and attached to serialized dags.
The source and target keys store the information of what component depends on what.
For an asset related dependency, a root node will have the source value equal to its dependency_type and
an end node will have the target value equal to its dependency_type. It's easier to explain by examples.
For the example below,
.. code-block:: python
# we assume the asset is active
DAG(dag_id="dag_1", schedule=[Asset.ref(uri="uri")])
we get dag dependency like
.. code-block:: python
DagDependency(
source="asset",
target="dag_1",
label="name", # asset name, we always use asset name as label
dependency_type="asset",
dependency_id=1, # asset id
)
This will look like `Asset name` -> `Dag dag_1` on the dependency graph. This is a root asset node as it
has the source value as asset, and it points to its target "dag_1"
For more complex dependency like asset alias,
.. code-block:: python
# we assume the asset is active
DAG(
dag_id="dag_2",
schedule=[
AssetAlias(name="alias_1"), # resolved into Asset(uri="uri", name="name")
AssetAlias(name="alias_2"), # resolved to nothing
],
)
we'll need to store more data,
.. code-block:: python
[
DagDependency(
source="asset",
target="asset-alias:alias_1",
label="name",
dependency_type="asset",
dependency_id="1",
),
DagDependency(
source="asset:1",
target="dag_2",
label="alias_1",
dependency_type="asset-alias",
dependency_id="alias_1",
),
DagDependency(
source="asset-alias",
target="dag_2",
label="alias_2",
dependency_type="asset-alias",
dependency_id="alias_2",
),
]
We want it to look like `Asset name` -> `AssetAlias alias_1` -> `Dag dag_1` on the dependency graph. The
first node here is a root node point to an asset alias. Thus, its target is set to the asset we're point
to. The second node represents the asset alias points to this asset and then this asset points to the dag.
The third node represents a dependency between an asset alias and dag directly as it's not resolved.
For asset ref cases, it works similar to asset if it's a valid asset ref. If not, it works the same as
an unresolved asset alias.
"""
source: str
target: str
label: str
dependency_type: Literal["asset", "asset-alias", "asset-name-ref", "asset-uri-ref", "trigger", "sensor"]
dependency_id: str | None = None
@property
def node_id(self):
"""Node ID for graph rendering."""
val = f"{self.dependency_type}"
if self.dependency_type not in ("asset", "asset-alias", "asset-name-ref", "asset-uri-ref"):
val = f"{val}:{self.source}:{self.target}"
if self.dependency_id:
val = f"{val}:{self.dependency_id}"
return val
|
DagDependency
|
python
|
django-extensions__django-extensions
|
tests/testapp/models.py
|
{
"start": 10050,
"end": 10228
}
|
class ____(ShortUUIDTestModel_pk):
many = models.ManyToManyField(ShortUUIDTestModel_field)
class Meta:
app_label = "django_extensions"
|
ShortUUIDTestManyToManyModel
|
python
|
cookiecutter__cookiecutter
|
cookiecutter/extensions.py
|
{
"start": 3175,
"end": 5441
}
|
class ____(Extension):
"""Jinja2 Extension for dates and times."""
tags = {'now'}
def __init__(self, environment: Environment) -> None:
"""Jinja2 Extension constructor."""
super().__init__(environment)
environment.extend(datetime_format='%Y-%m-%d')
def _datetime(
self,
timezone: str,
operator: str,
offset: str,
datetime_format: str | None,
) -> str:
d = arrow.now(timezone)
# parse shift params from offset and include operator
shift_params = {}
for param in offset.split(','):
interval, value = param.split('=')
shift_params[interval.strip()] = float(operator + value.strip())
d = d.shift(**shift_params)
if datetime_format is None:
datetime_format = self.environment.datetime_format # type: ignore[attr-defined]
return d.strftime(datetime_format)
def _now(self, timezone: str, datetime_format: str | None) -> str:
if datetime_format is None:
datetime_format = self.environment.datetime_format # type: ignore[attr-defined]
return arrow.now(timezone).strftime(datetime_format)
def parse(self, parser: Parser) -> nodes.Output:
"""Parse datetime template and add datetime value."""
lineno = next(parser.stream).lineno
node = parser.parse_expression()
if parser.stream.skip_if('comma'):
datetime_format = parser.parse_expression()
else:
datetime_format = nodes.Const(None)
if isinstance(node, nodes.Add):
call_method = self.call_method(
'_datetime',
[node.left, nodes.Const('+'), node.right, datetime_format],
lineno=lineno,
)
elif isinstance(node, nodes.Sub):
call_method = self.call_method(
'_datetime',
[node.left, nodes.Const('-'), node.right, datetime_format],
lineno=lineno,
)
else:
call_method = self.call_method(
'_now',
[node, datetime_format],
lineno=lineno,
)
return nodes.Output([call_method], lineno=lineno)
|
TimeExtension
|
python
|
scipy__scipy
|
scipy/stats/_resampling.py
|
{
"start": 94568,
"end": 98151
}
|
class ____(ResamplingMethod):
"""Configuration information for a Monte Carlo hypothesis test.
Instances of this class can be passed into the `method` parameter of some
hypothesis test functions to perform a Monte Carlo version of the
hypothesis tests.
Attributes
----------
n_resamples : int, optional
The number of Monte Carlo samples to draw. Default is 9999.
batch : int, optional
The number of Monte Carlo samples to process in each vectorized call to
the statistic. Batch sizes >>1 tend to be faster when the statistic
is vectorized, but memory usage scales linearly with the batch size.
Default is ``None``, which processes all samples in a single batch.
rvs : callable or tuple of callables, optional
A callable or sequence of callables that generates random variates
under the null hypothesis. Each element of `rvs` must be a callable
that accepts keyword argument ``size`` (e.g. ``rvs(size=(m, n))``) and
returns an N-d array sample of that shape. If `rvs` is a sequence, the
number of callables in `rvs` must match the number of samples passed
to the hypothesis test in which the `MonteCarloMethod` is used. Default
is ``None``, in which case the hypothesis test function chooses values
to match the standard version of the hypothesis test. For example,
the null hypothesis of `scipy.stats.pearsonr` is typically that the
samples are drawn from the standard normal distribution, so
``rvs = (rng.normal, rng.normal)`` where
``rng = np.random.default_rng()``.
rng : `numpy.random.Generator`, optional
Pseudorandom number generator state. When `rng` is None, a new
`numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a ``Generator``.
"""
rvs: object = None
rng: object = None
def __init__(self, n_resamples=9999, batch=None, rvs=None, rng=None):
if (rvs is not None) and (rng is not None):
message = 'Use of `rvs` and `rng` are mutually exclusive.'
raise ValueError(message)
self.n_resamples = n_resamples
self.batch = batch
self.rvs = rvs
self.rng = rng
def _asdict(self):
# `dataclasses.asdict` deepcopies; we don't want that.
return dict(n_resamples=self.n_resamples, batch=self.batch,
rvs=self.rvs, rng=self.rng)
_rs_deprecation = ("Use of attribute `random_state` is deprecated and replaced by "
"`rng`. Support for `random_state` will be removed in SciPy 1.19.0. "
"To silence this warning and ensure consistent behavior in SciPy "
"1.19.0, control the RNG using attribute `rng`. Values set using "
"attribute `rng` will be validated by `np.random.default_rng`, so "
"the behavior corresponding with a given value may change compared "
"to use of `random_state`. For example, 1) `None` will result in "
"unpredictable random numbers, 2) an integer will result in a "
"different stream of random numbers, (with the same distribution), "
"and 3) `np.random` or `RandomState` instances will result in an "
"error. See the documentation of `default_rng` for more "
"information.")
@dataclass
|
MonteCarloMethod
|
python
|
pytorch__pytorch
|
test/distributed/test_store.py
|
{
"start": 11939,
"end": 21722
}
|
class ____(TestCase, StoreTestBase):
_use_libuv = False
def _create_store(self):
store = create_tcp_store(use_libuv=self._use_libuv)
store.set_timeout(timedelta(seconds=300))
return store
def _create_store_with_ws(self, addr, world_size):
return create_tcp_store(
addr, world_size, wait_for_workers=False, use_libuv=self._use_libuv
)
def test_address_already_in_use(self):
addr = DEFAULT_HOSTNAME
port = common.find_free_port()
err_msg_reg = f"^The server socket has failed to listen on any local .*{port}"
with self.assertRaisesRegex(dist.DistNetworkError, err_msg_reg):
# Use noqa to silence flake8.
# Need to store in an unused variable here to ensure the first
# object is not destroyed before the second object is created.
store1 = dist.TCPStore(addr, port, 1, True, use_libuv=self._use_libuv) # noqa: F841
store2 = dist.TCPStore(addr, port, 1, True, use_libuv=self._use_libuv) # noqa: F841
self.assertEqual(store1.libuvBackend, self._use_libuv)
self.assertEqual(store2.libuvBackend, self._use_libuv)
@retry_on_connect_failures
def test_multitenancy(self):
addr = DEFAULT_HOSTNAME
port = common.find_free_port()
# Use noqa to silence flake8.
# Need to store in an unused variable here to ensure the first
# object is not destroyed before the second object is created.
store1 = dist.TCPStore(
addr, port, 1, True, multi_tenant=True, use_libuv=self._use_libuv
) # type: ignore[call-arg] # noqa: F841
store2 = dist.TCPStore(
addr, port, 1, True, multi_tenant=True, use_libuv=self._use_libuv
) # type: ignore[call-arg] # noqa: F841
self.assertEqual(store1.libuvBackend, self._use_libuv)
self.assertEqual(store2.libuvBackend, self._use_libuv)
def test_repr(self) -> None:
# server
store1 = self._create_store()
self.assertRegex(
repr(store1),
r"TCPStore\("
r"client=TCPClient\(SocketImpl\(fd=\d+, addr=\[?localhost\]?:\d+, remote=\[?localhost\]?:\d+\)\), "
r"server=TCPServer\(port=\d+\)\)",
)
# client
store2 = dist.TCPStore(
store1.host,
store1.port,
world_size=2,
is_master=False,
)
self.assertRegex(
repr(store2),
r"TCPStore\("
r"client=TCPClient\(SocketImpl\(fd=\d+, addr=\[?localhost\]?:\d+, remote=\[?localhost\]?:\d+\)\), "
r"server=<nullptr>\)",
)
@skip_if_win32()
@retry_on_connect_failures
def test_init_pg_and_rpc_with_same_socket(self):
addr = DEFAULT_HOSTNAME
port = common.find_free_port()
os.environ["MASTER_ADDR"] = addr
os.environ["MASTER_PORT"] = str(port)
# We internally use a multi-tenant TCP store. Both PG and RPC should successfully
# initialize even when using the same socket address.
os.environ["USE_LIBUV"] = "1" if self._use_libuv else "0"
dist.init_process_group(
backend="gloo",
init_method="env://",
rank=0,
world_size=1,
)
backend_opts = rpc.TensorPipeRpcBackendOptions(
init_method=f"tcp://{addr}:{port}", _transports=tp_transports()
)
rpc.init_rpc(
name="worker0",
rank=0,
world_size=1,
rpc_backend_options=backend_opts,
)
del os.environ["USE_LIBUV"]
assert "USE_LIBUV" not in os.environ
rpc.shutdown()
dist.destroy_process_group()
@skip_if_win32()
def test_take_over_listen_socket(self):
listen_sock: socket.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_sock.bind(("localhost", 0))
addr, port, *_ = listen_sock.getsockname()
listen_fd = listen_sock.detach()
store = dist.TCPStore(
addr,
port,
1,
is_master=True,
master_listen_fd=listen_fd,
use_libuv=self._use_libuv,
)
self.assertEqual(store.libuvBackend, self._use_libuv)
store.set("key", "value")
self.assertEqual(b"value", store.get("key"))
# The TCPStore has 6 keys in test_set_get. It contains the 5 keys added by
# the user and one additional key used for coordinate all the workers.
@property
def num_keys_total(self):
return 6
def _test_numkeys_delkeys(self, fs):
# We start off with one init key in the store to coordinate workers
self.assertEqual(fs.num_keys(), 1)
fs.add("key", 1)
fs.add("key", 2)
fs.add("key", 3)
fs.set("key0", "value0")
fs.add("key3", 1)
fs.set("key1", "value1")
self.assertEqual(fs.num_keys(), 5)
fs.delete_key("key")
self.assertEqual(fs.num_keys(), 4)
fs.set_timeout(timedelta(seconds=2))
with self.assertRaises(RuntimeError):
fs.get("key")
fs.delete_key("key0")
fs.delete_key("key3")
self.assertEqual(fs.num_keys(), 2)
fs.set("key4", "value2")
self.assertEqual(fs.num_keys(), 3)
self.assertEqual(b"value1", fs.get("key1"))
self.assertEqual(b"value2", fs.get("key4"))
def test_numkeys_delkeys(self):
self._test_numkeys_delkeys(self._create_store())
def _create_client(self, index, addr, port, world_size):
client_store = dist.TCPStore(
addr,
port,
world_size=world_size,
timeout=timedelta(seconds=10),
use_libuv=self._use_libuv,
)
self.assertEqual(b"value", client_store.get("key"))
client_store.set(f"new_key{index}", f"new_value{index}")
self.assertEqual(
f"next_value{index}".encode(),
client_store.compare_set(
f"new_key{index}", f"new_value{index}", f"next_value{index}"
),
)
def _multi_worker_helper(self, world_size):
addr = DEFAULT_HOSTNAME
server_store = self._create_store_with_ws(addr, world_size)
self.assertEqual(server_store.libuvBackend, self._use_libuv)
server_store.set("key", "value")
port = server_store.port
num_indices = world_size if world_size else 1
for i in range(num_indices):
self._create_client(i, addr, port, world_size)
def test_multi_worker_with_fixed_world_size(self):
self._multi_worker_helper(5)
def test_multi_worker_with_nonfixed_world_size(self):
self._multi_worker_helper(None)
def test_append(self):
store = self._create_store()
self.assertEqual(store.libuvBackend, self._use_libuv)
store.set("foo", "po")
store.append("foo", "tato")
store.append("bar", "po")
store.append("bar", "tato")
self.assertEqual(b"potato", store.get("foo"))
self.assertEqual(b"potato", store.get("bar"))
def test_multi_set(self):
store = self._create_store()
self.assertEqual(store.libuvBackend, self._use_libuv)
store.multi_set(["foo", "bar"], ["po", "tato"])
self.assertEqual(b"po", store.get("foo"))
self.assertEqual(b"tato", store.get("bar"))
def test_multi_get(self):
store = self._create_store()
self.assertEqual(store.libuvBackend, self._use_libuv)
store.set("foo", "po")
store.set("bar", "tato")
v0, v1 = store.multi_get(["foo", "bar"])
self.assertEqual(b"po", v0)
self.assertEqual(b"tato", v1)
def test_store_timeout_on_missing_clients(self):
with self.assertRaisesRegex(
DistStoreError,
r"Timed out after \d+ seconds waiting for clients. \d+/\d+ clients joined.",
):
# world_size is 2 so it should timeout
dist.TCPStore(
"localhost",
0,
2,
True,
timeout=timedelta(seconds=2),
use_libuv=self._use_libuv,
)
# when wait_for_workers is not set, then there should be no exception raised
dist.TCPStore(
"localhost",
0,
2,
True,
timeout=timedelta(seconds=2),
wait_for_workers=False,
use_libuv=self._use_libuv,
)
@skip_if_win32()
def test_world_size_0_raises(self):
with self.assertRaisesRegex(ValueError, "TCPStore world size cannot be 0"):
dist.TCPStore("localhost", 0, world_size=0, is_master=False)
def test_agent_store(self) -> None:
store = self._create_store()
with self.assertRaisesRegex(
dist.DistNetworkError,
"The server socket has failed to listen on any local network address",
):
dist.TCPStore(
host_name="localhost",
port=store.port,
world_size=1,
is_master=True,
use_libuv=self._use_libuv,
)
USE_AGENT_STORE = "TORCHELASTIC_USE_AGENT_STORE"
MASTER_PORT = "MASTER_PORT"
os.environ[USE_AGENT_STORE] = "1"
os.environ[MASTER_PORT] = str(store.port)
second_server = dist.TCPStore(
host_name="localhost",
port=store.port,
world_size=1,
is_master=True,
use_libuv=self._use_libuv,
)
del os.environ[USE_AGENT_STORE]
del os.environ[MASTER_PORT]
self.assertEqual(second_server.port, store.port)
|
TCPStoreTest
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 261495,
"end": 262652
}
|
class ____(ConditionalValueDefTextExprRef):
"""
ConditionalParameterValueDefTextExprRef schema wrapper.
Parameters
----------
param : str, :class:`ParameterName`
Filter using a parameter name.
value : str, dict, :class:`Text`, Sequence[str], :class:`ExprRef`
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
empty : bool
For selection parameters, the predicate of empty selections returns true by default.
Override this behavior, by setting this property ``empty: false``.
"""
_schema = {"$ref": "#/definitions/ConditionalParameter<ValueDef<(Text|ExprRef)>>"}
def __init__(
self,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[str | Parameter | SchemaBase | Sequence[str] | Map] = Undefined,
empty: Optional[bool] = Undefined,
**kwds,
):
super().__init__(param=param, value=value, empty=empty, **kwds)
|
ConditionalParameterValueDefTextExprRef
|
python
|
PyCQA__pylint
|
tests/checkers/unittest_base_checker.py
|
{
"start": 648,
"end": 958
}
|
class ____(BaseChecker):
def __init__(self) -> None:
super().__init__(PyLinter())
name = "basic"
msgs = {
"W0001": (
"Basic checker has an example.",
"basic-checker-example",
"Used nowhere and serves no purpose.",
)
}
|
OtherBasicChecker
|
python
|
wandb__wandb
|
wandb/util.py
|
{
"start": 61418,
"end": 63541
}
|
class ____:
"""An installed distribution.
Attributes:
key: The distribution name as it would be imported.
version: The distribution's version string.
"""
key: str
version: str
def working_set() -> Iterable[InstalledDistribution]:
"""Return the working set of installed distributions."""
from importlib.metadata import distributions
for d in distributions():
with contextlib.suppress(KeyError, UnicodeDecodeError, TypeError):
# In some distributions, the "Name" attribute may not be present,
# or the metadata itself may be None or malformed, which can raise
# KeyError, UnicodeDecodeError, or TypeError.
# For additional context, see: https://github.com/python/importlib_metadata/issues/371.
yield InstalledDistribution(key=d.metadata["Name"], version=d.version)
def get_core_path() -> str:
"""Returns the path to the wandb-core binary.
The path can be set explicitly via the _WANDB_CORE_PATH environment
variable. Otherwise, the path to the binary in the current package
is returned.
Returns:
str: The path to the wandb-core package.
Raises:
WandbCoreNotAvailableError: If wandb-core was not built for the current system.
"""
# NOTE: Environment variable _WANDB_CORE_PATH is a temporary development feature
# to assist in running the core service from a live development directory.
path_from_env: str = os.environ.get("_WANDB_CORE_PATH", "")
if path_from_env:
wandb.termwarn(
f"Using wandb-core from path `_WANDB_CORE_PATH={path_from_env}`. "
"This is a development feature and may not work as expected."
)
return path_from_env
bin_path = pathlib.Path(__file__).parent / "bin" / "wandb-core"
if not bin_path.exists():
raise WandbCoreNotAvailableError(
f"File not found: {bin_path}."
" Please contact support at support@wandb.com."
f" Your platform is: {platform.platform()}."
)
return str(bin_path)
|
InstalledDistribution
|
python
|
pypa__setuptools
|
setuptools/tests/test_build_meta.py
|
{
"start": 31732,
"end": 33320
}
|
class ____(TestBuildMetaBackend):
backend_name = 'setuptools.build_meta:__legacy__'
# build_meta_legacy-specific tests
def test_build_sdist_relative_path_import(self, tmpdir_cwd):
# This must fail in build_meta, but must pass in build_meta_legacy
path.build(self._relative_path_import_files)
build_backend = self.get_build_backend()
build_backend.build_sdist("temp")
def test_sys_argv_passthrough(self, tmpdir_cwd):
path.build(self._sys_argv_0_passthrough)
build_backend = self.get_build_backend()
build_backend.build_sdist("temp")
@pytest.mark.filterwarnings("ignore::setuptools.SetuptoolsDeprecationWarning")
def test_sys_exit_0_in_setuppy(monkeypatch, tmp_path):
"""Setuptools should be resilient to setup.py with ``sys.exit(0)`` (#3973)."""
monkeypatch.chdir(tmp_path)
setuppy = """
import sys, setuptools
setuptools.setup(name='foo', version='0.0.0')
sys.exit(0)
"""
(tmp_path / "setup.py").write_text(DALS(setuppy), encoding="utf-8")
backend = BuildBackend(backend_name="setuptools.build_meta")
assert backend.get_requires_for_build_wheel() == []
def test_system_exit_in_setuppy(monkeypatch, tmp_path):
monkeypatch.chdir(tmp_path)
setuppy = "import sys; sys.exit('some error')"
(tmp_path / "setup.py").write_text(setuppy, encoding="utf-8")
with pytest.raises(SystemExit, match="some error"):
backend = BuildBackend(backend_name="setuptools.build_meta")
backend.get_requires_for_build_wheel()
|
TestBuildMetaLegacyBackend
|
python
|
run-llama__llama_index
|
llama-index-integrations/tools/llama-index-tools-artifact-editor/llama_index/tools/artifact_editor/base.py
|
{
"start": 1026,
"end": 16740
}
|
class ____(BaseToolSpec):
"""
A tool spec that allows you to edit an artifact in-memory.
Using JSON patch operations, an LLM/Agent can be prompted to create, modify, and iterate on an artifact like a report, code, or anything that can be represented as a Pydantic model.
Attributes:
pydantic_cls: The Pydantic model class to edit
current_artifact: The current artifact instance
Methods:
to_tool_list: Returns a list of tools that can be used to edit the artifact
create_artifact: Creates an initial artifact instance
get_current_artifact: Gets the current artifact instance
apply_patch: Applies a JSON patch to the current artifact instance
"""
# The `create_artifact` function is excluded as it is manually injected into the tool spec
spec_functions = [
"apply_patch",
"get_current_artifact",
]
def __init__(
self,
pydantic_cls: Type[T],
current_artifact: Optional[T] = None,
) -> None:
"""
Initialize the artifact editor tool spec.
Args:
pydantic_cls (BaseModel): The Pydantic model class to edit
current_artifact (Optional[BaseModel]): The initial artifact instance to use
"""
self.pydantic_cls = pydantic_cls
self.current_artifact: Optional[T] = current_artifact
def to_tool_list(self) -> List[BaseTool]:
tools = super().to_tool_list()
tools.append(
FunctionTool.from_defaults(
self.create_artifact,
description=self.pydantic_cls.__doc__
or "Create an initial artifact instance.",
fn_schema=self.pydantic_cls,
)
)
return tools
def create_artifact(self, **kwargs: Any) -> dict:
"""Create an initial artifact instance."""
self.current_artifact = self.pydantic_cls.model_validate(kwargs)
return self.current_artifact.model_dump()
def get_current_artifact(self) -> Optional[dict]:
"""Get the current artifact instance."""
return self.current_artifact.model_dump() if self.current_artifact else None
def apply_patch(self, patch: JsonPatch) -> dict:
"""
Apply a JSON patch to the current Pydantic model instance.
Args:
patch: JsonPatch containing operations to apply
Returns:
New instance of the same model type with patches applied.
Also overwrites and saves the new instance as the current artifact.
Raises:
ValueError: If patch operation is invalid
IndexError: If array index is out of range
ValidationError: If patch results in invalid model
"""
# Validate patch object
if isinstance(patch, dict):
patch = JsonPatch.model_validate(patch)
elif isinstance(patch, str):
patch = JsonPatch.model_validate_json(patch)
# Convert to dict for easier manipulation
model_dict = self.current_artifact.model_dump()
model_class = self.pydantic_cls
for operation in patch.operations:
try:
self._apply_single_operation(model_dict, operation)
except Exception as e:
raise ValueError(
f"Failed to apply operation {operation.op} at {operation.path}: {e!s}"
)
# Convert back to original model type and validate
try:
self.current_artifact = model_class.model_validate(model_dict)
return self.current_artifact.model_dump()
except ValidationError as e:
raise ValueError(
f"Patch resulted in invalid {model_class.__name__} structure: {e!s}"
)
def _apply_single_operation(
self, data: Dict[str, Any], operation: PatchOperation
) -> None:
"""Apply a single patch operation to the data dictionary."""
path_parts = self._parse_path(operation.path)
# Validate path before applying operation
if operation.op in ["add", "replace"]:
self._validate_path_against_schema(path_parts, self.pydantic_cls)
if operation.op == "replace":
self._set_value_at_path(data, path_parts, operation.value)
elif operation.op == "add":
self._add_value_at_path(data, path_parts, operation.value)
elif operation.op == "remove":
self._remove_value_at_path(data, path_parts)
elif operation.op == "move":
if not operation.from_path:
raise ValueError("'move' operation requires 'from_path'")
from_parts = self._parse_path(operation.from_path)
to_parts = path_parts
# Validate both paths
self._validate_path_against_schema(to_parts, self.pydantic_cls)
value = self._get_value_at_path(data, from_parts)
self._remove_value_at_path(data, from_parts)
self._set_value_at_path(data, to_parts, value)
elif operation.op == "copy":
if not operation.from_path:
raise ValueError("'copy' operation requires 'from_path'")
from_parts = self._parse_path(operation.from_path)
to_parts = path_parts
# Validate target path
self._validate_path_against_schema(to_parts, self.pydantic_cls)
value = self._get_value_at_path(data, from_parts)
self._set_value_at_path(data, to_parts, value)
else:
raise ValueError(f"Unknown operation: {operation.op}")
def _validate_path_against_schema(
self, path_parts: List[Union[str, int]], model_class: Type[BaseModel]
) -> None:
"""
Validate that a path corresponds to valid fields in the Pydantic model schema.
Args:
path_parts: Parsed path components
model_class: The Pydantic model class to validate against
Raises:
ValueError: If the path contains invalid fields
"""
if not path_parts:
return
current_model = model_class
current_path = ""
for i, part in enumerate(path_parts):
current_path += f"/{part}" if current_path else f"{part}"
# If part is an integer or '-' (array append), we're dealing with an array index
if isinstance(part, int) or part == "-":
continue
# Check if this field exists in the current model
if hasattr(current_model, "model_fields"):
fields = current_model.model_fields
else:
# Fallback for older Pydantic versions
fields = getattr(current_model, "__fields__", {})
if part not in fields:
raise ValueError(
f"Invalid field '{part}' at path '/{current_path}'. Valid fields are: {list(fields.keys())}"
)
# Get the field type for nested validation
field_info = fields[part]
# Handle nested models
if hasattr(field_info, "annotation"):
field_type = field_info.annotation
else:
# Fallback for older Pydantic versions
field_type = getattr(field_info, "type_", None)
if field_type:
# Handle Optional types
if hasattr(field_type, "__origin__") and field_type.__origin__ is Union:
# Extract non-None type from Optional
args = getattr(field_type, "__args__", ())
field_type = next(
(arg for arg in args if arg is not type(None)), field_type
)
# Handle List types
if hasattr(field_type, "__origin__") and field_type.__origin__ in (
list,
List,
):
# For list types, the next part should be an index or '-'
if i + 1 < len(path_parts) and (
isinstance(path_parts[i + 1], int) or path_parts[i + 1] == "-"
):
continue
# If we're at the end of the path and it's a list, that's valid too
elif i + 1 == len(path_parts):
continue
# If it's a BaseModel subclass, use it for next iteration
if isinstance(field_type, type) and issubclass(field_type, BaseModel):
current_model = field_type
else:
# If we have more path parts but current field is not a model or list, check validity
if (
i + 1 < len(path_parts)
and not isinstance(path_parts[i + 1], int)
and path_parts[i + 1] != "-"
):
raise ValueError(
f"Cannot access nested field '{path_parts[i + 1]}' on non-object field '{part}' of type {field_type}"
)
def _parse_path(self, path: str) -> List[Union[str, int]]:
"""Parse a JSON pointer path into components."""
if not path.startswith("/"):
raise ValueError("Path must start with '/'")
if path == "/":
return []
parts = []
for part in path[1:].split("/"):
# Unescape JSON pointer characters
part = part.replace("~1", "/").replace("~0", "~")
# Try to convert to int if it looks like an array index
if part.isdigit():
parts.append(int(part))
else:
parts.append(part)
return parts
def _get_value_at_path(
self, data: Dict[str, Any], path_parts: List[Union[str, int]]
) -> Any:
"""Get value at the specified path."""
current = data
for part in path_parts:
if isinstance(current, dict):
if part not in current:
raise KeyError(f"Key '{part}' not found")
current = current[part]
elif isinstance(current, list):
if not isinstance(part, int):
raise ValueError(f"Array index must be integer, got {part}")
if part >= len(current) or part < -len(current):
raise IndexError(f"Array index {part} out of range")
current = current[part]
else:
raise ValueError(
f"Cannot index into {type(current).__name__} with {part}"
)
return current
def _set_value_at_path(
self, data: Dict[str, Any], path_parts: List[Union[str, int]], value: Any
) -> None:
"""Set value at the specified path."""
if not path_parts:
raise ValueError("Cannot replace root")
current = data
for part in path_parts[:-1]:
if isinstance(current, dict):
if part not in current:
raise KeyError(f"Key '{part}' not found")
current = current[part]
elif isinstance(current, list):
if not isinstance(part, int):
raise ValueError(f"Array index must be integer, got {part}")
if part >= len(current) or part < -len(current):
raise IndexError(f"Array index {part} out of range")
current = current[part]
else:
raise ValueError(
f"Cannot index into {type(current).__name__} with {part}"
)
last_part = path_parts[-1]
if isinstance(current, dict):
current[last_part] = value
elif isinstance(current, list):
if not isinstance(last_part, int):
raise ValueError(f"Array index must be integer, got {last_part}")
if last_part >= len(current) or last_part < -len(current):
raise IndexError(f"Array index {last_part} out of range")
current[last_part] = value
else:
raise ValueError(f"Cannot set value in {type(current).__name__}")
def _add_value_at_path(
self, data: Dict[str, Any], path_parts: List[Union[str, int]], value: Any
) -> None:
"""Add value at the specified path."""
if not path_parts:
raise ValueError("Cannot add to root")
current = data
for part in path_parts[:-1]:
if isinstance(current, dict):
if part not in current:
raise KeyError(f"Key '{part}' not found")
current = current[part]
elif isinstance(current, list):
if not isinstance(part, int):
raise ValueError(f"Array index must be integer, got {part}")
if part >= len(current) or part < -len(current):
raise IndexError(f"Array index {part} out of range")
current = current[part]
else:
raise ValueError(
f"Cannot index into {type(current).__name__} with {part}"
)
last_part = path_parts[-1]
if isinstance(current, dict):
current[last_part] = value
elif isinstance(current, list):
if isinstance(last_part, int):
if last_part > len(current) or last_part < -len(current) - 1:
raise IndexError(
f"Array index {last_part} out of range for insertion"
)
current.insert(last_part, value)
elif last_part == "-": # Special case for appending to array
current.append(value)
else:
raise ValueError(f"Invalid array index for add operation: {last_part}")
else:
raise ValueError(f"Cannot add value to {type(current).__name__}")
def _remove_value_at_path(
self, data: Dict[str, Any], path_parts: List[Union[str, int]]
) -> None:
"""Remove value at the specified path."""
if not path_parts:
raise ValueError("Cannot remove root")
current = data
for part in path_parts[:-1]:
if isinstance(current, dict):
if part not in current:
raise KeyError(f"Key '{part}' not found")
current = current[part]
elif isinstance(current, list):
if not isinstance(part, int):
raise ValueError(f"Array index must be integer, got {part}")
if part >= len(current) or part < -len(current):
raise IndexError(f"Array index {part} out of range")
current = current[part]
else:
raise ValueError(
f"Cannot index into {type(current).__name__} with {part}"
)
last_part = path_parts[-1]
if isinstance(current, dict):
if last_part not in current:
raise KeyError(f"Key '{last_part}' not found")
del current[last_part]
elif isinstance(current, list):
if not isinstance(last_part, int):
raise ValueError(f"Array index must be integer, got {last_part}")
if last_part >= len(current) or last_part < -len(current):
raise IndexError(f"Array index {last_part} out of range")
del current[last_part]
else:
raise ValueError(f"Cannot remove value from {type(current).__name__}")
|
ArtifactEditorToolSpec
|
python
|
lepture__authlib
|
tests/flask/test_oauth2/models.py
|
{
"start": 1767,
"end": 1978
}
|
class ____(db.Model, OAuth2ClientMixin):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("user.id", ondelete="CASCADE"))
user = db.relationship("User")
|
Client
|
python
|
huggingface__transformers
|
src/transformers/pipelines/zero_shot_object_detection.py
|
{
"start": 632,
"end": 10393
}
|
class ____(ChunkPipeline):
"""
Zero shot object detection pipeline using `OwlViTForObjectDetection`. This pipeline predicts bounding boxes of
objects when you provide an image and a set of `candidate_labels`.
Example:
```python
>>> from transformers import pipeline
>>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection")
>>> detector(
... "http://images.cocodataset.org/val2017/000000039769.jpg",
... candidate_labels=["cat", "couch"],
... )
[{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}]
>>> detector(
... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png",
... candidate_labels=["head", "bird"],
... )
[{'score': 0.119, 'label': 'bird', 'box': {'xmin': 71, 'ymin': 170, 'xmax': 410, 'ymax': 508}}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This object detection pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"zero-shot-object-detection"`.
See the list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=zero-shot-object-detection).
"""
_load_processor = False
_load_image_processor = True
_load_feature_extractor = False
_load_tokenizer = True
def __init__(self, **kwargs):
super().__init__(**kwargs)
requires_backends(self, "vision")
self.check_model_type(MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES)
@overload
def __call__(
self, image: Union[str, "Image.Image"], candidate_labels: str | list[str], **kwargs: Any
) -> list[dict[str, Any]]: ...
@overload
def __call__(self, image: list[dict[str, Any]], **kwargs: Any) -> list[list[dict[str, Any]]]: ...
def __call__(
self,
image: Union[str, "Image.Image", list[dict[str, Any]]],
candidate_labels: str | list[str] | None = None,
**kwargs: Any,
) -> list[dict[str, Any]] | list[list[dict[str, Any]]]:
"""
Detect objects (bounding boxes & classes) in the image(s) passed as inputs.
Args:
image (`str`, `PIL.Image` or `list[dict[str, Any]]`):
The pipeline handles three types of images:
- A string containing an http url pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
You can use this parameter to send directly a list of images, or a dataset or a generator like so:
```python
>>> from transformers import pipeline
>>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection")
>>> detector(
... [
... {
... "image": "http://images.cocodataset.org/val2017/000000039769.jpg",
... "candidate_labels": ["cat", "couch"],
... },
... {
... "image": "http://images.cocodataset.org/val2017/000000039769.jpg",
... "candidate_labels": ["cat", "couch"],
... },
... ]
... )
[[{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.25, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}], [{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}]]
```
candidate_labels (`str` or `list[str]` or `list[list[str]]`):
What the model should recognize in the image.
threshold (`float`, *optional*, defaults to 0.1):
The probability necessary to make a prediction.
top_k (`int`, *optional*, defaults to None):
The number of top predictions that will be returned by the pipeline. If the provided number is `None`
or higher than the number of predictions available, it will default to the number of predictions.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
Return:
A list of lists containing prediction results, one list per input image. Each list contains dictionaries
with the following keys:
- **label** (`str`) -- Text query corresponding to the found object.
- **score** (`float`) -- Score corresponding to the object (between 0 and 1).
- **box** (`dict[str,int]`) -- Bounding box of the detected object in image's original size. It is a
dictionary with `x_min`, `x_max`, `y_min`, `y_max` keys.
"""
if "text_queries" in kwargs:
candidate_labels = kwargs.pop("text_queries")
if isinstance(image, (str, Image.Image)):
inputs = {"image": image, "candidate_labels": candidate_labels}
elif isinstance(image, (list, tuple)) and valid_images(image):
return list(
super().__call__(
({"image": img, "candidate_labels": labels} for img, labels in zip(image, candidate_labels)),
**kwargs,
)
)
else:
"""
Supports the following format
- {"image": image, "candidate_labels": candidate_labels}
- [{"image": image, "candidate_labels": candidate_labels}]
- Generator and datasets
This is a common pattern in other multimodal pipelines, so we support it here as well.
"""
inputs = image
results = super().__call__(inputs, **kwargs)
return results
def _sanitize_parameters(self, **kwargs):
preprocess_params = {}
if "timeout" in kwargs:
preprocess_params["timeout"] = kwargs["timeout"]
postprocess_params = {}
if "threshold" in kwargs:
postprocess_params["threshold"] = kwargs["threshold"]
if "top_k" in kwargs:
postprocess_params["top_k"] = kwargs["top_k"]
return preprocess_params, {}, postprocess_params
def preprocess(self, inputs, timeout=None):
image = load_image(inputs["image"], timeout=timeout)
candidate_labels = inputs["candidate_labels"]
if isinstance(candidate_labels, str):
candidate_labels = candidate_labels.split(",")
target_size = torch.tensor([[image.height, image.width]], dtype=torch.int32)
for i, candidate_label in enumerate(candidate_labels):
text_inputs = self.tokenizer(candidate_label, return_tensors="pt")
image_features = self.image_processor(image, return_tensors="pt")
image_features = image_features.to(self.dtype)
yield {
"is_last": i == len(candidate_labels) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _forward(self, model_inputs):
target_size = model_inputs.pop("target_size")
candidate_label = model_inputs.pop("candidate_label")
is_last = model_inputs.pop("is_last")
outputs = self.model(**model_inputs)
model_outputs = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def postprocess(self, model_outputs, threshold=0.1, top_k=None):
results = []
for model_output in model_outputs:
label = model_output["candidate_label"]
model_output = BaseModelOutput(model_output)
outputs = self.image_processor.post_process_object_detection(
outputs=model_output, threshold=threshold, target_sizes=model_output["target_size"]
)[0]
for index in outputs["scores"].nonzero():
score = outputs["scores"][index].item()
box = self._get_bounding_box(outputs["boxes"][index][0])
result = {"score": score, "label": label, "box": box}
results.append(result)
results = sorted(results, key=lambda x: x["score"], reverse=True)
if top_k:
results = results[:top_k]
return results
def _get_bounding_box(self, box: "torch.Tensor") -> dict[str, int]:
"""
Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... }
Args:
box (`torch.Tensor`): Tensor containing the coordinates in corners format.
Returns:
bbox (`dict[str, int]`): Dict containing the coordinates in corners format.
"""
xmin, ymin, xmax, ymax = box.int().tolist()
bbox = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
|
ZeroShotObjectDetectionPipeline
|
python
|
pytorch__pytorch
|
torch/export/pt2_archive/_package_weights.py
|
{
"start": 2122,
"end": 4780
}
|
class ____(dict):
"""
A dictionary mapping from weight name to a tuple of (tensor, TensorProperties).
tensor represents the actual initial value of the weight.
TensorProperties represents the properties of the weight that are needed to recover the weight.
We use two separate entries because `tensor` could be a clone of the original weight tensor,
so it doesn't have the same property as the original weight (such as underlying storage pointer).
"""
def __init__(self, weight_dict: dict[str, tuple[torch.Tensor, TensorProperties]]):
super().__init__(weight_dict)
def get_weight(self, name: str) -> tuple[torch.Tensor, TensorProperties]:
return self[name]
def get_weight_properties(self, name: str) -> TensorProperties:
return self[name][1]
def get_complete(
group: OrderedSet[tuple[str, str]], models_weights: dict[str, Weights]
) -> tuple[str, str]:
"""
`group` is a (model_name, weight_name) tuple.
`model_weights` is a dictionary mapping from model name to its Weights.
One of the tensor in `group` must be complete and they must share the
same underlying storage.
Returns the name of the complete tensor in the `group`. If multiple
tensors are complete, returns an arbitrary one.
"""
def get_tensor_properties(name_tuple: tuple[str, str]) -> TensorProperties:
# returns the tensor properties
(model_name, weight_name) = name_tuple
return models_weights[model_name].get_weight_properties(weight_name)
for name_tuple in group:
tensor_property = get_tensor_properties(name_tuple)
if tensor_property.is_complete():
return name_tuple
warnings.warn(
"No complete tensor found in the group! Returning the first one. "
"This may cause issues when your weights are not on CPU.",
stacklevel=2,
)
assert len(group) > 0
return next(iter(group))
def group_weights(all_weights: dict[str, Weights]) -> list[OrderedSet[tuple[str, str]]]:
"""
Group weights that share the same underlying storage.
Returns a list of sets, each set contains a tuple of (model_name, weight_name).
"""
weights_dict: dict[tuple[int, torch.dtype], OrderedSet[tuple[str, str]]] = (
collections.defaultdict(OrderedSet)
) # (storage_key, dtype) -> set(weight)
for model_name, weights in all_weights.items():
for weight_name, (tensor, properties) in weights.items():
weights_dict[(properties.storage_ptr, tensor.dtype)].add(
(model_name, weight_name)
)
return list(weights_dict.values())
|
Weights
|
python
|
numba__numba
|
numba/core/typing/cffi_utils.py
|
{
"start": 6341,
"end": 7172
}
|
class ____(templates.AbstractTemplate):
key = 'ffi.from_buffer'
def generic(self, args, kws):
if kws or len(args) != 1:
return
[ary] = args
if not isinstance(ary, types.Buffer):
raise TypingError("from_buffer() expected a buffer object, got %s"
% (ary,))
if ary.layout not in ('C', 'F'):
raise TypingError("from_buffer() unsupported on non-contiguous buffers (got %s)"
% (ary,))
if ary.layout != 'C' and ary.ndim > 1:
raise TypingError("from_buffer() only supports multidimensional arrays with C layout (got %s)"
% (ary,))
ptr = types.CPointer(ary.dtype)
return templates.signature(ptr, ary)
@registry.register_attr
|
FFI_from_buffer
|
python
|
getsentry__sentry
|
tests/sentry/core/endpoints/test_organization_avatar.py
|
{
"start": 756,
"end": 1898
}
|
class ____(OrganizationAvatarTestBase):
method = "put"
def test_upload(self) -> None:
data = {"avatar_type": "upload", "avatar_photo": b64encode(self.load_fixture("avatar.jpg"))}
self.get_success_response(self.organization.slug, **data)
avatar = OrganizationAvatar.objects.get(organization=self.organization)
assert avatar.get_avatar_type_display() == "upload"
assert avatar.file_id
def test_put_bad(self) -> None:
OrganizationAvatar.objects.create(organization=self.organization)
self.get_error_response(self.organization.slug, avatar_type="upload", status_code=400)
avatar = OrganizationAvatar.objects.get(organization=self.organization)
assert avatar.get_avatar_type_display() == "letter_avatar"
self.get_error_response(self.organization.slug, avatar_type="foo", status_code=400)
assert avatar.get_avatar_type_display() == "letter_avatar"
def test_put_forbidden(self) -> None:
org = self.create_organization()
self.get_error_response(org.slug, avatar_type="letter_avatar", status_code=403)
|
OrganizationAvatarPutTest
|
python
|
ansible__ansible
|
lib/ansible/plugins/lookup/lines.py
|
{
"start": 2241,
"end": 2786
}
|
class ____(LookupBase):
def run(self, terms, variables=None, **kwargs):
ret = []
for term in terms:
p = subprocess.Popen(term, cwd=self._loader.get_basedir(), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
ret.extend([to_text(l) for l in stdout.splitlines()])
else:
raise AnsibleError("lookup_plugin.lines(%s) returned %d" % (term, p.returncode))
return ret
|
LookupModule
|
python
|
sympy__sympy
|
sympy/polys/polyoptions.py
|
{
"start": 17250,
"end": 17500
}
|
class ____(BooleanOption, metaclass=OptionType):
"""``symmetric`` option to polynomial manipulation functions. """
option = 'symmetric'
requires = ['modulus']
excludes = ['greedy', 'domain', 'split', 'gaussian', 'extension']
|
Symmetric
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/auto_materialize_rule_evaluation.py
|
{
"start": 4607,
"end": 4722
}
|
class ____(NamedTuple): ...
@whitelist_for_serdes(serializer=BackcompatNullSerializer)
|
AutoMaterializeAssetEvaluation
|
python
|
encode__django-rest-framework
|
tests/browsable_api/views.py
|
{
"start": 779,
"end": 1231
}
|
class ____(ModelViewSet):
queryset = BasicModelWithUsers.objects.all()
serializer_class = BasicSerializer
permission_classes = [OrganizationPermissions]
# permission_classes = [IsAuthenticated, OrganizationPermissions]
renderer_classes = (renderers.BrowsableAPIRenderer, renderers.JSONRenderer)
def get_queryset(self):
qs = super().get_queryset().filter(users=self.request.user)
return qs
|
BasicModelWithUsersViewSet
|
python
|
huggingface__transformers
|
src/transformers/models/siglip/modeling_siglip.py
|
{
"start": 14835,
"end": 16023
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: Union[SiglipVisionConfig, SiglipTextConfig]):
super().__init__()
self.embed_dim = config.hidden_size
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.self_attn = SiglipAttention(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = SiglipMLP(config)
@auto_docstring
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
|
SiglipEncoderLayer
|
python
|
huggingface__transformers
|
src/transformers/models/bros/modeling_bros.py
|
{
"start": 21718,
"end": 28225
}
|
class ____(BrosPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = BrosTextEmbeddings(config)
self.bbox_embeddings = BrosBboxEmbeddings(config)
self.encoder = BrosEncoder(config)
self.pooler = BrosPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
bbox: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
r"""
bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):
Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values
(x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the
bounding box.
Examples:
```python
>>> import torch
>>> from transformers import BrosProcessor, BrosModel
>>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
>>> model = BrosModel.from_pretrained("jinho8345/bros-base-uncased")
>>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
>>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
>>> encoding["bbox"] = bbox
>>> outputs = model(**encoding)
>>> last_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if bbox is None:
raise ValueError("You have to specify bbox")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
# if bbox has 2 points (4 float tensors) per token, convert it to 4 points (8 float tensors) per token
if bbox.shape[-1] == 4:
bbox = bbox[:, :, [0, 1, 2, 1, 2, 3, 0, 3]]
scaled_bbox = bbox * self.config.bbox_scale
bbox_position_embeddings = self.bbox_embeddings(scaled_bbox)
encoder_outputs = self.encoder(
embedding_output,
bbox_pos_emb=bbox_position_embeddings,
attention_mask=extended_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@auto_docstring
|
BrosModel
|
python
|
pytorch__pytorch
|
torch/_inductor/ir.py
|
{
"start": 258323,
"end": 259147
}
|
class ____(MutatingFirstArgExternKernel):
def __init__(self, variable: IRNode, new_size: int) -> None:
assert isinstance(new_size, int), "TODO: dynamic shapes"
super().__init__(
None,
NoneLayout(device=variable.get_device()),
self.unwrap_storage([variable]),
constant_args=(new_size,),
)
V.graph.mark_buffer_mutated(variable.get_name())
self.name = V.graph.register_buffer(self)
V.graph.register_operation(self)
self.python_kernel_name = "inductor_ops.resize_storage_bytes_"
self.cpp_kernel_name = "torch::inductor::resize_storage_bytes_"
assert isinstance(variable, (BaseView, StorageBox, TensorBox)), type(variable)
V.graph.never_reuse_buffers.add(variable.data.get_name())
|
ResizeStorageBytes
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/call3.py
|
{
"start": 1845,
"end": 2930
}
|
class ____:
def f(self, y: Any, /):
...
# This should generate an error
c2: P2 = C2()
def f8(a: int, b: int = 3, /):
...
kwargs: Dict[str, Any] = {}
# This should generate an error
f8()
# This should generate an error
f8(**kwargs)
f8(0, **kwargs)
def f9(*, c: int):
pass
# This should generate an error because it is missing a keyword
# argument for keyword parameter "c".
f9(*[1, 2, 3])
# This should generate an error because "/" cannot be used after "*args"
def f10(x, *args, /, y):
pass
# This should generate an error because "*" cannot be used after "*args"
def f11(x, *args, *, y):
pass
def f15(x, /, *args):
pass
# This should generate an error because x
# is a position-only parameter.
f15(x=1)
def f16(x, /, *args, **kw):
pass
# This should generate an error because x
# is a position-only parameter.
f16(x=1)
def f12(a: int, b: str, /):
...
def f13(v: Tuple[int, str]):
f12(*v)
def f14(v: Tuple[int]):
# This should generate an error because parameter "b" has
# no corresponding argument.
f12(*v)
|
C2
|
python
|
pypa__warehouse
|
tests/unit/admin/test_forms.py
|
{
"start": 156,
"end": 2706
}
|
class ____:
def test_validate_empty_string(self):
"""Test that empty string sets field data to None."""
form = SetUploadLimitForm(MultiDict({"upload_limit": ""}))
assert form.validate()
assert form.upload_limit.data is None
# Verify the validator was called and returned early
assert form.upload_limit.errors == []
def test_validate_none(self):
"""Test that None value sets field data to None."""
form = SetUploadLimitForm(MultiDict({}))
assert form.validate()
assert form.upload_limit.data is None
# Verify the validator was called and returned early
assert form.upload_limit.errors == []
def test_validate_upload_limit_with_none_data(self):
"""Test validator directly with None data to cover early return."""
form = SetUploadLimitForm(MultiDict({"upload_limit": ""}))
# The filter converts empty string to None
assert form.upload_limit.data is None
# Call validator directly to ensure the early return is covered
form.validate_upload_limit(form.upload_limit)
assert form.upload_limit.data is None
def test_validate_valid_integer(self):
"""Test that valid integer is converted to bytes."""
form = SetUploadLimitForm(MultiDict({"upload_limit": "150"}))
assert form.validate()
assert form.upload_limit.data == 150 * 1024 * 1024 # 150 MiB in bytes
def test_validate_invalid_value(self):
"""Test that non-integer value raises validation error."""
form = SetUploadLimitForm(MultiDict({"upload_limit": "not_a_number"}))
assert not form.validate()
assert (
"Upload limit must be a valid integer or empty" in form.upload_limit.errors
)
def test_validate_below_minimum(self):
"""Test that value below minimum raises validation error."""
form = SetUploadLimitForm(MultiDict({"upload_limit": "50"})) # < 100 MiB
assert not form.validate()
assert any(
"Upload limit can not be less than" in error
for error in form.upload_limit.errors
)
def test_validate_above_maximum(self):
"""Test that value above maximum raises validation error."""
form = SetUploadLimitForm(MultiDict({"upload_limit": "2000"})) # > 1024 MiB
assert not form.validate()
assert any(
"Upload limit can not be greater than" in error
for error in form.upload_limit.errors
)
|
TestSetUploadLimitForm
|
python
|
xlwings__xlwings
|
xlwings/constants.py
|
{
"start": 76075,
"end": 76490
}
|
class ____:
xlMicrosoftAccess = 4 # from enum XlMSApplication
xlMicrosoftFoxPro = 5 # from enum XlMSApplication
xlMicrosoftMail = 3 # from enum XlMSApplication
xlMicrosoftPowerPoint = 2 # from enum XlMSApplication
xlMicrosoftProject = 6 # from enum XlMSApplication
xlMicrosoftSchedulePlus = 7 # from enum XlMSApplication
xlMicrosoftWord = 1 # from enum XlMSApplication
|
MSApplication
|
python
|
pydantic__pydantic
|
pydantic/v1/errors.py
|
{
"start": 12802,
"end": 12932
}
|
class ____(PydanticValueError):
code = 'date.not_in_the_past'
msg_template = 'date is not in the past'
|
DateNotInThePastError
|
python
|
getsentry__sentry
|
tests/sentry/models/test_groupreaction.py
|
{
"start": 171,
"end": 6220
}
|
class ____(TestCase):
def setUp(self):
super().setUp()
repo = self.create_repo(project=self.project)
self.commit = self.create_commit(
repo=repo,
project=self.project,
key="pretend this is a sha",
)
def test_check_constraint_both_null_fails(self):
with pytest.raises(IntegrityError):
GroupReaction.objects.create(
project=self.project,
commit=None,
group=None,
user_id=self.user.id,
reaction=False,
source=GroupReactionType.USER_SUSPECT_COMMIT_REACTION.value,
)
def test_unique_constraint_commit_group(self):
GroupReaction.objects.create(
project=self.project,
commit=self.commit,
group=self.group,
user_id=self.user.id,
reaction=False,
source=GroupReactionType.USER_SUSPECT_COMMIT_REACTION.value,
)
updated_reaction, created = GroupReaction.objects.update_or_create(
project=self.project,
commit=self.commit,
group=self.group,
user_id=self.user.id,
source=GroupReactionType.USER_SUSPECT_COMMIT_REACTION.value,
defaults={"reaction": True},
)
assert not created
assert updated_reaction.reaction
assert GroupReaction.objects.count() == 1
with pytest.raises(IntegrityError):
GroupReaction.objects.create(
project=self.project,
commit=self.commit,
group=self.group,
user_id=self.user.id,
reaction=True,
source=GroupReactionType.USER_SUSPECT_COMMIT_REACTION.value,
)
def test_unique_constraint_project_wide_exclusion(self):
GroupReaction.objects.create(
project=self.project,
commit=self.commit,
group=None,
user_id=self.user.id,
reaction=False,
source=GroupReactionType.USER_SUSPECT_COMMIT_REACTION.value,
)
with pytest.raises(IntegrityError):
GroupReaction.objects.create(
project=self.project,
commit=self.commit,
group=None,
user_id=self.user.id,
reaction=True,
source=GroupReactionType.USER_SUSPECT_COMMIT_REACTION.value,
)
def test_unique_constraint_group_exclusion(self):
GroupReaction.objects.create(
project=self.project,
commit=None,
group=self.group,
user_id=self.user.id,
reaction=False,
source=GroupReactionType.USER_SUSPECT_COMMIT_REACTION.value,
)
with pytest.raises(IntegrityError):
GroupReaction.objects.create(
project=self.project,
commit=None,
group=self.group,
user_id=self.user.id,
reaction=True,
source=GroupReactionType.USER_SUSPECT_COMMIT_REACTION.value,
)
def test_user_can_react_to_different_commits_same_group(self):
repo = self.create_repo(project=self.project)
commit2 = self.create_commit(
repo=repo,
project=self.project,
key="another commit sha",
)
GroupReaction.objects.create(
project=self.project,
commit=self.commit,
group=self.group,
user_id=self.user.id,
reaction=False,
source=GroupReactionType.USER_SUSPECT_COMMIT_REACTION.value,
)
reaction2 = GroupReaction.objects.create(
project=self.project,
commit=commit2,
group=self.group,
user_id=self.user.id,
reaction=True,
source=GroupReactionType.USER_SUSPECT_COMMIT_REACTION.value,
)
assert reaction2.reaction is True
assert GroupReaction.objects.count() == 2
def test_user_can_react_to_multiple_groups(self):
group2 = self.create_group(project=self.project)
GroupReaction.objects.create(
project=self.project,
commit=self.commit,
group=self.group,
user_id=self.user.id,
reaction=False,
source=GroupReactionType.USER_SUSPECT_COMMIT_REACTION.value,
)
reaction2 = GroupReaction.objects.create(
project=self.project,
commit=self.commit,
group=group2,
user_id=self.user.id,
reaction=True,
source=GroupReactionType.USER_SUSPECT_COMMIT_REACTION.value,
)
assert reaction2.group == group2
assert GroupReaction.objects.count() == 2
def test_multiple_deleted_users_can_react(self):
user2 = self.create_user()
reaction1 = GroupReaction.objects.create(
project=self.project,
commit=self.commit,
group=self.group,
user_id=self.user.id,
reaction=False,
source=GroupReactionType.USER_SUSPECT_COMMIT_REACTION.value,
)
reaction2 = GroupReaction.objects.create(
project=self.project,
commit=self.commit,
group=self.group,
user_id=user2.id,
reaction=True,
source=GroupReactionType.USER_SUSPECT_COMMIT_REACTION.value,
)
# delete users
reaction1.user_id = None
reaction1.save()
reaction2.user_id = None
reaction2.save()
deleted_user_reactions = GroupReaction.objects.filter(
project=self.project,
commit=self.commit,
group=self.group,
user_id=None,
source=GroupReactionType.USER_SUSPECT_COMMIT_REACTION.value,
)
assert deleted_user_reactions.count() == 2
assert GroupReaction.objects.count() == 2
|
GroupReactionTest
|
python
|
apache__airflow
|
scripts/in_container/verify_providers.py
|
{
"start": 1740,
"end": 1948
}
|
class ____(Enum):
Operators = "Operators"
Transfers = "Transfers"
Sensors = "Sensors"
Hooks = "Hooks"
Secrets = "Secrets"
Trigger = "Trigger"
Notification = "Notification"
|
EntityType
|
python
|
ray-project__ray
|
rllib/env/multi_agent_env.py
|
{
"start": 730,
"end": 17949
}
|
class ____(gym.Env):
"""An environment that hosts multiple independent agents.
Agents are identified by AgentIDs (string).
"""
# Optional mappings from AgentID to individual agents' spaces.
# Set this to an "exhaustive" dictionary, mapping all possible AgentIDs to
# individual agents' spaces. Alternatively, override
# `get_observation_space(agent_id=...)` and `get_action_space(agent_id=...)`, which
# is the API that RLlib uses to get individual spaces and whose default
# implementation is to simply look up `agent_id` in these dicts.
observation_spaces: Optional[Dict[AgentID, gym.Space]] = None
action_spaces: Optional[Dict[AgentID, gym.Space]] = None
# All agents currently active in the environment. This attribute may change during
# the lifetime of the env or even during an individual episode.
agents: List[AgentID] = []
# All agents that may appear in the environment, ever.
# This attribute should not be changed during the lifetime of this env.
possible_agents: List[AgentID] = []
# @OldAPIStack, use `observation_spaces` and `action_spaces`, instead.
observation_space: Optional[gym.Space] = None
action_space: Optional[gym.Space] = None
def __init__(self):
super().__init__()
# @OldAPIStack
if not hasattr(self, "_agent_ids"):
self._agent_ids = set()
# If these important attributes are not set, try to infer them.
if not self.agents:
self.agents = list(self._agent_ids)
if not self.possible_agents:
self.possible_agents = self.agents.copy()
def reset(
self,
*,
seed: Optional[int] = None,
options: Optional[dict] = None,
) -> Tuple[MultiAgentDict, MultiAgentDict]: # type: ignore
"""Resets the env and returns observations from ready agents.
Args:
seed: An optional seed to use for the new episode.
Returns:
New observations for each ready agent.
.. testcode::
:skipif: True
from ray.rllib.env.multi_agent_env import MultiAgentEnv
class MyMultiAgentEnv(MultiAgentEnv):
# Define your env here.
env = MyMultiAgentEnv()
obs, infos = env.reset(seed=42, options={})
print(obs)
.. testoutput::
{
"car_0": [2.4, 1.6],
"car_1": [3.4, -3.2],
"traffic_light_1": [0, 3, 5, 1],
}
"""
# Call super's `reset()` method to (maybe) set the given `seed`.
super().reset(seed=seed, options=options)
def step(
self, action_dict: MultiAgentDict
) -> Tuple[
MultiAgentDict, MultiAgentDict, MultiAgentDict, MultiAgentDict, MultiAgentDict
]:
"""Returns observations from ready agents.
The returns are dicts mapping from agent_id strings to values. The
number of agents in the env can vary over time.
Returns:
Tuple containing 1) new observations for
each ready agent, 2) reward values for each ready agent. If
the episode is just started, the value will be None.
3) Terminated values for each ready agent. The special key
"__all__" (required) is used to indicate env termination.
4) Truncated values for each ready agent.
5) Info values for each agent id (may be empty dicts).
.. testcode::
:skipif: True
env = ...
obs, rewards, terminateds, truncateds, infos = env.step(action_dict={
"car_0": 1, "car_1": 0, "traffic_light_1": 2,
})
print(rewards)
print(terminateds)
print(infos)
.. testoutput::
{
"car_0": 3,
"car_1": -1,
"traffic_light_1": 0,
}
{
"car_0": False, # car_0 is still running
"car_1": True, # car_1 is terminated
"__all__": False, # the env is not terminated
}
{
"car_0": {}, # info for car_0
"car_1": {}, # info for car_1
}
"""
raise NotImplementedError
def render(self) -> None:
"""Tries to render the environment."""
# By default, do nothing.
pass
def get_observation_space(self, agent_id: AgentID) -> gym.Space:
if self.observation_spaces is not None:
return self.observation_spaces[agent_id]
# @OldAPIStack behavior.
# `self.observation_space` is a `gym.spaces.Dict` AND contains `agent_id`.
if (
isinstance(self.observation_space, gym.spaces.Dict)
and agent_id in self.observation_space.spaces
):
return self.observation_space[agent_id]
# `self.observation_space` is not a `gym.spaces.Dict` OR doesn't contain
# `agent_id` -> The defined space is most likely meant to be the space
# for all agents.
else:
return self.observation_space
def get_action_space(self, agent_id: AgentID) -> gym.Space:
if self.action_spaces is not None:
return self.action_spaces[agent_id]
# @OldAPIStack behavior.
# `self.action_space` is a `gym.spaces.Dict` AND contains `agent_id`.
if (
isinstance(self.action_space, gym.spaces.Dict)
and agent_id in self.action_space.spaces
):
return self.action_space[agent_id]
# `self.action_space` is not a `gym.spaces.Dict` OR doesn't contain
# `agent_id` -> The defined space is most likely meant to be the space
# for all agents.
else:
return self.action_space
@property
def num_agents(self) -> int:
return len(self.agents)
@property
def max_num_agents(self) -> int:
return len(self.possible_agents)
# fmt: off
# __grouping_doc_begin__
def with_agent_groups(
self,
groups: Dict[str, List[AgentID]],
obs_space: gym.Space = None,
act_space: gym.Space = None,
) -> "MultiAgentEnv":
"""Convenience method for grouping together agents in this env.
An agent group is a list of agent IDs that are mapped to a single
logical agent. All agents of the group must act at the same time in the
environment. The grouped agent exposes Tuple action and observation
spaces that are the concatenated action and obs spaces of the
individual agents.
The rewards of all the agents in a group are summed. The individual
agent rewards are available under the "individual_rewards" key of the
group info return.
Agent grouping is required to leverage algorithms such as Q-Mix.
Args:
groups: Mapping from group id to a list of the agent ids
of group members. If an agent id is not present in any group
value, it will be left ungrouped. The group id becomes a new agent ID
in the final environment.
obs_space: Optional observation space for the grouped
env. Must be a tuple space. If not provided, will infer this to be a
Tuple of n individual agents spaces (n=num agents in a group).
act_space: Optional action space for the grouped env.
Must be a tuple space. If not provided, will infer this to be a Tuple
of n individual agents spaces (n=num agents in a group).
.. testcode::
:skipif: True
from ray.rllib.env.multi_agent_env import MultiAgentEnv
class MyMultiAgentEnv(MultiAgentEnv):
# define your env here
...
env = MyMultiAgentEnv(...)
grouped_env = env.with_agent_groups(env, {
"group1": ["agent1", "agent2", "agent3"],
"group2": ["agent4", "agent5"],
})
"""
from ray.rllib.env.wrappers.group_agents_wrapper import GroupAgentsWrapper
return GroupAgentsWrapper(self, groups, obs_space, act_space)
# __grouping_doc_end__
# fmt: on
@OldAPIStack
@Deprecated(new="MultiAgentEnv.possible_agents", error=False)
def get_agent_ids(self) -> Set[AgentID]:
if not hasattr(self, "_agent_ids"):
self._agent_ids = set()
if not isinstance(self._agent_ids, set):
self._agent_ids = set(self._agent_ids)
# Make this backward compatible as much as possible.
return self._agent_ids if self._agent_ids else set(self.agents)
@OldAPIStack
def to_base_env(
self,
make_env: Optional[Callable[[int], EnvType]] = None,
num_envs: int = 1,
remote_envs: bool = False,
remote_env_batch_wait_ms: int = 0,
restart_failed_sub_environments: bool = False,
) -> "BaseEnv":
"""Converts an RLlib MultiAgentEnv into a BaseEnv object.
The resulting BaseEnv is always vectorized (contains n
sub-environments) to support batched forward passes, where n may
also be 1. BaseEnv also supports async execution via the `poll` and
`send_actions` methods and thus supports external simulators.
Args:
make_env: A callable taking an int as input (which indicates
the number of individual sub-environments within the final
vectorized BaseEnv) and returning one individual
sub-environment.
num_envs: The number of sub-environments to create in the
resulting (vectorized) BaseEnv. The already existing `env`
will be one of the `num_envs`.
remote_envs: Whether each sub-env should be a @ray.remote
actor. You can set this behavior in your config via the
`remote_worker_envs=True` option.
remote_env_batch_wait_ms: The wait time (in ms) to poll remote
sub-environments for, if applicable. Only used if
`remote_envs` is True.
restart_failed_sub_environments: If True and any sub-environment (within
a vectorized env) throws any error during env stepping, we will try to
restart the faulty sub-environment. This is done
without disturbing the other (still intact) sub-environments.
Returns:
The resulting BaseEnv object.
"""
from ray.rllib.env.remote_base_env import RemoteBaseEnv
if remote_envs:
env = RemoteBaseEnv(
make_env,
num_envs,
multiagent=True,
remote_env_batch_wait_ms=remote_env_batch_wait_ms,
restart_failed_sub_environments=restart_failed_sub_environments,
)
# Sub-environments are not ray.remote actors.
else:
env = MultiAgentEnvWrapper(
make_env=make_env,
existing_envs=[self],
num_envs=num_envs,
restart_failed_sub_environments=restart_failed_sub_environments,
)
return env
@DeveloperAPI
def make_multi_agent(
env_name_or_creator: Union[str, EnvCreator],
) -> Type["MultiAgentEnv"]:
"""Convenience wrapper for any single-agent env to be converted into MA.
Allows you to convert a simple (single-agent) `gym.Env` class
into a `MultiAgentEnv` class. This function simply stacks n instances
of the given ```gym.Env``` class into one unified ``MultiAgentEnv`` class
and returns this class, thus pretending the agents act together in the
same environment, whereas - under the hood - they live separately from
each other in n parallel single-agent envs.
Agent IDs in the resulting and are int numbers starting from 0
(first agent).
Args:
env_name_or_creator: String specifier or env_maker function taking
an EnvContext object as only arg and returning a gym.Env.
Returns:
New MultiAgentEnv class to be used as env.
The constructor takes a config dict with `num_agents` key
(default=1). The rest of the config dict will be passed on to the
underlying single-agent env's constructor.
.. testcode::
:skipif: True
from ray.rllib.env.multi_agent_env import make_multi_agent
# By gym string:
ma_cartpole_cls = make_multi_agent("CartPole-v1")
# Create a 2 agent multi-agent cartpole.
ma_cartpole = ma_cartpole_cls({"num_agents": 2})
obs = ma_cartpole.reset()
print(obs)
# By env-maker callable:
from ray.rllib.examples.envs.classes.stateless_cartpole import StatelessCartPole
ma_stateless_cartpole_cls = make_multi_agent(
lambda config: StatelessCartPole(config))
# Create a 3 agent multi-agent stateless cartpole.
ma_stateless_cartpole = ma_stateless_cartpole_cls(
{"num_agents": 3})
print(obs)
.. testoutput::
{0: [...], 1: [...]}
{0: [...], 1: [...], 2: [...]}
"""
class MultiEnv(MultiAgentEnv):
def __init__(self, config: EnvContext = None):
super().__init__()
# Note: Explicitly check for None here, because config
# can have an empty dict but meaningful data fields (worker_index,
# vector_index) etc.
# TODO (sven): Clean this up, so we are not mixing up dict fields
# with data fields.
if config is None:
config = {}
else:
# Note the deepcopy is needed b/c (a) we need to remove the
# `num_agents` keyword and (b) with `num_envs > 0` in the
# `VectorMultiAgentEnv` all following environment creations
# need the same config again.
config = copy.deepcopy(config)
num = config.pop("num_agents", 1)
if isinstance(env_name_or_creator, str):
self.envs = [gym.make(env_name_or_creator) for _ in range(num)]
else:
self.envs = [env_name_or_creator(config) for _ in range(num)]
self.terminateds = set()
self.truncateds = set()
self.observation_spaces = {
i: self.envs[i].observation_space for i in range(num)
}
self.action_spaces = {i: self.envs[i].action_space for i in range(num)}
self.agents = list(range(num))
self.possible_agents = self.agents.copy()
@override(MultiAgentEnv)
def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None):
self.terminateds = set()
self.truncateds = set()
obs, infos = {}, {}
for i, env in enumerate(self.envs):
obs[i], infos[i] = env.reset(seed=seed, options=options)
if not self.observation_spaces[i].contains(obs[i]):
print("===> MultiEnv does not contain obs.")
return obs, infos
@override(MultiAgentEnv)
def step(self, action_dict):
obs, rew, terminated, truncated, info = {}, {}, {}, {}, {}
# The environment is expecting an action for at least one agent.
if len(action_dict) == 0:
raise ValueError(
"The environment is expecting an action for at least one agent."
)
for i, action in action_dict.items():
obs[i], rew[i], terminated[i], truncated[i], info[i] = self.envs[
i
].step(action)
if terminated[i]:
self.terminateds.add(i)
if truncated[i]:
self.truncateds.add(i)
# TODO: Flaw in our MultiAgentEnv API wrt. new gymnasium: Need to return
# an additional episode_done bool that covers cases where all agents are
# either terminated or truncated, but not all are truncated and not all are
# terminated. We can then get rid of the aweful `__all__` special keys!
terminated["__all__"] = len(self.terminateds | self.truncateds) == len(
self.envs
)
truncated["__all__"] = len(self.truncateds) == len(self.envs)
return obs, rew, terminated, truncated, info
@override(MultiAgentEnv)
def render(self):
# This render method simply renders all n underlying individual single-agent
# envs and concatenates their images (on top of each other if the returned
# images have dims where [width] > [height], otherwise next to each other).
render_images = [e.render() for e in self.envs]
if render_images[0].shape[1] > render_images[0].shape[0]:
concat_dim = 0
else:
concat_dim = 1
return np.concatenate(render_images, axis=concat_dim)
return MultiEnv
@OldAPIStack
|
MultiAgentEnv
|
python
|
openai__openai-python
|
tests/test_transform.py
|
{
"start": 13305,
"end": 13762
}
|
class ____(TypedDict):
foo: Annotated[Union[str, Iterable[Baz8]], PropertyInfo(alias="FOO")]
@parametrize
@pytest.mark.asyncio
async def test_iterable_union_str(use_async: bool) -> None:
assert await transform({"foo": "bar"}, TypedDictIterableUnionStr, use_async) == {"FOO": "bar"}
assert cast(Any, await transform(iter([{"foo_baz": "bar"}]), Union[str, Iterable[Baz8]], use_async)) == [
{"fooBaz": "bar"}
]
|
TypedDictIterableUnionStr
|
python
|
PyCQA__mccabe
|
test_mccabe.py
|
{
"start": 5479,
"end": 7998
}
|
class ____(unittest.TestCase):
def setUp(self):
self.original_complexity = mccabe.McCabeChecker.max_complexity
def tearDown(self):
mccabe.McCabeChecker.max_complexity = self.original_complexity
def test_max_complexity_is_always_an_int(self):
"""Ensure bug #32 does not regress."""
class _options(object):
max_complexity = None
options = _options()
options.max_complexity = '16'
self.assertEqual(0, mccabe.McCabeChecker.max_complexity)
mccabe.McCabeChecker.parse_options(options)
self.assertEqual(16, mccabe.McCabeChecker.max_complexity)
def test_get_module_complexity(self):
self.assertEqual(0, mccabe.get_module_complexity("mccabe.py"))
# This test uses the Hypothesis and Hypothesmith libraries to generate random
# syntatically-valid Python source code and applies McCabe on it.
@settings(
max_examples=1000, # roughly 1k tests/minute, or half that under coverage
derandomize=False, # deterministic mode to avoid CI flakiness
deadline=None, # ignore Hypothesis' health checks; we already know that
suppress_health_check=HealthCheck.all(), # this is slow and filter-heavy.
)
@given(
# Note that while Hypothesmith might generate code unlike that written by
# humans, it's a general test that should pass for any *valid* source code.
# (so e.g. running it against code scraped of the internet might also help)
src_contents=hypothesmith.from_grammar() | hypothesmith.from_node(),
max_complexity=st.integers(min_value=1),
)
@pytest.mark.skipif(not hypothesmith, reason="hypothesmith could not be imported")
def test_idempotent_any_syntatically_valid_python(
src_contents: str, max_complexity: int
) -> None:
"""Property-based tests for mccabe.
This test case is based on a similar test for Black, the code formatter.
Black's test was written by Zac Hatfield-Dodds, the author of Hypothesis
and the Hypothesmith tool for source code generation. You can run this
file with `python`, `pytest`, or (soon) a coverage-guided fuzzer Zac is
working on.
"""
# Before starting, let's confirm that the input string is valid Python:
compile(src_contents, "<string>", "exec") # else bug is in hypothesmith
# Then try to apply get_complexity_number to the code...
get_code_complexity(src_contents, max_complexity)
if __name__ == "__main__":
test_idempotent_any_syntatically_valid_python()
unittest.main()
|
RegressionTests
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/components/scaffold/scaffold.py
|
{
"start": 2542,
"end": 2678
}
|
class ____:
message: str
ScaffoldFormatOptions: TypeAlias = Literal["yaml", "python"]
@public
@dataclass
|
ScaffolderUnavailableReason
|
python
|
pypa__warehouse
|
tests/unit/accounts/test_views.py
|
{
"start": 28076,
"end": 47380
}
|
class ____:
def test_get_two_factor_data_invalid_after_login(self, pyramid_request):
sign_time = datetime.datetime.now(datetime.UTC) - datetime.timedelta(seconds=30)
last_login_time = datetime.datetime.now(datetime.UTC) - datetime.timedelta(
seconds=1
)
query_params = {"userid": 1}
token_service = pretend.stub(
loads=pretend.call_recorder(
lambda *args, **kwargs: (query_params, sign_time)
)
)
user_service = pretend.stub(
find_userid=pretend.call_recorder(lambda username: 1),
get_user=pretend.call_recorder(
lambda userid: pretend.stub(last_login=last_login_time)
),
update_user=lambda *a, **k: None,
has_totp=lambda uid: True,
has_webauthn=lambda uid: False,
has_recovery_codes=lambda uid: False,
)
pyramid_request.find_service = lambda interface, **kwargs: {
ITokenService: token_service,
IUserService: user_service,
}[interface]
pyramid_request.query_string = pretend.stub()
with pytest.raises(TokenInvalid):
views._get_two_factor_data(pyramid_request)
def test_two_factor_and_totp_validate_redirect_to_account_login(
self,
db_request,
token_service,
user_service,
):
"""
Checks redirect to the login page if the 2fa login got expired.
Given there's user in the database and has a token signed before last_login date
When the user calls accounts.two-factor view
Then the user is redirected to account/login page
... warning::
This test has to use database and load the user from database
to make sure we always compare user.last_login as timezone-aware datetime.
"""
user = UserFactory.create(
username="jdoe",
name="Joe",
password="any",
is_active=True,
last_login=datetime.datetime.now(datetime.UTC)
+ datetime.timedelta(days=+1),
)
token_data = {"userid": user.id}
# Remove user object from scope, The `token_service` will load the user
# from the `user_service` and handle it from there
db_request.db.expunge(user)
del user
token = token_service.dumps(token_data)
db_request.query_string = token
db_request.find_service = lambda interface, **kwargs: {
ITokenService: token_service,
IUserService: user_service,
}[interface]
db_request.route_path = pretend.call_recorder(lambda name: "/account/login/")
two_factor_and_totp_validate(db_request)
# This view is redirected to only during a TokenException recovery
# which is called in two instances:
# 1. No userid in token
# 2. The token has expired
assert db_request.route_path.calls == [pretend.call("accounts.login")]
@pytest.mark.parametrize("redirect_url", [None, "/foo/bar/", "/wat/"])
def test_get_returns_totp_form(self, pyramid_request, redirect_url):
query_params = {"userid": 1}
if redirect_url:
query_params["redirect_to"] = redirect_url
token_service = pretend.stub(
loads=pretend.call_recorder(
lambda *args, **kwargs: (
query_params,
datetime.datetime.now(datetime.UTC),
)
)
)
user_service = pretend.stub(
find_userid=pretend.call_recorder(lambda username: 1),
get_user=pretend.call_recorder(
lambda userid: pretend.stub(
last_login=(
datetime.datetime.now(datetime.UTC) - datetime.timedelta(days=1)
)
)
),
update_user=lambda *a, **k: None,
has_totp=lambda uid: True,
has_webauthn=lambda uid: False,
has_recovery_codes=lambda uid: False,
)
pyramid_request.find_service = lambda interface, **kwargs: {
ITokenService: token_service,
IUserService: user_service,
}[interface]
pyramid_request.registry.settings = {"remember_device.days": 30}
pyramid_request.query_string = pretend.stub()
form_obj = pretend.stub()
form_class = pretend.call_recorder(lambda d, user_service, **kw: form_obj)
result = views.two_factor_and_totp_validate(
pyramid_request, _form_class=form_class
)
assert token_service.loads.calls == [
pretend.call(pyramid_request.query_string, return_timestamp=True)
]
assert result == {"totp_form": form_obj, "remember_device_days": 30}
assert form_class.calls == [
pretend.call(
pyramid_request.POST,
request=pyramid_request,
user_id=1,
user_service=user_service,
check_password_metrics_tags=["method:auth", "auth_method:login_form"],
)
]
@pytest.mark.parametrize("redirect_url", [None, "/foo/bar/", "/wat/"])
def test_get_returns_webauthn(self, pyramid_request, redirect_url):
query_params = {"userid": 1}
if redirect_url:
query_params["redirect_to"] = redirect_url
token_service = pretend.stub(
loads=pretend.call_recorder(
lambda *args, **kwargs: (
query_params,
datetime.datetime.now(datetime.UTC),
)
)
)
user_service = pretend.stub(
find_userid=pretend.call_recorder(lambda username: 1),
get_user=pretend.call_recorder(
lambda userid: pretend.stub(
last_login=(
datetime.datetime.now(datetime.UTC) - datetime.timedelta(days=1)
)
)
),
update_user=lambda *a, **k: None,
has_totp=lambda uid: False,
has_webauthn=lambda uid: True,
has_recovery_codes=lambda uid: False,
)
pyramid_request.find_service = lambda interface, **kwargs: {
ITokenService: token_service,
IUserService: user_service,
}[interface]
pyramid_request.registry.settings = {"remember_device.days": 30}
pyramid_request.query_string = pretend.stub()
result = views.two_factor_and_totp_validate(
pyramid_request, _form_class=pretend.stub()
)
assert token_service.loads.calls == [
pretend.call(pyramid_request.query_string, return_timestamp=True)
]
assert result == {"has_webauthn": True, "remember_device_days": 30}
@pytest.mark.parametrize("redirect_url", [None, "/foo/bar/", "/wat/"])
def test_get_returns_recovery_code_status(self, pyramid_request, redirect_url):
query_params = {"userid": 1}
if redirect_url:
query_params["redirect_to"] = redirect_url
token_service = pretend.stub(
loads=pretend.call_recorder(
lambda *args, **kwargs: (
query_params,
datetime.datetime.now(datetime.UTC),
)
)
)
user_service = pretend.stub(
find_userid=pretend.call_recorder(lambda username: 1),
get_user=pretend.call_recorder(
lambda userid: pretend.stub(
last_login=(
datetime.datetime.now(datetime.UTC) - datetime.timedelta(days=1)
)
)
),
update_user=lambda *a, **k: None,
has_totp=lambda uid: False,
has_webauthn=lambda uid: False,
has_recovery_codes=lambda uid: True,
)
pyramid_request.find_service = lambda interface, **kwargs: {
ITokenService: token_service,
IUserService: user_service,
}[interface]
pyramid_request.registry.settings = {"remember_device.days": 30}
pyramid_request.query_string = pretend.stub()
result = views.two_factor_and_totp_validate(
pyramid_request, _form_class=pretend.stub()
)
assert token_service.loads.calls == [
pretend.call(pyramid_request.query_string, return_timestamp=True)
]
assert result == {"has_recovery_codes": True, "remember_device_days": 30}
@pytest.mark.parametrize("redirect_url", ["test_redirect_url", None])
@pytest.mark.parametrize("has_recovery_codes", [True, False])
@pytest.mark.parametrize("remember_device", [True, False])
def test_totp_auth(
self,
monkeypatch,
db_request,
redirect_url,
has_recovery_codes,
remember_device,
make_email_renderers,
metrics,
):
make_email_renderers("unrecognized-login")
remember = pretend.call_recorder(lambda request, user_id: [("foo", "bar")])
monkeypatch.setattr(views, "remember", remember)
_remember_device = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(views, "_remember_device", _remember_device)
user = UserFactory.create(
with_verified_primary_email=True,
username="testuser",
name="Test User",
last_login=(
datetime.datetime.now(datetime.UTC) - datetime.timedelta(days=1)
),
)
monkeypatch.setattr(
type(user),
"has_recovery_codes",
property(lambda u: has_recovery_codes),
)
user.record_event = pretend.call_recorder(lambda *a, **kw: None)
user_id = user.id
query_params = {"userid": str(user_id)}
if redirect_url:
query_params["redirect_to"] = redirect_url
two_factor_token_service = pretend.stub(
loads=pretend.call_recorder(
lambda *args, **kwargs: (
query_params,
datetime.datetime.now(datetime.UTC),
)
)
)
user_service = pretend.stub(
find_userid=pretend.call_recorder(lambda username: user.id),
get_user=pretend.call_recorder(lambda userid: user),
update_user=lambda *a, **k: None,
has_totp=lambda userid: True,
has_webauthn=lambda userid: False,
has_recovery_codes=lambda userid: has_recovery_codes,
check_totp_value=lambda userid, totp_value: True,
get_password_timestamp=lambda userid: 0,
needs_tos_flash=lambda userid, revision: False,
device_is_known=lambda *a: True,
)
new_session = {}
db_request.find_service = lambda interface, **kwargs: {
ITokenService: two_factor_token_service,
IUserService: user_service,
}[interface]
db_request.method = "POST"
db_request.session = pretend.stub(
items=lambda: [("a", "b"), ("foo", "bar")],
update=new_session.update,
invalidate=pretend.call_recorder(lambda: None),
new_csrf_token=pretend.call_recorder(lambda: None),
get_password_timestamp=lambda userid: 0,
)
db_request.session.record_auth_timestamp = pretend.call_recorder(
lambda *args: None
)
db_request.session.record_password_timestamp = lambda timestamp: None
db_request.registry.settings = {"remember_device.days": 30}
form_obj = pretend.stub(
validate=pretend.call_recorder(lambda: True),
totp_value=pretend.stub(data="test-otp-secret"),
remember_device=pretend.stub(data=remember_device),
)
form_class = pretend.call_recorder(lambda d, user_service, **kw: form_obj)
db_request.route_path = pretend.call_recorder(lambda a: "/account/two-factor")
db_request.params = pretend.stub(
get=pretend.call_recorder(lambda k: query_params.get(k))
)
db_request.user = user
send_email = pretend.call_recorder(lambda *a: None)
monkeypatch.setattr(views, "send_recovery_code_reminder_email", send_email)
result = views.two_factor_and_totp_validate(db_request, _form_class=form_class)
token_expected_data = {"userid": str(user.id)}
if redirect_url:
token_expected_data["redirect_to"] = redirect_url
assert isinstance(result, HTTPSeeOther)
assert remember.calls == [pretend.call(db_request, str(user.id))]
assert db_request.session.invalidate.calls == [pretend.call()]
assert db_request.session.new_csrf_token.calls == [pretend.call()]
assert user.record_event.calls == [
pretend.call(
tag=EventTag.Account.LoginSuccess,
request=db_request,
additional={"two_factor_method": "totp", "two_factor_label": "totp"},
)
]
assert db_request.session.record_auth_timestamp.calls == [pretend.call()]
assert send_email.calls == (
[] if has_recovery_codes else [pretend.call(db_request, user)]
)
assert _remember_device.calls == (
[]
if not remember_device
else [pretend.call(db_request, result, str(user.id), "totp")]
)
def test_totp_auth_already_authed(self):
request = pretend.stub(
identity=pretend.stub(),
route_path=pretend.call_recorder(lambda p: "redirect_to"),
)
result = views.two_factor_and_totp_validate(request)
assert request.route_path.calls == [pretend.call("manage.projects")]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "redirect_to"
def test_totp_form_invalid(self):
token_data = {"userid": 1}
token_service = pretend.stub(
loads=pretend.call_recorder(
lambda *args, **kwargs: (
token_data,
datetime.datetime.now(datetime.UTC),
)
)
)
user_service = pretend.stub(
get_user=pretend.call_recorder(
lambda userid: pretend.stub(
last_login=(
datetime.datetime.now(datetime.UTC) - datetime.timedelta(days=1)
)
)
),
has_totp=lambda userid: True,
has_webauthn=lambda userid: False,
has_recovery_codes=lambda userid: False,
check_totp_value=lambda userid, totp_value: False,
)
request = pretend.stub(
POST={},
method="POST",
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
identity=None,
route_path=pretend.call_recorder(lambda p: "redirect_to"),
find_service=lambda interface, **kwargs: {
ITokenService: token_service,
IUserService: user_service,
}[interface],
query_string=pretend.stub(),
registry=pretend.stub(settings={"remember_device.days": 30}),
)
form_obj = pretend.stub(
validate=pretend.call_recorder(lambda: False),
totp_value=pretend.stub(data="test-otp-secret"),
)
form_class = pretend.call_recorder(lambda *a, **kw: form_obj)
result = views.two_factor_and_totp_validate(request, _form_class=form_class)
assert token_service.loads.calls == [
pretend.call(request.query_string, return_timestamp=True)
]
assert result == {"totp_form": form_obj, "remember_device_days": 30}
def test_two_factor_token_missing_userid(self, pyramid_request):
token_service = pretend.stub(
loads=pretend.call_recorder(lambda *a, **kw: ({}, None))
)
pyramid_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
pyramid_request.route_path = pretend.call_recorder(lambda p: "redirect_to")
pyramid_request.find_service = lambda interface, **kwargs: {
ITokenService: token_service
}[interface]
pyramid_request.query_string = pretend.stub()
result = views.two_factor_and_totp_validate(pyramid_request)
assert token_service.loads.calls == [
pretend.call(pyramid_request.query_string, return_timestamp=True)
]
assert pyramid_request.route_path.calls == [pretend.call("accounts.login")]
assert pyramid_request.session.flash.calls == [
pretend.call("Invalid or expired two factor login.", queue="error")
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "redirect_to"
def test_two_factor_token_invalid(self, pyramid_request):
token_service = pretend.stub(loads=pretend.raiser(TokenException))
pyramid_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
pyramid_request.find_service = lambda interface, **kwargs: {
ITokenService: token_service
}[interface]
pyramid_request.route_path = pretend.call_recorder(lambda p: "redirect_to")
result = views.two_factor_and_totp_validate(pyramid_request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "redirect_to"
assert pyramid_request.session.flash.calls == [
pretend.call("Invalid or expired two factor login.", queue="error")
]
def test_two_factor_and_totp_validate_device_not_known(
self, db_request, token_service
):
user = UserFactory.create()
token_data = {"userid": str(user.id)}
token_service.loads = pretend.call_recorder(
lambda *args, **kwargs: (
token_data,
datetime.datetime.now(datetime.UTC),
)
)
user_service = pretend.stub(
get_user=lambda userid: user,
has_totp=lambda uid: True,
has_webauthn=lambda uid: False,
has_recovery_codes=lambda uid: False,
device_is_known=lambda *a: False,
check_totp_value=lambda userid, totp_value: True,
)
db_request.find_service = lambda interface, **kwargs: {
ITokenService: token_service,
IUserService: user_service,
}[interface]
db_request.route_path = pretend.call_recorder(
lambda name: "/account/confirm-login/"
)
db_request.query_string = token_service.dumps(token_data)
db_request.registry.settings = {"remember_device.days": 30}
db_request.method = "POST"
db_request.POST = MultiDict({"totp_value": "123456"})
result = two_factor_and_totp_validate(db_request)
assert isinstance(result, HTTPSeeOther)
assert db_request.route_path.calls == [pretend.call("accounts.confirm-login")]
|
TestTwoFactor
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-rki-covid/source_rki_covid/source.py
|
{
"start": 3595,
"end": 4629
}
|
class ____(RkiCovidStream, ABC):
state_checkpoint_interval = None
@property
def cursor_field(self) -> str:
"""
TODO
Override to return the cursor field used by this stream e.g: an API entity might always use created_at as the cursor field. This is
usually id or date based. This field's presence tells the framework this in an incremental stream. Required for incremental.
:return str: The name of the cursor field.
"""
return []
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Override to determine the latest state after reading the latest record. This typically compared the cursor_field from the latest record and
the current state and picks the 'most' recent cursor. This is how a stream's state is determined. Required for incremental.
"""
return {}
# source: germany/history/cases/:days | Incremental
|
IncrementalRkiCovidStream
|
python
|
huggingface__transformers
|
src/transformers/models/vivit/modeling_vivit.py
|
{
"start": 14278,
"end": 14870
}
|
class ____(nn.Module):
def __init__(self, config: VivitConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
@auto_docstring
|
VivitPooler
|
python
|
davidhalter__jedi
|
jedi/inference/names.py
|
{
"start": 13835,
"end": 14737
}
|
class ____(_ParamMixin):
api_type = 'param'
def get_kind(self):
raise NotImplementedError
def to_string(self):
raise NotImplementedError
def get_executed_param_name(self):
"""
For dealing with type inference and working around the graph, we
sometimes want to have the param name of the execution. This feels a
bit strange and we might have to refactor at some point.
For now however it exists to avoid infering params when we don't really
need them (e.g. when we can just instead use annotations.
"""
return None
@property
def star_count(self):
kind = self.get_kind()
if kind == Parameter.VAR_POSITIONAL:
return 1
if kind == Parameter.VAR_KEYWORD:
return 2
return 0
def infer_default(self):
return NO_VALUES
|
ParamNameInterface
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/core_api/datamodels/log.py
|
{
"start": 1321,
"end": 1570
}
|
class ____(BaseModel):
"""Log serializer for responses."""
content: list[StructuredLogMessage] | list[str]
"""Either a list of parsed events, or a list of lines on parse error"""
continuation_token: str | None
|
TaskInstancesLogResponse
|
python
|
huggingface__transformers
|
src/transformers/models/swin2sr/modeling_swin2sr.py
|
{
"start": 29586,
"end": 30260
}
|
class ____(PreTrainedModel):
config: Swin2SRConfig
base_model_prefix = "swin2sr"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = True
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
init.trunc_normal_(module.weight, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
@auto_docstring
|
Swin2SRPreTrainedModel
|
python
|
Farama-Foundation__Gymnasium
|
docs/tutorials/gymnasium_basics/implementing_custom_wrappers.py
|
{
"start": 1854,
"end": 2786
}
|
class ____(ObservationWrapper):
def __init__(self, env):
super().__init__(env)
self.observation_space = Box(shape=(2,), low=-np.inf, high=np.inf)
def observation(self, obs):
return obs["target"] - obs["agent"]
# %%
# Inheriting from :class:`gymnasium.ActionWrapper`
# ------------------------------------------------
# Action wrappers can be used to apply a transformation to actions before applying them to the environment.
# If you implement an action wrapper, you need to define that transformation by implementing
# :meth:`gymnasium.ActionWrapper.action`. Moreover, you should specify the domain of that transformation
# by updating the action space of the wrapper.
#
# Let’s say you have an environment with action space of type :class:`gymnasium.spaces.Box`, but you would only like
# to use a finite subset of actions. Then, you might want to implement the following wrapper:
|
RelativePosition
|
python
|
django__django
|
tests/queryset_pickle/models.py
|
{
"start": 632,
"end": 907
}
|
class ____(models.Model):
name = models.CharField(_("name"), max_length=100)
objects = models.Manager()
previous_django_version_objects = PreviousDjangoVersionQuerySet.as_manager()
missing_django_version_objects = MissingDjangoVersionQuerySet.as_manager()
|
Group
|
python
|
google__jax
|
tests/scipy_interpolate_test.py
|
{
"start": 851,
"end": 2349
}
|
class ____(jtu.JaxTestCase):
"""Tests for LAX-backed scipy.interpolate implementations"""
@jtu.sample_product(
spaces=(((0., 10., 10),), ((-15., 20., 12), (3., 4., 24))),
method=("linear", "nearest"),
)
def testRegularGridInterpolator(self, spaces, method):
rng = jtu.rand_default(self.rng())
scipy_fun = lambda init_args, call_args: sp_interp.RegularGridInterpolator(
*init_args[:2], method, False, *init_args[2:])(*call_args)
lax_fun = lambda init_args, call_args: jsp_interp.RegularGridInterpolator(
*init_args[:2], method, False, *init_args[2:])(*call_args)
def args_maker():
points = tuple(map(lambda x: np.linspace(*x), spaces))
values = rng(reduce(operator.add, tuple(map(np.shape, points))), float)
fill_value = np.nan
init_args = (points, values, fill_value)
n_validation_points = 50
valid_points = tuple(
map(
lambda x: np.linspace(x[0] - 0.2 * (x[1] - x[0]), x[1] + 0.2 *
(x[1] - x[0]), n_validation_points),
spaces))
valid_points = np.squeeze(np.stack(valid_points, axis=1))
call_args = (valid_points,)
return init_args, call_args
self._CheckAgainstNumpy(
scipy_fun, lax_fun, args_maker, check_dtypes=False, tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, rtol={np.float64: 1e-14})
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
LaxBackedScipyInterpolateTests
|
python
|
tornadoweb__tornado
|
tornado/httpserver.py
|
{
"start": 698,
"end": 1510
}
|
class ____ to start a server at the beginning of the process
(and even that is often done indirectly via `tornado.web.Application.listen`).
.. versionchanged:: 4.0
The ``HTTPRequest`` class that used to live in this module has been moved
to `tornado.httputil.HTTPServerRequest`. The old name remains as an alias.
"""
import socket
import ssl
from tornado.escape import native_str
from tornado.http1connection import HTTP1ServerConnection, HTTP1ConnectionParameters
from tornado import httputil
from tornado import iostream
from tornado import netutil
from tornado.tcpserver import TCPServer
from tornado.util import Configurable
import typing
from typing import Union, Any, Dict, Callable, List, Type, Tuple, Optional, Awaitable
if typing.TYPE_CHECKING:
from typing import Set # noqa: F401
|
except
|
python
|
PyCQA__pyflakes
|
pyflakes/messages.py
|
{
"start": 8639,
"end": 8880
}
|
class ____(Message):
message = "'...'.format(...) has invalid format string: %s"
def __init__(self, filename, loc, error):
Message.__init__(self, filename, loc)
self.message_args = (error,)
|
StringDotFormatInvalidFormat
|
python
|
GoogleCloudPlatform__python-docs-samples
|
appengine/standard/endpoints-frameworks-v2/quickstart/main.py
|
{
"start": 4860,
"end": 5439
}
|
class ____(remote.Service):
@endpoints.method(
message_types.VoidMessage,
Greeting,
path="greet",
http_method="POST",
name="greet",
)
def greet(self, request):
user = endpoints.get_current_user()
user_name = user.email() if user else "Anonymous"
return Greeting(message="Hello, {}".format(user_name))
# [END endpoints_authed_greeting_api]
# [START endpoints_greeting_api_api_server]
api = endpoints.api_server([GreetingApi, AuthedGreetingApi])
# [END endpoints_greeting_api_api_server]
|
AuthedGreetingApi
|
python
|
ipython__ipython
|
docs/autogen_shortcuts.py
|
{
"start": 840,
"end": 982
}
|
class ____(Filter):
"""Protocol reflecting non-public prompt_toolkit's `_AndList` and `_OrList`."""
filters: List[Filter]
|
_NestedFilter
|
python
|
mlflow__mlflow
|
mlflow/entities/model_registry/model_version.py
|
{
"start": 659,
"end": 9070
}
|
class ____(_ModelRegistryEntity):
"""
MLflow entity for Model Version.
"""
def __init__(
self,
name: str,
version: str,
creation_timestamp: int,
last_updated_timestamp: int | None = None,
description: str | None = None,
user_id: str | None = None,
current_stage: str | None = None,
source: str | None = None,
run_id: str | None = None,
status: str = ModelVersionStatus.to_string(ModelVersionStatus.READY),
status_message: str | None = None,
tags: list[ModelVersionTag] | None = None,
run_link: str | None = None,
aliases: list[str] | None = None,
# TODO: Make model_id a required field
# (currently optional to minimize breakages during prototype development)
model_id: str | None = None,
params: list[ModelParam] | None = None,
metrics: list[Metric] | None = None,
deployment_job_state: ModelVersionDeploymentJobState | None = None,
):
super().__init__()
self._name: str = name
self._version: str = version
self._creation_time: int = creation_timestamp
self._last_updated_timestamp: int | None = last_updated_timestamp
self._description: str | None = description
self._user_id: str | None = user_id
self._current_stage: str | None = current_stage
self._source: str | None = source
self._run_id: str | None = run_id
self._run_link: str | None = run_link
self._status: str = status
self._status_message: str | None = status_message
self._tags: dict[str, str] = {tag.key: tag.value for tag in (tags or [])}
self._aliases: list[str] = aliases or []
self._model_id: str | None = model_id
self._params: list[ModelParam] | None = params
self._metrics: list[Metric] | None = metrics
self._deployment_job_state: ModelVersionDeploymentJobState | None = deployment_job_state
@property
def name(self) -> str:
"""String. Unique name within Model Registry."""
return self._name
@name.setter
def name(self, new_name: str):
self._name = new_name
@property
def version(self) -> str:
"""Version"""
return self._version
@property
def creation_timestamp(self) -> int:
"""Integer. Model version creation timestamp (milliseconds since the Unix epoch)."""
return self._creation_time
@property
def last_updated_timestamp(self) -> int | None:
"""Integer. Timestamp of last update for this model version (milliseconds since the Unix
epoch).
"""
return self._last_updated_timestamp
@last_updated_timestamp.setter
def last_updated_timestamp(self, updated_timestamp: int):
self._last_updated_timestamp = updated_timestamp
@property
def description(self) -> str | None:
"""String. Description"""
return self._description
@description.setter
def description(self, description: str):
self._description = description
@property
def user_id(self) -> str | None:
"""String. User ID that created this model version."""
return self._user_id
@property
def current_stage(self) -> str | None:
"""String. Current stage of this model version."""
return self._current_stage
@current_stage.setter
def current_stage(self, stage: str):
self._current_stage = stage
@property
def source(self) -> str | None:
"""String. Source path for the model."""
return self._source
@property
def run_id(self) -> str | None:
"""String. MLflow run ID that generated this model."""
return self._run_id
@property
def run_link(self) -> str | None:
"""String. MLflow run link referring to the exact run that generated this model version."""
return self._run_link
@property
def status(self) -> str:
"""String. Current Model Registry status for this model."""
return self._status
@property
def status_message(self) -> str | None:
"""String. Descriptive message for error status conditions."""
return self._status_message
@property
def tags(self) -> dict[str, str]:
"""Dictionary of tag key (string) -> tag value for the current model version."""
return self._tags
@property
def aliases(self) -> list[str]:
"""List of aliases (string) for the current model version."""
return self._aliases
@aliases.setter
def aliases(self, aliases: list[str]):
self._aliases = aliases
@property
def model_id(self) -> str | None:
"""String. ID of the model associated with this version."""
return self._model_id
@property
def params(self) -> list[ModelParam] | None:
"""List of parameters associated with this model version."""
return self._params
@property
def metrics(self) -> list[Metric] | None:
"""List of metrics associated with this model version."""
return self._metrics
@property
def deployment_job_state(self) -> ModelVersionDeploymentJobState | None:
"""Deployment job state for the current model version."""
return self._deployment_job_state
@classmethod
def _properties(cls) -> list[str]:
# aggregate with base class properties since cls.__dict__ does not do it automatically
return sorted(cls._get_properties_helper())
def _add_tag(self, tag: ModelVersionTag):
self._tags[tag.key] = tag.value
# proto mappers
@classmethod
def from_proto(cls, proto) -> "ModelVersion":
# input: mlflow.protos.model_registry_pb2.ModelVersion
# returns: ModelVersion entity
model_version = cls(
proto.name,
proto.version,
proto.creation_timestamp,
proto.last_updated_timestamp,
proto.description if proto.HasField("description") else None,
proto.user_id,
proto.current_stage,
proto.source,
proto.run_id if proto.HasField("run_id") else None,
ModelVersionStatus.to_string(proto.status),
proto.status_message if proto.HasField("status_message") else None,
run_link=proto.run_link,
aliases=proto.aliases,
deployment_job_state=ModelVersionDeploymentJobState.from_proto(
proto.deployment_job_state
),
)
for tag in proto.tags:
model_version._add_tag(ModelVersionTag.from_proto(tag))
# TODO: Include params, metrics, and model ID in proto
return model_version
def to_proto(self):
# input: ModelVersion entity
# returns mlflow.protos.model_registry_pb2.ModelVersion
model_version = ProtoModelVersion()
model_version.name = self.name
model_version.version = str(self.version)
model_version.creation_timestamp = self.creation_timestamp
if self.last_updated_timestamp is not None:
model_version.last_updated_timestamp = self.last_updated_timestamp
if self.description is not None:
model_version.description = self.description
if self.user_id is not None:
model_version.user_id = self.user_id
if self.current_stage is not None:
model_version.current_stage = self.current_stage
if self.source is not None:
model_version.source = str(self.source)
if self.run_id is not None:
model_version.run_id = str(self.run_id)
if self.run_link is not None:
model_version.run_link = str(self.run_link)
if self.status is not None:
model_version.status = ModelVersionStatus.from_string(self.status)
if self.status_message:
model_version.status_message = self.status_message
model_version.tags.extend(
[ProtoModelVersionTag(key=key, value=value) for key, value in self._tags.items()]
)
model_version.aliases.extend(self.aliases)
if self.deployment_job_state is not None:
ModelVersionDeploymentJobState.to_proto(self.deployment_job_state)
# TODO: Include params, metrics, and model ID in proto
return model_version
|
ModelVersion
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/aggregations/near_object/executor.py
|
{
"start": 571,
"end": 6453
}
|
class ____(Generic[ConnectionType], _BaseExecutor[ConnectionType]):
@overload
def near_object(
self,
near_object: UUID,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
object_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
target_vector: Optional[str] = None,
total_count: bool = True,
return_metrics: Optional[PropertiesMetrics] = None,
) -> executor.Result[AggregateReturn]: ...
@overload
def near_object(
self,
near_object: UUID,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
object_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Union[str, GroupByAggregate],
target_vector: Optional[str] = None,
total_count: bool = True,
return_metrics: Optional[PropertiesMetrics] = None,
) -> executor.Result[AggregateGroupByReturn]: ...
@overload
def near_object(
self,
near_object: UUID,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
object_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Optional[Union[str, GroupByAggregate]] = None,
target_vector: Optional[str] = None,
total_count: bool = True,
return_metrics: Optional[PropertiesMetrics] = None,
) -> executor.Result[Union[AggregateReturn, AggregateGroupByReturn]]: ...
def near_object(
self,
near_object: UUID,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
object_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Optional[Union[str, GroupByAggregate]] = None,
target_vector: Optional[str] = None,
total_count: bool = True,
return_metrics: Optional[PropertiesMetrics] = None,
) -> executor.Result[Union[AggregateReturn, AggregateGroupByReturn]]:
"""Aggregate metrics over the objects returned by a near object search on this collection.
At least one of `certainty`, `distance`, or `object_limit` must be specified here for the vector search.
This method requires that the objects in the collection have associated vectors.
Args:
near_object: The UUID of the object to search on.
certainty: The minimum certainty of the object search.
distance: The maximum distance of the object search.
object_limit: The maximum number of objects to return from the object search prior to the aggregation.
filters: The filters to apply to the search.
group_by: How to group the aggregation by.
total_count: Whether to include the total number of objects that match the query in the response.
return_metrics: A list of property metrics to aggregate together after the text search.
Returns:
Depending on the presence of the `group_by` argument, either a `AggregateReturn` object or a `AggregateGroupByReturn that includes the aggregation objects.
Raises:
weaviate.exceptions.WeaviateQueryError: If an error occurs while performing the query against Weaviate.
weaviate.exceptions.WeaviateInvalidInputError: If any of the input arguments are of the wrong type.
"""
return_metrics = (
return_metrics
if (return_metrics is None or isinstance(return_metrics, list))
else [return_metrics]
)
if isinstance(group_by, str):
group_by = GroupByAggregate(prop=group_by)
if self._connection._weaviate_version.is_lower_than(1, 29, 0):
# use gql, remove once 1.29 is the minimum supported version
def resp(res: dict) -> Union[AggregateReturn, AggregateGroupByReturn]:
return (
self._to_aggregate_result(res, return_metrics)
if group_by is None
else self._to_group_by_result(res, return_metrics)
)
builder = self._base(return_metrics, filters, total_count)
builder = self._add_groupby_to_builder(builder, group_by)
builder = self._add_near_object_to_builder(
builder, near_object, certainty, distance, object_limit, target_vector
)
return executor.execute(
response_callback=resp,
method=self._do,
query=builder,
)
else:
# use grpc
request = self._grpc.near_object(
near_object=near_object,
certainty=certainty,
distance=distance,
target_vector=target_vector,
aggregations=(
[metric.to_grpc() for metric in return_metrics]
if return_metrics is not None
else []
),
filters=_FilterToGRPC.convert(filters) if filters is not None else None,
group_by=group_by._to_grpc() if group_by is not None else None,
limit=group_by.limit if group_by is not None else None,
objects_count=total_count,
object_limit=object_limit,
)
def respGrpc(
res: aggregate_pb2.AggregateReply,
) -> Union[AggregateReturn, AggregateGroupByReturn]:
return self._to_result(group_by is not None, res)
return executor.execute(
response_callback=respGrpc,
method=self._connection.grpc_aggregate,
request=request,
)
|
_NearObjectExecutor
|
python
|
ray-project__ray
|
python/ray/autoscaler/node_provider.py
|
{
"start": 344,
"end": 10616
}
|
class ____:
"""Interface for getting and returning nodes from a Cloud.
**Important**: This is an INTERNAL API that is only exposed for the purpose
of implementing custom node providers. It is not allowed to call into
NodeProvider methods from any Ray package outside the autoscaler, only to
define new implementations of NodeProvider for use with the "external" node
provider option.
NodeProviders are namespaced by the `cluster_name` parameter; they only
operate on nodes within that namespace.
Nodes may be in one of three states: {pending, running, terminated}. Nodes
appear immediately once started by `create_node`, and transition
immediately to terminated when `terminate_node` is called.
Threading and concurrency:
- The autoscaler calls the following methods from multiple threads
(NodeLauncher, NodeUpdaterThread, autoscaler main loop, and
NodeProviderAdapter executors).
- These methods MUST be thread-safe:
non_terminated_nodes, is_running, is_terminated, node_tags, internal_ip,
external_ip, get_node_id, create_node/create_node_with_resources_and_labels,
set_node_tags, terminate_node/terminate_nodes.
TODO (rueian): make sure all the existing implementations are thread-safe.
"""
def __init__(self, provider_config: Dict[str, Any], cluster_name: str) -> None:
self.provider_config = provider_config
self.cluster_name = cluster_name
self._internal_ip_cache: Dict[str, str] = {}
self._external_ip_cache: Dict[str, str] = {}
def is_readonly(self) -> bool:
"""Returns whether this provider is readonly.
Readonly node providers do not allow nodes to be created or terminated.
"""
return False
def non_terminated_nodes(self, tag_filters: Dict[str, str]) -> List[str]:
"""Return a list of node ids filtered by the specified tags dict.
This list must not include terminated nodes. For performance reasons,
providers are allowed to cache the result of a call to
non_terminated_nodes() to serve single-node queries
(e.g. is_running(node_id)). This means that non_terminate_nodes() must
be called again to refresh results.
Examples:
>>> from ray.autoscaler.node_provider import NodeProvider
>>> from ray.autoscaler.tags import TAG_RAY_NODE_KIND
>>> provider = NodeProvider(...) # doctest: +SKIP
>>> provider.non_terminated_nodes( # doctest: +SKIP
... {TAG_RAY_NODE_KIND: "worker"})
["node-1", "node-2"]
"""
raise NotImplementedError
def is_running(self, node_id: str) -> bool:
"""Return whether the specified node is running."""
raise NotImplementedError
def is_terminated(self, node_id: str) -> bool:
"""Return whether the specified node is terminated."""
raise NotImplementedError
def node_tags(self, node_id: str) -> Dict[str, str]:
"""Returns the tags of the given node (string dict)."""
raise NotImplementedError
def external_ip(self, node_id: str) -> str:
"""Returns the external ip of the given node."""
raise NotImplementedError
def internal_ip(self, node_id: str) -> str:
"""Returns the internal ip (Ray ip) of the given node."""
raise NotImplementedError
def get_node_id(self, ip_address: str, use_internal_ip: bool = False) -> str:
"""Returns the node_id given an IP address.
Assumes ip-address is unique per node.
Args:
ip_address: Address of node.
use_internal_ip: Whether the ip address is
public or private.
Raises:
ValueError: If not found.
"""
def find_node_id():
if use_internal_ip:
return self._internal_ip_cache.get(ip_address)
else:
return self._external_ip_cache.get(ip_address)
if not find_node_id():
all_nodes = self.non_terminated_nodes({})
ip_func = self.internal_ip if use_internal_ip else self.external_ip
ip_cache = (
self._internal_ip_cache if use_internal_ip else self._external_ip_cache
)
for node_id in all_nodes:
ip_cache[ip_func(node_id)] = node_id
if not find_node_id():
if use_internal_ip:
known_msg = f"Worker internal IPs: {list(self._internal_ip_cache)}"
else:
known_msg = f"Worker external IP: {list(self._external_ip_cache)}"
raise ValueError(f"ip {ip_address} not found. " + known_msg)
return find_node_id()
def create_node(
self, node_config: Dict[str, Any], tags: Dict[str, str], count: int
) -> Optional[Dict[str, Any]]:
"""Creates a number of nodes within the namespace.
Optionally returns a mapping from created node ids to node metadata.
Optionally may throw a
ray.autoscaler.node_launch_exception.NodeLaunchException which the
autoscaler may use to provide additional functionality such as
observability.
"""
raise NotImplementedError
def create_node_with_resources_and_labels(
self,
node_config: Dict[str, Any],
tags: Dict[str, str],
count: int,
resources: Dict[str, float],
labels: Dict[str, str],
) -> Optional[Dict[str, Any]]:
"""Create nodes with a given resource and label config.
This is the method actually called by the autoscaler. Prefer to
implement this when possible directly, otherwise it delegates to the
create_node() implementation.
Optionally may throw a ray.autoscaler.node_launch_exception.NodeLaunchException.
"""
return self.create_node(node_config, tags, count)
def set_node_tags(self, node_id: str, tags: Dict[str, str]) -> None:
"""Sets the tag values (string dict) for the specified node."""
raise NotImplementedError
def terminate_node(self, node_id: str) -> Optional[Dict[str, Any]]:
"""Terminates the specified node.
Optionally return a mapping from deleted node ids to node
metadata.
"""
raise NotImplementedError
def terminate_nodes(self, node_ids: List[str]) -> Optional[Dict[str, Any]]:
"""Terminates a set of nodes.
May be overridden with a batch method, which optionally may return a
mapping from deleted node ids to node metadata.
"""
for node_id in node_ids:
logger.info("NodeProvider: {}: Terminating node".format(node_id))
self.terminate_node(node_id)
return None
@property
def max_terminate_nodes(self) -> Optional[int]:
"""The maximum number of nodes which can be terminated in one single
API request. By default, this is "None", which means that the node
provider's underlying API allows infinite requests to be terminated
with one request.
For example, AWS only allows 1000 nodes to be terminated
at once; to terminate more, we must issue multiple separate API
requests. If the limit is infinity, then simply set this to None.
This may be overridden. The value may be useful when overriding the
"terminate_nodes" method.
"""
return None
@staticmethod
def bootstrap_config(cluster_config: Dict[str, Any]) -> Dict[str, Any]:
"""Bootstraps the cluster config by adding env defaults if needed."""
return cluster_config
def get_command_runner(
self,
log_prefix: str,
node_id: str,
auth_config: Dict[str, Any],
cluster_name: str,
process_runner: ModuleType,
use_internal_ip: bool,
docker_config: Optional[Dict[str, Any]] = None,
) -> CommandRunnerInterface:
"""Returns the CommandRunner class used to perform SSH commands.
Args:
log_prefix: stores "NodeUpdater: {}: ".format(<node_id>). Used
to print progress in the CommandRunner.
node_id: the node ID.
auth_config: the authentication configs from the autoscaler
yaml file.
cluster_name: the name of the cluster.
process_runner: the module to use to run the commands
in the CommandRunner. E.g., subprocess.
use_internal_ip: whether the node_id belongs to an internal ip
or external ip.
docker_config: If set, the docker information of the docker
container that commands should be run on.
"""
common_args = {
"log_prefix": log_prefix,
"node_id": node_id,
"provider": self,
"auth_config": auth_config,
"cluster_name": cluster_name,
"process_runner": process_runner,
"use_internal_ip": use_internal_ip,
}
if docker_config and docker_config["container_name"] != "":
return DockerCommandRunner(docker_config, **common_args)
else:
return SSHCommandRunner(**common_args)
def prepare_for_head_node(self, cluster_config: Dict[str, Any]) -> Dict[str, Any]:
"""Returns a new cluster config with custom configs for head node."""
return cluster_config
@staticmethod
def fillout_available_node_types_resources(
cluster_config: Dict[str, Any]
) -> Dict[str, Any]:
"""Fills out missing "resources" field for available_node_types."""
return cluster_config
def safe_to_scale(self) -> bool:
"""Optional condition to determine if it's safe to proceed with an autoscaling
update. Can be used to wait for convergence of state managed by an external
cluster manager.
Called by the autoscaler immediately after non_terminated_nodes().
If False is returned, the autoscaler will abort the update.
"""
return True
def post_process(self) -> None:
"""This optional method is executed at the end of
StandardAutoscaler._update().
"""
pass
|
NodeProvider
|
python
|
networkx__networkx
|
networkx/generators/tests/test_geometric.py
|
{
"start": 5868,
"end": 7905
}
|
class ____:
"""Unit tests for :func:`~networkx.geographical_threshold_graph`"""
def test_number_of_nodes(self):
G = nx.geographical_threshold_graph(50, 100, seed=42)
assert len(G) == 50
G = nx.geographical_threshold_graph(range(50), 100, seed=42)
assert len(G) == 50
def test_distances(self):
"""Tests that pairs of vertices adjacent if and only if their
distances meet the given threshold.
"""
# Use the Euclidean metric and alpha = -2
# the default according to the documentation.
G = nx.geographical_threshold_graph(50, 10)
for u, v in combinations(G, 2):
# Adjacent vertices must exceed the threshold.
if v in G[u]:
assert join(G, u, v, 10, -2, math.dist)
# Nonadjacent vertices must not exceed the threshold.
else:
assert not join(G, u, v, 10, -2, math.dist)
def test_metric(self):
"""Tests for providing an alternate distance metric to the generator."""
# Use the L1 metric.
G = nx.geographical_threshold_graph(50, 10, metric=l1dist)
for u, v in combinations(G, 2):
# Adjacent vertices must exceed the threshold.
if v in G[u]:
assert join(G, u, v, 10, -2, l1dist)
# Nonadjacent vertices must not exceed the threshold.
else:
assert not join(G, u, v, 10, -2, l1dist)
def test_p_dist_zero(self):
"""Tests if p_dict = 0 returns disconnected graph with 0 edges"""
def p_dist(dist):
return 0
G = nx.geographical_threshold_graph(50, 1, p_dist=p_dist)
assert len(G.edges) == 0
def test_pos_weight_name(self):
gtg = nx.geographical_threshold_graph
G = gtg(50, 100, seed=42, pos_name="coords", weight_name="wt")
assert all(len(d["coords"]) == 2 for n, d in G.nodes.items())
assert all(d["wt"] > 0 for n, d in G.nodes.items())
|
TestGeographicalThresholdGraph
|
python
|
tox-dev__tox
|
src/tox/tox_env/info.py
|
{
"start": 440,
"end": 2205
}
|
class ____:
"""Stores metadata about the tox environment."""
def __init__(self, path: Path) -> None:
self._path = path / ".tox-info.json"
try:
value = json.loads(self._path.read_text())
except (ValueError, OSError):
value = {}
self._content = value
def __repr__(self) -> str:
return f"{self.__class__.__name__}(path={self._path})"
@contextmanager
def compare(
self,
value: Any,
section: str,
sub_section: str | None = None,
) -> Iterator[tuple[bool, Any | None]]:
"""
Compare new information with the existing one and update if differs.
:param value: the value stored
:param section: the primary key of the information
:param sub_section: the secondary key of the information
:return: a tuple where the first value is if it differs and the second is the old value
"""
old = self._content.get(section)
if sub_section is not None and old is not None:
old = old.get(sub_section)
if old == value:
yield True, old
else:
yield False, old
# if no exception thrown update
if sub_section is None:
self._content[section] = value
elif self._content.get(section) is None:
self._content[section] = {sub_section: value}
else:
self._content[section][sub_section] = value
self._write()
def reset(self) -> None:
self._content = {}
def _write(self) -> None:
self._path.parent.mkdir(parents=True, exist_ok=True)
self._path.write_text(json.dumps(self._content, indent=2))
__all__ = ("Info",)
|
Info
|
python
|
gevent__gevent
|
src/greentest/3.14/test_httpservers.py
|
{
"start": 1825,
"end": 2776
}
|
class ____(threading.Thread):
def __init__(self, test_object, request_handler, tls=None):
threading.Thread.__init__(self)
self.request_handler = request_handler
self.test_object = test_object
self.tls = tls
def run(self):
if self.tls:
certfile, keyfile, password = self.tls
self.server = create_https_server(
certfile, keyfile, password,
request_handler=self.request_handler,
)
else:
self.server = HTTPServer(('localhost', 0), self.request_handler)
self.test_object.HOST, self.test_object.PORT = self.server.socket.getsockname()
self.test_object.server_started.set()
self.test_object = None
try:
self.server.serve_forever(0.05)
finally:
self.server.server_close()
def stop(self):
self.server.shutdown()
self.join()
|
TestServerThread
|
python
|
streamlit__streamlit
|
lib/streamlit/elements/lib/built_in_chart_utils.py
|
{
"start": 42777,
"end": 43355
}
|
class ____(StreamlitAPIException):
def __init__(self, color_from_user: str | Color | list[Color] | None) -> None:
message = f"""
This does not look like a valid color argument: `{color_from_user}`.
The color argument can be:
* A hex string like "#ffaa00" or "#ffaa0088".
* An RGB or RGBA tuple with the red, green, blue, and alpha
components specified as ints from 0 to 255 or floats from 0.0 to
1.0.
* The name of a column.
* Or a list of colors, matching the number of y columns to draw.
"""
super().__init__(message)
|
StreamlitInvalidColorError
|
python
|
kamyu104__LeetCode-Solutions
|
Python/check-if-the-rectangle-corner-is-reachable.py
|
{
"start": 47,
"end": 1246
}
|
class ____(object):
def canReachCorner(self, X, Y, circles):
"""
:type X: int
:type Y: int
:type circles: List[List[int]]
:rtype: bool
"""
def check(x1, y1, r1, x2, y2, r2):
return (x1-x2)**2+(y1-y2)**2 <= (r1+r2)**2
def iter_dfs():
lookup = [False]*len(circles)
stk = []
dst = [False]*len(circles)
for u in xrange(len(circles)):
x, y, r = circles[u]
if x-r <= 0 or y+r >= Y:
lookup[u] = True
stk.append(u)
if x+r >= X or y-r <= 0:
dst[u] = True
while stk:
u = stk.pop()
if dst[u]:
return True
x1, y1, r1 = circles[u]
for v in xrange(len(circles)):
x2, y2, r2 = circles[v]
if lookup[v] or not check(x1, y1, r1, x2, y2, r2):
continue
lookup[v] = True
stk.append(v)
return False
return not iter_dfs()
# Time: O(n^2)
# Space: O(n)
# bfs
|
Solution
|
python
|
django__django
|
tests/postgres_tests/test_hstore.py
|
{
"start": 9012,
"end": 9945
}
|
class ____(PostgreSQLSimpleTestCase):
def test_invalid_default(self):
class MyModel(PostgreSQLModel):
field = HStoreField(default={})
model = MyModel()
self.assertEqual(
model.check(),
[
checks.Warning(
msg=(
"HStoreField default should be a callable instead of an "
"instance so that it's not shared between all field "
"instances."
),
hint="Use a callable instead, e.g., use `dict` instead of `{}`.",
obj=MyModel._meta.get_field("field"),
id="fields.E010",
)
],
)
def test_valid_default(self):
class MyModel(PostgreSQLModel):
field = HStoreField(default=dict)
self.assertEqual(MyModel().check(), [])
|
TestChecks
|
python
|
encode__django-rest-framework
|
rest_framework/generics.py
|
{
"start": 8987,
"end": 9428
}
|
class ____(mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
GenericAPIView):
"""
Concrete view for retrieving or deleting a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
|
RetrieveDestroyAPIView
|
python
|
huggingface__transformers
|
tests/tensor_parallel/test_tensor_parallel.py
|
{
"start": 2908,
"end": 4132
}
|
class ____(TestCasePlus):
def test_packed_unpacked_conversion(self):
WORLD_SIZE = 2
PACKED_BLOCK_SIZE = 800
SHARDING_DIM = 2
NUM_BLOCKS = 2
original_packed_weights = torch.randn(4, 512, 2 * PACKED_BLOCK_SIZE)
original_packed_weights.get_dtype = lambda: "F32" # get_packed_weights expects PySlice object
empty_param = torch.empty(4, 512, 2 * PACKED_BLOCK_SIZE)
class MockDeviceMesh:
def size(self):
return WORLD_SIZE
mock_mesh = (
MockDeviceMesh()
) # get_packed_weights only calls `.size()`, do this to avoid doing actual distributed run
packed_weights_0 = get_packed_weights(original_packed_weights, empty_param, mock_mesh, 0, SHARDING_DIM)
packed_weights_1 = get_packed_weights(original_packed_weights, empty_param, mock_mesh, 1, SHARDING_DIM)
# simulate all gather of sharded weights
packed_weights = torch.cat([packed_weights_0, packed_weights_1], dim=SHARDING_DIM)
unpacked_weights = repack_weights(packed_weights, SHARDING_DIM, WORLD_SIZE, NUM_BLOCKS)
assert torch.allclose(unpacked_weights, original_packed_weights)
|
TestTensorParallelUtils
|
python
|
nedbat__coveragepy
|
tests/test_context.py
|
{
"start": 8826,
"end": 10605
}
|
class ____(CoverageTest):
"""Tests of qualname_from_frame."""
# Pylint gets confused about meth() below.
# pylint: disable=no-value-for-parameter
run_in_temp_dir = False
def test_method(self) -> None:
assert Parent().meth() == "tests.test_context.Parent.meth"
def test_inherited_method(self) -> None:
assert Child().meth() == "tests.test_context.Parent.meth"
def test_mi_inherited_method(self) -> None:
assert MultiChild().meth() == "tests.test_context.Parent.meth"
def test_no_arguments(self) -> None:
assert no_arguments() == "tests.test_context.no_arguments"
def test_plain_old_function(self) -> None:
assert plain_old_function(0, 1) == "tests.test_context.plain_old_function"
def test_fake_out(self) -> None:
assert fake_out(0) == "tests.test_context.fake_out"
def test_property(self) -> None:
assert Parent().a_property == "tests.test_context.Parent.a_property"
def test_changeling(self) -> None:
c = Child()
c.meth = patch_meth # type: ignore[assignment]
assert c.meth(c) == "tests.test_context.patch_meth" # type: ignore[call-arg]
def test_bug_829(self) -> None:
# A class with a name like a function shouldn't confuse qualname_from_frame.
class test_something: # pylint: disable=unused-variable
assert get_qualname() is None
def test_bug_1210(self) -> None:
# Under pyarmor (an obfuscator), a function can have a "self" argument,
# but then not have a "self" local.
co = mock.Mock(co_name="a_co_name", co_argcount=1, co_varnames=["self"])
frame = mock.Mock(f_code=co, f_locals={})
assert qualname_from_frame(frame) == "unittest.mock.a_co_name"
|
QualnameTest
|
python
|
fastapi__sqlmodel
|
sqlmodel/_compat.py
|
{
"start": 1094,
"end": 22886
}
|
class ____:
obj: Any
update: Dict[str, Any]
def __getattribute__(self, __name: str) -> Any:
update = super().__getattribute__("update")
obj = super().__getattribute__("obj")
if __name in update:
return update[__name]
return getattr(obj, __name)
def _is_union_type(t: Any) -> bool:
return t is UnionType or t is Union
finish_init: ContextVar[bool] = ContextVar("finish_init", default=True)
@contextmanager
def partial_init() -> Generator[None, None, None]:
token = finish_init.set(False)
yield
finish_init.reset(token)
if IS_PYDANTIC_V2:
from annotated_types import MaxLen
from pydantic import ConfigDict as BaseConfig
from pydantic._internal._fields import PydanticMetadata
from pydantic._internal._model_construction import ModelMetaclass
from pydantic._internal._repr import Representation as Representation
from pydantic_core import PydanticUndefined as Undefined
from pydantic_core import PydanticUndefinedType as UndefinedType
# Dummy for types, to make it importable
class ModelField:
pass
class SQLModelConfig(BaseConfig, total=False):
table: Optional[bool]
registry: Optional[Any]
def get_config_value(
*, model: InstanceOrType["SQLModel"], parameter: str, default: Any = None
) -> Any:
return model.model_config.get(parameter, default)
def set_config_value(
*,
model: InstanceOrType["SQLModel"],
parameter: str,
value: Any,
) -> None:
model.model_config[parameter] = value # type: ignore[literal-required]
def get_model_fields(model: InstanceOrType[BaseModel]) -> Dict[str, "FieldInfo"]:
# TODO: refactor the usage of this function to always pass the class
# not the instance, and then remove this extra check
# this is for compatibility with Pydantic v3
if isinstance(model, type):
use_model = model
else:
use_model = model.__class__
return use_model.model_fields
def get_fields_set(
object: InstanceOrType["SQLModel"],
) -> Union[Set[str], Callable[[BaseModel], Set[str]]]:
return object.model_fields_set
def init_pydantic_private_attrs(new_object: InstanceOrType["SQLModel"]) -> None:
object.__setattr__(new_object, "__pydantic_fields_set__", set())
object.__setattr__(new_object, "__pydantic_extra__", None)
object.__setattr__(new_object, "__pydantic_private__", None)
def get_annotations(class_dict: Dict[str, Any]) -> Dict[str, Any]:
raw_annotations: Dict[str, Any] = class_dict.get("__annotations__", {})
if sys.version_info >= (3, 14) and "__annotations__" not in class_dict:
# See https://github.com/pydantic/pydantic/pull/11991
from annotationlib import (
Format,
call_annotate_function,
get_annotate_from_class_namespace,
)
if annotate := get_annotate_from_class_namespace(class_dict):
raw_annotations = call_annotate_function(
annotate, format=Format.FORWARDREF
)
return raw_annotations
def is_table_model_class(cls: Type[Any]) -> bool:
config = getattr(cls, "model_config", {})
if config:
return config.get("table", False) or False
return False
def get_relationship_to(
name: str,
rel_info: "RelationshipInfo",
annotation: Any,
) -> Any:
origin = get_origin(annotation)
use_annotation = annotation
# Direct relationships (e.g. 'Team' or Team) have None as an origin
if origin is None:
if isinstance(use_annotation, ForwardRef):
use_annotation = use_annotation.__forward_arg__
else:
return use_annotation
# If Union (e.g. Optional), get the real field
elif _is_union_type(origin):
use_annotation = get_args(annotation)
if len(use_annotation) > 2:
raise ValueError(
"Cannot have a (non-optional) union as a SQLAlchemy field"
)
arg1, arg2 = use_annotation
if arg1 is NoneType and arg2 is not NoneType:
use_annotation = arg2
elif arg2 is NoneType and arg1 is not NoneType:
use_annotation = arg1
else:
raise ValueError(
"Cannot have a Union of None and None as a SQLAlchemy field"
)
# If a list, then also get the real field
elif origin is list:
use_annotation = get_args(annotation)[0]
return get_relationship_to(
name=name, rel_info=rel_info, annotation=use_annotation
)
def is_field_noneable(field: "FieldInfo") -> bool:
if getattr(field, "nullable", Undefined) is not Undefined:
return field.nullable # type: ignore
origin = get_origin(field.annotation)
if origin is not None and _is_union_type(origin):
args = get_args(field.annotation)
if any(arg is NoneType for arg in args):
return True
if not field.is_required():
if field.default is Undefined:
return False
if field.annotation is None or field.annotation is NoneType:
return True
return False
return False
def get_sa_type_from_type_annotation(annotation: Any) -> Any:
# Resolve Optional fields
if annotation is None:
raise ValueError("Missing field type")
origin = get_origin(annotation)
if origin is None:
return annotation
elif origin is Annotated:
return get_sa_type_from_type_annotation(get_args(annotation)[0])
if _is_union_type(origin):
bases = get_args(annotation)
if len(bases) > 2:
raise ValueError(
"Cannot have a (non-optional) union as a SQLAlchemy field"
)
# Non optional unions are not allowed
if bases[0] is not NoneType and bases[1] is not NoneType:
raise ValueError(
"Cannot have a (non-optional) union as a SQLAlchemy field"
)
# Optional unions are allowed
use_type = bases[0] if bases[0] is not NoneType else bases[1]
return get_sa_type_from_type_annotation(use_type)
return origin
def get_sa_type_from_field(field: Any) -> Any:
type_: Any = field.annotation
return get_sa_type_from_type_annotation(type_)
def get_field_metadata(field: Any) -> Any:
for meta in field.metadata:
if isinstance(meta, (PydanticMetadata, MaxLen)):
return meta
return FakeMetadata()
def post_init_field_info(field_info: FieldInfo) -> None:
return None
# Dummy to make it importable
def _calculate_keys(
self: "SQLModel",
include: Optional[Mapping[Union[int, str], Any]],
exclude: Optional[Mapping[Union[int, str], Any]],
exclude_unset: bool,
update: Optional[Dict[str, Any]] = None,
) -> Optional[AbstractSet[str]]: # pragma: no cover
return None
def sqlmodel_table_construct(
*,
self_instance: _TSQLModel,
values: Dict[str, Any],
_fields_set: Union[Set[str], None] = None,
) -> _TSQLModel:
# Copy from Pydantic's BaseModel.construct()
# Ref: https://github.com/pydantic/pydantic/blob/v2.5.2/pydantic/main.py#L198
# Modified to not include everything, only the model fields, and to
# set relationships
# SQLModel override to get class SQLAlchemy __dict__ attributes and
# set them back in after creating the object
# new_obj = cls.__new__(cls)
cls = type(self_instance)
old_dict = self_instance.__dict__.copy()
# End SQLModel override
fields_values: Dict[str, Any] = {}
defaults: Dict[
str, Any
] = {} # keeping this separate from `fields_values` helps us compute `_fields_set`
for name, field in cls.model_fields.items():
if field.alias and field.alias in values:
fields_values[name] = values.pop(field.alias)
elif name in values:
fields_values[name] = values.pop(name)
elif not field.is_required():
defaults[name] = field.get_default(call_default_factory=True)
if _fields_set is None:
_fields_set = set(fields_values.keys())
fields_values.update(defaults)
_extra: Union[Dict[str, Any], None] = None
if cls.model_config.get("extra") == "allow":
_extra = {}
for k, v in values.items():
_extra[k] = v
# SQLModel override, do not include everything, only the model fields
# else:
# fields_values.update(values)
# End SQLModel override
# SQLModel override
# Do not set __dict__, instead use setattr to trigger SQLAlchemy
# object.__setattr__(new_obj, "__dict__", fields_values)
# instrumentation
for key, value in {**old_dict, **fields_values}.items():
setattr(self_instance, key, value)
# End SQLModel override
object.__setattr__(self_instance, "__pydantic_fields_set__", _fields_set)
if not cls.__pydantic_root_model__:
object.__setattr__(self_instance, "__pydantic_extra__", _extra)
if cls.__pydantic_post_init__:
self_instance.model_post_init(None)
elif not cls.__pydantic_root_model__:
# Note: if there are any private attributes, cls.__pydantic_post_init__ would exist
# Since it doesn't, that means that `__pydantic_private__` should be set to None
object.__setattr__(self_instance, "__pydantic_private__", None)
# SQLModel override, set relationships
# Get and set any relationship objects
for key in self_instance.__sqlmodel_relationships__:
value = values.get(key, Undefined)
if value is not Undefined:
setattr(self_instance, key, value)
# End SQLModel override
return self_instance
def sqlmodel_validate(
cls: Type[_TSQLModel],
obj: Any,
*,
strict: Union[bool, None] = None,
from_attributes: Union[bool, None] = None,
context: Union[Dict[str, Any], None] = None,
update: Union[Dict[str, Any], None] = None,
) -> _TSQLModel:
if not is_table_model_class(cls):
new_obj: _TSQLModel = cls.__new__(cls)
else:
# If table, create the new instance normally to make SQLAlchemy create
# the _sa_instance_state attribute
# The wrapper of this function should use with _partial_init()
with partial_init():
new_obj = cls()
# SQLModel Override to get class SQLAlchemy __dict__ attributes and
# set them back in after creating the object
old_dict = new_obj.__dict__.copy()
use_obj = obj
if isinstance(obj, dict) and update:
use_obj = {**obj, **update}
elif update:
use_obj = ObjectWithUpdateWrapper(obj=obj, update=update)
cls.__pydantic_validator__.validate_python(
use_obj,
strict=strict,
from_attributes=from_attributes,
context=context,
self_instance=new_obj,
)
# Capture fields set to restore it later
fields_set = new_obj.__pydantic_fields_set__.copy()
if not is_table_model_class(cls):
# If not table, normal Pydantic code, set __dict__
new_obj.__dict__ = {**old_dict, **new_obj.__dict__}
else:
# Do not set __dict__, instead use setattr to trigger SQLAlchemy
# instrumentation
for key, value in {**old_dict, **new_obj.__dict__}.items():
setattr(new_obj, key, value)
# Restore fields set
object.__setattr__(new_obj, "__pydantic_fields_set__", fields_set)
# Get and set any relationship objects
if is_table_model_class(cls):
for key in new_obj.__sqlmodel_relationships__:
value = getattr(use_obj, key, Undefined)
if value is not Undefined:
setattr(new_obj, key, value)
return new_obj
def sqlmodel_init(*, self: "SQLModel", data: Dict[str, Any]) -> None:
old_dict = self.__dict__.copy()
if not is_table_model_class(self.__class__):
self.__pydantic_validator__.validate_python(
data,
self_instance=self,
)
else:
sqlmodel_table_construct(
self_instance=self,
values=data,
)
object.__setattr__(
self,
"__dict__",
{**old_dict, **self.__dict__},
)
else:
from pydantic import BaseConfig as BaseConfig # type: ignore[assignment]
from pydantic.errors import ConfigError
from pydantic.fields import ( # type: ignore[attr-defined, no-redef]
SHAPE_SINGLETON,
ModelField,
)
from pydantic.fields import ( # type: ignore[attr-defined, no-redef]
Undefined as Undefined, # noqa
)
from pydantic.fields import ( # type: ignore[attr-defined, no-redef]
UndefinedType as UndefinedType,
)
from pydantic.main import ( # type: ignore[no-redef]
ModelMetaclass as ModelMetaclass,
)
from pydantic.main import validate_model
from pydantic.typing import resolve_annotations
from pydantic.utils import ROOT_KEY, ValueItems
from pydantic.utils import ( # type: ignore[no-redef]
Representation as Representation,
)
class SQLModelConfig(BaseConfig): # type: ignore[no-redef]
table: Optional[bool] = None # type: ignore[misc]
registry: Optional[Any] = None # type: ignore[misc]
def get_config_value(
*, model: InstanceOrType["SQLModel"], parameter: str, default: Any = None
) -> Any:
return getattr(model.__config__, parameter, default) # type: ignore[union-attr]
def set_config_value(
*,
model: InstanceOrType["SQLModel"],
parameter: str,
value: Any,
) -> None:
setattr(model.__config__, parameter, value) # type: ignore
def get_model_fields(model: InstanceOrType[BaseModel]) -> Dict[str, "FieldInfo"]:
return model.__fields__ # type: ignore
def get_fields_set(
object: InstanceOrType["SQLModel"],
) -> Union[Set[str], Callable[[BaseModel], Set[str]]]:
return object.__fields_set__
def init_pydantic_private_attrs(new_object: InstanceOrType["SQLModel"]) -> None:
object.__setattr__(new_object, "__fields_set__", set())
def get_annotations(class_dict: Dict[str, Any]) -> Dict[str, Any]:
return resolve_annotations( # type: ignore[no-any-return]
class_dict.get("__annotations__", {}),
class_dict.get("__module__", None),
)
def is_table_model_class(cls: Type[Any]) -> bool:
config = getattr(cls, "__config__", None)
if config:
return getattr(config, "table", False)
return False
def get_relationship_to(
name: str,
rel_info: "RelationshipInfo",
annotation: Any,
) -> Any:
temp_field = ModelField.infer( # type: ignore[attr-defined]
name=name,
value=rel_info,
annotation=annotation,
class_validators=None,
config=SQLModelConfig,
)
relationship_to = temp_field.type_
if isinstance(temp_field.type_, ForwardRef):
relationship_to = temp_field.type_.__forward_arg__
return relationship_to
def is_field_noneable(field: "FieldInfo") -> bool:
if not field.required: # type: ignore[attr-defined]
# Taken from [Pydantic](https://github.com/samuelcolvin/pydantic/blob/v1.8.2/pydantic/fields.py#L946-L947)
return field.allow_none and ( # type: ignore[attr-defined]
field.shape != SHAPE_SINGLETON or not field.sub_fields # type: ignore[attr-defined]
)
return field.allow_none # type: ignore[no-any-return, attr-defined]
def get_sa_type_from_field(field: Any) -> Any:
if isinstance(field.type_, type) and field.shape == SHAPE_SINGLETON:
return field.type_
raise ValueError(f"The field {field.name} has no matching SQLAlchemy type")
def get_field_metadata(field: Any) -> Any:
metadata = FakeMetadata()
metadata.max_length = field.field_info.max_length
metadata.max_digits = getattr(field.type_, "max_digits", None)
metadata.decimal_places = getattr(field.type_, "decimal_places", None)
return metadata
def post_init_field_info(field_info: FieldInfo) -> None:
field_info._validate() # type: ignore[attr-defined]
def _calculate_keys(
self: "SQLModel",
include: Optional[Mapping[Union[int, str], Any]],
exclude: Optional[Mapping[Union[int, str], Any]],
exclude_unset: bool,
update: Optional[Dict[str, Any]] = None,
) -> Optional[AbstractSet[str]]:
if include is None and exclude is None and not exclude_unset:
# Original in Pydantic:
# return None
# Updated to not return SQLAlchemy attributes
# Do not include relationships as that would easily lead to infinite
# recursion, or traversing the whole database
return (
self.__fields__.keys() # noqa
) # | self.__sqlmodel_relationships__.keys()
keys: AbstractSet[str]
if exclude_unset:
keys = self.__fields_set__.copy() # noqa
else:
# Original in Pydantic:
# keys = self.__dict__.keys()
# Updated to not return SQLAlchemy attributes
# Do not include relationships as that would easily lead to infinite
# recursion, or traversing the whole database
keys = (
self.__fields__.keys() # noqa
) # | self.__sqlmodel_relationships__.keys()
if include is not None:
keys &= include.keys()
if update:
keys -= update.keys()
if exclude:
keys -= {str(k) for k, v in exclude.items() if ValueItems.is_true(v)}
return keys
def sqlmodel_validate(
cls: Type[_TSQLModel],
obj: Any,
*,
strict: Union[bool, None] = None,
from_attributes: Union[bool, None] = None,
context: Union[Dict[str, Any], None] = None,
update: Union[Dict[str, Any], None] = None,
) -> _TSQLModel:
# This was SQLModel's original from_orm() for Pydantic v1
# Duplicated from Pydantic
if not cls.__config__.orm_mode: # type: ignore[attr-defined] # noqa
raise ConfigError(
"You must have the config attribute orm_mode=True to use from_orm"
)
if not isinstance(obj, Mapping):
obj = (
{ROOT_KEY: obj}
if cls.__custom_root_type__ # type: ignore[attr-defined] # noqa
else cls._decompose_class(obj) # type: ignore[attr-defined] # noqa
)
# SQLModel, support update dict
if update is not None:
obj = {**obj, **update}
# End SQLModel support dict
if not getattr(cls.__config__, "table", False): # noqa
# If not table, normal Pydantic code
m: _TSQLModel = cls.__new__(cls)
else:
# If table, create the new instance normally to make SQLAlchemy create
# the _sa_instance_state attribute
m = cls()
values, fields_set, validation_error = validate_model(cls, obj)
if validation_error:
raise validation_error
# Updated to trigger SQLAlchemy internal handling
if not getattr(cls.__config__, "table", False): # noqa
object.__setattr__(m, "__dict__", values)
else:
for key, value in values.items():
setattr(m, key, value)
# Continue with standard Pydantic logic
object.__setattr__(m, "__fields_set__", fields_set)
m._init_private_attributes() # type: ignore[attr-defined] # noqa
return m
def sqlmodel_init(*, self: "SQLModel", data: Dict[str, Any]) -> None:
values, fields_set, validation_error = validate_model(self.__class__, data)
# Only raise errors if not a SQLModel model
if (
not is_table_model_class(self.__class__) # noqa
and validation_error
):
raise validation_error
if not is_table_model_class(self.__class__):
object.__setattr__(self, "__dict__", values)
else:
# Do not set values as in Pydantic, pass them through setattr, so
# SQLAlchemy can handle them
for key, value in values.items():
setattr(self, key, value)
object.__setattr__(self, "__fields_set__", fields_set)
non_pydantic_keys = data.keys() - values.keys()
if is_table_model_class(self.__class__):
for key in non_pydantic_keys:
if key in self.__sqlmodel_relationships__:
setattr(self, key, data[key])
|
ObjectWithUpdateWrapper
|
python
|
openai__openai-python
|
src/openai/types/responses/response_function_call_arguments_done_event.py
|
{
"start": 215,
"end": 639
}
|
class ____(BaseModel):
arguments: str
"""The function-call arguments."""
item_id: str
"""The ID of the item."""
name: str
"""The name of the function that was called."""
output_index: int
"""The index of the output item."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.function_call_arguments.done"]
|
ResponseFunctionCallArgumentsDoneEvent
|
python
|
spack__spack
|
lib/spack/spack/util/environment.py
|
{
"start": 12589,
"end": 13151
}
|
class ____(NamePathModifier):
def execute(self, env: MutableMapping[str, str]):
tty.debug(f"RemoveFirstPath: {self.name}-{self.value}", level=3)
environment_value = env.get(self.name, "")
directories = environment_value.split(self.separator)
directories = [path_to_os_path(os.path.normpath(x)).pop() for x in directories]
val = path_to_os_path(os.path.normpath(self.value)).pop()
if val in directories:
directories.remove(val)
env[self.name] = self.separator.join(directories)
|
RemoveFirstPath
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.