language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-shared/dagster_shared/serdes/serdes.py
|
{
"start": 20968,
"end": 21057
}
|
class ____(ABC):
pass
T_Enum = TypeVar("T_Enum", bound=Enum, default=Enum)
|
Serializer
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 58939,
"end": 59291
}
|
class ____(sgqlc.types.Enum):
"""Various content states of a ProjectCard
Enumeration Choices:
* `CONTENT_ONLY`: The card has content only.
* `NOTE_ONLY`: The card has a note only.
* `REDACTED`: The card is redacted.
"""
__schema__ = github_schema
__choices__ = ("CONTENT_ONLY", "NOTE_ONLY", "REDACTED")
|
ProjectCardState
|
python
|
eriklindernoren__ML-From-Scratch
|
mlfromscratch/unsupervised_learning/apriori.py
|
{
"start": 86,
"end": 313
}
|
class ____():
def __init__(self, antecedent, concequent, confidence, support):
self.antecedent = antecedent
self.concequent = concequent
self.confidence = confidence
self.support = support
|
Rule
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/channels.py
|
{
"start": 1015412,
"end": 1016103
}
|
class ____(ValueChannelMixin, core.ValueDefnumber):
"""
XError2Value schema wrapper.
Definition object for a constant value (primitive value or gradient definition) of an
encoding channel.
Parameters
----------
value : float
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "xError2"
def __init__(self, value, **kwds):
super().__init__(value=value, **kwds)
@with_property_setters
|
XError2Value
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/tasks.py
|
{
"start": 337173,
"end": 358803
}
|
class ____(Request):
"""
Get all the company's tasks and all public tasks
:param id: List of IDs to filter by
:type id: Sequence[str]
:param name: Get only tasks whose name matches this pattern (python regular
expression syntax)
:type name: str
:param user: List of user IDs used to filter results by the task's creating
user
:type user: Sequence[str]
:param project: List of project IDs
:type project: Sequence[str]
:param page: Page number, returns a specific page out of the resulting list of
tasks
:type page: int
:param page_size: Page size, specifies the number of results returned in each
page (last page may contain fewer results)
:type page_size: int
:param order_by: List of field names to order by. When search_text is used,
'@text_score' can be used as a field representing the text score of returned
documents. Use '-' prefix to specify descending order. Optional, recommended
when using page
:type order_by: Sequence[str]
:param type: List of task types. One or more of: 'training', 'testing',
'import', 'annotation', 'inference', 'data_processing', 'application',
'monitor', 'controller', 'optimizer', 'service', 'qc' or 'custom' (case
insensitive)
:type type: Sequence[str]
:param tags: List of task user-defined tags. Use '-' prefix to exclude tags
:type tags: Sequence[str]
:param system_tags: List of task system tags. Use '-' prefix to exclude system
tags
:type system_tags: Sequence[str]
:param status: List of task status.
:type status: Sequence[TaskStatusEnum]
:param only_fields: List of task field names (nesting is supported using '.',
e.g. execution.model_labels). If provided, this list defines the query's
projection (only these fields will be returned for each result entry)
:type only_fields: Sequence[str]
:param parent: Parent ID
:type parent: str
:param status_changed: List of status changed constraint strings (utcformat,
epoch) with an optional prefix modifier (``>``, ``>=``, ``<``, ``<=``)
:type status_changed: Sequence[str]
:param search_text: Free text search query
:type search_text: str
:param _all_: Multi-field pattern condition (all fields match pattern)
:type _all_: MultiFieldPatternData
:param _any_: Multi-field pattern condition (any field matches pattern)
:type _any_: MultiFieldPatternData
:param input.view.entries.dataset: List of input dataset IDs
:type input.view.entries.dataset: Sequence[str]
:param input.view.entries.version: List of input dataset version IDs
:type input.view.entries.version: Sequence[str]
:param search_hidden: If set to 'true' then hidden tasks are included in the
search results
:type search_hidden: bool
:param scroll_id: Scroll ID returned from the previos calls to get_all
:type scroll_id: str
:param refresh_scroll: If set then all the data received with this scroll will
be requeried
:type refresh_scroll: bool
:param size: The number of tasks to retrieve
:type size: int
"""
_service = "tasks"
_action = "get_all"
_version = "2.23"
_schema = {
"definitions": {
"multi_field_pattern_data": {
"properties": {
"fields": {
"description": "List of field names",
"items": {"type": "string"},
"type": ["array", "null"],
},
"pattern": {
"description": "Pattern string (regex)",
"type": ["string", "null"],
},
},
"type": "object",
},
"task_status_enum": {
"enum": [
"created",
"queued",
"in_progress",
"stopped",
"published",
"publishing",
"closed",
"failed",
"completed",
"unknown",
],
"type": "string",
},
},
"dependencies": {"page": ["page_size"]},
"properties": {
"_all_": {
"description": "Multi-field pattern condition (all fields match pattern)",
"oneOf": [
{"$ref": "#/definitions/multi_field_pattern_data"},
{"type": "null"},
],
},
"_any_": {
"description": "Multi-field pattern condition (any field matches pattern)",
"oneOf": [
{"$ref": "#/definitions/multi_field_pattern_data"},
{"type": "null"},
],
},
"id": {
"description": "List of IDs to filter by",
"items": {"type": "string"},
"type": ["array", "null"],
},
"input.view.entries.dataset": {
"description": "List of input dataset IDs",
"items": {"type": "string"},
"type": ["array", "null"],
},
"input.view.entries.version": {
"description": "List of input dataset version IDs",
"items": {"type": "string"},
"type": ["array", "null"],
},
"name": {
"description": "Get only tasks whose name matches this pattern (python regular expression syntax)",
"type": ["string", "null"],
},
"only_fields": {
"description": (
"List of task field names (nesting is supported using '.', e.g. execution.model_labels). If"
" provided, this list defines the query's projection (only these fields will be returned for each"
" result entry)"
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"order_by": {
"description": (
"List of field names to order by. When search_text is used, '@text_score' can be used as a field"
" representing the text score of returned documents. Use '-' prefix to specify descending order."
" Optional, recommended when using page"
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"page": {
"description": "Page number, returns a specific page out of the resulting list of tasks",
"minimum": 0,
"type": ["integer", "null"],
},
"page_size": {
"description": (
"Page size, specifies the number of results returned in each page (last page may contain fewer "
"results)"
),
"minimum": 1,
"type": ["integer", "null"],
},
"parent": {"description": "Parent ID", "type": ["string", "null"]},
"project": {
"description": "List of project IDs",
"items": {"type": "string"},
"type": ["array", "null"],
},
"refresh_scroll": {
"description": "If set then all the data received with this scroll will be requeried",
"type": ["boolean", "null"],
},
"scroll_id": {
"description": "Scroll ID returned from the previos calls to get_all",
"type": ["string", "null"],
},
"search_hidden": {
"default": False,
"description": "If set to 'true' then hidden tasks are included in the search results",
"type": ["boolean", "null"],
},
"search_text": {
"description": "Free text search query",
"type": ["string", "null"],
},
"size": {
"description": "The number of tasks to retrieve",
"minimum": 1,
"type": ["integer", "null"],
},
"status": {
"description": "List of task status.",
"items": {"$ref": "#/definitions/task_status_enum"},
"type": ["array", "null"],
},
"status_changed": {
"description": (
"List of status changed constraint strings (utcformat, epoch) with an optional prefix modifier "
"(``>``, ``>=``, ``<``, ``<=``)"
),
"items": {"pattern": "^(>=|>|<=|<)?.*$", "type": "string"},
"type": ["array", "null"],
},
"system_tags": {
"description": "List of task system tags. Use '-' prefix to exclude system tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "List of task user-defined tags. Use '-' prefix to exclude tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"type": {
"description": (
"List of task types. One or more of: 'training', 'testing', 'import', 'annotation', 'inference',"
" 'data_processing', 'application', 'monitor', 'controller', 'optimizer', 'service', 'qc' or"
" 'custom' (case insensitive)"
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"user": {
"description": "List of user IDs used to filter results by the task's creating user",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self,
id=None,
name=None,
user=None,
project=None,
page=None,
page_size=None,
order_by=None,
type=None,
tags=None,
system_tags=None,
status=None,
only_fields=None,
parent=None,
status_changed=None,
search_text=None,
_all_=None,
_any_=None,
input__view__entries__dataset=None,
input__view__entries__version=None,
search_hidden=False,
scroll_id=None,
refresh_scroll=None,
size=None,
**kwargs
):
super(GetAllRequest, self).__init__(**kwargs)
self.id = id
self.name = name
self.user = user
self.project = project
self.page = page
self.page_size = page_size
self.order_by = order_by
self.type = type
self.tags = tags
self.system_tags = system_tags
self.status = status
self.only_fields = only_fields
self.parent = parent
self.status_changed = status_changed
self.search_text = search_text
self._all_ = _all_
self._any_ = _any_
self.input__view__entries__dataset = input__view__entries__dataset
self.input__view__entries__version = input__view__entries__version
self.search_hidden = search_hidden
self.scroll_id = scroll_id
self.refresh_scroll = refresh_scroll
self.size = size
@schema_property("id")
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", (list, tuple))
self.assert_isinstance(value, "id", six.string_types, is_array=True)
self._property_id = value
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("user")
def user(self):
return self._property_user
@user.setter
def user(self, value):
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", (list, tuple))
self.assert_isinstance(value, "user", six.string_types, is_array=True)
self._property_user = value
@schema_property("project")
def project(self):
return self._property_project
@project.setter
def project(self, value):
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", (list, tuple))
self.assert_isinstance(value, "project", six.string_types, is_array=True)
self._property_project = value
@schema_property("page")
def page(self):
return self._property_page
@page.setter
def page(self, value):
if value is None:
self._property_page = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page", six.integer_types)
self._property_page = value
@schema_property("page_size")
def page_size(self):
return self._property_page_size
@page_size.setter
def page_size(self, value):
if value is None:
self._property_page_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page_size", six.integer_types)
self._property_page_size = value
@schema_property("order_by")
def order_by(self):
return self._property_order_by
@order_by.setter
def order_by(self, value):
if value is None:
self._property_order_by = None
return
self.assert_isinstance(value, "order_by", (list, tuple))
self.assert_isinstance(value, "order_by", six.string_types, is_array=True)
self._property_order_by = value
@schema_property("type")
def type(self):
return self._property_type
@type.setter
def type(self, value):
if value is None:
self._property_type = None
return
self.assert_isinstance(value, "type", (list, tuple))
self.assert_isinstance(value, "type", six.string_types, is_array=True)
self._property_type = value
@schema_property("tags")
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("status")
def status(self):
return self._property_status
@status.setter
def status(self, value):
if value is None:
self._property_status = None
return
self.assert_isinstance(value, "status", (list, tuple))
if any(isinstance(v, six.string_types) for v in value):
value = [
TaskStatusEnum(v) if isinstance(v, six.string_types) else v
for v in value
]
else:
self.assert_isinstance(value, "status", TaskStatusEnum, is_array=True)
self._property_status = value
@schema_property("only_fields")
def only_fields(self):
return self._property_only_fields
@only_fields.setter
def only_fields(self, value):
if value is None:
self._property_only_fields = None
return
self.assert_isinstance(value, "only_fields", (list, tuple))
self.assert_isinstance(value, "only_fields", six.string_types, is_array=True)
self._property_only_fields = value
@schema_property("parent")
def parent(self):
return self._property_parent
@parent.setter
def parent(self, value):
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("status_changed")
def status_changed(self):
return self._property_status_changed
@status_changed.setter
def status_changed(self, value):
if value is None:
self._property_status_changed = None
return
self.assert_isinstance(value, "status_changed", (list, tuple))
self.assert_isinstance(value, "status_changed", six.string_types, is_array=True)
self._property_status_changed = value
@schema_property("search_text")
def search_text(self):
return self._property_search_text
@search_text.setter
def search_text(self, value):
if value is None:
self._property_search_text = None
return
self.assert_isinstance(value, "search_text", six.string_types)
self._property_search_text = value
@schema_property("_all_")
def _all_(self):
return self._property__all_
@_all_.setter
def _all_(self, value):
if value is None:
self._property__all_ = None
return
if isinstance(value, dict):
value = MultiFieldPatternData.from_dict(value)
else:
self.assert_isinstance(value, "_all_", MultiFieldPatternData)
self._property__all_ = value
@schema_property("_any_")
def _any_(self):
return self._property__any_
@_any_.setter
def _any_(self, value):
if value is None:
self._property__any_ = None
return
if isinstance(value, dict):
value = MultiFieldPatternData.from_dict(value)
else:
self.assert_isinstance(value, "_any_", MultiFieldPatternData)
self._property__any_ = value
@schema_property("input.view.entries.dataset")
def input__view__entries__dataset(self):
return self._property_input__view__entries__dataset
@input__view__entries__dataset.setter
def input__view__entries__dataset(self, value):
if value is None:
self._property_input__view__entries__dataset = None
return
self.assert_isinstance(value, "input__view__entries__dataset", (list, tuple))
self.assert_isinstance(
value, "input__view__entries__dataset", six.string_types, is_array=True
)
self._property_input__view__entries__dataset = value
@schema_property("input.view.entries.version")
def input__view__entries__version(self):
return self._property_input__view__entries__version
@input__view__entries__version.setter
def input__view__entries__version(self, value):
if value is None:
self._property_input__view__entries__version = None
return
self.assert_isinstance(value, "input__view__entries__version", (list, tuple))
self.assert_isinstance(
value, "input__view__entries__version", six.string_types, is_array=True
)
self._property_input__view__entries__version = value
@schema_property("search_hidden")
def search_hidden(self):
return self._property_search_hidden
@search_hidden.setter
def search_hidden(self, value):
if value is None:
self._property_search_hidden = None
return
self.assert_isinstance(value, "search_hidden", (bool,))
self._property_search_hidden = value
@schema_property("scroll_id")
def scroll_id(self):
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value):
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
@schema_property("refresh_scroll")
def refresh_scroll(self):
return self._property_refresh_scroll
@refresh_scroll.setter
def refresh_scroll(self, value):
if value is None:
self._property_refresh_scroll = None
return
self.assert_isinstance(value, "refresh_scroll", (bool,))
self._property_refresh_scroll = value
@schema_property("size")
def size(self):
return self._property_size
@size.setter
def size(self, value):
if value is None:
self._property_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "size", six.integer_types)
self._property_size = value
|
GetAllRequest
|
python
|
pytorch__pytorch
|
test/cpp/aoti_inference/test.py
|
{
"start": 559,
"end": 6579
}
|
class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.randn(30, 1, device="cuda")
def forward(self, x, y):
z = self.w * x * y
return z[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17]]
data = {}
large_data = {}
cuda_alloc_data = {}
data_with_tensor_constants = {}
# Basice AOTI model test generation.
def generate_basic_tests():
for device in ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]:
for use_runtime_constant_folding in [True, False]:
if device == "cpu" and use_runtime_constant_folding:
# We do not test runtime const folding for cpu mode.
continue
model = Net(device).to(device=device)
x = torch.randn((4, 4), device=device)
with torch.no_grad():
ref_output = model(x)
torch._dynamo.reset()
with torch.no_grad():
dim0_x = Dim("dim0_x", min=1, max=1024)
dynamic_shapes = {"x": {0: dim0_x}}
model_so_path = aot_compile(
model,
(x,),
dynamic_shapes=dynamic_shapes,
options={
"aot_inductor.use_runtime_constant_folding": use_runtime_constant_folding
},
)
# Also store a .pt2 file using the aoti_compile_and_package API
pt2_package_path = torch._inductor.aoti_compile_and_package(
torch.export.export(
model,
(x,),
dynamic_shapes=dynamic_shapes,
),
inductor_configs={
"aot_inductor.use_runtime_constant_folding": use_runtime_constant_folding
},
)
suffix = f"{device}"
if use_runtime_constant_folding:
suffix += "_use_runtime_constant_folding"
data.update(
{
f"model_so_path_{suffix}": model_so_path,
f"pt2_package_path_{suffix}": pt2_package_path,
f"inputs_{suffix}": [x],
f"outputs_{suffix}": [ref_output],
f"w_pre_{suffix}": model.w_pre,
f"w_add_{suffix}": model.w_add,
}
)
def generate_basic_tests_consts_cpp():
backup_consts_asm_cfg: bool = (
torch._inductor.config.aot_inductor.use_consts_asm_build
)
torch._inductor.config.aot_inductor.use_consts_asm_build = False
# Test consts cpp build again.
generate_basic_tests()
torch._inductor.config.aot_inductor.use_consts_asm_build = backup_consts_asm_cfg
def generate_large_tests():
device = "cuda"
model = Net(device, size=4096).to(device=device)
x = torch.randn((4096, 4096), device=device)
with torch.no_grad():
ref_output = model(x)
torch._dynamo.reset()
for use_runtime_constant_folding in [True, False]:
with torch.no_grad():
model_so_path = aot_compile(
model,
(x,),
options={
"aot_inductor.use_runtime_constant_folding": use_runtime_constant_folding
},
)
# Also store a .pt2 file using the aoti_compile_and_package API
pt2_package_path = torch._inductor.aoti_compile_and_package(
torch.export.export(
model,
(x,),
),
inductor_configs={
"aot_inductor.use_runtime_constant_folding": use_runtime_constant_folding
},
)
suffix = "_use_runtime_constant_folding" if use_runtime_constant_folding else ""
large_data.update(
{ # noqa: F541
f"model_so_path{suffix}": model_so_path,
f"pt2_package_path{suffix}": pt2_package_path,
"inputs": [x],
"outputs": [ref_output],
"w_pre": model.w_pre,
"w_add": model.w_add,
}
)
def generate_cuda_alloc_test():
device = "cuda"
model = Net(device, size=4096).to(device=device)
x = torch.randn((4096, 4096), device=device)
with torch.no_grad():
ref_output = model(x)
torch._dynamo.reset()
with torch.no_grad():
model_so_path = aot_compile(
model,
(x,),
options={"aot_inductor.weight_use_caching_allocator": True},
)
cuda_alloc_data.update(
{ # noqa: F541
"model_so_path": model_so_path,
"inputs": [x],
"outputs": [ref_output],
"w_pre": model.w_pre,
"w_add": model.w_add,
}
)
# AOTI model which will create additional tensors during autograd.
def generate_test_with_additional_tensors():
if not torch.cuda.is_available():
return
model = NetWithTensorConstants()
x = torch.randn((30, 1), device="cuda")
y = torch.randn((30, 1), device="cuda")
with torch.no_grad():
ref_output = model(x, y)
torch._dynamo.reset()
with torch.no_grad():
model_so_path = aot_compile(model, (x, y))
# Also store a .pt2 file using the aoti_compile_and_package API
pt2_package_path = torch._inductor.aoti_compile_and_package(
torch.export.export(model, (x, y))
)
data_with_tensor_constants.update(
{
"model_so_path": model_so_path,
"pt2_package_path": pt2_package_path,
"inputs": [x, y],
"outputs": [ref_output],
"w": model.w,
}
)
generate_basic_tests()
generate_basic_tests_consts_cpp()
generate_large_tests()
generate_test_with_additional_tensors()
generate_cuda_alloc_test()
# Use this to communicate tensors to the cpp code
|
NetWithTensorConstants
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocol41.py
|
{
"start": 649,
"end": 744
}
|
class ____(Protocol[_T_contra]):
def write(self, __s: _T_contra) -> object: ...
|
SupportsWrite
|
python
|
doocs__leetcode
|
solution/0000-0099/0014.Longest Common Prefix/Solution.py
|
{
"start": 0,
"end": 254
}
|
class ____:
def longestCommonPrefix(self, strs: List[str]) -> str:
for i in range(len(strs[0])):
for s in strs[1:]:
if len(s) <= i or s[i] != strs[0][i]:
return s[:i]
return strs[0]
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/users/models/userpermission.py
|
{
"start": 663,
"end": 2709
}
|
class ____(OverwritableConfigMixin, ControlOutboxProducingModel):
"""
Permissions are applied to administrative users and control explicit scope-like permissions within the API.
Generally speaking, they should only apply to active superuser sessions.
"""
__relocation_scope__ = RelocationScope.Config
__relocation_custom_ordinal__ = ["user", "permission"]
user = FlexibleForeignKey("sentry.User")
# permissions should be in the form of 'service-name.permission-name'
permission = models.CharField(max_length=32)
class Meta:
app_label = "sentry"
db_table = "sentry_userpermission"
unique_together = (("user", "permission"),)
__repr__ = sane_repr("user_id", "permission")
@classmethod
def for_user(cls, user_id: int) -> frozenset[str]:
"""
Return a set of permission for the given user ID.
"""
return frozenset(cls.objects.filter(user=user_id).values_list("permission", flat=True))
def outboxes_for_update(self, shard_identifier: int | None = None) -> list[ControlOutboxBase]:
regions = find_regions_for_user(self.user_id)
return [
outbox
for outbox in OutboxCategory.USER_UPDATE.as_control_outboxes(
region_names=regions,
shard_identifier=self.user_id,
object_identifier=self.user_id,
)
]
def normalize_before_relocation_import(
self, pk_map: PrimaryKeyMap, scope: ImportScope, flags: ImportFlags
) -> int | None:
from sentry.users.models.user import User
old_user_id = self.user_id
old_pk = super().normalize_before_relocation_import(pk_map, scope, flags)
if old_pk is None:
return None
# If we are merging users, ignore the imported permissions and use the existing user's
# permissions instead.
if pk_map.get_kind(get_model_name(User), old_user_id) == ImportKind.Existing:
return None
return old_pk
|
UserPermission
|
python
|
tiangolo__fastapi
|
scripts/contributors.py
|
{
"start": 1712,
"end": 1778
}
|
class ____(BaseModel):
edges: list[PullRequestEdge]
|
PullRequests
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_expr.py
|
{
"start": 70937,
"end": 71985
}
|
class ____(Elemwise):
"""Column Selection"""
_parameters = ["frame"]
operation = getattr
@functools.cached_property
def _meta(self):
meta = self.frame._meta
# Handle scalar results
if is_series_like(meta) or is_dataframe_like(meta):
return self.frame._meta.index
return meta
@property
def _projection_columns(self):
return []
def _task(self, name: Key, index: int) -> Task:
return Task(
name,
getattr,
TaskRef((self.frame._name, index)),
"index",
)
@functools.cached_property
def unique_partition_mapping_columns_from_shuffle(self):
name = self.frame._meta.index.name
if name in self.frame.unique_partition_mapping_columns_from_shuffle:
return {name}
elif (name,) in self.frame.unique_partition_mapping_columns_from_shuffle:
return {(name,)}
else:
return set()
def _return_input(df, divisions=None):
return df
|
Index
|
python
|
apache__airflow
|
providers/presto/tests/unit/presto/transfers/test_gcs_to_presto.py
|
{
"start": 1237,
"end": 5612
}
|
class ____:
@mock.patch("airflow.providers.presto.transfers.gcs_to_presto.PrestoHook")
@mock.patch("airflow.providers.presto.transfers.gcs_to_presto.GCSHook")
@mock.patch("airflow.providers.presto.transfers.gcs_to_presto.NamedTemporaryFile")
def test_execute_without_schema(self, mock_tempfile, mock_gcs_hook, mock_presto_hook):
filename = "file://97g23r"
file_handle = mock.MagicMock()
mock_tempfile.return_value.__enter__.return_value = file_handle
mock_tempfile.return_value.__enter__.return_value.name = filename
op = GCSToPrestoOperator(
task_id=TASK_ID,
source_bucket=BUCKET,
source_object=PATH,
presto_table=PRESTO_TABLE,
presto_conn_id=PRESTO_CONN_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(None)
mock_gcs_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_presto_hook.assert_called_once_with(presto_conn_id=PRESTO_CONN_ID)
mock_download = mock_gcs_hook.return_value.download
mock_download.assert_called_once_with(bucket_name=BUCKET, object_name=PATH, filename=filename)
mock_insert = mock_presto_hook.return_value.insert_rows
mock_insert.assert_called_once()
@mock.patch("airflow.providers.presto.transfers.gcs_to_presto.PrestoHook")
@mock.patch("airflow.providers.presto.transfers.gcs_to_presto.GCSHook")
@mock.patch("airflow.providers.presto.transfers.gcs_to_presto.NamedTemporaryFile")
def test_execute_schema_fields(self, mock_tempfile, mock_gcs_hook, mock_presto_hook):
filename = "file://97g23r"
file_handle = mock.MagicMock()
mock_tempfile.return_value.__enter__.return_value = file_handle
mock_tempfile.return_value.__enter__.return_value.name = filename
op = GCSToPrestoOperator(
task_id=TASK_ID,
source_bucket=BUCKET,
source_object=PATH,
presto_table=PRESTO_TABLE,
presto_conn_id=PRESTO_CONN_ID,
gcp_conn_id=GCP_CONN_ID,
schema_fields=SCHEMA_FIELDS,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(None)
mock_gcs_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_presto_hook.assert_called_once_with(presto_conn_id=PRESTO_CONN_ID)
mock_download = mock_gcs_hook.return_value.download
mock_download.assert_called_once_with(bucket_name=BUCKET, object_name=PATH, filename=filename)
mock_insert = mock_presto_hook.return_value.insert_rows
mock_insert.assert_called_once()
@mock.patch("airflow.providers.presto.transfers.gcs_to_presto.json.loads")
@mock.patch("airflow.providers.presto.transfers.gcs_to_presto.PrestoHook")
@mock.patch("airflow.providers.presto.transfers.gcs_to_presto.GCSHook")
@mock.patch("airflow.providers.presto.transfers.gcs_to_presto.NamedTemporaryFile")
def test_execute_schema_json(self, mock_tempfile, mock_gcs_hook, mock_presto_hook, mock_json_loader):
filename = "file://97g23r"
file_handle = mock.MagicMock()
mock_tempfile.return_value.__enter__.return_value = file_handle
mock_tempfile.return_value.__enter__.return_value.name = filename
mock_json_loader.return_value = SCHEMA_FIELDS
op = GCSToPrestoOperator(
task_id=TASK_ID,
source_bucket=BUCKET,
source_object=PATH,
presto_table=PRESTO_TABLE,
presto_conn_id=PRESTO_CONN_ID,
gcp_conn_id=GCP_CONN_ID,
schema_object=SCHEMA_JSON,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(None)
mock_gcs_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_presto_hook.assert_called_once_with(presto_conn_id=PRESTO_CONN_ID)
mock_download = mock_gcs_hook.return_value.download
assert mock_download.call_count == 2
mock_insert = mock_presto_hook.return_value.insert_rows
mock_insert.assert_called_once()
|
TestGCSToPrestoOperator
|
python
|
pydantic__pydantic
|
pydantic/v1/errors.py
|
{
"start": 16551,
"end": 16733
}
|
class ____(PydanticValueError):
code = 'payment_card_number.invalid_length_for_brand'
msg_template = 'Length for a {brand} card must be {required_length}'
|
InvalidLengthForBrand
|
python
|
bottlepy__bottle
|
bottle.py
|
{
"start": 6442,
"end": 6891
}
|
class ____:
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
update_wrapper(self, func)
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
|
cached_property
|
python
|
viewflow__viewflow
|
tests/workflow/test_flow_viewset__flow.py
|
{
"start": 3432,
"end": 3578
}
|
class ____(Flow):
process_class = TestFlowViewestProcess
start = flow.StartHandle().Next(this.end)
end = flow.End()
|
TestFlowViewestFlow
|
python
|
pytorch__pytorch
|
test/distributed/fsdp/test_fsdp_pure_fp16.py
|
{
"start": 990,
"end": 5520
}
|
class ____(FSDPTest):
@skip_if_lt_x_gpu(2)
def test_pure_fp16_training(self):
"""Tests pure FP16 training, including when the parameter's dtype is
changed after FSDP initialization and before training."""
self.run_subtests(
{
"cpu_offload": [
CPUOffload(offload_params=True),
CPUOffload(offload_params=False),
]
},
self._test_pure_fp16_training,
)
def _test_pure_fp16_training(self, cpu_offload: CPUOffload):
self._test_fsdp_parity(
NestedWrappedModule,
FSDPInitMode.RECURSIVE,
device_init_mode=DEVICEInitMode.DEVICE_BEFORE,
# Run one iteration to avoid NaN without a gradient scaler
num_iters=1,
cpu_offload=cpu_offload,
use_pure_fp16=True,
)
@skip_if_lt_x_gpu(2)
def test_fp16_dtypes(self):
"""
Tests that both user-facing parameter/gradient dtypes and internal
saved dtype attributes are as expected when using an FP16 model
possibly with explicit mixed precision enabled.
"""
self.run_subtests(
{
"to_half_before_fsdp_init": [False, True],
"use_orig_params": [False, True],
"mixed_precision": [
MixedPrecision(),
MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float32,
),
MixedPrecision(
param_dtype=torch.float32,
),
],
},
self._test_fp16_dtypes,
)
def _test_fp16_dtypes(
self,
to_half_before_fsdp_init: bool,
use_orig_params: bool,
mixed_precision: MixedPrecision,
):
model = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.NO_FSDP,
DEVICEInitMode.DEVICE_NEVER,
{
"device_id": device_type,
},
)
fsdp_kwargs = {
"use_orig_params": use_orig_params,
"device_id": device_type,
"mixed_precision": mixed_precision,
}
if to_half_before_fsdp_init:
model = model.half()
fsdp_model = FSDP(model, **fsdp_kwargs)
if not to_half_before_fsdp_init:
fsdp_model = fsdp_model.half()
for param in fsdp_model.parameters():
self.assertEqual(param.dtype, torch.float16)
inp = tuple(
t.half() if torch.is_tensor(t) else t
for t in fsdp_model.module.get_input(self.device_type)
)
out = fsdp_model(*inp)
out.sum().backward()
# Check handle dtype attributes
for handle in traversal_utils._get_fsdp_handles(fsdp_model):
self.assertEqual(handle.flat_param.dtype, torch.float16)
self.assertEqual(handle.flat_param.grad.dtype, torch.float16)
self.assertEqual(handle._orig_param_dtype, torch.float16)
# Specifying `mixed_precision` takes precedence over the model
# dtype for both `param_dtype` and `reduce_dtype`
if mixed_precision.param_dtype is not None:
self.assertEqual(
handle._fwd_bwd_param_dtype, mixed_precision.param_dtype
)
else:
self.assertEqual(handle._fwd_bwd_param_dtype, torch.float16)
if mixed_precision.reduce_dtype is not None:
self.assertEqual(handle._reduce_dtype, mixed_precision.reduce_dtype)
elif (
mixed_precision.reduce_dtype is None
and mixed_precision.param_dtype is not None
):
# Special case: infer reduce dtype from parameter dtype
self.assertEqual(handle._reduce_dtype, mixed_precision.param_dtype)
else:
self.assertEqual(handle._reduce_dtype, torch.float16)
# Check parameter/gradient dtypes
for param in fsdp_model.parameters():
self.assertEqual(param.dtype, torch.float16)
if param.grad is not None:
self.assertEqual(param.grad.dtype, torch.float16)
devices = ("cuda", "hpu", "xpu")
instantiate_device_type_tests(TestPureFP16, globals(), only_for=devices, allow_xpu=True)
if __name__ == "__main__":
run_tests()
|
TestPureFP16
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/storage/schedules/base.py
|
{
"start": 753,
"end": 7439
}
|
class ____(abc.ABC, MayHaveInstanceWeakref[T_DagsterInstance]):
"""Abstract class for managing persistance of scheduler artifacts."""
@abc.abstractmethod
def wipe(self) -> None:
"""Delete all schedules from storage."""
@abc.abstractmethod
def all_instigator_state(
self,
repository_origin_id: Optional[str] = None,
repository_selector_id: Optional[str] = None,
instigator_type: Optional[InstigatorType] = None,
instigator_statuses: Optional[set[InstigatorStatus]] = None,
) -> Sequence[InstigatorState]:
"""Return all InstigationStates present in storage.
Args:
repository_origin_id (Optional[str]): The ExternalRepository target id to scope results to
repository_selector_id (Optional[str]): The repository selector id to scope results to
instigator_type (Optional[InstigatorType]): The InstigatorType to scope results to
instigator_statuses (Optional[Set[InstigatorStatus]]): The InstigatorStatuses to scope results to
"""
@abc.abstractmethod
def get_instigator_state(self, origin_id: str, selector_id: str) -> Optional[InstigatorState]:
"""Return the instigator state for the given id.
Args:
origin_id (str): The unique instigator identifier
selector_id (str): The logical instigator identifier
"""
@abc.abstractmethod
def add_instigator_state(self, state: InstigatorState) -> InstigatorState:
"""Add an instigator state to storage.
Args:
state (InstigatorState): The state to add
"""
@abc.abstractmethod
def update_instigator_state(self, state: InstigatorState) -> InstigatorState:
"""Update an instigator state in storage.
Args:
state (InstigatorState): The state to update
"""
@abc.abstractmethod
def delete_instigator_state(self, origin_id: str, selector_id: str) -> None:
"""Delete a state in storage.
Args:
origin_id (str): The id of the instigator target to delete
selector_id (str): The logical instigator identifier
"""
@property
def supports_batch_queries(self) -> bool:
return False
def get_batch_ticks(
self,
selector_ids: Sequence[str],
limit: Optional[int] = None,
statuses: Optional[Sequence[TickStatus]] = None,
) -> Mapping[str, Sequence[InstigatorTick]]:
raise NotImplementedError()
@abc.abstractmethod
def get_tick(self, tick_id: int) -> InstigatorTick:
"""Get the tick for a given evaluation tick id.
Args:
tick_id (str): The id of the tick to query.
Returns:
InstigatorTick: The tick for the given id.
"""
@abc.abstractmethod
def get_ticks(
self,
origin_id: str,
selector_id: str,
before: Optional[float] = None,
after: Optional[float] = None,
limit: Optional[int] = None,
statuses: Optional[Sequence[TickStatus]] = None,
) -> Sequence[InstigatorTick]:
"""Get the ticks for a given instigator.
Args:
origin_id (str): The id of the instigator target
selector_id (str): The logical instigator identifier
"""
@abc.abstractmethod
def create_tick(self, tick_data: TickData) -> InstigatorTick:
"""Add a tick to storage.
Args:
tick_data (TickData): The tick to add
"""
@abc.abstractmethod
def update_tick(self, tick: InstigatorTick) -> InstigatorTick:
"""Update a tick already in storage.
Args:
tick (InstigatorTick): The tick to update
"""
@abc.abstractmethod
def purge_ticks(
self,
origin_id: str,
selector_id: str,
before: float,
tick_statuses: Optional[Sequence[TickStatus]] = None,
) -> None:
"""Wipe ticks for an instigator for a certain status and timestamp.
Args:
origin_id (str): The id of the instigator target to delete
selector_id (str): The logical instigator identifier
before (datetime): All ticks before this datetime will get purged
tick_statuses (Optional[List[TickStatus]]): The tick statuses to wipe
"""
@property
def supports_auto_materialize_asset_evaluations(self) -> bool:
return True
@abc.abstractmethod
def add_auto_materialize_asset_evaluations(
self,
evaluation_id: int,
asset_evaluations: Sequence[AutomationConditionEvaluationWithRunIds[EntityKey]],
) -> None:
"""Add asset policy evaluations to storage."""
@abc.abstractmethod
def get_auto_materialize_asset_evaluations(
self, key: T_EntityKey, limit: int, cursor: Optional[int] = None
) -> Sequence[AutoMaterializeAssetEvaluationRecord[T_EntityKey]]:
"""Get the policy evaluations for a given asset.
Args:
asset_key (AssetKey): The asset key to query
limit (Optional[int]): The maximum number of evaluations to return
cursor (Optional[int]): The cursor to paginate from
"""
@abc.abstractmethod
def get_auto_materialize_evaluations_for_evaluation_id(
self, evaluation_id: int
) -> Sequence[AutoMaterializeAssetEvaluationRecord]:
"""Get all policy evaluations for a given evaluation ID.
Args:
evaluation_id (int): The evaluation ID to query.
"""
@abc.abstractmethod
def purge_asset_evaluations(self, before: float) -> None:
"""Wipe evaluations before a certain timestamp.
Args:
before (datetime): All evaluations before this datetime will get purged
"""
@abc.abstractmethod
def upgrade(self) -> None:
"""Perform any needed migrations."""
def migrate(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:
"""Call this method to run any required data migrations."""
def optimize(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:
"""Call this method to run any optional data migrations for optimized reads."""
def optimize_for_webserver(
self, statement_timeout: int, pool_recycle: int, max_overflow: int
) -> None:
"""Allows for optimizing database connection / use in the context of a long lived webserver process."""
def alembic_version(self) -> Optional[AlembicVersion]:
return None
def dispose(self) -> None:
"""Explicit lifecycle management."""
|
ScheduleStorage
|
python
|
pyparsing__pyparsing
|
pyparsing/core.py
|
{
"start": 148186,
"end": 148348
}
|
class ____(Token):
def __init__(self) -> None:
super().__init__()
self._may_return_empty = True
self.mayIndexError = False
|
PositionToken
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/sticky_variant_dependent/package.py
|
{
"start": 227,
"end": 604
}
|
class ____(AutotoolsPackage):
"""Package with a sticky variant and a conflict"""
homepage = "http://www.example.com"
url = "http://www.example.com/a-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
depends_on("sticky-variant")
conflicts("%gcc", when="^sticky-variant~allow-gcc")
depends_on("c", type="build")
|
StickyVariantDependent
|
python
|
django__django
|
tests/postgres_tests/test_operations.py
|
{
"start": 1140,
"end": 6910
}
|
class ____(OptimizerTestBase, OperationTestBase):
app_label = "test_add_concurrently"
def test_requires_atomic_false(self):
project_state = self.set_up_test_model(self.app_label)
new_state = project_state.clone()
operation = AddIndexConcurrently(
"Pony",
Index(fields=["pink"], name="pony_pink_idx"),
)
msg = (
"The AddIndexConcurrently operation cannot be executed inside "
"a transaction (set atomic = False on the migration)."
)
with self.assertRaisesMessage(NotSupportedError, msg):
with connection.schema_editor(atomic=True) as editor:
operation.database_forwards(
self.app_label, editor, project_state, new_state
)
def test_add(self):
project_state = self.set_up_test_model(self.app_label, index=False)
table_name = "%s_pony" % self.app_label
index = Index(fields=["pink"], name="pony_pink_idx")
new_state = project_state.clone()
operation = AddIndexConcurrently("Pony", index)
self.assertEqual(
operation.describe(),
"Concurrently create index pony_pink_idx on field(s) pink of model Pony",
)
self.assertEqual(
operation.formatted_description(),
"+ Concurrently create index pony_pink_idx on field(s) pink of model Pony",
)
operation.state_forwards(self.app_label, new_state)
self.assertEqual(
len(new_state.models[self.app_label, "pony"].options["indexes"]), 1
)
self.assertIndexNotExists(table_name, ["pink"])
# Add index.
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(
self.app_label, editor, project_state, new_state
)
self.assertIndexExists(table_name, ["pink"])
# Reversal.
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(
self.app_label, editor, new_state, project_state
)
self.assertIndexNotExists(table_name, ["pink"])
# Deconstruction.
name, args, kwargs = operation.deconstruct()
self.assertEqual(name, "AddIndexConcurrently")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"model_name": "Pony", "index": index})
def test_add_other_index_type(self):
project_state = self.set_up_test_model(self.app_label, index=False)
table_name = "%s_pony" % self.app_label
new_state = project_state.clone()
operation = AddIndexConcurrently(
"Pony",
BrinIndex(fields=["pink"], name="pony_pink_brin_idx"),
)
self.assertIndexNotExists(table_name, ["pink"])
# Add index.
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(
self.app_label, editor, project_state, new_state
)
self.assertIndexExists(table_name, ["pink"], index_type="brin")
# Reversal.
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(
self.app_label, editor, new_state, project_state
)
self.assertIndexNotExists(table_name, ["pink"])
def test_add_with_options(self):
project_state = self.set_up_test_model(self.app_label, index=False)
table_name = "%s_pony" % self.app_label
new_state = project_state.clone()
index = BTreeIndex(fields=["pink"], name="pony_pink_btree_idx", fillfactor=70)
operation = AddIndexConcurrently("Pony", index)
self.assertIndexNotExists(table_name, ["pink"])
# Add index.
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(
self.app_label, editor, project_state, new_state
)
self.assertIndexExists(table_name, ["pink"], index_type="btree")
# Reversal.
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(
self.app_label, editor, new_state, project_state
)
self.assertIndexNotExists(table_name, ["pink"])
def test_reduce_add_remove_concurrently(self):
self.assertOptimizesTo(
[
AddIndexConcurrently(
"Pony",
Index(fields=["pink"], name="pony_pink_idx"),
),
RemoveIndex("Pony", "pony_pink_idx"),
],
[],
)
def test_reduce_add_remove(self):
self.assertOptimizesTo(
[
AddIndexConcurrently(
"Pony",
Index(fields=["pink"], name="pony_pink_idx"),
),
RemoveIndexConcurrently("Pony", "pony_pink_idx"),
],
[],
)
def test_reduce_add_rename(self):
self.assertOptimizesTo(
[
AddIndexConcurrently(
"Pony",
Index(fields=["pink"], name="pony_pink_idx"),
),
RenameIndex(
"Pony",
old_name="pony_pink_idx",
new_name="pony_pink_index",
),
],
[
AddIndexConcurrently(
"Pony",
Index(fields=["pink"], name="pony_pink_index"),
),
],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific tests.")
@modify_settings(INSTALLED_APPS={"append": "migrations"})
|
AddIndexConcurrentlyTests
|
python
|
astropy__astropy
|
astropy/io/ascii/core.py
|
{
"start": 13513,
"end": 18004
}
|
class ____(BaseSplitter):
"""Default class to split strings into columns using python csv. The class
attributes are taken from the csv Dialect class.
Typical usage::
# lines = ..
splitter = ascii.DefaultSplitter()
for col_vals in splitter(lines):
for col_val in col_vals:
...
"""
delimiter = " "
""" one-character string used to separate fields. """
quotechar = '"'
""" control how instances of *quotechar* in a field are quoted """
doublequote = True
""" character to remove special meaning from following character """
escapechar = None
""" one-character stringto quote fields containing special characters """
quoting = csv.QUOTE_MINIMAL
""" control when quotes are recognized by the reader """
skipinitialspace = True
""" ignore whitespace immediately following the delimiter """
csv_writer = None
csv_writer_out = StringIO()
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end.
If splitting on whitespace then replace unquoted tabs with space first.
"""
if self.delimiter == r"\s":
line = _replace_tab_with_space(line, self.escapechar, self.quotechar)
return line.strip() + "\n"
def process_val(self, val: str) -> str:
"""Remove whitespace at the beginning or end of value."""
return val.strip(" \t")
def __call__(self, lines):
"""Return an iterator over the table ``lines``, where each iterator output
is a list of the split line values.
Parameters
----------
lines : list
List of table lines
Yields
------
line : list of str
Each line's split values.
"""
if self.process_line:
lines = [self.process_line(x) for x in lines]
delimiter = " " if self.delimiter == r"\s" else self.delimiter
csv_reader = csv.reader(
lines,
delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
skipinitialspace=self.skipinitialspace,
)
for vals in csv_reader:
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
delimiter = " " if self.delimiter is None else str(self.delimiter)
if self.csv_writer is None:
self.csv_writer = CsvWriter(
delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
)
if self.process_val:
vals = [self.process_val(x) for x in vals]
return self.csv_writer.writerow(vals).rstrip("\r\n")
def _replace_tab_with_space(line: str, escapechar: str, quotechar: str) -> str:
"""Replace tabs with spaces in given string, preserving quoted substrings.
Parameters
----------
line : str
String containing tabs to be replaced with spaces.
escapechar : str
Character in ``line`` used to escape special characters.
quotechar : str
Character in ``line`` indicating the start/end of a substring.
Returns
-------
line : str
A copy of ``line`` with tabs replaced by spaces, preserving quoted substrings.
"""
newline = []
in_quote = False
lastchar = "NONE"
for char in line:
if char == quotechar and lastchar != escapechar:
in_quote = not in_quote
if char == "\t" and not in_quote:
char = " "
lastchar = char
newline.append(char)
return "".join(newline)
def _get_line_index(line_or_func, lines):
"""Return the appropriate line index, depending on ``line_or_func`` which
can be either a function, a positive or negative int, or None.
"""
if callable(line_or_func):
return line_or_func(lines)
elif line_or_func:
if line_or_func >= 0:
return line_or_func
else:
n_lines = sum(1 for line in lines)
return n_lines + line_or_func
else:
return line_or_func
|
DefaultSplitter
|
python
|
coleifer__peewee
|
tests/regressions.py
|
{
"start": 18705,
"end": 18812
}
|
class ____(BaseVersionedModel):
user = ForeignKeyField(VUser, null=True)
content = TextField()
|
VTweet
|
python
|
pennersr__django-allauth
|
allauth/mfa/migrations/0001_initial.py
|
{
"start": 159,
"end": 1486
}
|
class ____(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Authenticator",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"type",
models.CharField(
choices=[
("recovery_codes", "Recovery codes"),
("totp", "TOTP Authenticator"),
],
max_length=20,
),
),
("data", models.JSONField()),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"unique_together": {("user", "type")},
},
),
]
|
Migration
|
python
|
tornadoweb__tornado
|
tornado/test/queues_test.py
|
{
"start": 2525,
"end": 5987
}
|
class ____(AsyncTestCase):
@gen_test
def test_blocking_get(self):
q = queues.Queue() # type: queues.Queue[int]
q.put_nowait(0)
self.assertEqual(0, (yield q.get()))
def test_nonblocking_get(self):
q = queues.Queue() # type: queues.Queue[int]
q.put_nowait(0)
self.assertEqual(0, q.get_nowait())
def test_nonblocking_get_exception(self):
q = queues.Queue() # type: queues.Queue[int]
self.assertRaises(queues.QueueEmpty, q.get_nowait)
@gen_test
def test_get_with_putters(self):
q = queues.Queue(1) # type: queues.Queue[int]
q.put_nowait(0)
put = q.put(1)
self.assertEqual(0, (yield q.get()))
self.assertIsNone((yield put))
@gen_test
def test_blocking_get_wait(self):
q = queues.Queue() # type: queues.Queue[int]
q.put(0)
self.io_loop.call_later(0.01, q.put_nowait, 1)
self.io_loop.call_later(0.02, q.put_nowait, 2)
self.assertEqual(0, (yield q.get(timeout=timedelta(seconds=1))))
self.assertEqual(1, (yield q.get(timeout=timedelta(seconds=1))))
@gen_test
def test_get_timeout(self):
q = queues.Queue() # type: queues.Queue[int]
get_timeout = q.get(timeout=timedelta(seconds=0.01))
get = q.get()
with self.assertRaises(TimeoutError):
yield get_timeout
q.put_nowait(0)
self.assertEqual(0, (yield get))
@gen_test
def test_get_timeout_preempted(self):
q = queues.Queue() # type: queues.Queue[int]
get = q.get(timeout=timedelta(seconds=0.01))
q.put(0)
yield gen.sleep(0.02)
self.assertEqual(0, (yield get))
@gen_test
def test_get_clears_timed_out_putters(self):
q = queues.Queue(1) # type: queues.Queue[int]
# First putter succeeds, remainder block.
putters = [q.put(i, timedelta(seconds=0.01)) for i in range(10)]
put = q.put(10)
self.assertEqual(10, len(q._putters))
yield gen.sleep(0.02)
self.assertEqual(10, len(q._putters))
self.assertFalse(put.done()) # Final waiter is still active.
q.put(11)
self.assertEqual(0, (yield q.get())) # get() clears the waiters.
self.assertEqual(1, len(q._putters))
for putter in putters[1:]:
self.assertRaises(TimeoutError, putter.result)
@gen_test
def test_get_clears_timed_out_getters(self):
q = queues.Queue() # type: queues.Queue[int]
getters = [
asyncio.ensure_future(q.get(timedelta(seconds=0.01))) for _ in range(10)
]
get = asyncio.ensure_future(q.get())
self.assertEqual(11, len(q._getters))
yield gen.sleep(0.02)
self.assertEqual(11, len(q._getters))
self.assertFalse(get.done()) # Final waiter is still active.
q.get() # get() clears the waiters.
self.assertEqual(2, len(q._getters))
for getter in getters:
self.assertRaises(TimeoutError, getter.result)
@gen_test
def test_async_for(self):
q = queues.Queue() # type: queues.Queue[int]
for i in range(5):
q.put(i)
async def f():
results = []
async for i in q:
results.append(i)
if i == 4:
return results
results = yield f()
self.assertEqual(results, list(range(5)))
|
QueueGetTest
|
python
|
cherrypy__cherrypy
|
cherrypy/test/test_conn.py
|
{
"start": 17832,
"end": 27560
}
|
class ____(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_readall_or_close(self):
if cherrypy.server.protocol_version != 'HTTP/1.1':
return self.skip()
self.PROTOCOL = 'HTTP/1.1'
if self.scheme == 'https':
self.HTTP_CONN = HTTPSConnection
else:
self.HTTP_CONN = HTTPConnection
# Test a max of 0 (the default) and then reset to what it was above.
old_max = cherrypy.server.max_request_body_size
for new_max in (0, old_max):
cherrypy.server.max_request_body_size = new_max
self.persistent = True
conn = self.HTTP_CONN
# Get a POST page with an error
conn.putrequest('POST', '/err_before_read', skip_host=True)
conn.putheader('Host', self.HOST)
conn.putheader('Content-Type', 'text/plain')
conn.putheader('Content-Length', '1000')
conn.putheader('Expect', '100-continue')
conn.endheaders()
response = conn.response_class(conn.sock, method='POST')
# ...assert and then skip the 100 response
version, status, reason = response._read_status()
self.assertEqual(status, 100)
while True:
skip = response.fp.readline().strip()
if not skip:
break
# ...send the body
conn.send(ntob('x' * 1000))
# ...get the final response
response.begin()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(500)
# Now try a working page with an Expect header...
conn._output(b'POST /upload HTTP/1.1')
conn._output(ntob('Host: %s' % self.HOST, 'ascii'))
conn._output(b'Content-Type: text/plain')
conn._output(b'Content-Length: 17')
conn._output(b'Expect: 100-continue')
conn._send_output()
response = conn.response_class(conn.sock, method='POST')
# ...assert and then skip the 100 response
version, status, reason = response._read_status()
self.assertEqual(status, 100)
while True:
skip = response.fp.readline().strip()
if not skip:
break
# ...send the body
body = b'I am a small file'
conn.send(body)
# ...get the final response
response.begin()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(200)
self.assertBody("thanks for '%s'" % tonative(body))
conn.close()
def test_No_Message_Body(self):
if cherrypy.server.protocol_version != 'HTTP/1.1':
return self.skip()
self.PROTOCOL = 'HTTP/1.1'
# Set our HTTP_CONN to an instance so it persists between requests.
self.persistent = True
# Make the first request and assert there's no "Connection: close".
self.getPage('/')
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader('Connection')
# Make a 204 request on the same connection.
self.getPage('/custom/204')
self.assertStatus(204)
self.assertNoHeader('Content-Length')
self.assertBody('')
self.assertNoHeader('Connection')
# Make a 304 request on the same connection.
self.getPage('/custom/304')
self.assertStatus(304)
self.assertNoHeader('Content-Length')
self.assertBody('')
self.assertNoHeader('Connection')
def test_Chunked_Encoding(self):
if cherrypy.server.protocol_version != 'HTTP/1.1':
return self.skip()
if (
hasattr(self, 'harness')
and 'modpython' in self.harness.__class__.__name__.lower()
):
# mod_python forbids chunked encoding
return self.skip()
self.PROTOCOL = 'HTTP/1.1'
# Set our HTTP_CONN to an instance so it persists between requests.
self.persistent = True
conn = self.HTTP_CONN
# Try a normal chunked request (with extensions)
body = ntob(
'8;key=value\r\nxx\r\nxxxx\r\n5\r\nyyyyy\r\n0\r\n'
'Content-Type: application/json\r\n'
'\r\n',
)
conn.putrequest('POST', '/upload', skip_host=True)
conn.putheader('Host', self.HOST)
conn.putheader('Transfer-Encoding', 'chunked')
conn.putheader('Trailer', 'Content-Type')
# Note that this is somewhat malformed:
# we shouldn't be sending Content-Length.
# RFC 2616 says the server should ignore it.
conn.putheader('Content-Length', '3')
conn.endheaders()
conn.send(body)
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus('200 OK')
self.assertBody("thanks for '%s'" % 'xx\r\nxxxxyyyyy')
# Try a chunked request that exceeds server.max_request_body_size.
# Note that the delimiters and trailer are included.
body = ntob('3e3\r\n' + ('x' * 995) + '\r\n0\r\n\r\n')
conn.putrequest('POST', '/upload', skip_host=True)
conn.putheader('Host', self.HOST)
conn.putheader('Transfer-Encoding', 'chunked')
conn.putheader('Content-Type', 'text/plain')
# Chunked requests don't need a content-length
# # conn.putheader("Content-Length", len(body))
conn.endheaders()
conn.send(body)
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(413)
conn.close()
def test_Content_Length_in(self):
# Try a non-chunked request where Content-Length exceeds
# server.max_request_body_size. Assert error before body send.
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest('POST', '/upload', skip_host=True)
conn.putheader('Host', self.HOST)
conn.putheader('Content-Type', 'text/plain')
conn.putheader('Content-Length', '9999')
conn.endheaders()
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(413)
self.assertBody(
'The entity sent with the request exceeds '
'the maximum allowed bytes.',
)
conn.close()
def test_Content_Length_out_preheaders(self):
# Try a non-chunked response where Content-Length is less than
# the actual bytes in the response body.
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest(
'GET',
'/custom_cl?body=I+have+too+many+bytes&cl=5',
skip_host=True,
)
conn.putheader('Host', self.HOST)
conn.endheaders()
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(500)
self.assertBody(
'The requested resource returned more bytes than the '
'declared Content-Length.',
)
conn.close()
def test_Content_Length_out_postheaders(self):
# Try a non-chunked response where Content-Length is less than
# the actual bytes in the response body.
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest(
'GET',
'/custom_cl?body=I+too&body=+have+too+many&cl=5',
skip_host=True,
)
conn.putheader('Host', self.HOST)
conn.endheaders()
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(200)
self.assertBody('I too')
conn.close()
def test_598(self):
tmpl = '{scheme}://{host}:{port}/one_megabyte_of_a/'
url = tmpl.format(
scheme=self.scheme,
host=self.HOST,
port=self.PORT,
)
remote_data_conn = urllib.request.urlopen(url)
buf = remote_data_conn.read(512)
time.sleep(timeout * 0.6)
remaining = (1024 * 1024) - 512
while remaining:
data = remote_data_conn.read(remaining)
if not data:
break
else:
buf += data
remaining -= len(data)
self.assertEqual(len(buf), 1024 * 1024)
self.assertEqual(buf, ntob('a' * 1024 * 1024))
self.assertEqual(remaining, 0)
remote_data_conn.close()
def setup_upload_server():
class Root:
@cherrypy.expose
def upload(self):
if not cherrypy.request.method == 'POST':
raise AssertionError(
"'POST' != request.method %r" % cherrypy.request.method,
)
return "thanks for '%s'" % tonative(cherrypy.request.body.read())
cherrypy.tree.mount(Root())
cherrypy.config.update(
{
'server.max_request_body_size': 1001,
'server.socket_timeout': 10,
'server.accepted_queue_size': 5,
'server.accepted_queue_timeout': 0.1,
},
)
reset_names = 'ECONNRESET', 'WSAECONNRESET'
socket_reset_errors = [
getattr(errno, name) for name in reset_names if hasattr(errno, name)
]
'reset error numbers available on this platform'
socket_reset_errors += [
# Python 3.5 raises an http.client.RemoteDisconnected
# with this message
'Remote end closed connection without response',
]
|
ConnectionTests
|
python
|
bokeh__bokeh
|
src/bokeh/embed/bundle.py
|
{
"start": 2584,
"end": 7454
}
|
class ____:
js_files: list[URL]
js_raw: list[str]
css_files: list[URL]
css_raw: list[str]
hashes: Hashes
def __init__(self, js_files: list[URL] = [], js_raw: list[str] = [],
css_files: list[URL] = [], css_raw: list[str] = [], hashes: Hashes = {}):
self.js_files = js_files[:]
self.js_raw = js_raw[:]
self.css_files = css_files[:]
self.css_raw = css_raw[:]
self.hashes = {**hashes}
def __iter__(self) -> Iterator[str]:
yield self._render_js()
yield self._render_css()
def _render_js(self) -> str:
return JS_RESOURCES.render(js_files=self.js_files, js_raw=self.js_raw, hashes=self.hashes)
def _render_css(self) -> str:
return CSS_RESOURCES.render(css_files=self.css_files, css_raw=self.css_raw)
def scripts(self, tag: bool = True) -> str:
if tag:
return JS_RESOURCES.render(js_raw=self.js_raw, js_files=[])
else:
return "\n".join(self.js_raw)
@property
def js_urls(self) -> list[URL]:
return self.js_files
@property
def css_urls(self) -> list[URL]:
return self.css_files
def add(self, artifact: Artifact) -> None:
if isinstance(artifact, ScriptRef):
self.js_files.append(artifact.url)
elif isinstance(artifact, Script):
self.js_raw.append(artifact.content)
elif isinstance(artifact, StyleRef):
self.css_files.append(artifact.url)
elif isinstance(artifact, Style):
self.css_raw.append(artifact.content)
def bundle_for_objs_and_resources(objs: Sequence[HasProps | Document] | None, resources: Resources | None) -> Bundle:
''' Generate rendered CSS and JS resources suitable for the given
collection of Bokeh objects
Args:
objs (seq[HasProps or Document]) :
resources (Resources)
Returns:
Bundle
'''
if objs is not None:
all_objs = _all_objs(objs)
use_widgets = _use_widgets(all_objs)
use_tables = _use_tables(all_objs)
use_gl = _use_gl(all_objs)
use_mathjax = _use_mathjax(all_objs)
else:
# XXX: force all components on server and in notebook, because we don't know in advance what will be used
all_objs = None
use_widgets = True
use_tables = True
use_gl = True
use_mathjax = True
js_files: list[URL] = []
js_raw: list[str] = []
css_files: list[URL] = []
css_raw: list[str] = []
if resources is not None:
components = list(resources.components)
if not use_widgets: components.remove("bokeh-widgets")
if not use_tables: components.remove("bokeh-tables")
if not use_gl: components.remove("bokeh-gl")
if not use_mathjax: components.remove("bokeh-mathjax")
resources = resources.clone(components=components)
js_files.extend(map(URL, resources.js_files))
js_raw.extend(resources.js_raw)
css_files.extend(map(URL, resources.css_files))
css_raw.extend(resources.css_raw)
extensions = _bundle_extensions(all_objs if objs else None, resources)
mode = resources.mode
if mode == "inline":
js_raw.extend([ Resources._inline(bundle.artifact_path) for bundle in extensions ])
elif mode == "server":
js_files.extend([ bundle.server_url for bundle in extensions ])
elif mode == "cdn":
for bundle in extensions:
if bundle.cdn_url is not None:
js_files.append(bundle.cdn_url)
else:
js_raw.append(Resources._inline(bundle.artifact_path))
else:
js_files.extend([ URL(str(bundle.artifact_path)) for bundle in extensions ])
models = [ obj.__class__ for obj in all_objs ] if all_objs else None
ext = bundle_models(models)
if ext is not None:
js_raw.append(ext)
return Bundle(js_files, js_raw, css_files, css_raw, resources.hashes if resources else {})
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _query_extensions(all_objs: set[HasProps], query: Callable[[type[HasProps]], bool]) -> bool:
names: set[str] = set()
for obj in all_objs:
if hasattr(obj, "__implementation__"):
continue
name = obj.__view_module__.split(".")[0]
if name == "bokeh":
continue
if name in names:
continue
names.add(name)
for model in HasProps.model_class_reverse_map.values():
if model.__module__.startswith(name):
if query(model):
return True
return False
@dataclass(frozen=True)
|
Bundle
|
python
|
wandb__wandb
|
wandb/sdk/data_types/audio.py
|
{
"start": 6385,
"end": 6508
}
|
class ____(_dtypes.Type):
name = "audio-file"
types = [Audio]
_dtypes.TypeRegistry.add(_AudioFileType)
|
_AudioFileType
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/callpath/package.py
|
{
"start": 217,
"end": 838
}
|
class ____(Package):
homepage = "https://github.com/tgamblin/callpath"
url = "http://github.com/tgamblin/callpath-1.0.tar.gz"
version("0.8", md5="0123456789abcdef0123456789abcdef")
version("0.9", md5="0123456789abcdef0123456789abcdef")
version("1.0", md5="0123456789abcdef0123456789abcdef")
depends_on("c", type="build")
depends_on("dyninst")
depends_on("mpi")
def install(self, spec, prefix):
mkdirp(prefix)
touch(join_path(prefix, "dummyfile"))
def setup_run_environment(self, env: EnvironmentModifications) -> None:
env.set("FOOBAR", self.name)
|
Callpath
|
python
|
psf__black
|
src/black/trans.py
|
{
"start": 13455,
"end": 31200
}
|
class ____(StringTransformer, CustomSplitMapMixin):
"""StringTransformer that merges strings together.
Requirements:
(A) The line contains adjacent strings such that ALL of the validation checks
listed in StringMerger._validate_msg(...)'s docstring pass.
OR
(B) The line contains a string which uses line continuation backslashes.
Transformations:
Depending on which of the two requirements above where met, either:
(A) The string group associated with the target string is merged.
OR
(B) All line-continuation backslashes are removed from the target string.
Collaborations:
StringMerger provides custom split information to StringSplitter.
"""
def do_match(self, line: Line) -> TMatchResult:
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
string_indices = []
idx = 0
while is_valid_index(idx):
leaf = LL[idx]
if (
leaf.type == token.STRING
and is_valid_index(idx + 1)
and LL[idx + 1].type == token.STRING
):
# Let's check if the string group contains an inline comment
# If we have a comment inline, we don't merge the strings
contains_comment = False
i = idx
while is_valid_index(i):
if LL[i].type != token.STRING:
break
if line.comments_after(LL[i]):
contains_comment = True
break
i += 1
if not contains_comment and not is_part_of_annotation(leaf):
string_indices.append(idx)
# Advance to the next non-STRING leaf.
idx += 2
while is_valid_index(idx) and LL[idx].type == token.STRING:
idx += 1
elif leaf.type == token.STRING and "\\\n" in leaf.value:
string_indices.append(idx)
# Advance to the next non-STRING leaf.
idx += 1
while is_valid_index(idx) and LL[idx].type == token.STRING:
idx += 1
else:
idx += 1
if string_indices:
return Ok(string_indices)
else:
return TErr("This line has no strings that need merging.")
def do_transform(
self, line: Line, string_indices: list[int]
) -> Iterator[TResult[Line]]:
new_line = line
rblc_result = self._remove_backslash_line_continuation_chars(
new_line, string_indices
)
if isinstance(rblc_result, Ok):
new_line = rblc_result.ok()
msg_result = self._merge_string_group(new_line, string_indices)
if isinstance(msg_result, Ok):
new_line = msg_result.ok()
if isinstance(rblc_result, Err) and isinstance(msg_result, Err):
msg_cant_transform = msg_result.err()
rblc_cant_transform = rblc_result.err()
cant_transform = CannotTransform(
"StringMerger failed to merge any strings in this line."
)
# Chain the errors together using `__cause__`.
msg_cant_transform.__cause__ = rblc_cant_transform
cant_transform.__cause__ = msg_cant_transform
yield Err(cant_transform)
else:
yield Ok(new_line)
@staticmethod
def _remove_backslash_line_continuation_chars(
line: Line, string_indices: list[int]
) -> TResult[Line]:
"""
Merge strings that were split across multiple lines using
line-continuation backslashes.
Returns:
Ok(new_line), if @line contains backslash line-continuation
characters.
OR
Err(CannotTransform), otherwise.
"""
LL = line.leaves
indices_to_transform = []
for string_idx in string_indices:
string_leaf = LL[string_idx]
if (
string_leaf.type == token.STRING
and "\\\n" in string_leaf.value
and not has_triple_quotes(string_leaf.value)
):
indices_to_transform.append(string_idx)
if not indices_to_transform:
return TErr(
"Found no string leaves that contain backslash line continuation"
" characters."
)
new_line = line.clone()
new_line.comments = line.comments.copy()
append_leaves(new_line, line, LL)
for string_idx in indices_to_transform:
new_string_leaf = new_line.leaves[string_idx]
new_string_leaf.value = new_string_leaf.value.replace("\\\n", "")
return Ok(new_line)
def _merge_string_group(
self, line: Line, string_indices: list[int]
) -> TResult[Line]:
"""
Merges string groups (i.e. set of adjacent strings).
Each index from `string_indices` designates one string group's first
leaf in `line.leaves`.
Returns:
Ok(new_line), if ALL of the validation checks found in
_validate_msg(...) pass.
OR
Err(CannotTransform), otherwise.
"""
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
# A dict of {string_idx: tuple[num_of_strings, string_leaf]}.
merged_string_idx_dict: dict[int, tuple[int, Leaf]] = {}
for string_idx in string_indices:
vresult = self._validate_msg(line, string_idx)
if isinstance(vresult, Err):
continue
merged_string_idx_dict[string_idx] = self._merge_one_string_group(
LL, string_idx, is_valid_index
)
if not merged_string_idx_dict:
return TErr("No string group is merged")
# Build the final line ('new_line') that this method will later return.
new_line = line.clone()
previous_merged_string_idx = -1
previous_merged_num_of_strings = -1
for i, leaf in enumerate(LL):
if i in merged_string_idx_dict:
previous_merged_string_idx = i
previous_merged_num_of_strings, string_leaf = merged_string_idx_dict[i]
new_line.append(string_leaf)
if (
previous_merged_string_idx
<= i
< previous_merged_string_idx + previous_merged_num_of_strings
):
for comment_leaf in line.comments_after(leaf):
new_line.append(comment_leaf, preformatted=True)
continue
append_leaves(new_line, line, [leaf])
return Ok(new_line)
def _merge_one_string_group(
self, LL: list[Leaf], string_idx: int, is_valid_index: Callable[[int], bool]
) -> tuple[int, Leaf]:
"""
Merges one string group where the first string in the group is
`LL[string_idx]`.
Returns:
A tuple of `(num_of_strings, leaf)` where `num_of_strings` is the
number of strings merged and `leaf` is the newly merged string
to be replaced in the new line.
"""
# If the string group is wrapped inside an Atom node, we must make sure
# to later replace that Atom with our new (merged) string leaf.
atom_node = LL[string_idx].parent
# We will place BREAK_MARK in between every two substrings that we
# merge. We will then later go through our final result and use the
# various instances of BREAK_MARK we find to add the right values to
# the custom split map.
BREAK_MARK = "@@@@@ BLACK BREAKPOINT MARKER @@@@@"
QUOTE = LL[string_idx].value[-1]
def make_naked(string: str, string_prefix: str) -> str:
"""Strip @string (i.e. make it a "naked" string)
Pre-conditions:
* assert_is_leaf_string(@string)
Returns:
A string that is identical to @string except that
@string_prefix has been stripped, the surrounding QUOTE
characters have been removed, and any remaining QUOTE
characters have been escaped.
"""
assert_is_leaf_string(string)
if "f" in string_prefix:
f_expressions = [
string[span[0] + 1 : span[1] - 1] # +-1 to get rid of curly braces
for span in iter_fexpr_spans(string)
]
debug_expressions_contain_visible_quotes = any(
re.search(r".*[\'\"].*(?<![!:=])={1}(?!=)(?![^\s:])", expression)
for expression in f_expressions
)
if not debug_expressions_contain_visible_quotes:
# We don't want to toggle visible quotes in debug f-strings, as
# that would modify the AST
string = _toggle_fexpr_quotes(string, QUOTE)
# After quotes toggling, quotes in expressions won't be escaped
# because quotes can't be reused in f-strings. So we can simply
# let the escaping logic below run without knowing f-string
# expressions.
RE_EVEN_BACKSLASHES = r"(?:(?<!\\)(?:\\\\)*)"
naked_string = string[len(string_prefix) + 1 : -1]
naked_string = re.sub(
"(" + RE_EVEN_BACKSLASHES + ")" + QUOTE, r"\1\\" + QUOTE, naked_string
)
return naked_string
# Holds the CustomSplit objects that will later be added to the custom
# split map.
custom_splits = []
# Temporary storage for the 'has_prefix' part of the CustomSplit objects.
prefix_tracker = []
# Sets the 'prefix' variable. This is the prefix that the final merged
# string will have.
next_str_idx = string_idx
prefix = ""
while (
not prefix
and is_valid_index(next_str_idx)
and LL[next_str_idx].type == token.STRING
):
prefix = get_string_prefix(LL[next_str_idx].value).lower()
next_str_idx += 1
# The next loop merges the string group. The final string will be
# contained in 'S'.
#
# The following convenience variables are used:
#
# S: string
# NS: naked string
# SS: next string
# NSS: naked next string
S = ""
NS = ""
num_of_strings = 0
next_str_idx = string_idx
while is_valid_index(next_str_idx) and LL[next_str_idx].type == token.STRING:
num_of_strings += 1
SS = LL[next_str_idx].value
next_prefix = get_string_prefix(SS).lower()
# If this is an f-string group but this substring is not prefixed
# with 'f'...
if "f" in prefix and "f" not in next_prefix:
# Then we must escape any braces contained in this substring.
SS = re.sub(r"(\{|\})", r"\1\1", SS)
NSS = make_naked(SS, next_prefix)
has_prefix = bool(next_prefix)
prefix_tracker.append(has_prefix)
S = prefix + QUOTE + NS + NSS + BREAK_MARK + QUOTE
NS = make_naked(S, prefix)
next_str_idx += 1
# Take a note on the index of the non-STRING leaf.
non_string_idx = next_str_idx
S_leaf = Leaf(token.STRING, S)
if self.normalize_strings:
S_leaf.value = normalize_string_quotes(S_leaf.value)
# Fill the 'custom_splits' list with the appropriate CustomSplit objects.
temp_string = S_leaf.value[len(prefix) + 1 : -1]
for has_prefix in prefix_tracker:
mark_idx = temp_string.find(BREAK_MARK)
assert (
mark_idx >= 0
), "Logic error while filling the custom string breakpoint cache."
temp_string = temp_string[mark_idx + len(BREAK_MARK) :]
breakpoint_idx = mark_idx + (len(prefix) if has_prefix else 0) + 1
custom_splits.append(CustomSplit(has_prefix, breakpoint_idx))
string_leaf = Leaf(token.STRING, S_leaf.value.replace(BREAK_MARK, ""))
if atom_node is not None:
# If not all children of the atom node are merged (this can happen
# when there is a standalone comment in the middle) ...
if non_string_idx - string_idx < len(atom_node.children):
# We need to replace the old STRING leaves with the new string leaf.
first_child_idx = LL[string_idx].remove()
for idx in range(string_idx + 1, non_string_idx):
LL[idx].remove()
if first_child_idx is not None:
atom_node.insert_child(first_child_idx, string_leaf)
else:
# Else replace the atom node with the new string leaf.
replace_child(atom_node, string_leaf)
self.add_custom_splits(string_leaf.value, custom_splits)
return num_of_strings, string_leaf
@staticmethod
def _validate_msg(line: Line, string_idx: int) -> TResult[None]:
"""Validate (M)erge (S)tring (G)roup
Transform-time string validation logic for _merge_string_group(...).
Returns:
* Ok(None), if ALL validation checks (listed below) pass.
OR
* Err(CannotTransform), if any of the following are true:
- The target string group does not contain ANY stand-alone comments.
- The target string is not in a string group (i.e. it has no
adjacent strings).
- The string group has more than one inline comment.
- The string group has an inline comment that appears to be a pragma.
- The set of all string prefixes in the string group is of
length greater than one and is not equal to {"", "f"}.
- The string group consists of raw strings.
- The string group would merge f-strings with different quote types
and internal quotes.
- The string group is stringified type annotations. We don't want to
process stringified type annotations since pyright doesn't support
them spanning multiple string values. (NOTE: mypy, pytype, pyre do
support them, so we can change if pyright also gains support in the
future. See https://github.com/microsoft/pyright/issues/4359.)
"""
# We first check for "inner" stand-alone comments (i.e. stand-alone
# comments that have a string leaf before them AND after them).
for inc in [1, -1]:
i = string_idx
found_sa_comment = False
is_valid_index = is_valid_index_factory(line.leaves)
while is_valid_index(i) and line.leaves[i].type in [
token.STRING,
STANDALONE_COMMENT,
]:
if line.leaves[i].type == STANDALONE_COMMENT:
found_sa_comment = True
elif found_sa_comment:
return TErr(
"StringMerger does NOT merge string groups which contain "
"stand-alone comments."
)
i += inc
QUOTE = line.leaves[string_idx].value[-1]
num_of_inline_string_comments = 0
set_of_prefixes = set()
num_of_strings = 0
for leaf in line.leaves[string_idx:]:
if leaf.type != token.STRING:
# If the string group is trailed by a comma, we count the
# comments trailing the comma to be one of the string group's
# comments.
if leaf.type == token.COMMA and id(leaf) in line.comments:
num_of_inline_string_comments += 1
break
if has_triple_quotes(leaf.value):
return TErr("StringMerger does NOT merge multiline strings.")
num_of_strings += 1
prefix = get_string_prefix(leaf.value).lower()
if "r" in prefix:
return TErr("StringMerger does NOT merge raw strings.")
set_of_prefixes.add(prefix)
if (
"f" in prefix
and leaf.value[-1] != QUOTE
and (
"'" in leaf.value[len(prefix) + 1 : -1]
or '"' in leaf.value[len(prefix) + 1 : -1]
)
):
return TErr(
"StringMerger does NOT merge f-strings with different quote types"
" and internal quotes."
)
if id(leaf) in line.comments:
num_of_inline_string_comments += 1
if contains_pragma_comment(line.comments[id(leaf)]):
return TErr("Cannot merge strings which have pragma comments.")
if num_of_strings < 2:
return TErr(
f"Not enough strings to merge (num_of_strings={num_of_strings})."
)
if num_of_inline_string_comments > 1:
return TErr(
f"Too many inline string comments ({num_of_inline_string_comments})."
)
if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}:
return TErr(f"Too many different prefixes ({set_of_prefixes}).")
return Ok(None)
|
StringMerger
|
python
|
pytorch__pytorch
|
torch/testing/_internal/common_utils.py
|
{
"start": 119932,
"end": 193296
}
|
class ____(expecttest.TestCase):
# NOTE: "precision" lets classes and generated tests set minimum
# atol values when comparing tensors. Used by @precisionOverride and @toleranceOverride, for
# example.
# NOTE: "rel_tol" lets classes and generated tests set minimum
# rtol values when comparing tensors. Used by @toleranceOverride, for example.
_precision: float = 0
_rel_tol: float = 0
# Toggles whether to assert that `torch.get_default_dtype()` returns
# `torch.float` when `setUp` and `tearDown` are called.
_default_dtype_check_enabled: bool = False
# Always use difflib to print diffs on multi line equality.
# Undocumented feature in unittest
_diffThreshold = sys.maxsize
maxDiff = None
# checker to early terminate test suite if unrecoverable failure occurs.
def _should_stop_test_suite(self):
if torch.cuda.is_initialized():
# CUDA device side error will cause subsequence test cases to fail.
# stop entire test suite if catches RuntimeError during torch.cuda.synchronize().
try:
torch.cuda.synchronize()
except RuntimeError as rte:
print("TEST SUITE EARLY TERMINATION due to torch.cuda.synchronize() failure", file=sys.stderr)
print(str(rte), file=sys.stderr)
return True
return False
else:
return False
@property
def precision(self) -> float:
return self._precision
@precision.setter
def precision(self, prec: float) -> None:
self._precision = prec
@property
def rel_tol(self) -> float:
return self._rel_tol
@rel_tol.setter
def rel_tol(self, prec: float) -> None:
self._rel_tol = prec
_do_cuda_memory_leak_check = False
_do_cuda_non_default_stream = False
# When True, if a test case raises a NotImplementedError, instead of failing
# the test, skip it instead.
_ignore_not_implemented_error = False
def __init__(self, method_name='runTest', methodName='runTest'):
# methodName is the correct naming in unittest and testslide uses keyword arguments.
# So we need to use both to 1) not break BC and, 2) support testslide.
if methodName != "runTest":
method_name = methodName
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is not None:
# Wraps the tested method if we should do CUDA memory check.
if TEST_CUDA_MEM_LEAK_CHECK:
self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True)
# FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044
if self._do_cuda_memory_leak_check and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors)
# Wraps the tested method if we should enforce non default CUDA stream.
self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True)
if self._do_cuda_non_default_stream and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream)
if self._ignore_not_implemented_error:
self.wrap_with_policy(method_name, lambda: skip_exception_type(NotImplementedError))
if PRINT_REPRO_ON_FAILURE:
try:
def _get_rel_test_path(abs_test_path):
# Attempt to get relative path based on the "test" dir.
# In CI, the working dir is not guaranteed to be the base repo dir so
# we can't just compute relative path from that.
parts = Path(abs_test_path).parts
for i, part in enumerate(parts):
if part == "test":
base_dir = os.path.join(*parts[:i]) if i > 0 else ''
return os.path.relpath(abs_test_path, start=base_dir)
# Can't determine containing dir; just return the test filename.
# The path isn't strictly correct but it's arguably better than nothing.
return os.path.split(abs_test_path)[1]
abs_test_path = inspect.getfile(type(self))
test_filename = _get_rel_test_path(abs_test_path)
class_name = type(self).__name__
test_run_cmd = f"python {test_filename} {class_name}.{method_name}"
env_var_prefix = TestEnvironment.repro_env_var_prefix()
repro_parts = [env_var_prefix, test_run_cmd]
self.wrap_with_policy(
method_name,
lambda repro_parts=repro_parts: print_repro_on_failure(repro_parts))
except Exception as e:
# Don't fail entirely if we can't get the test filename
log.info("could not print repro string", extra=str(e)) # type: ignore[arg-type]
def assertLeaksNoCudaTensors(self, name=None):
name = self.id() if name is None else name
return CudaMemoryLeakCheck(self, name)
def enforceNonDefaultStream(self):
return CudaNonDefaultStream()
def _remove_ansi_escape(self, input):
# 7-bit C1 ANSI sequences
ansi_escape = re.compile(r'''
\x1B # ESC
(?: # 7-bit C1 Fe (except CSI)
[@-Z\\-_]
| # or [ for CSI, followed by a control sequence
\[
[0-?]* # Parameter bytes
[ -/]* # Intermediate bytes
[@-~] # Final byte
)
''', re.VERBOSE)
return ansi_escape.sub('', input)
def remove_comment_lines(self, input_string):
lines = input_string.split('\n')
filtered_lines = [line for line in lines if not line.strip().startswith('#')]
return '\n'.join(filtered_lines)
def remove_empty_lines(self, input_string):
lines = input_string.split('\n')
filtered_lines = [line for line in lines if line.strip() != '']
return '\n'.join(filtered_lines)
# ignore comments will ignore lines that starts with # after being stripped
def assertExpectedInline(self, actual, expect, skip=0, ignore_comments=False, ignore_empty_lines=False):
actual = actual if isinstance(actual, str) else str(actual)
actual = self._remove_ansi_escape(actual)
expect = self._remove_ansi_escape(expect)
if ignore_comments:
actual = self.remove_comment_lines(actual)
expect = self.remove_comment_lines(expect)
if ignore_empty_lines:
actual = self.remove_empty_lines(actual)
expect = self.remove_empty_lines(expect)
return super().assertExpectedInline(actual if isinstance(actual, str) else str(actual), expect, skip + 1)
# Munges exceptions that internally contain stack traces, using munge_exc
def assertExpectedInlineMunged(
self, exc_type, callable, expect, *, skip=0, suppress_suffix=True, post_munge=None,
):
try:
callable()
except exc_type as e:
munged = munge_exc(e, suppress_suffix=suppress_suffix, skip=skip + 1)
if post_munge:
munged = post_munge(munged)
self.assertExpectedInline(
munged, expect, skip=skip + 1
)
return
self.fail(msg="Did not raise when expected to")
def assertLogs(self, logger=None, level=None):
if logger is None:
logger = logging.getLogger("torch")
return super().assertLogs(logger, level)
def assertNoLogs(self, logger=None, level=None):
if logger is None:
logger = logging.getLogger("torch")
return super().assertNoLogs(logger, level)
def wrap_with_cuda_policy(self, method_name, policy):
test_method = getattr(self, method_name)
# the import below may initialize CUDA context, so we do it only if
# self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream
# is True.
# TODO: sure looks like we unconditionally initialize the context here
# -- ezyang
from torch.testing._internal.common_cuda import TEST_CUDA
fullname = self.id().lower() # class_name.method_name
if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname):
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
def wrap_with_policy(self, method_name, policy):
test_method = getattr(self, method_name)
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
# A policy is a zero-argument function that returns a context manager.
# We don't take the context manager directly as it may be necessary to
# construct it once per test method
def wrap_method_with_policy(self, method, policy):
# Assumes that `method` is the tested function in `self`.
# NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope
# alive, so this cannot be done in setUp and tearDown because
# tearDown is run unconditionally no matter whether the test
# passes or not. For the same reason, we can't wrap the `method`
# call in try-finally and always do the check.
@wraps(method)
def wrapper(self, *args, **kwargs):
with policy():
method(*args, **kwargs)
return types.MethodType(wrapper, self)
def wrap_with_cuda_memory_check(self, method):
return self.wrap_method_with_policy(method, self.assertLeaksNoCudaTensors)
def _dynamo_test_key(self):
return f"{self.__class__.__name__}.{self._testMethodName}"
def compile_fn(self, fn, backend, nopython):
# Allows subclasses to control compilation
return torch._dynamo.optimize(backend, nopython=nopython)(fn)
def _run_custom(self, result=None):
using_unittest = isinstance(result, unittest.TestResult)
super_run = super().run
test_cls = super_run.__self__ # type: ignore[attr-defined]
# Are we compiling?
compiled = TEST_WITH_TORCHDYNAMO or TEST_WITH_AOT_EAGER or TEST_WITH_TORCHINDUCTOR
# Is the class strict and compiling?
strict_default = False
should_reset_dynamo = False
# We disable size_asserts for test_ops since some tests fail
# due to mismatch of strides returned from eager v.s. meta kernels
# Only some of the ops has this problem, but since tests in
# test_op.py are parametrized, it's hard to do this specifically
# for the affected ops.
# It's not a big deal since these problems are captured by
# test_torchinductor_opinfo.py as well.
should_disable_size_asserts = False
if compiled:
try:
path = inspect.getfile(type(test_cls))
full_path = os.path.abspath(path)
match = re.match(r".*/test/(.*).py", full_path)
if match is not None:
filename = match.group(1)
if TEST_WITH_TORCHINDUCTOR:
from .dynamo_test_failures import FIXME_inductor_non_strict
strict_default = filename not in FIXME_inductor_non_strict
should_reset_dynamo = True
if filename == "test_ops":
should_disable_size_asserts = True
else:
strict_default = True
# inspect.getfile can fail with these
except (OSError, TypeError):
pass
if "STRICT_DEFAULT" in os.environ:
if os.environ["STRICT_DEFAULT"] == "1":
strict_default = True
strict_mode = False
if compiled:
test_method = getattr(self, self._testMethodName)
if hasattr(test_method, "dynamo_strict"):
strict_mode = test_method.dynamo_strict
elif hasattr(test_cls, "dynamo_strict"):
strict_mode = test_cls.dynamo_strict
else:
strict_mode = strict_default
nopython = getattr(test_cls, "dynamo_strict_nopython", False) and compiled
if strict_mode or should_reset_dynamo:
torch._dynamo.reset()
torch.compiler.set_stance("default")
# TODO: Remove this; this is grandfathered in because we suppressed errors
# on test suite previously
# When strict mode is False, suppress_errors is True
if compiled:
suppress_errors = not strict_mode
else:
suppress_errors = torch._dynamo.config.suppress_errors
maybe_disable_size_asserts = (
torch._inductor.config.patch(size_asserts=False)
if should_disable_size_asserts
else contextlib.nullcontext()
)
with unittest.mock.patch("torch._dynamo.config.suppress_errors", suppress_errors), maybe_disable_size_asserts:
if TEST_WITH_AOT_EAGER:
super_run = self.compile_fn(super_run, "aot_eager_decomp_partition", nopython)
elif TEST_WITH_TORCHDYNAMO or TEST_WITH_TORCHINDUCTOR:
if TEST_WITH_TORCHINDUCTOR:
super_run = self.compile_fn(super_run, "inductor", nopython)
else:
# Assume eager-generated GraphModules will not error out.
# If we do, this is probably a Dynamo bug!
super_run = self.compile_fn(super_run, "eager_noexcept", nopython)
key = self._dynamo_test_key()
def expect_failure(f, file_name):
@wraps(f)
def wrapper(*args, **kwargs):
try:
f(*args, **kwargs)
except BaseException as e: # noqa: B036
self.skipTest(e)
raise RuntimeError(f"Unexpected success, please remove `{file_name}`")
return wrapper
if TEST_WITH_TORCHINDUCTOR:
subdir = "test/inductor_expected_failures"
from .dynamo_test_failures import inductor_expected_failures as expected_failures
else:
subdir = "test/dynamo_expected_failures"
from .dynamo_test_failures import dynamo_expected_failures as expected_failures
if key in expected_failures:
method = getattr(self, self._testMethodName)
file_name = os.path.join(subdir, key)
setattr(self, self._testMethodName, expect_failure(method, file_name))
def ignore_failure(f, file_name):
@wraps(f)
def wrapper(*args, **kwargs):
try:
f(*args, **kwargs)
except BaseException as e: # noqa: B036
self.skipTest(e)
method = getattr(self, self._testMethodName)
if getattr(method, "__unittest_expecting_failure__", False):
self.skipTest("unexpected success")
else:
self.skipTest(f"This test passed, maybe we can remove `{file_name}`")
return wrapper
if TEST_WITH_TORCHINDUCTOR:
subdir = "test/inductor_skips"
from .dynamo_test_failures import inductor_skips as skips
else:
subdir = "test/dynamo_skips"
from .dynamo_test_failures import dynamo_skips as skips
if key in skips:
method = getattr(self, self._testMethodName)
file_name = os.path.join(subdir, key)
setattr(self, self._testMethodName, ignore_failure(method, file_name))
from .dynamo_test_failures import compiled_autograd_skips
if torch._dynamo.config.compiled_autograd and key in compiled_autograd_skips:
# Still run the test, but with compiled autograd disabled
super_run = runWithoutCompiledAutograd()(super_run)
super_run(result=result)
if strict_mode or should_reset_dynamo:
torch._dynamo.reset()
elif torch._dynamo.config.compiled_autograd:
torch._dynamo.compiled_autograd.reset()
# Early terminate test if necessary. If using pytest, use the -x flag instead
if using_unittest and self._should_stop_test_suite():
if result.wasSuccessful():
case = TestCase()
if TEST_SAVE_XML is not None:
# This is a big hacky, XMLRunner modifies expected type from TestCase to TestInfo
# Create dummy TestInfo to record results correctly
from xmlrunner.result import _TestInfo # type: ignore[import]
case = _TestInfo(result, case)
case.output = _TestInfo.ERROR # type: ignore[attr-defined]
case.elapsed_time = 0.0 # type: ignore[attr-defined]
case.test_description = "TestSuiteEarlyFailure" # type: ignore[attr-defined]
# This shouldn't really happen, but if does add fake failure
# For more details see https://github.com/pytorch/pytorch/issues/71973
result.failures.append((case, "TestSuite execution was aborted early"))
assert result.wasSuccessful() is False
result.stop()
def run(self, result=None):
with contextlib.ExitStack() as stack:
if TEST_WITH_CROSSREF:
stack.enter_context(CrossRefMode())
self._run_custom(
result=result,
)
def setUp(self):
check_if_enable(self)
set_rng_seed()
# Save global check sparse tensor invariants state that can be
# restored from tearDown:
self._check_invariants = torch.sparse.check_sparse_tensor_invariants.is_enabled()
# Enable invariant checks for all sparse tensors constructions
# including the unsafe ones. If this is not desired for some
# test case, use check_invariants=False optional argument to
# sparse tensor constructors or
# @torch.sparse.check_sparse_tensor_invariants(False)
# decorator to disable the invariant checks.
torch.sparse.check_sparse_tensor_invariants.enable()
if self._default_dtype_check_enabled:
assert torch.get_default_dtype() == torch.float
# attempt to reset some global state at the end of the test
self._prev_grad_state = torch.is_grad_enabled()
def tearDown(self):
# There exists test cases that override TestCase.setUp
# definition, so we cannot assume that _check_invariants
# attribute is defined in general.
if hasattr(self, '_check_invariants'):
# Restore the global check sparse tensor invariants state
if self._check_invariants:
torch.sparse.check_sparse_tensor_invariants.enable()
else:
torch.sparse.check_sparse_tensor_invariants.disable()
if self._default_dtype_check_enabled:
assert torch.get_default_dtype() == torch.float
# attribute may not be defined, per above
if hasattr(self, '_prev_grad_state'):
torch.set_grad_enabled(self._prev_grad_state)
@staticmethod
def _make_crow_indices(n_rows, n_cols, nnz,
*, device, dtype, random=True):
"""Return crow_indices of a CSR tensor with size (n_rows, n_cols) and
the number of specified elements nnz.
If random is True, the column counts of rows are in random
order. Otherwise, the column counts of rows are defined by the
used sampling method.
Sampling method
---------------
The used sampling method was introduced in
https://pearu.github.io/csr_sampling.html, and here we give
only an overall description of the method.
Notice that crow_indices can be defined as cumsum(counts)
where counts is a sequence of non-negative integers satisfying
the following conditions:
len(counts) == n_rows + 1
counts.max() <= n_cols
while counts[i + 1] is interpreted as the number of specified
elements in the i-th row.
The used sampling method aims at increasing the diversity of
CSR samples, that is, a CSR sample should contain (i) rows
that are all filled, (ii) rows with no elements at all, and
(iii) rows that are partially filled. At the same time and for
the given total number of specified elements (nnz), there
should be minimal preference to rows with a given number of
elements. To achieve this, the sampling method is built-up on
using a sawteeth model for counts. In the simplest case, we
would have
counts = arange(n_rows + 1) % (n_cols + 1)
that has equal number of all possible column counts per row.
This formula can be used only for specific input values of
n_rows, n_cols, and nnz. To generalize this model to any
combinations of inputs, the counts model above is extended
with an incomplete sawtooth, and the right and lower
rectangular parts that will guarantee that
counts.sum() == nnz
for any combination of n_rows, n_cols, and nnz. Basically,
we'll find a maximal window in (n_rows + 1, n_cols + 1)-grid
that is able to hold a sequence of sawteeth and so-called
final correction, while the external part of the window is
filled with counts to meet the nnz constraint exactly.
"""
assert 0 <= nnz <= n_rows * n_cols, (nnz, n_rows, n_cols)
def sawteeth(n, m):
# return the total number of counts in the sequence of
# sawteeth where n and m define a window in (n_rows+1,
# n_cols+1) rectangle where the sequence of sawteeth
# perfectly fit.
M = (n_cols - m) * (n_cols - m + 1) // 2
K = (n_rows - n) % (n_cols - m + 1)
return M * ((n_rows - n) // (n_cols - m + 1)) + K * (K - 1) // 2
# Different from the original method description, here counts
# has leading 0 required by crow_indices:
counts = torch.zeros(n_rows + 1, dtype=dtype, device=torch.device('cpu'))
n = m = 0
N = sawteeth(n, m)
if N and nnz >= max(N, n_cols):
# determine the width of the sawteeth window. We use bisection to solve
# N(n, 0) == 0 or nnz - n * n_cols < max(N(n, 0), n_cols)
# for n
n_left = n
n_right = n_rows - 1
N_right = sawteeth(n_right, m)
while n_right - n_left > 1:
n_middle = (n_left + n_right) // 2
N_middle = sawteeth(n_middle, m)
if N_middle == 0 or nnz - n_middle * n_cols < max(N_middle, n_cols):
n_right, N_right = n_middle, N_middle
else:
n_left = n_middle
n, N = n_right, N_right
# fill the right rectangle with counts:
assert n
counts[-n:].fill_(n_cols)
if N and nnz - n * n_cols >= max(N, n_rows - n):
# determine the height of the sawteeth window. We use bisection to solve
# N(n, m) == 0 or nnz - n * n_cols - m * (n_rows - n) < max(N(n, m), n_rows - n)
# for m.
m_left = m
m_right = n_cols - 1
N_right = sawteeth(n, m_right)
while m_right - m_left > 1:
m_middle = (m_left + m_right) // 2
N_middle = sawteeth(n, m_middle)
if N_middle == 0 or nnz - n * n_cols - m_middle * (n_rows - n) < max(N_middle, n_rows - n):
m_right, N_right = m_middle, N_middle
else:
m_left = m_middle
m, N = m_right, N_right
# fill the bottom rectangle with counts:
assert m
counts[1:n_rows - n + 1].fill_(m)
if N:
# fill the sawteeth window with counts
q, r = divmod(nnz - n * n_cols - m * (n_rows - n),
(n_cols - m) * (n_cols - m + 1) // 2)
p = 1 + q * (n_cols - m + 1)
k = math.isqrt(2 * r)
if k * (k + 1) > 2 * r:
k -= 1
corr = r - k * (k + 1) // 2
assert not ((p > 1) and (m > 0)) # full sawteeth are never on top of a bottom rectangle
# sequence of full sawteeth:
counts[1:p] = torch.arange(p - 1, dtype=dtype, device=counts.device) % (n_cols - m + 1)
# incomplete sawtooth:
counts[p:p + k + 1] += torch.arange(k + 1, dtype=dtype, device=counts.device)
else:
# given input does not support sawteeth
p = 1
corr = nnz - n * n_cols - m * (n_rows - n)
# correction that will guarantee counts.sum() == nnz:
counts[p] += corr
if random:
# randomize crow_indices by shuffling the sawteeth
# sequence:
perm = torch.randperm(n_rows, device=counts.device)
counts[1:] = counts[1:][perm]
# compute crow_indices:
crow_indices = counts
crow_indices.cumsum_(dim=0)
return crow_indices.to(device=device)
def genSparseCompressedTensor(self, size, nnz, *, layout, device, dtype, index_dtype, blocksize=(), dense_dims=0):
from operator import mul
from functools import reduce
sparse_dim = 2
assert all(size[d] > 0 for d in range(len(size))) or nnz == 0, 'invalid arguments'
assert len(size) >= sparse_dim
if blocksize:
assert len(blocksize) == 2, (size, blocksize)
assert size[-2 - dense_dims] % blocksize[0] == 0, (size, blocksize)
assert size[-1 - dense_dims] % blocksize[1] == 0, (size, blocksize)
blocksize0, blocksize1 = blocksize
else:
blocksize0 = blocksize1 = 1
size = tuple(size)
dense_size = size[(len(size) - dense_dims):]
def random_sparse_compressed(n_compressed_dims, n_plain_dims, nnz):
compressed_indices = self._make_crow_indices(n_compressed_dims, n_plain_dims, nnz, device=device, dtype=index_dtype)
plain_indices = torch.zeros(nnz, dtype=index_dtype, device=device)
for i in range(n_compressed_dims):
count = compressed_indices[i + 1] - compressed_indices[i]
plain_indices[compressed_indices[i]:compressed_indices[i + 1]], _ = torch.sort(
torch.randperm(n_plain_dims, dtype=index_dtype, device=device)[:count])
low = -1 if dtype != torch.uint8 else 0
high = 1 if dtype != torch.uint8 else 2
values = make_tensor((nnz,) + blocksize + dense_size, device=device, dtype=dtype, low=low, high=high)
return values, compressed_indices, plain_indices
batch_shape = size[:-2 - dense_dims]
n_batch = reduce(mul, batch_shape, 1)
if layout in {torch.sparse_csr, torch.sparse_bsr}:
n_compressed_dims, n_plain_dims = size[-2 - dense_dims] // blocksize0, size[-1 - dense_dims] // blocksize1
else:
n_compressed_dims, n_plain_dims = size[-1 - dense_dims] // blocksize1, size[-2 - dense_dims] // blocksize0
blocknnz = nnz // (blocksize0 * blocksize1)
sparse_tensors = [random_sparse_compressed(n_compressed_dims, n_plain_dims, blocknnz) for _ in range(n_batch)]
sparse_tensors_it = map(list, zip(*sparse_tensors, strict=True))
values = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, blocknnz, *blocksize, *dense_size)
compressed_indices = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1)
plain_indices = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1)
return torch.sparse_compressed_tensor(compressed_indices, plain_indices,
values, size=size, dtype=dtype, layout=layout, device=device)
def genSparseCSRTensor(self, size, nnz, *, device, dtype, index_dtype, dense_dims=0):
return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_csr, device=device,
dtype=dtype, index_dtype=index_dtype, blocksize=(), dense_dims=dense_dims)
def genSparseCSCTensor(self, size, nnz, *, device, dtype, index_dtype, dense_dims=0):
return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_csc, device=device,
dtype=dtype, index_dtype=index_dtype, blocksize=(), dense_dims=0)
def genSparseBSRTensor(self, size, blocksize, nnz, *, device, dtype, index_dtype, dense_dims=0):
assert len(blocksize) == 2
return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_bsr, device=device,
dtype=dtype, index_dtype=index_dtype, blocksize=blocksize, dense_dims=dense_dims)
def genSparseBSCTensor(self, size, blocksize, nnz, *, device, dtype, index_dtype, dense_dims=0):
assert len(blocksize) == 2
return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_bsc, device=device,
dtype=dtype, index_dtype=index_dtype, blocksize=blocksize, dense_dims=dense_dims)
def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device, dtype):
# Assert not given impossible combination, where the sparse dims have
# empty numel, but nnz > 0 makes the indices containing values.
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
v_size = [nnz] + list(size[sparse_dim:])
v = make_tensor(v_size, device=device, dtype=dtype, low=-1, high=1)
i = torch.rand(sparse_dim, nnz, device=device)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if is_uncoalesced:
i1 = i[:, :(nnz // 2), ...]
i2 = i[:, :((nnz + 1) // 2), ...]
i = torch.cat([i1, i2], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size), dtype=dtype, device=device)
if not is_uncoalesced:
x = x.coalesce()
else:
# FIXME: `x` is a sparse view of `v`. Currently rebase_history for
# sparse views is not implemented, so this workaround is
# needed for inplace operations done on `x`, e.g., copy_().
# Remove after implementing something equivalent to CopySlice
# for sparse views.
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards
x = x.detach().clone()._coalesced_(False)
return x, x._indices().clone(), x._values().clone()
def generate_simple_inputs(self, layout,
device=None,
dtype=None,
index_dtype=None,
pin_memory=None,
members_pin_memory=None,
enable_batch=True,
enable_hybrid=True,
enable_zero_sized=True,
enable_non_contiguous_indices=True,
enable_non_contiguous_values=True,
enable_batch_variable_nse=False,
output_tensor=True,
patterns=None):
"""Generator of simple inputs for tensor constructors of the given layout.
The generated tensor inputs have the following properties:
- tensor shapes are minimal but not trivial
- tensor values are sorted sequences for COO and CSR formats, e.g. [1, 2, 3, 4]
- the generated tensors represent the same mathematical tensor for all layouts
- the generated tensors include regular, zero-sized, and optionally, batched or/and hybrid tensors.
- the generated tensors include contiguous or non-contiguous tensors both in indices and values
If output_tensor is True, yield tensors with the given
layout. Otherwise, yield inputs to the corresponding tensor
constructors:
- sparse compressed input is defined as
(compressed_indices, plain_indices, values), dict(size=expected_size_from_shape_inference, device=device, dtype=dtype,
pin_memory=pin_memory)
- sparse COO input is defined as
(indices, values), dict(size=expected_size_from_shape_inference, device=device, dtype=dtype, pin_memory=pin_memory)
- strided input is defined as
(values,), dict(device=device, dtype=dtype)
"""
if index_dtype is None:
index_dtype = torch.int64
is_compressed_sparse_layout = layout in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}
if output_tensor:
for args, kwargs in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype,
pin_memory=pin_memory,
enable_batch=enable_batch, enable_hybrid=enable_hybrid,
enable_zero_sized=enable_zero_sized,
enable_non_contiguous_indices=enable_non_contiguous_indices,
enable_non_contiguous_values=enable_non_contiguous_values,
enable_batch_variable_nse=enable_batch_variable_nse,
output_tensor=False):
if members_pin_memory:
args = tuple(a.pin_memory() for a in args)
if layout is torch.strided:
assert len(args) == 1
size = kwargs.pop('size', None) # to ensure that a zero-sized tensor has the desired shape
assert size is not None
if pin_memory:
yield args[0].reshape(size).pin_memory()
else:
yield args[0].reshape(size)
elif layout is torch.sparse_coo:
yield torch.sparse_coo_tensor(*args, **kwargs)
elif is_compressed_sparse_layout:
kwargs.update(layout=layout)
yield torch.sparse_compressed_tensor(*args, **kwargs)
else:
assert 0 # unreachable
return
def get_blockpattern(pattern, blocksize):
basesize = pattern.shape
assert basesize[0] % blocksize[0] == 0, (basesize, blocksize)
assert basesize[1] % blocksize[1] == 0, (basesize, blocksize)
blockpattern = pattern.reshape(-1,
blocksize[0],
basesize[1] // blocksize[1],
blocksize[1]).transpose(-3, -2).any(-1).any(-1)
block_ids = torch.arange(1, blockpattern.numel() + 1).reshape(blockpattern.shape)
return (blockpattern != 0) * block_ids
def get_sparse_data(pattern):
basesize = pattern.shape
assert len(basesize) == 2, basesize # pattern is expected to be a matrix
# We cannot use `torch.sparse_xyz_tensor(pattern)` to
# compute the sparse layout indices and values because
# generate_simple_inputs is used to generate the inputs to
# test `torch.sparse_xyz_tensor` factory functions, so
# we'll compute the indices and values independently of
# the factory functions.
indices = torch.where(pattern != 0)
coo_indices = torch.stack(indices)
crow_indices = torch.zeros(basesize[0] + 1, dtype=torch.int64)
crow_indices[1:] = torch.cumsum(coo_indices[0].bincount(minlength=basesize[0]), 0)
col_indices = coo_indices[1]
strided_values = torch.zeros(basesize, dtype=torch.int64)
# the property of `values == range(1, 1+nnz)` is used in
# get_sparse_data_with_block to relate BSR and BSC values,
# so, don't change the following line:
values = torch.arange(1, 1 + len(indices[0]), dtype=torch.int64)
strided_values[indices] = values
indices_T = torch.where(pattern.transpose(0, 1) != 0)
coo_indices_T = torch.stack(indices_T)
ccol_indices = torch.zeros(basesize[1] + 1, dtype=torch.int64)
ccol_indices[1:] = torch.cumsum(coo_indices_T[0].bincount(minlength=basesize[1]), 0)
row_indices = coo_indices_T[1]
csc_values = strided_values.transpose(0, 1)[indices_T]
return {torch.sparse_coo: (coo_indices, values),
torch.sparse_csr: (crow_indices, col_indices, values),
torch.sparse_csc: (ccol_indices, row_indices, csc_values),
torch.strided: (strided_values,)}
def get_sparse_data_with_block(pattern, blocksize):
nonblock_data = get_sparse_data(pattern)
blockpattern = get_blockpattern(pattern, blocksize)
block_data = get_sparse_data(blockpattern)
strided_values = nonblock_data[torch.strided][0]
block_indices = block_data[torch.sparse_coo][0]
bsr_values = torch.stack([strided_values[bi * blocksize[0]:(bi + 1) * blocksize[0],
bj * blocksize[1]:(bj + 1) * blocksize[1]]
for bi, bj in block_indices.transpose(0, 1)])
# here we use the property `values == range(1, 1+nnz)` and
# `values` relation to `csc_values` (see get_sparse_data)
# to get BSC blocks via reordering the BSR blocks:
bsc_values = bsr_values[block_data[torch.sparse_csc][2] - 1]
return {torch.sparse_bsr: (*block_data[torch.sparse_csr][:2], bsr_values),
torch.sparse_bsc: (*block_data[torch.sparse_csc][:2], bsc_values),
**nonblock_data}
def get_batch_sparse_data(pattern, blocksize):
size = pattern.shape
if len(size) <= 2: # non-batch
return get_sparse_data_with_block(pattern, blocksize)
# batch data is created recursively:
batch_data = {} # type: ignore[var-annotated]
for i, item in enumerate(pattern):
for layout, d in get_batch_sparse_data(item, blocksize).items():
target = batch_data.get(layout)
if layout is torch.sparse_coo:
# a "batch COO" means a COO with the leading
# sparse dimensions interpreted as batch
# dimensions
ext_coo_indices1 = torch.cat((torch.full((1, len(d[1])), i, dtype=torch.int64), d[0]))
if target is None:
target = batch_data[layout] = (ext_coo_indices1, d[1])
else:
target[0].set_(torch.cat((target[0], ext_coo_indices1), 1)) # type: ignore[call-overload]
target[1].set_(torch.cat((target[1], d[1])))
else:
if target is None:
target = batch_data[layout] = tuple(d[j].unsqueeze(0) for j in range(len(d)))
else:
for j in range(len(d)):
target[j].set_(torch.cat((target[j], d[j].unsqueeze(0)))) # type: ignore[call-overload]
return batch_data
def generate_values(base, densesize):
"""Generates a tensor of shape densesize with values equal to
base + i_1 * 10^0 + ... + i_d * 10^{d - 1}
at indices i_1, ..., i_d (with 0 <= i_j < densesize[j] for any 1 <= j <=
len(densesize))
This mapping produces unique values as long as
densesize[i] < 10 for all i in range(len(densesize)).
"""
if not densesize:
return base
if not isinstance(base, int) and base.ndim > 0:
return torch.stack([generate_values(b, densesize) for b in base])
if base == 0:
return torch.zeros(densesize, dtype=torch.int64)
r = torch.arange(densesize[0], dtype=torch.int64)
for i, d in enumerate(densesize[1:]):
y = torch.arange(d, dtype=torch.int64) * (10 ** (i + 1))
r = r[..., None] + y[None, ...]
r.add_(base)
return r
if patterns is None:
# A pattern is a 3-tuple with the following items:
#
# - a list of integers with the depth of two or more. The
# integers define the sparsity patterns of the generated
# inputs: zero values correspond to unspecified
# elements/blocks, and non-zero values to the specified
# elements.
#
# For debugging convenience, the elements with the same
# value typically belong to the same block. However, it
# is not a hard requirement: as long as the shape of a
# pattern divides with block sizes, the pattern will be
# a valid one.
#
# If the depth of the list is larger than two, inputs
# with batch dimensions will be generated.
#
# - a list of 2-tuples of block sizes, used to generate
# BSR/BSC tensors with various block size parameters
#
# - a list of tuples of dense dimensions, used to generate
# hybrid tensors with various dense dimensions
#
patterns = [
# a simple 3 x 2 tensor: non-hybrid, hybrid with 1 and 2 dense dimensions
([[1, 2, 0],
[1, 0, 3]], [(2, 1), (1, 3)], [(), (2,), (4, 5)]),
# 2 x 3 batch of 3 x 2 tensors: non-hybrid and hybrid with 2 dense dimensions
([[[[1, 2, 0],
[1, 0, 3]],
[[1, 2, 3],
[1, 0, 0]],
[[1, 0, 0],
[1, 2, 3]]],
[[[0, 2, 0],
[1, 2, 3]],
[[1, 0, 3],
[1, 2, 0]],
[[1, 2, 3],
[0, 2, 0]]]], [(2, 1), (2, 3)], [(), (2,)]),
# tensor with non-trivial blocksize
([[0, 1, 0, 2, 0, 2],
[0, 1, 0, 0, 2, 0],
[3, 3, 3, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 5, 0, 6, 6, 6],
[5, 0, 5, 6, 6, 6],
[0, 0, 0, 0, 8, 8],
[7, 7, 7, 0, 8, 8]], [(2, 3)], [(), (4, 5)]),
# batch tensor with variable NSE
# Requires https://github.com/pytorch/pytorch/pull/84843 or similar.
([[[1, 2],
[3, 4]],
[[1, 0],
[0, 0]]], [(1, 1)], ([()] if enable_batch_variable_nse else []))]
def non_contiguous_copy(t, dim=-1, offset=0):
# return a copy of t that is non-contiguous along the
# given dimension and with the given storage offset
self.assertTrue(t.is_contiguous())
if dim < 0:
dim = dim + t.ndim
assert dim >= 0 and dim < t.ndim
step = max(2, offset + 1)
tmp = torch.zeros((*t.shape[:dim], t.shape[dim] * step, *t.shape[dim + 1:]), dtype=t.dtype, device=t.device)
dim_slices = (*((slice(None),) * dim), slice(offset, None, step))
r = tmp[dim_slices].copy_(t)
self.assertFalse(r.is_contiguous())
self.assertEqual(t, r)
return r
# the main loop of the method:
for pattern, blocksizes, densesizes in patterns:
if not enable_hybrid:
densesizes = [s for s in densesizes if not s]
if not (densesizes and blocksizes):
continue
pattern = torch.tensor(pattern, dtype=torch.int64)
if not enable_batch and pattern.ndim > 2:
continue
for blocksize in blocksizes:
data = get_batch_sparse_data(pattern, blocksize)[layout]
for densesize in densesizes:
indices = [a.to(device=device, dtype=index_dtype) for a in data[:-1]]
values = generate_values(data[-1], densesize).to(device=device, dtype=dtype)
kwargs = dict(device=device, dtype=dtype, size=pattern.shape + densesize)
if pin_memory is not None:
kwargs.update(pin_memory=pin_memory)
yield (*indices, values), kwargs.copy()
if enable_non_contiguous_indices and pattern.ndim > 2:
# sparse compressed indices can be sliced only along batch dimensions
for (dim, offset) in {(0, 1), (-2, 0)}:
indices_copy = [non_contiguous_copy(a, dim=dim, offset=offset) for a in indices]
yield (*indices_copy, values), kwargs.copy()
if enable_non_contiguous_values:
values_copy = non_contiguous_copy(values, dim=-1, offset=1)
yield (*indices_copy, values_copy), kwargs.copy()
if enable_non_contiguous_values:
values_copy = non_contiguous_copy(values, dim=-1, offset=1)
yield (*indices, values_copy), kwargs.copy()
# zero-sized tensor inputs, non-batch, non-hybrid/hybrid
if enable_zero_sized:
for basesize, blocksizes, densesizes in [
((2, 0), [(1, 2)], [(), (2,), (2, 3)] if enable_hybrid else [()]),
((0, 2), [(1, 2), (2, 1), (3, 2)], [()]),
((0, 0), [(1, 2)], [()]),
]:
for blocksize in blocksizes:
for densesize in densesizes: # type: ignore[attr-defined]
if layout == torch.strided:
indices = () # type: ignore[assignment]
values = torch.empty((basesize + densesize), device=device, dtype=dtype)
elif layout == torch.sparse_coo:
indices = (torch.empty(len(basesize), 0, device=device, dtype=index_dtype),) # type: ignore[assignment]
values = torch.empty((0, *densesize), device=device, dtype=dtype)
elif layout == torch.sparse_csr:
crow_indices = torch.tensor([0] * (basesize[0] + 1), device=device, dtype=index_dtype)
col_indices = torch.empty(0, device=device, dtype=index_dtype)
indices = (crow_indices, col_indices) # type: ignore[assignment]
values = torch.empty((0, *densesize), device=device, dtype=dtype)
elif layout == torch.sparse_csc:
ccol_indices = torch.tensor([0] * (basesize[1] + 1), device=device, dtype=index_dtype)
row_indices = torch.empty(0, device=device, dtype=index_dtype)
indices = (ccol_indices, row_indices) # type: ignore[assignment]
values = torch.empty((0, *densesize), device=device, dtype=dtype)
elif layout == torch.sparse_bsr:
crow_indices = torch.tensor([0] * (basesize[0] // blocksize[0] + 1), device=device, dtype=index_dtype)
col_indices = torch.empty(0, device=device, dtype=index_dtype)
indices = (crow_indices, col_indices) # type: ignore[assignment]
values = torch.empty((0, *blocksize, *densesize), device=device, dtype=dtype)
elif layout == torch.sparse_bsc:
ccol_indices = torch.tensor([0] * (basesize[1] // blocksize[1] + 1), device=device, dtype=index_dtype)
row_indices = torch.empty(0, device=device, dtype=index_dtype)
indices = (ccol_indices, row_indices) # type: ignore[assignment]
values = torch.empty((0, *blocksize, *densesize), device=device, dtype=dtype)
else:
assert 0 # unreachable
kwargs = dict(device=device, dtype=dtype, size=basesize + densesize)
if pin_memory is not None:
kwargs.update(pin_memory=pin_memory)
yield (*indices, values), kwargs
def safeToDense(self, t):
# coalesce is only implemented for COO
if t.layout == torch.sparse_coo:
t = t.coalesce()
return t.to_dense()
# Compares a torch function with a reference function for a given sample input (object of SampleInput)
# Note: only values are compared, type comparison is not done here
def compare_with_reference(self, torch_fn, ref_fn, sample_input, **kwargs):
numpy_sample = sample_input.numpy()
n_inp, n_args, n_kwargs = numpy_sample.input, numpy_sample.args, numpy_sample.kwargs
t_inp, t_args, t_kwargs = sample_input.input, sample_input.args, sample_input.kwargs
actual = torch_fn(t_inp, *t_args, **t_kwargs)
expected = ref_fn(n_inp, *n_args, **n_kwargs)
self.assertEqual(actual, expected, exact_device=False, **kwargs)
# Compares the given Torch and NumPy functions on the given tensor-like object.
# NOTE: both torch_fn and np_fn should be functions that take a single
# tensor (array). If the torch and/or NumPy function require additional
# arguments then wrap the function in a lambda or pass a partial function.
# TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol)
def compare_with_numpy(self, torch_fn, np_fn, tensor_like,
device=None, dtype=None, **kwargs):
assert TEST_NUMPY
if isinstance(tensor_like, torch.Tensor):
assert device is None
assert dtype is None
t_cpu = tensor_like.detach().cpu()
if t_cpu.dtype is torch.bfloat16:
t_cpu = t_cpu.float()
a = t_cpu.numpy()
t = tensor_like
else:
d = copy.copy(torch_to_numpy_dtype_dict)
d[torch.bfloat16] = np.float32
a = np.array(tensor_like, dtype=d[dtype])
t = torch.tensor(tensor_like, device=device, dtype=dtype)
np_result = np_fn(a)
torch_result = torch_fn(t).cpu()
# Converts arrays to tensors
if isinstance(np_result, np.ndarray):
try:
np_result = torch.from_numpy(np_result)
except Exception:
# NOTE: copying an array before conversion is necessary when,
# for example, the array has negative strides.
np_result = torch.from_numpy(np_result.copy())
if t.dtype is torch.bfloat16 and torch_result.dtype is torch.bfloat16 and np_result.dtype is torch.float:
torch_result = torch_result.to(torch.float)
self.assertEqual(np_result, torch_result, **kwargs)
def assertEqualIgnoreType(self, *args, **kwargs) -> None:
# If you are seeing this function used, that means test is written wrongly
# and deserves detailed investigation
return self.assertEqual(*args, exact_dtype=False, **kwargs)
def assertEqualBroadcasting(self, x, y, *args, **kwargs) -> None:
r"""Tests if tensor x equals to y, if y to be broadcast to x.shape.
"""
if not isinstance(y, Iterable):
# int, float, etc. or different shape tensors
y = torch.ones_like(x) * y
if not isinstance(y, torch.Tensor):
# iterable, but not a tensor
y = torch.ones_like(x) * torch.tensor(y)
return self.assertEqual(x, y, *args, **kwargs)
def assertEqual(
self,
x,
y,
msg: Optional[Union[str, Callable[[str], str]]] = None,
*,
atol: Optional[float] = None,
rtol: Optional[float] = None,
equal_nan=True,
exact_dtype=True,
# TODO: default this to True
exact_device=False,
exact_layout=False,
exact_stride=False,
exact_is_coalesced=False
):
# Hide this function from `pytest`'s traceback
__tracebackhide__ = True
# numpy's dtypes are a superset of what PyTorch supports. In case we encounter an unsupported dtype, we fall
# back to an elementwise comparison. Note that this has to happen here and not for example in
# `TensorOrArrayPair`, since at that stage we can no longer split the array into its elements and perform
# multiple comparisons.
if any(
isinstance(input, np.ndarray) and not has_corresponding_torch_dtype(input.dtype) for input in (x, y)
):
def to_list(input):
return input.tolist() if isinstance(input, (torch.Tensor, np.ndarray)) else list(input)
x = to_list(x)
y = to_list(y)
# When comparing a sequence of numbers to a tensor, we need to convert the sequence to a tensor here.
# Otherwise, the pair origination of `are_equal` will fail, because the sequence is recognized as container
# that should be checked elementwise while the tensor is not.
elif isinstance(x, torch.Tensor) and isinstance(y, Sequence):
y = torch.as_tensor(y, dtype=x.dtype, device=x.device)
elif isinstance(x, Sequence) and isinstance(y, torch.Tensor):
x = torch.as_tensor(x, dtype=y.dtype, device=y.device)
# unbind NSTs to compare them; don't do this for NJTs
if isinstance(x, torch.Tensor) and x.is_nested and x.layout == torch.strided:
x = x.unbind()
if isinstance(y, torch.Tensor) and y.is_nested and y.layout == torch.strided:
y = y.unbind()
error_metas = not_close_error_metas(
x,
y,
pair_types=(
NonePair,
RelaxedBooleanPair,
RelaxedNumberPair,
TensorOrArrayPair,
TypedStoragePair,
StringPair,
SetPair,
TypePair,
ObjectPair,
),
sequence_types=(
Sequence,
Sequential,
ModuleList,
ParameterList,
ScriptList,
torch.utils.data.dataset.Subset,
),
mapping_types=(Mapping, ModuleDict, ParameterDict, ScriptDict),
rtol=rtol,
rtol_override=self.rel_tol,
atol=atol,
atol_override=self.precision,
equal_nan=equal_nan,
check_device=exact_device,
check_dtype=exact_dtype,
check_layout=exact_layout,
check_stride=exact_stride,
check_is_coalesced=exact_is_coalesced,
)
if error_metas:
# See [ErrorMeta Cycles]
error_metas = [error_metas] # type: ignore[list-item]
# TODO: compose all metas into one AssertionError
raise error_metas.pop()[0].to_error( # type: ignore[index]
# This emulates unittest.TestCase's behavior if a custom message passed and
# TestCase.longMessage (https://docs.python.org/3/library/unittest.html#unittest.TestCase.longMessage)
# is True (default)
(lambda generated_msg: f"{generated_msg}\n{msg}") if isinstance(msg, str) and self.longMessage else msg
)
def assertNotEqual(self, x, y, msg: Optional[str] = None, *, # type: ignore[override]
atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None:
with self.assertRaises(AssertionError, msg=msg):
self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs)
def assertEqualTypeString(self, x, y) -> None:
# This API is used simulate deprecated x.type() is y.type()
self.assertEqual(x.device, y.device)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.is_sparse, y.is_sparse)
def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None:
for elem in iterable:
if id(obj) == id(elem):
return
raise AssertionError("object not found in iterable")
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaises(self, expected_exception, *args, **kwargs):
if self._ignore_not_implemented_error:
context: Optional[AssertRaisesContextIgnoreNotImplementedError] = \
AssertRaisesContextIgnoreNotImplementedError(expected_exception, self) # type: ignore[call-arg]
try:
return context.handle('assertRaises', args, kwargs) # type: ignore[union-attr, arg-type]
finally:
# see https://bugs.python.org/issue23890
context = None
else:
return super().assertRaises(expected_exception, *args, **kwargs)
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaisesRegex(self, expected_exception, expected_regex, *args, **kwargs):
# Verifies that an exception with the type expected_exception and message
# matching the regular expression defined by expected_regex is thrown.
# If the test is instantiated for a non-native device type (like XLA)
# then the message is not validated.
# Checks whether the test is instantiated for a device type by testing
# if the test class has defined the device_type attribute and,
# if so, tests whether the instantiated device type is native or not
if hasattr(self, 'device_type') and self.device_type not in NATIVE_DEVICES and self.device_type != "mps": # type: ignore[attr-defined]
# empty string matches any string
expected_regex = ''
if self._ignore_not_implemented_error:
context = AssertRaisesContextIgnoreNotImplementedError( # type: ignore[call-arg]
expected_exception, self, expected_regex)
return context.handle('assertRaisesRegex', args, kwargs) # type: ignore[attr-defined, arg-type]
else:
return super().assertRaisesRegex(expected_exception, expected_regex, *args, **kwargs)
# Verifies that no unraisable exceptions are raised by callable. Unlike regular
# exceptions, these do not actually propagate to the caller and are
# suppressed. We must test for them specially.
def assertNoUnraisable(self, callable, *args, **kwargs):
raised = None
def record_unraisable(unraisable):
nonlocal raised
raised = unraisable
# Disable GC when running the callable to prevent spurious flakiness
# from unlucky GCs inside the callable
prev = gc.isenabled()
gc.disable()
try:
with unittest.mock.patch("sys.unraisablehook", record_unraisable):
callable(*args, **kwargs)
finally:
if prev:
gc.enable()
self.assertIsNone(raised)
# TODO: Support context manager interface
# NB: The kwargs forwarding to callable robs the 'subname' parameter.
# If you need it, manually apply your callable in a lambda instead.
def assertExpectedRaises(self, exc_type, callable, *args, **kwargs):
subname = None
if 'subname' in kwargs:
subname = kwargs['subname']
del kwargs['subname']
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertExpected(str(e), subname)
return
# Don't put this in the try block; the AssertionError will catch it
self.fail(msg="Did not raise when expected to")
def assertNotWarn(self, callable, msg=''):
r"""
Test if :attr:`callable` does not raise a warning.
"""
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
callable()
self.assertTrue(len(ws) == 0, msg)
@contextmanager
def assertWarnsOnceRegex(self, category, regex=''):
"""Context manager for code that *must always* warn
This filters expected warnings from the test and fails if
the expected warning is not caught. It uses set_warn_always() to force
TORCH_WARN_ONCE to behave like TORCH_WARN
"""
pattern = re.compile(regex)
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
yield
if len(ws) == 0:
self.fail('no warning caught')
self.assertTrue(any(type(w.message) is category for w in ws))
self.assertTrue(
any(re.match(pattern, str(w.message)) for w in ws),
f'{pattern}, {[w.message for w in ws if type(w.message) is category]}')
def assertExpected(self, s, subname=None):
r"""
Test that a string matches the recorded contents of a file
derived from the name of this test and subname. This file
is placed in the 'expect' directory in the same directory
as the test script. You can automatically update the recorded test
output using --accept.
If you call this multiple times in a single function, you must
give a unique subname each time.
"""
if not isinstance(s, str):
raise TypeError("assertExpected is strings only")
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
# NB: we take __file__ from the module that defined the test
# class, so we place the expect directory where the test script
# lives, NOT where test/common_utils.py lives. This doesn't matter in
# PyTorch where all test scripts are in the same directory as
# test/common_utils.py, but it matters in onnx-pytorch
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__) # type: ignore[type-var]
expected_file = os.path.join(os.path.dirname(test_file), # type: ignore[type-var, arg-type]
"expect",
munged_id)
subname_output = ""
if subname:
expected_file += "-" + subname
subname_output = f" ({subname})"
expected_file += ".expect"
expected = None
def accept_output(update_type):
print(f"Accepting {update_type} for {munged_id}{subname_output}:\n\n{s}")
with open(expected_file, 'w') as f:
# Adjust for producer_version, leave s unmodified
s_tag = re.sub(r'(producer_version): "[0-9.]*"',
r'\1: "CURRENT_VERSION"', s)
f.write(s_tag)
try:
with open(expected_file) as f:
expected = f.read()
except OSError as e:
if e.errno != errno.ENOENT:
raise
elif expecttest.ACCEPT:
return accept_output("output")
else:
raise RuntimeError(
f"I got this output for {munged_id}{subname_output}:\n\n{s}\n\n"
"No expect file exists; to accept the current output, run:\n"
f"python {__main__.__file__} {munged_id} --accept") from None
# a hack for JIT tests
if IS_WINDOWS:
expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected)
s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s)
# Adjust for producer_version
expected = expected.replace(
'producer_version: "CURRENT_VERSION"',
f'producer_version: "{torch.onnx.producer_version}"'
)
if expecttest.ACCEPT:
if expected != s:
return accept_output("updated output")
else:
if hasattr(self, "assertMultiLineEqual"):
# Python 2.7 only
# NB: Python considers lhs "old" and rhs "new".
self.assertMultiLineEqual(expected, s)
else:
self.assertEqual(s, expected)
def assertExpectedStripMangled(self, s, subname=None):
s = re.sub(r'__torch__[^ ]+', '', s)
self.assertExpected(s, subname)
def assertGreaterAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Assert that ``first`` is greater than or almost equal to ``second``.
The equality of ``first`` and ``second`` is determined in a similar way to
the ``assertAlmostEqual`` function of the standard library.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if first >= second:
return
diff = second - first
if delta is not None:
if diff <= delta:
return
standardMsg = f"{first} not greater than or equal to {second} within {delta} delta"
else:
if places is None:
places = 7
if round(diff, places) == 0:
return
standardMsg = f"{first} not greater than or equal to {second} within {places} places"
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertAtenOp(self, onnx_model, operator, overload_name=""):
all_aten_nodes = [p for p in onnx_model.graph.node
if p.op_type == "ATen" and p.domain == "org.pytorch.aten"]
self.assertTrue(all_aten_nodes)
for op in all_aten_nodes:
attrs = {attr.name: attr.s.decode() for attr in op.attribute}
if attrs.get("operator") == operator:
break
self.assertEqual(attrs["operator"], operator) # type: ignore[possibly-undefined]
self.assertEqual(attrs.get("overload_name", ""), overload_name)
def check_nondeterministic_alert(self, fn, caller_name, should_alert=True):
'''Checks that an operation produces a nondeterministic alert when
expected while `torch.use_deterministic_algorithms(True)` is set.
Args:
fn (callable): Function to check for a nondeterministic alert
caller_name (str): Name of the operation that produces the
nondeterministic alert. This name is expected to appear at the
beginning of the error/warning message.
should_alert (bool, optional): If True, then the check will only pass
if calling `fn` produces a nondeterministic error/warning with the
expected message. If False, then the check will only pass if
calling `fn` does not produce an error. Default: `True`.
'''
alert_message = '^' + caller_name + ' does not have a deterministic implementation, but you set'
# Check that errors are thrown correctly
with DeterministicGuard(True):
if should_alert:
with self.assertRaisesRegex(
RuntimeError,
alert_message,
msg='expected a non-deterministic error, but it was not raised'):
fn()
else:
# If a nondeterministic error is not expected, make sure
# that it is not raised
try:
fn()
except RuntimeError as e:
if 'does not have a deterministic implementation' in str(e):
self.fail(
'did not expect non-deterministic error message, '
+ 'but got one anyway: "' + str(e) + '"')
# Reraise exceptions unrelated to nondeterminism
raise
# Check that warnings are thrown correctly
with DeterministicGuard(True, warn_only=True):
if should_alert:
with self.assertWarnsRegex(
UserWarning,
alert_message):
fn()
else:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
fn()
for warning in w:
if isinstance(warning, UserWarning):
self.assertTrue(re.search(alert_message, str(warning)) is None)
# run code in subprocess and capture exceptions.
@staticmethod
def run_process_no_exception(code, env=None):
import subprocess
popen = subprocess.Popen(
[sys.executable, '-c', code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
(stdout, stderr) = popen.communicate()
return (stdout, stderr)
# returns captured stderr
@staticmethod
def runWithPytorchAPIUsageStderr(code):
env = os.environ.copy()
env["PYTORCH_API_USAGE_STDERR"] = "1"
# remove CI flag since this is a wrapped test process.
# CI flag should be set in the parent process only.
env.pop("CI", None)
env.pop("TEST_SHOWLOCALS", None)
_stdout, stderr = TestCase.run_process_no_exception(code, env=env)
return stderr.decode('ascii')
def _attempt_load_from_subprocess(
self,
file: pathlib.Path,
import_string: str,
expected_failure_message: Optional[str] = None
) -> None:
"""
Attempts weights_only `torch.load` in a subprocess. This is used to test that
weights_only `torch.load` works as expected without global imports.
Args:
file (pathlib.Path): The path to the checkpoint to load.
import_string (str): import string to add to the script
exected_failure_message (str, optional): The expected failure message if the
checkpoint fails to load. If None, the test will pass
"""
script = f"import torch;{import_string}torch.load(r'{file}', weights_only=True)"
cm = (
self.assertRaisesRegex(RuntimeError, re.escape(expected_failure_message))
if expected_failure_message else contextlib.nullcontext()
)
with cm:
try:
subprocess.check_output(
[sys.executable, "-c", script],
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
cwd=os.path.dirname(os.path.realpath(__file__)),
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as e:
raise RuntimeError(e.output.decode("utf-8")) from None
|
TestCase
|
python
|
qdrant__qdrant-client
|
qdrant_client/grpc/collections_service_pb2_grpc.py
|
{
"start": 12768,
"end": 21674
}
|
class ____(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Collections/Get',
collections__pb2.GetCollectionInfoRequest.SerializeToString,
collections__pb2.GetCollectionInfoResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Collections/List',
collections__pb2.ListCollectionsRequest.SerializeToString,
collections__pb2.ListCollectionsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Collections/Create',
collections__pb2.CreateCollection.SerializeToString,
collections__pb2.CollectionOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Collections/Update',
collections__pb2.UpdateCollection.SerializeToString,
collections__pb2.CollectionOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Collections/Delete',
collections__pb2.DeleteCollection.SerializeToString,
collections__pb2.CollectionOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateAliases(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Collections/UpdateAliases',
collections__pb2.ChangeAliases.SerializeToString,
collections__pb2.CollectionOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListCollectionAliases(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Collections/ListCollectionAliases',
collections__pb2.ListCollectionAliasesRequest.SerializeToString,
collections__pb2.ListAliasesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListAliases(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Collections/ListAliases',
collections__pb2.ListAliasesRequest.SerializeToString,
collections__pb2.ListAliasesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CollectionClusterInfo(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Collections/CollectionClusterInfo',
collections__pb2.CollectionClusterInfoRequest.SerializeToString,
collections__pb2.CollectionClusterInfoResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CollectionExists(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Collections/CollectionExists',
collections__pb2.CollectionExistsRequest.SerializeToString,
collections__pb2.CollectionExistsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateCollectionClusterSetup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Collections/UpdateCollectionClusterSetup',
collections__pb2.UpdateCollectionClusterSetupRequest.SerializeToString,
collections__pb2.UpdateCollectionClusterSetupResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateShardKey(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Collections/CreateShardKey',
collections__pb2.CreateShardKeyRequest.SerializeToString,
collections__pb2.CreateShardKeyResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteShardKey(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Collections/DeleteShardKey',
collections__pb2.DeleteShardKeyRequest.SerializeToString,
collections__pb2.DeleteShardKeyResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
Collections
|
python
|
apache__airflow
|
providers/fab/src/airflow/providers/fab/auth_manager/api_fastapi/datamodels/roles.py
|
{
"start": 1040,
"end": 1141
}
|
class ____(BaseModel):
"""Outgoing representation of a resource."""
name: str
|
ResourceResponse
|
python
|
huggingface__transformers
|
src/transformers/models/esm/modeling_esm.py
|
{
"start": 17475,
"end": 19666
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = EsmAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise RuntimeError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = EsmAttention(config, is_cross_attention=True)
self.intermediate = EsmIntermediate(config)
self.output = EsmOutput(config)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
**kwargs: Unpack[TransformersKwargs],
):
attention_output = self.attention(
hidden_states,
attention_mask=attention_mask,
**kwargs,
)
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise AttributeError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated"
" with cross-attention layers by setting `config.add_cross_attention=True`"
)
attention_output = self.crossattention(
attention_output,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
**kwargs,
)
layer_output = self.feed_forward_chunk(attention_output)
return layer_output
def feed_forward_chunk(self, attention_output):
attention_output_ln = self.LayerNorm(attention_output)
intermediate_output = self.intermediate(attention_output_ln)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
EsmLayer
|
python
|
django-guardian__django-guardian
|
example_project_custom_group/articles/migrations/0002_initial.py
|
{
"start": 159,
"end": 4302
}
|
class ____(migrations.Migration):
initial = True
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
("contenttypes", "0002_remove_content_type_name"),
("articles", "0001_initial"),
("core", "0001_initial"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name="biguserobjectpermission",
name="user",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name="biggroupobjectpermission",
name="content_type",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="contenttypes.contenttype"),
),
migrations.AddField(
model_name="biggroupobjectpermission",
name="group",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="core.customgroup"),
),
migrations.AddField(
model_name="biggroupobjectpermission",
name="permission",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="auth.permission"),
),
migrations.AddField(
model_name="articleuserobjectpermission",
name="content_object",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="articles.article"),
),
migrations.AddField(
model_name="articleuserobjectpermission",
name="permission",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="auth.permission"),
),
migrations.AddField(
model_name="articleuserobjectpermission",
name="user",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name="articlegroupobjectpermission",
name="content_object",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="articles.article"),
),
migrations.AddField(
model_name="articlegroupobjectpermission",
name="group",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="auth.group"),
),
migrations.AddField(
model_name="articlegroupobjectpermission",
name="permission",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="auth.permission"),
),
migrations.AddIndex(
model_name="biguserobjectpermission",
index=models.Index(fields=["content_type", "object_pk"], name="articles_bi_content_3fff51_idx"),
),
migrations.AddIndex(
model_name="biguserobjectpermission",
index=models.Index(fields=["content_type", "object_pk", "user"], name="articles_bi_content_a2ac4b_idx"),
),
migrations.AlterUniqueTogether(
name="biguserobjectpermission",
unique_together={("user", "permission", "object_pk")},
),
migrations.AddIndex(
model_name="biggroupobjectpermission",
index=models.Index(fields=["content_type", "object_pk"], name="articles_bi_content_824ecd_idx"),
),
migrations.AddIndex(
model_name="biggroupobjectpermission",
index=models.Index(fields=["content_type", "object_pk", "group"], name="articles_bi_content_61c3ef_idx"),
),
migrations.AlterUniqueTogether(
name="biggroupobjectpermission",
unique_together={("group", "permission", "object_pk")},
),
migrations.AlterUniqueTogether(
name="articleuserobjectpermission",
unique_together={("user", "permission", "content_object")},
),
migrations.AlterUniqueTogether(
name="articlegroupobjectpermission",
unique_together={("group", "permission", "content_object")},
),
]
|
Migration
|
python
|
huggingface__transformers
|
src/transformers/models/maskformer/modeling_maskformer_swin.py
|
{
"start": 18071,
"end": 18614
}
|
class ____(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, dim)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.swin.modeling_swin.SwinAttention with Swin->MaskFormerSwin
|
MaskFormerSwinSelfOutput
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/connect/select/tutorial001_py310.py
|
{
"start": 222,
"end": 2153
}
|
class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
team_id: int | None = Field(default=None, foreign_key="team.id")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
session.add(team_preventers)
session.add(team_z_force)
session.commit()
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team_id=team_z_force.id
)
hero_rusty_man = Hero(
name="Rusty-Man",
secret_name="Tommy Sharp",
age=48,
team_id=team_preventers.id,
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
def select_heroes():
with Session(engine) as session:
statement = select(Hero, Team).where(Hero.team_id == Team.id)
results = session.exec(statement)
for hero, team in results:
print("Hero:", hero, "Team:", team)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
|
Hero
|
python
|
getsentry__sentry
|
tests/sentry/integrations/github/test_client.py
|
{
"start": 51926,
"end": 68440
}
|
class ____(GitHubClientFileBlameBase):
"""
Tests that get_blame_for_files handles the GraphQL response correctly
"""
def setUp(self) -> None:
super().setUp()
self.file1 = SourceLineInfo(
path="src/sentry/integrations/github/client_1.py",
lineno=10,
ref="master",
repo=self.repo_1,
code_mapping=None, # type: ignore[arg-type]
)
self.file2 = SourceLineInfo(
path="src/sentry/integrations/github/client_1.py",
lineno=20,
ref="master",
repo=self.repo_1,
code_mapping=None, # type: ignore[arg-type]
)
self.file3 = SourceLineInfo(
path="src/sentry/integrations/github/client_2.py",
lineno=20,
ref="master",
repo=self.repo_1,
code_mapping=None, # type: ignore[arg-type]
)
self.data = {
"repository0": {
"ref0": {
"target": {
"blame0": {
"ranges": [
{
"commit": {
"oid": "987",
"author": {
"name": "not this one",
"email": "blah@example.com",
},
"message": "bye",
"committedDate": "2022-01-01T00:00:00Z",
},
"startingLine": 1,
"endingLine": 9,
"age": 0,
},
{
"commit": {
"oid": "123",
"author": {"name": "foo1", "email": "foo1@example.com"},
"message": "hello",
"committedDate": "2022-01-01T00:00:00Z",
},
"startingLine": 10,
"endingLine": 15,
"age": 0,
},
{
"commit": {
"oid": "456",
"author": {"name": "foo2", "email": "foo2@example.com"},
"message": "hello",
"committedDate": "2021-01-01T00:00:00Z",
},
"startingLine": 16,
"endingLine": 21,
"age": 0,
},
]
},
"blame1": {
"ranges": [
{
"commit": {
"oid": "789",
"author": {"name": "foo3", "email": "foo3@example.com"},
"message": "hello",
"committedDate": "2020-01-01T00:00:00Z",
},
"startingLine": 20,
"endingLine": 20,
"age": 0,
}
]
},
}
}
}
}
@mock.patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_get_blame_for_files_full_response(self, get_jwt) -> None:
"""
Tests that the correct commits are selected from the blame range when a full response is returned.
"""
responses.add(
method=responses.POST,
url="https://api.github.com/graphql",
json={
"data": self.data,
},
content_type="application/json",
)
response = self.github_client.get_blame_for_files(
[self.file1, self.file2, self.file3], extra={}
)
self.assertEqual(
response,
[
FileBlameInfo(
**asdict(self.file1),
commit=CommitInfo(
commitId="123",
commitAuthorName="foo1",
commitAuthorEmail="foo1@example.com",
commitMessage="hello",
committedDate=datetime(2022, 1, 1, 0, 0, 0, tzinfo=UTC),
),
),
FileBlameInfo(
**asdict(self.file2),
commit=CommitInfo(
commitId="456",
commitAuthorName="foo2",
commitAuthorEmail="foo2@example.com",
commitMessage="hello",
committedDate=datetime(2021, 1, 1, 0, 0, 0, tzinfo=UTC),
),
),
FileBlameInfo(
**asdict(self.file3),
commit=CommitInfo(
commitId="789",
commitAuthorName="foo3",
commitAuthorEmail="foo3@example.com",
commitMessage="hello",
committedDate=datetime(2020, 1, 1, 0, 0, 0, tzinfo=UTC),
),
),
],
)
@mock.patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_get_cached_blame_for_files_full_response(self, get_jwt) -> None:
"""
Tests that the cached commits are returned with full response
"""
responses.add(
method=responses.POST,
url="https://api.github.com/graphql",
json={
"data": self.data,
},
content_type="application/json",
)
query, variables = create_blame_query(
generate_file_path_mapping([self.file1, self.file2, self.file3]), extra={}
)
cache_key = self.github_client.get_cache_key(
"/graphql", orjson.dumps({"query": query, "variables": variables}).decode()
)
assert self.github_client.check_cache(cache_key) is None
response = self.github_client.get_blame_for_files(
[self.file1, self.file2, self.file3], extra={}
)
self.assertEqual(
response,
[
FileBlameInfo(
**asdict(self.file1),
commit=CommitInfo(
commitId="123",
commitAuthorName="foo1",
commitAuthorEmail="foo1@example.com",
commitMessage="hello",
committedDate=datetime(2022, 1, 1, 0, 0, 0, tzinfo=UTC),
),
),
FileBlameInfo(
**asdict(self.file2),
commit=CommitInfo(
commitId="456",
commitAuthorName="foo2",
commitAuthorEmail="foo2@example.com",
commitMessage="hello",
committedDate=datetime(2021, 1, 1, 0, 0, 0, tzinfo=UTC),
),
),
FileBlameInfo(
**asdict(self.file3),
commit=CommitInfo(
commitId="789",
commitAuthorName="foo3",
commitAuthorEmail="foo3@example.com",
commitMessage="hello",
committedDate=datetime(2020, 1, 1, 0, 0, 0, tzinfo=UTC),
),
),
],
)
cached_1 = self.github_client.check_cache(cache_key)
assert isinstance(cached_1, dict)
assert cached_1["data"] == self.data
# Calling a second time should work
response = self.github_client.get_blame_for_files(
[self.file1, self.file2, self.file3], extra={}
)
cached_2 = self.github_client.check_cache(cache_key)
assert isinstance(cached_2, dict)
assert cached_2["data"] == self.data
# Calling again after the cache has been cleared should still work
cache.delete(cache_key)
response = self.github_client.get_blame_for_files(
[self.file1, self.file2, self.file3], extra={}
)
cached_3 = self.github_client.check_cache(cache_key)
assert isinstance(cached_3, dict)
assert cached_3["data"] == self.data
assert (
self.github_client.get_blame_for_files([self.file1, self.file2], extra={}) != response
)
@mock.patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_get_blame_for_files_response_partial_data(self, get_jwt) -> None:
"""
Tests that commits are still returned when some data is missing from the response
"""
file1 = SourceLineInfo(
path="src/sentry/integrations/github/client_1.py",
lineno=10,
ref="master",
repo=self.repo_1,
code_mapping=None, # type: ignore[arg-type]
)
file2 = SourceLineInfo(
path="src/sentry/integrations/github/client_2.py",
lineno=15,
ref="master",
repo=self.repo_1,
code_mapping=None, # type: ignore[arg-type]
)
file3 = SourceLineInfo(
path="src/sentry/integrations/github/client.py",
lineno=20,
ref="master",
repo=self.repo_2,
code_mapping=None, # type: ignore[arg-type]
)
file4 = SourceLineInfo(
path="src/sentry/integrations/github/client.py",
lineno=25,
ref="master",
repo=self.repo_3,
code_mapping=None, # type: ignore[arg-type]
)
data = {
"repository0": {
"ref0": {
"target": {
"blame0": {
"ranges": [
{
"commit": {
"oid": "123",
"author": None,
"message": None,
"committedDate": "2022-01-01T00:00:00Z",
},
"startingLine": 10,
"endingLine": 15,
"age": 0,
}
]
},
"blame1": {"ranges": []},
}
}
},
"repository1": {"ref0": None},
"repository2": None,
}
responses.add(
method=responses.POST,
url="https://api.github.com/graphql",
json={
"data": data,
},
content_type="application/json",
)
response = self.github_client.get_blame_for_files([file1, file2, file3, file4], extra={})
self.assertEqual(
response,
[
FileBlameInfo(
**asdict(file1),
commit=CommitInfo(
commitId="123",
commitAuthorName=None,
commitAuthorEmail=None,
commitMessage=None,
committedDate=datetime(2022, 1, 1, 0, 0, 0, tzinfo=UTC),
),
),
],
)
@mock.patch("sentry.integrations.github.client.logger.error")
@mock.patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_get_blame_for_files_invalid_commit(self, get_jwt, mock_logger_error) -> None:
"""
Tests commits that have invalid data are skipped and logged
"""
file1 = SourceLineInfo(
path="src/sentry/integrations/github/client_1.py",
lineno=10,
ref="master",
repo=self.repo_1,
code_mapping=None, # type: ignore[arg-type]
)
file2 = SourceLineInfo(
path="src/sentry/integrations/github/client_2.py",
lineno=15,
ref="master",
repo=self.repo_1,
code_mapping=None, # type: ignore[arg-type]
)
data = {
"repository0": {
"ref0": {
"target": {
"blame0": {
"ranges": [
{
"commit": {
"oid": None,
"author": None,
"message": None,
"committedDate": "2022-01-01T00:00:00Z",
},
"startingLine": 10,
"endingLine": 15,
"age": 0,
}
]
},
"blame1": {
"ranges": [
{
"commit": {
"oid": "123",
"author": None,
"message": None,
"committedDate": None,
},
"startingLine": 10,
"endingLine": 15,
"age": 0,
}
]
},
}
}
},
}
responses.add(
method=responses.POST,
url="https://api.github.com/graphql",
json={
"data": data,
},
content_type="application/json",
)
response = self.github_client.get_blame_for_files([file1, file2], extra={})
self.assertEqual(response, [])
mock_logger_error.assert_has_calls(
[
mock.call(
"get_blame_for_files.extract_commits_from_blame.invalid_commit_response",
extra={
"provider": "github",
"organization_integration_id": self.github_client.org_integration_id,
"file_lineno": file1.lineno,
"file_path": file1.path,
"branch_name": file1.ref,
"repo_name": file1.repo.name,
"reason": "Missing property oid",
},
),
mock.call(
"get_blame_for_files.extract_commits_from_blame.invalid_commit_response",
extra={
"provider": "github",
"organization_integration_id": self.github_client.org_integration_id,
"file_lineno": file2.lineno,
"file_path": file2.path,
"branch_name": file2.ref,
"repo_name": file2.repo.name,
"commit_id": "123",
"reason": "Missing property committedDate",
},
),
]
)
|
GitHubClientFileBlameResponseTest
|
python
|
eriklindernoren__ML-From-Scratch
|
mlfromscratch/supervised_learning/regression.py
|
{
"start": 9136,
"end": 10499
}
|
class ____(Regression):
""" Regression where a combination of l1 and l2 regularization are used. The
ratio of their contributions are set with the 'l1_ratio' parameter.
Parameters:
-----------
degree: int
The degree of the polynomial that the independent variable X will be transformed to.
reg_factor: float
The factor that will determine the amount of regularization and feature
shrinkage.
l1_ration: float
Weighs the contribution of l1 and l2 regularization.
n_iterations: float
The number of training iterations the algorithm will tune the weights for.
learning_rate: float
The step length that will be used when updating the weights.
"""
def __init__(self, degree=1, reg_factor=0.05, l1_ratio=0.5, n_iterations=3000,
learning_rate=0.01):
self.degree = degree
self.regularization = l1_l2_regularization(alpha=reg_factor, l1_ratio=l1_ratio)
super(ElasticNet, self).__init__(n_iterations,
learning_rate)
def fit(self, X, y):
X = normalize(polynomial_features(X, degree=self.degree))
super(ElasticNet, self).fit(X, y)
def predict(self, X):
X = normalize(polynomial_features(X, degree=self.degree))
return super(ElasticNet, self).predict(X)
|
ElasticNet
|
python
|
doocs__leetcode
|
solution/1000-1099/1031.Maximum Sum of Two Non-Overlapping Subarrays/Solution.py
|
{
"start": 0,
"end": 586
}
|
class ____:
def maxSumTwoNoOverlap(self, nums: List[int], firstLen: int, secondLen: int) -> int:
n = len(nums)
s = list(accumulate(nums, initial=0))
ans = t = 0
i = firstLen
while i + secondLen - 1 < n:
t = max(t, s[i] - s[i - firstLen])
ans = max(ans, t + s[i + secondLen] - s[i])
i += 1
t = 0
i = secondLen
while i + firstLen - 1 < n:
t = max(t, s[i] - s[i - secondLen])
ans = max(ans, t + s[i + firstLen] - s[i])
i += 1
return ans
|
Solution
|
python
|
explosion__spaCy
|
spacy/lang/id/__init__.py
|
{
"start": 302,
"end": 593
}
|
class ____(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
prefixes = TOKENIZER_PREFIXES
suffixes = TOKENIZER_SUFFIXES
infixes = TOKENIZER_INFIXES
syntax_iterators = SYNTAX_ITERATORS
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
|
IndonesianDefaults
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/layers/pooling.py
|
{
"start": 23904,
"end": 27848
}
|
class ____(Layer):
"""Pooling layer for arbitrary pooling functions, for 3D inputs.
This class only exists for code reuse. It will never be an exposed API.
Args:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`.
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)`
while `channels_first` corresponds to
inputs with shape `(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(Pooling3D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 3, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 3, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def call(self, inputs):
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
if self.data_format == 'channels_first':
# TF does not support `channels_first` with 3D pooling operations,
# so we must handle this case manually.
# TODO(fchollet): remove this when TF pooling is feature-complete.
inputs = array_ops.transpose(inputs, (0, 2, 3, 4, 1))
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper())
if self.data_format == 'channels_first':
outputs = array_ops.transpose(outputs, (0, 4, 1, 2, 3))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
len_dim1 = input_shape[2]
len_dim2 = input_shape[3]
len_dim3 = input_shape[4]
else:
len_dim1 = input_shape[1]
len_dim2 = input_shape[2]
len_dim3 = input_shape[3]
len_dim1 = conv_utils.conv_output_length(len_dim1, self.pool_size[0],
self.padding, self.strides[0])
len_dim2 = conv_utils.conv_output_length(len_dim2, self.pool_size[1],
self.padding, self.strides[1])
len_dim3 = conv_utils.conv_output_length(len_dim3, self.pool_size[2],
self.padding, self.strides[2])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3])
else:
return tensor_shape.TensorShape(
[input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4]])
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(Pooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
Pooling3D
|
python
|
realpython__materials
|
python-serialize/http-payload/django-rest-api/rest_api/apps.py
|
{
"start": 36,
"end": 147
}
|
class ____(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "rest_api"
|
RestApiConfig
|
python
|
ray-project__ray
|
rllib/examples/offline_rl/classes/image_offline_prelearner.py
|
{
"start": 601,
"end": 3734
}
|
class ____(OfflinePreLearner):
"""This class transforms image data to `MultiAgentBatch`es.
While the `ImageOfflineData` class transforms raw image
bytes to `numpy` arrays, this class maps these data in
`SingleAgentEpisode` instances through the learner connector
pipeline and finally outputs a >`MultiAgentBatch` ready for
training in RLlib's `Learner`s.
Note, the basic transformation from images to `SingleAgentEpisode`
instances creates synthetic data that does not rely on any MDP
and therefore no agent can learn from it. However, this example
should show how to transform data into this form through
overriding the `OfflinePreLearner`.
"""
def __init__(
self,
config: "AlgorithmConfig",
learner: Union[Learner, List[ActorHandle]],
spaces: Optional[Tuple[gym.Space, gym.Space]] = None,
module_spec: Optional[MultiRLModuleSpec] = None,
module_state: Optional[Dict[ModuleID, Any]] = None,
**kwargs: Dict[str, Any],
):
# Set up necessary class attributes.
self.config = config
self.action_space = spaces[1]
self.observation_space = spaces[0]
self.input_read_episodes = self.config.input_read_episodes
self.input_read_sample_batches = self.config.input_read_sample_batches
self._policies_to_train = "default_policy"
self._is_multi_agent = False
# Build the `MultiRLModule` needed for the learner connector.
self._module = module_spec.build()
# Build the learner connector pipeline.
self._learner_connector = self.config.build_learner_connector(
input_observation_space=self.observation_space,
input_action_space=self.action_space,
)
@override(OfflinePreLearner)
@staticmethod
def _map_to_episodes(
is_multi_agent: bool,
batch: Dict[str, Union[list, np.ndarray]],
schema: Dict[str, str] = SCHEMA,
to_numpy: bool = False,
input_compress_columns: Optional[List[str]] = None,
observation_space: gym.Space = None,
action_space: gym.Space = None,
**kwargs: Dict[str, Any],
) -> Dict[str, List[EpisodeType]]:
# Define a container for the episodes.
episodes = []
# Batches come in as numpy arrays.
for i, obs in enumerate(batch["array"]):
# Construct your episode.
episode = SingleAgentEpisode(
id_=uuid.uuid4().hex,
observations=[obs, obs],
observation_space=observation_space,
actions=[action_space.sample()],
action_space=action_space,
rewards=[random.random()],
terminated=True,
truncated=False,
len_lookback_buffer=0,
t_started=0,
)
# Numpy'ize, if necessary.
if to_numpy:
episode.to_numpy()
# Store the episode in the container.
episodes.append(episode)
return {"episodes": episodes}
|
ImageOfflinePreLearner
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/parametertree/parameterTypes/basetypes.py
|
{
"start": 9380,
"end": 10482
}
|
class ____(Parameter):
"""
Parameter representing a single value.
This parameter is backed by :class:`~pyqtgraph.parametertree.parameterTypes.basetypes.WidgetParameterItem`
to represent the following parameter names through various subclasses:
- 'int'
- 'float'
- 'bool'
- 'str'
- 'color'
- 'colormap'
"""
@property
def itemClass(self):
from .bool import BoolParameterItem
from .numeric import NumericParameterItem
from .str import StrParameterItem
return {
'bool': BoolParameterItem,
'int': NumericParameterItem,
'float': NumericParameterItem,
'str': StrParameterItem,
}[self.opts['type']]
def _interpretValue(self, v):
typ = self.opts['type']
def _missing_interp(v):
# Assume raw interpretation
return v
# Or:
# raise TypeError(f'No interpreter found for type {typ}')
interpreter = getattr(builtins, typ, _missing_interp)
return interpreter(v)
|
SimpleParameter
|
python
|
scipy__scipy
|
scipy/io/_harwell_boeing/_fortran_format_parser.py
|
{
"start": 2427,
"end": 4632
}
|
class ____:
@classmethod
def from_number(cls, n, min=None):
"""Given a float number, returns a "reasonable" ExpFormat instance to
represent any number between -n and n.
Parameters
----------
n : float
max number one wants to be able to represent
min : int
minimum number of characters to use for the format
Returns
-------
res : ExpFormat
ExpFormat instance with reasonable (see Notes) computed width
Notes
-----
Reasonable should be understood as the minimal string length necessary
to avoid losing precision.
"""
# len of one number in exp format: sign + 1|0 + "." +
# number of digit for fractional part + 'E' + sign of exponent +
# len of exponent
finfo = np.finfo(n.dtype)
# Number of digits for fractional part
n_prec = finfo.precision + 1
# Number of digits for exponential part
n_exp = number_digits(np.max(np.abs([finfo.maxexp, finfo.minexp])))
width = 1 + 1 + n_prec + 1 + n_exp + 1
if n < 0:
width += 1
repeat = int(np.floor(80 / width))
return cls(width, n_prec, min, repeat=repeat)
def __init__(self, width, significand, min=None, repeat=None):
"""\
Parameters
----------
width : int
number of characters taken by the string (includes space).
"""
self.width = width
self.significand = significand
self.repeat = repeat
self.min = min
def __repr__(self):
r = "ExpFormat("
if self.repeat:
r += f"{self.repeat}"
r += f"E{self.width}.{self.significand}"
if self.min:
r += f"E{self.min}"
return r + ")"
@property
def fortran_format(self):
r = "("
if self.repeat:
r += f"{self.repeat}"
r += f"E{self.width}.{self.significand}"
if self.min:
r += f"E{self.min}"
return r + ")"
@property
def python_format(self):
return "%" + str(self.width-1) + "." + str(self.significand) + "E"
|
ExpFormat
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/recursiveTypeAlias8.py
|
{
"start": 371,
"end": 806
}
|
class ____(TypedDict):
options: list[CorD]
type: int
CorD = ClassC | ClassD
def foo(a: CorD):
reveal_type(a, expected_text="ClassC | ClassD")
options = a.get("options", [])
reveal_type(options, expected_text="list[ClassC | ClassD] | Any | list[Any]")
for option in options:
reveal_type(option, expected_text="ClassC | ClassD | Any")
reveal_type(option["type"], expected_text="int | Any")
|
ClassD
|
python
|
ray-project__ray
|
rllib/policy/tf_mixins.py
|
{
"start": 2530,
"end": 4735
}
|
class ____:
"""Mixin for TFPolicy that adds entropy coeff decay."""
def __init__(self, entropy_coeff, entropy_coeff_schedule):
self._entropy_coeff_schedule = None
if entropy_coeff_schedule is None:
self.entropy_coeff = get_variable(
entropy_coeff, framework="tf", tf_name="entropy_coeff", trainable=False
)
else:
# Allows for custom schedule similar to lr_schedule format
if isinstance(entropy_coeff_schedule, list):
self._entropy_coeff_schedule = PiecewiseSchedule(
entropy_coeff_schedule,
outside_value=entropy_coeff_schedule[-1][-1],
framework=None,
)
else:
# Implements previous version but enforces outside_value
self._entropy_coeff_schedule = PiecewiseSchedule(
[[0, entropy_coeff], [entropy_coeff_schedule, 0.0]],
outside_value=0.0,
framework=None,
)
self.entropy_coeff = get_variable(
self._entropy_coeff_schedule.value(0),
framework="tf",
tf_name="entropy_coeff",
trainable=False,
)
if self.framework == "tf":
self._entropy_coeff_placeholder = tf1.placeholder(
dtype=tf.float32, name="entropy_coeff"
)
self._entropy_coeff_update = self.entropy_coeff.assign(
self._entropy_coeff_placeholder, read_value=False
)
def on_global_var_update(self, global_vars):
super().on_global_var_update(global_vars)
if self._entropy_coeff_schedule is not None:
new_val = self._entropy_coeff_schedule.value(global_vars["timestep"])
if self.framework == "tf":
self.get_session().run(
self._entropy_coeff_update,
feed_dict={self._entropy_coeff_placeholder: new_val},
)
else:
self.entropy_coeff.assign(new_val, read_value=False)
@OldAPIStack
|
EntropyCoeffSchedule
|
python
|
plotly__plotly.py
|
plotly/graph_objs/layout/scene/camera/_up.py
|
{
"start": 235,
"end": 2878
}
|
class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.scene.camera"
_path_str = "layout.scene.camera.up"
_valid_props = {"x", "y", "z"}
@property
def x(self):
"""
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def z(self):
"""
The 'z' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
@property
def _prop_descriptions(self):
return """\
x
y
z
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Up object
Sets the (x,y,z) components of the 'up' camera vector. This
vector determines the up direction of this scene with respect
to the page. The default is *{x: 0, y: 0, z: 1}* which means
that the z axis points up.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.camera.Up`
x
y
z
Returns
-------
Up
"""
super().__init__("up")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.scene.camera.Up
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.camera.Up`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._set_property("z", arg, z)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Up
|
python
|
wepe__MachineLearning
|
DeepLearning Tutorials/cnn_LeNet/convolutional_mlp_commentate.py
|
{
"start": 980,
"end": 3363
}
|
class ____(object):
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
#assert condition,condition为True,则继续往下执行,condition为False,中断程序
#image_shape[1]和filter_shape[1]都是num input feature maps,它们必须是一样的。
assert image_shape[1] == filter_shape[1]
self.input = input
#每个隐层神经元(即像素)与上一层的连接数为num input feature maps * filter height * filter width。
#可以用numpy.prod(filter_shape[1:])来求得
fan_in = numpy.prod(filter_shape[1:])
#lower layer上每个神经元获得的梯度来自于:"num output feature maps * filter height * filter width" /pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
numpy.prod(poolsize))
#以上求得fan_in、fan_out ,将它们代入公式,以此来随机初始化W,W就是线性卷积核
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
#偏置b是一维向量,每个输出图的特征图都对应一个偏置,
#而输出的特征图的个数由filter个数决定,因此用filter_shape[0]即number of filters来初始化
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
#将输入图像与filter卷积,conv.conv2d函数
#卷积完没有加b再通过sigmoid,这里是一处简化。
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape
)
#maxpooling,最大子采样过程
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
#加偏置,再通过tanh映射,得到卷积+子采样层的最终输出
#因为b是一维向量,这里用维度转换函数dimshuffle将其reshape。比如b是(10,),
#则b.dimshuffle('x', 0, 'x', 'x'))将其reshape为(1,10,1,1)
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
#卷积+采样层的参数
self.params = [self.W, self.b]
"""
注释:
这是定义隐藏层的类,首先明确:隐藏层的输入即input,输出即隐藏层的神经元个数。输入层与隐藏层是全连接的。
假设输入是n_in维的向量(也可以说时n_in个神经元),隐藏层有n_out个神经元,则因为是全连接,
一共有n_in*n_out个权重,故W大小时(n_in,n_out),n_in行n_out列,每一列对应隐藏层的每一个神经元的连接权重。
b是偏置,隐藏层有n_out个神经元,故b时n_out维向量。
rng即随机数生成器,numpy.random.RandomState,用于初始化W。
input训练模型所用到的所有输入,并不是MLP的输入层,MLP的输入层的神经元个数时n_in,而这里的参数input大小是(n_example,n_in),每一行一个样本,即每一行作为MLP的输入层。
activation:激活函数,这里定义为函数tanh
"""
|
LeNetConvPoolLayer
|
python
|
tiangolo__fastapi
|
docs_src/body/tutorial003.py
|
{
"start": 87,
"end": 362
}
|
class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
app = FastAPI()
@app.put("/items/{item_id}")
async def update_item(item_id: int, item: Item):
return {"item_id": item_id, **item.dict()}
|
Item
|
python
|
allegroai__clearml
|
clearml/utilities/proxy_object.py
|
{
"start": 11077,
"end": 14687
}
|
class ____(type):
# This metaclass is heavily inspired by the Object Proxying python recipe
# (http://code.activestate.com/recipes/496741/). It adds special methods
# to the wrapper class so it can proxy the wrapped class. In addition, it
# adds a field __overrides__ in the wrapper class dictionary, containing
# all attributes decorated to be overridden.
_special_names = [
"__abs__",
"__add__",
"__and__",
"__call__",
"__cmp__",
"__coerce__",
"__contains__",
"__delitem__",
"__delslice__",
"__div__",
"__divmod__",
"__eq__",
"__float__",
"__floordiv__",
"__ge__",
"__getitem__",
"__getslice__",
"__gt__",
"__hash__",
"__hex__",
"__iadd__",
"__iand__",
"__idiv__",
"__idivmod__",
"__ifloordiv__",
"__ilshift__",
"__imod__",
"__imul__",
"__int__",
"__invert__",
"__ior__",
"__ipow__",
"__irshift__",
"__isub__",
"__iter__",
"__itruediv__",
"__ixor__",
"__le__",
"__len__",
"__long__",
"__lshift__",
"__lt__",
"__mod__",
"__mul__",
"__ne__",
"__neg__",
"__oct__",
"__or__",
"__pos__",
"__pow__",
"__radd__",
"__rand__",
"__rdiv__",
"__rdivmod__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__rfloorfiv__",
"__rlshift__",
"__rmod__",
"__rmul__",
"__ror__",
"__rpow__",
"__rrshift__",
"__rshift__",
"__rsub__",
"__rtruediv__",
"__rxor__",
"__setitem__",
"__setslice__",
"__sub__",
"__truediv__",
"__xor__",
"next",
"__str__",
"__repr__",
"__round__",
"__fspath__",
"__bytes__",
"__index__",
]
def __new__(mcs: Any, classname: str, bases: Tuple[Any], attrs: Dict[str, Any]) -> type:
def make_method(name: str) -> Callable:
def method(self, *args: Any, **kwargs: Any) -> Any:
obj = object.__getattribute__(self, "_wrapped")
if obj is None:
cb = object.__getattribute__(self, "_callback")
obj = cb()
object.__setattr__(self, "_wrapped", obj)
# we have to convert the instance to the real type
if (
args
and len(args) == 1
and (isinstance(args[0], LazyEvalWrapper) or hasattr(type(args[0]), "_base_class_"))
):
try:
int(args[0]) # force loading the instance
except: # noqa
pass
args = (object.__getattribute__(args[0], "_wrapped"),)
mtd = getattr(obj, name)
return mtd(*args, **kwargs)
return method
typed_class = attrs.get("_base_class_")
for name in mcs._special_names:
if not typed_class or hasattr(typed_class, name):
attrs[name] = make_method(name)
overrides = attrs.get("__overrides__", [])
# overrides.extend(k for k, v in attrs.items() if isinstance(v, lazy))
attrs["__overrides__"] = overrides
return type.__new__(mcs, classname, bases, attrs)
|
WrapperBase
|
python
|
huggingface__transformers
|
src/transformers/models/vitdet/modeling_vitdet.py
|
{
"start": 27237,
"end": 29902
}
|
class ____(VitDetPreTrainedModel, BackboneMixin):
def __init__(self, config):
super().__init__(config)
super()._init_backbone(config)
self.embeddings = VitDetEmbeddings(config)
self.encoder = VitDetEncoder(config)
self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
# initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> VitDetEmbeddings:
return self.embeddings.projection
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import VitDetConfig, VitDetBackbone
>>> import torch
>>> config = VitDetConfig()
>>> model = VitDetBackbone(config)
>>> pixel_values = torch.randn(1, 3, 224, 224)
>>> with torch.no_grad():
... outputs = model(pixel_values)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 768, 14, 14]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
embedding_output = self.embeddings(pixel_values)
outputs = self.encoder(
embedding_output,
output_hidden_states=True,
output_attentions=output_attentions,
return_dict=return_dict,
)
hidden_states = outputs.hidden_states if return_dict else outputs[1]
feature_maps = ()
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
feature_maps += (hidden_state,)
if not return_dict:
if output_hidden_states:
output = (feature_maps,) + outputs[1:]
else:
output = (feature_maps,) + outputs[2:]
return output
return BackboneOutput(
feature_maps=feature_maps,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=outputs.attentions,
)
__all__ = ["VitDetModel", "VitDetPreTrainedModel", "VitDetBackbone"]
|
VitDetBackbone
|
python
|
pypa__warehouse
|
tests/common/db/subscriptions.py
|
{
"start": 713,
"end": 1006
}
|
class ____(WarehouseFactory):
class Meta:
model = StripeSubscriptionProduct
id = factory.Faker("uuid4", cast_to=None)
product_id = "prod_123"
product_name = factory.Faker("pystr", max_chars=12)
description = factory.Faker("sentence")
|
StripeSubscriptionProductFactory
|
python
|
pytorch__pytorch
|
benchmarks/operator_benchmark/pt/stack_test.py
|
{
"start": 1561,
"end": 2675
}
|
class ____(op_bench.TorchBenchmarkBase):
def init(self, sizes, N, dim, device):
random.seed(42)
inputs = []
gen_sizes = []
if type(sizes) is list and N == -1:
gen_sizes = sizes
else:
for i in range(N):
gen_sizes.append(
[
old_size() if callable(old_size) else old_size
for old_size in sizes
]
)
for s in gen_sizes:
inputs.append(torch.rand(s, device=device))
result = torch.rand(gen_sizes[0], device=device)
self.inputs = {"result": result, "inputs": inputs, "dim": dim}
self.set_module_name("stack")
def forward(self, result: torch.Tensor, inputs: list[torch.Tensor], dim: int):
return torch.stack(inputs, dim=dim, out=result)
op_bench.generate_pt_test(
stack_configs_static_runtime
+ stack_configs_short
+ stack_configs_long
+ stack_configs_multidim,
StackBenchmark,
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
|
StackBenchmark
|
python
|
google__jax
|
jax/_src/pallas/fuser/block_spec.py
|
{
"start": 1697,
"end": 2261
}
|
class ____:
avals_in: tuple[core.AbstractValue, ...]
avals_out: tuple[core.AbstractValue, ...]
out_usages: tuple[set[Usage], ...]
eval_function: Any = dataclasses.field(default=None, init=False)
scalar_prefetch_fn: Any = dataclasses.field(default=None, init=False)
scalar_prefetch_handler: Any | None
grid: tuple[int | jax.Array, ...] | None
def __post_init__(self):
self._scalar_prefetch = None
def set_eval_function(self, eval_function):
self.eval_function = eval_function
return eval_function
@dataclasses.dataclass
|
PullRuleContext
|
python
|
getsentry__sentry
|
src/sentry/discover/endpoints/discover_saved_query_detail.py
|
{
"start": 6094,
"end": 6808
}
|
class ____(DiscoverSavedQueryBase):
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
def has_feature(self, organization, request):
return features.has("organizations:discover-query", organization, actor=request.user)
def post(self, request: Request, organization, query) -> Response:
"""
Update last_visited and increment visits counter
"""
if not self.has_feature(organization, request):
return self.respond(status=404)
query.visits = F("visits") + 1
query.last_visited = timezone.now()
query.save(update_fields=["visits", "last_visited"])
return Response(status=204)
|
DiscoverSavedQueryVisitEndpoint
|
python
|
getsentry__sentry
|
tests/snuba/api/endpoints/test_organization_events_spans_performance.py
|
{
"start": 39165,
"end": 61703
}
|
class ____(OrganizationEventsSpansEndpointTestBase):
URL = "sentry-api-0-organization-events-spans"
def test_no_projects(self) -> None:
user = self.create_user()
org = self.create_organization(owner=user)
self.login_as(user=user)
url = reverse(
self.URL,
kwargs={"organization_id_or_slug": org.slug},
)
response = self.client.get(url, format="json")
assert response.status_code == 404, response.content
def test_require_span_param(self) -> None:
response = self.client.get(
self.url,
data={"project": self.project.id},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {"span": [ErrorDetail("This field is required.", code="required")]}
def test_bad_span_param(self) -> None:
response = self.client.get(
self.url,
data={"project": self.project.id, "span": ["http.server"]},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {
"span": [
ErrorDetail(
"span must consist of of a span op and a valid 16 character hex delimited by a colon (:)",
code="invalid",
)
]
}
response = self.client.get(
self.url,
data={"project": self.project.id, "span": ["http.server:foo"]},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {
"span": [
ErrorDetail(
"`spanGroup` must be a valid 16 character hex (containing only digits, or a-f characters)",
code="invalid",
)
]
}
def test_bad_params_reverse_min_max(self) -> None:
response = self.client.get(
self.url,
data={
"project": self.project.id,
"min_exclusive_time": 7.0,
"max_exclusive_time": 1.0,
"span": f"http.server:{'ab' * 8}",
},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {
"non_field_errors": ["min_exclusive_time cannot be greater than max_exclusive_time."]
}
def test_bad_params_invalid_min(self) -> None:
response = self.client.get(
self.url,
data={
"project": self.project.id,
"min_exclusive_time": "foo",
"max_exclusive_time": 1.0,
"span": f"http.server:{'ab' * 8}",
},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {
"min_exclusive_time": ["A valid number is required."]
}, "failing for min_exclusive_time"
def test_bad_params_invalid_max(self) -> None:
response = self.client.get(
self.url,
data={
"project": self.project.id,
"min_exclusive_time": 100,
"max_exclusive_time": "bar",
"span": f"http.server:{'ab' * 8}",
},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {
"max_exclusive_time": ["A valid number is required."]
}, "failing for max_exclusive_time"
def test_span_filters(self) -> None:
test_op = "django.middleware"
test_hash = "cd" * 8
spans = [
# span with test_op but different hash
{
"same_process_as_parent": True,
"parent_span_id": "a" * 16,
"span_id": "b" * 16,
"start_timestamp": (self.min_ago + timedelta(seconds=1)).isoformat(),
"timestamp": (self.min_ago + timedelta(seconds=4)).isoformat(),
"op": test_op,
"description": "middleware span",
"hash": "ab" * 8,
"exclusive_time": 3.0,
},
# span with test_hash but different op
{
"same_process_as_parent": True,
"parent_span_id": "a" * 16,
"span_id": "c" * 16,
"start_timestamp": (self.min_ago + timedelta(seconds=1)).isoformat(),
"timestamp": (self.min_ago + timedelta(seconds=4)).isoformat(),
"op": "django.view",
"description": "middleware span",
"hash": test_hash,
"exclusive_time": 1.0,
},
]
self.create_event(spans=spans)
response = self.client.get(
self.url,
data={"project": self.project.id, "span": f"{test_op}:{test_hash}"},
format="json",
)
assert response.status_code == 200, response.content
assert response.data == [{"op": test_op, "group": test_hash, "examples": []}]
def test_span_filters_with_min_max(self) -> None:
test_op = "django.middleware"
test_hash = "2b9cbb96dbf59baa"
spans = [
{
"same_process_as_parent": True,
"parent_span_id": "a" * 16,
"span_id": "b" * 16,
"start_timestamp": (self.min_ago + timedelta(seconds=1)).isoformat(),
"timestamp": (self.min_ago + timedelta(seconds=4)).isoformat(),
"op": test_op,
"description": "middleware span",
"hash": "ab" * 8,
"exclusive_time": 3.0,
},
{
"same_process_as_parent": True,
"parent_span_id": "a" * 16,
"span_id": "b" * 16,
"start_timestamp": (self.min_ago + timedelta(seconds=1)).isoformat(),
"timestamp": (self.min_ago + timedelta(seconds=4)).isoformat(),
"op": test_op,
"description": "middleware span",
"hash": "ab" * 8,
"exclusive_time": 3.0,
},
{
"same_process_as_parent": True,
"parent_span_id": "a" * 16,
"span_id": "c" * 16,
"start_timestamp": (self.min_ago + timedelta(seconds=1)).isoformat(),
"timestamp": (self.min_ago + timedelta(seconds=4)).isoformat(),
"op": "django.view",
"description": "middleware span",
"hash": test_hash,
"exclusive_time": 1.0,
},
]
self.create_event(spans=spans)
response = self.client.get(
self.url,
data={
"project": self.project.id,
"span": f"{test_op}:{test_hash}",
"min_exclusive_time": 1.0,
"max_exclusive_time": 2.0,
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data == [{"op": test_op, "group": test_hash, "examples": []}]
@patch("sentry.api.endpoints.organization_events_spans_performance.raw_snql_query")
def test_one_span(self, mock_raw_snql_query: MagicMock) -> None:
event = self.create_event()
mock_raw_snql_query.side_effect = [
{
"data": [self.suspect_span_examples_snuba_results("http.server", event)],
},
]
response = self.client.get(
self.url,
data={"project": self.project.id, "span": "http.server:0a7c0d32f132a132"},
format="json",
)
assert response.status_code == 200, response.content
assert mock_raw_snql_query.call_count == 1
self.assert_span_examples(response.data, [self.span_example_results("http.server", event)])
def test_one_span_with_min(self) -> None:
spans = [
{
"same_process_as_parent": True,
"parent_span_id": "a" * 16,
"span_id": x * 16,
"start_timestamp": (self.min_ago + timedelta(seconds=1)).isoformat(),
"timestamp": (self.min_ago + timedelta(seconds=4)).isoformat(),
"op": "django.middleware",
"description": "middleware span",
"exclusive_time": 5.0,
}
for x in ["b", "c"]
]
self.create_event(spans=spans)
response = self.client.get(
self.url,
data={
"project": self.project.id,
"span": f"django.middleware:{'cd' * 8}",
"min_exclusive_time": 7.0,
},
format="json",
)
assert response.status_code == 200, response.content
expected_result = [
{
"op": "django.middleware",
"group": "cd" * 8,
"examples": [],
}
]
self.assert_span_examples(response.data, expected_result)
def test_one_span_with_max(self) -> None:
spans = [
{
"same_process_as_parent": True,
"parent_span_id": "a" * 16,
"span_id": x * 16,
"start_timestamp": (self.min_ago + timedelta(seconds=1)).isoformat(),
"timestamp": (self.min_ago + timedelta(seconds=4)).isoformat(),
"op": "django.middleware",
"description": "middleware span",
"hash": "cd" * 8,
"exclusive_time": 5.0,
}
for x in ["b", "c"]
]
self.create_event(spans=spans)
response = self.client.get(
self.url,
data={
"project": self.project.id,
"span": f"django.middleware:{'cd' * 8}",
"max_exclusive_time": 2.0,
},
format="json",
)
assert response.status_code == 200, response.content
expected_result = [
{
"op": "django.middleware",
"group": "cd" * 8,
"examples": [],
}
]
self.assert_span_examples(response.data, expected_result)
def test_one_span_with_min_max(self) -> None:
spans = [
{
"same_process_as_parent": True,
"parent_span_id": "a" * 16,
"span_id": x * 16,
"start_timestamp": (self.min_ago + timedelta(seconds=1)).isoformat(),
"timestamp": (self.min_ago + timedelta(seconds=4)).isoformat(),
"op": "django.middleware",
"description": "middleware span",
"exclusive_time": 5.0,
}
for x in ["b", "c"]
] + [
{
"same_process_as_parent": True,
"parent_span_id": "a" * 16,
"span_id": x * 16,
"start_timestamp": (self.min_ago + timedelta(seconds=4)).isoformat(),
"timestamp": (self.min_ago + timedelta(seconds=5)).isoformat(),
"op": "django.middleware",
"description": "middleware span",
"exclusive_time": 3.0,
}
for x in ["d", "e", "f"]
]
event = self.create_event(spans=spans)
response = self.client.get(
self.url,
data={
"project": self.project.id,
"span": "django.middleware:2b9cbb96dbf59baa",
"min_exclusive_time": 2.0,
"max_exclusive_time": 4.0,
},
format="json",
)
assert response.status_code == 200, response.content
expected_result = [
{
"op": "django.middleware",
"group": "2b9cbb96dbf59baa",
"examples": [
{
"id": event.event_id,
"description": "middleware span",
"startTimestamp": (self.min_ago).timestamp(),
"finishTimestamp": (self.min_ago + timedelta(seconds=8)).timestamp(),
"nonOverlappingExclusiveTime": 1000.0,
"spans": [
{
"id": x * 16,
"exclusiveTime": 3.0,
"startTimestamp": (self.min_ago + timedelta(seconds=4)).timestamp(),
"finishTimestamp": (
self.min_ago + timedelta(seconds=5)
).timestamp(),
"trace": "a" * 32,
}
for x in ["d", "e", "f"]
],
}
],
}
]
self.assert_span_examples(response.data, expected_result)
@patch("sentry.api.endpoints.organization_events_spans_performance.raw_snql_query")
def test_per_page(self, mock_raw_snql_query: MagicMock) -> None:
event = self.create_event()
mock_raw_snql_query.side_effect = [
{
"data": [
self.suspect_span_examples_snuba_results("http.server", event),
self.suspect_span_examples_snuba_results("http.server", event),
],
},
]
response = self.client.get(
self.url,
data={
"project": self.project.id,
"span": ["http.server:0a7c0d32f132a132"],
"per_page": 1,
},
format="json",
)
assert response.status_code == 200, response.content
assert mock_raw_snql_query.call_count == 1
self.assert_span_examples(
response.data,
[self.span_example_results("http.server", event)],
)
def test_per_page_with_min(self) -> None:
spans = [
{
"same_process_as_parent": True,
"parent_span_id": "a" * 16,
"span_id": x * 16,
"start_timestamp": (self.min_ago + timedelta(seconds=1)).isoformat(),
"timestamp": (self.min_ago + timedelta(seconds=4)).isoformat(),
"op": "django.middleware",
"description": "middleware span",
"exclusive_time": 5.0,
}
for x in ["b", "c"]
] + [
{
"same_process_as_parent": True,
"parent_span_id": "a" * 16,
"span_id": x * 16,
"start_timestamp": (self.min_ago + timedelta(seconds=4)).isoformat(),
"timestamp": (self.min_ago + timedelta(seconds=5)).isoformat(),
"op": "django.middleware",
"description": "middleware span",
"exclusive_time": 3.0,
}
for x in ["d", "e", "f"]
]
event = self.create_event(spans=spans)
response = self.client.get(
self.url,
data={
"project": self.project.id,
"span": "django.middleware:2b9cbb96dbf59baa",
"min_exclusive_time": 4.0,
"per_page": 1,
},
format="json",
)
assert response.status_code == 200, response.content
expected_result = [
{
"op": "django.middleware",
"group": "2b9cbb96dbf59baa",
"examples": [
{
"id": event.event_id,
"description": "middleware span",
"startTimestamp": (self.min_ago).timestamp(),
"finishTimestamp": (self.min_ago + timedelta(seconds=8)).timestamp(),
"nonOverlappingExclusiveTime": 3000.0,
"spans": [
{
"id": x * 16,
"exclusiveTime": 5.0,
"startTimestamp": (self.min_ago + timedelta(seconds=1)).timestamp(),
"finishTimestamp": (
self.min_ago + timedelta(seconds=4)
).timestamp(),
"trace": "a" * 32,
}
for x in ["b", "c"]
],
}
],
}
]
self.assert_span_examples(response.data, expected_result)
def test_per_page_with_max(self) -> None:
spans = [
{
"same_process_as_parent": True,
"parent_span_id": "a" * 16,
"span_id": x * 16,
"start_timestamp": (self.min_ago + timedelta(seconds=1)).isoformat(),
"timestamp": (self.min_ago + timedelta(seconds=4)).isoformat(),
"op": "django.middleware",
"description": "middleware span",
"exclusive_time": 5.0,
}
for x in ["b", "c"]
] + [
{
"same_process_as_parent": True,
"parent_span_id": "a" * 16,
"span_id": x * 16,
"start_timestamp": (self.min_ago + timedelta(seconds=4)).isoformat(),
"timestamp": (self.min_ago + timedelta(seconds=5)).isoformat(),
"op": "django.middleware",
"description": "middleware span",
"exclusive_time": 3.0,
}
for x in ["d", "e", "f"]
]
event = self.create_event(spans=spans)
response = self.client.get(
self.url,
data={
"project": self.project.id,
"span": "django.middleware:2b9cbb96dbf59baa",
"max_exclusive_time": 4.0,
"per_page": 1,
},
format="json",
)
assert response.status_code == 200, response.content
expected_result = [
{
"op": "django.middleware",
"group": "2b9cbb96dbf59baa",
"examples": [
{
"id": event.event_id,
"description": "middleware span",
"startTimestamp": (self.min_ago).timestamp(),
"finishTimestamp": (self.min_ago + timedelta(seconds=8)).timestamp(),
"nonOverlappingExclusiveTime": 1000.0,
"spans": [
{
"id": x * 16,
"exclusiveTime": 3.0,
"startTimestamp": (self.min_ago + timedelta(seconds=4)).timestamp(),
"finishTimestamp": (
self.min_ago + timedelta(seconds=5)
).timestamp(),
"trace": "a" * 32,
}
for x in ["d", "e", "f"]
],
}
],
}
]
self.assert_span_examples(response.data, expected_result)
def test_per_page_with_min_max(self) -> None:
spans = [
{
"same_process_as_parent": True,
"parent_span_id": "a" * 16,
"span_id": x * 16,
"start_timestamp": (self.min_ago + timedelta(seconds=1)).isoformat(),
"timestamp": (self.min_ago + timedelta(seconds=4)).isoformat(),
"op": "django.middleware",
"description": "middleware span",
"hash": "2b9cbb96dbf59baa",
"exclusive_time": 5.0,
}
for x in ["b", "c"]
] + [
{
"same_process_as_parent": True,
"parent_span_id": "a" * 16,
"span_id": x * 16,
"start_timestamp": (self.min_ago + timedelta(seconds=4)).isoformat(),
"timestamp": (self.min_ago + timedelta(seconds=5)).isoformat(),
"op": "django.middleware",
"description": "middleware span",
"hash": "2b9cbb96dbf59baa",
"exclusive_time": 3.0,
}
for x in ["d", "e", "f"]
]
event = self.create_event(spans=spans)
response = self.client.get(
self.url,
data={
"project": self.project.id,
"span": "django.middleware:2b9cbb96dbf59baa",
"min_exclusive_time": 2.0,
"max_exclusive_time": 4.0,
"per_page": 1,
},
format="json",
)
assert response.status_code == 200, response.content
expected_result = [
{
"op": "django.middleware",
"group": "2b9cbb96dbf59baa",
"examples": [
{
"id": event.event_id,
"description": "middleware span",
"startTimestamp": (self.min_ago).timestamp(),
"finishTimestamp": (self.min_ago + timedelta(seconds=8)).timestamp(),
"nonOverlappingExclusiveTime": 1000.0,
"spans": [
{
"id": x * 16,
"exclusiveTime": 3.0,
"startTimestamp": (self.min_ago + timedelta(seconds=4)).timestamp(),
"finishTimestamp": (
self.min_ago + timedelta(seconds=5)
).timestamp(),
"trace": "a" * 32,
}
for x in ["d", "e", "f"]
],
}
],
}
]
self.assert_span_examples(response.data, expected_result)
|
OrganizationEventsSpansExamplesEndpointTest
|
python
|
pyparsing__pyparsing
|
tests/test_unit.py
|
{
"start": 391152,
"end": 391811
}
|
class ____(Test02_WithoutPackrat):
"""
rerun Test2 tests, now that packrat is enabled
"""
def test000_assert_packrat_status(self):
print("Packrat enabled:", ParserElement._packratEnabled)
print(
"Packrat cache:",
type(ParserElement.packrat_cache).__name__,
getattr(ParserElement.packrat_cache, "size", "- no size attribute -"),
)
self.assertTrue(ParserElement._packratEnabled, "packrat not enabled")
self.assertEqual(
"_FifoCache",
type(ParserElement.packrat_cache).__name__,
msg="incorrect cache type",
)
|
Test04_WithPackrat
|
python
|
pypa__setuptools
|
setuptools/_vendor/packaging/_tokenizer.py
|
{
"start": 2128,
"end": 5273
}
|
class ____:
"""Context-sensitive token parsing.
Provides methods to examine the input stream to check whether the next token
matches.
"""
def __init__(
self,
source: str,
*,
rules: dict[str, str | re.Pattern[str]],
) -> None:
self.source = source
self.rules: dict[str, re.Pattern[str]] = {
name: re.compile(pattern) for name, pattern in rules.items()
}
self.next_token: Token | None = None
self.position = 0
def consume(self, name: str) -> None:
"""Move beyond provided token name, if at current position."""
if self.check(name):
self.read()
def check(self, name: str, *, peek: bool = False) -> bool:
"""Check whether the next token has the provided name.
By default, if the check succeeds, the token *must* be read before
another check. If `peek` is set to `True`, the token is not loaded and
would need to be checked again.
"""
assert (
self.next_token is None
), f"Cannot check for {name!r}, already have {self.next_token!r}"
assert name in self.rules, f"Unknown token name: {name!r}"
expression = self.rules[name]
match = expression.match(self.source, self.position)
if match is None:
return False
if not peek:
self.next_token = Token(name, match[0], self.position)
return True
def expect(self, name: str, *, expected: str) -> Token:
"""Expect a certain token name next, failing with a syntax error otherwise.
The token is *not* read.
"""
if not self.check(name):
raise self.raise_syntax_error(f"Expected {expected}")
return self.read()
def read(self) -> Token:
"""Consume the next token and return it."""
token = self.next_token
assert token is not None
self.position += len(token.text)
self.next_token = None
return token
def raise_syntax_error(
self,
message: str,
*,
span_start: int | None = None,
span_end: int | None = None,
) -> NoReturn:
"""Raise ParserSyntaxError at the given position."""
span = (
self.position if span_start is None else span_start,
self.position if span_end is None else span_end,
)
raise ParserSyntaxError(
message,
source=self.source,
span=span,
)
@contextlib.contextmanager
def enclosing_tokens(
self, open_token: str, close_token: str, *, around: str
) -> Iterator[None]:
if self.check(open_token):
open_position = self.position
self.read()
else:
open_position = None
yield
if open_position is None:
return
if not self.check(close_token):
self.raise_syntax_error(
f"Expected matching {close_token} for {open_token}, after {around}",
span_start=open_position,
)
self.read()
|
Tokenizer
|
python
|
facebook__pyre-check
|
client/language_server/code_navigation_request.py
|
{
"start": 1189,
"end": 1472
}
|
class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
errors: List[Dict[str, Any]]
def to_errors_response(self) -> List[error.Error]:
return [error.Error.from_json(error_response) for error_response in self.errors]
@dataclasses.dataclass(frozen=True)
|
TypeErrorsResponse
|
python
|
walkccc__LeetCode
|
solutions/2104. Sum of Subarray Ranges/2104.py
|
{
"start": 0,
"end": 893
}
|
class ____:
def subArrayRanges(self, nums: list[int]) -> int:
prevGt, nextGt = self._getPrevNext(nums, operator.lt)
prevLt, nextLt = self._getPrevNext(nums, operator.gt)
return sum(num * (i - prevGt[i]) * (nextGt[i] - i) -
num * (i - prevLt[i]) * (nextLt[i] - i)
for i, num in enumerate(nums))
def _getPrevNext(
self,
nums: list[int],
op: callable
) -> tuple[list[int], list[int]]:
"""
Returns `prev` and `next`, that store the indices of the nearest numbers
that are smaller or larger than the current number depending on `op`.
"""
n = len(nums)
prev = [-1] * n
next = [n] * n
stack = []
for i, num in enumerate(nums):
while stack and op(nums[stack[-1]], num):
next[stack.pop()] = i
if stack:
prev[i] = stack[-1]
stack.append(i)
return prev, next
|
Solution
|
python
|
executablebooks__jupyter-book
|
py/jupyter_book/nodeenv.py
|
{
"start": 230,
"end": 3219
}
|
class ____(Exception): ...
def is_windows():
return platform.system() == "Windows"
def get_triple_node_version(node_path):
# Check version
_version = subprocess.run(
[node_path, "-v"], capture_output=True, check=True, text=True
).stdout
match = re.match(r"^v(\d+)\.(\d+)\.(\d+).*$", _version)
return [int(x) for x in match.groups()]
def find_installed_node():
# shutil.which can find things with PATHEXT, but 3.12.0 breaks this by preferring NODE over NODE.EXE on Windows
return shutil.which("node.exe") if is_windows() else shutil.which("node")
def find_nodeenv_path(version: str):
# The conda packaging of this package does not need to install node!
import platformdirs
return platformdirs.user_data_path(
appname="jupyter-book", appauthor=False, version=version
)
def ask_to_install_node(path):
if env_value := os.environ.get(INSTALL_NODEENV_KEY, "").lower():
return env_value in {"yes", "true", "1", "y"}
return input(f"❔ Install Node.js in '{path}'? (y/N): ").lower() == "y"
def create_nodeenv(env_path, version):
command = [
sys.executable,
"-m",
"nodeenv",
"-v",
f"--node={version}",
"--prebuilt",
"--clean-src",
env_path,
]
result = subprocess.run(command, capture_output=True, encoding="utf-8")
if result.returncode:
shutil.rmtree(env_path)
raise NodeEnvCreationError(result.stderr)
else:
return env_path
def find_valid_node(binary_path, nodeenv_version, test_version):
# First, try local Node
node_path = find_installed_node()
if node_path is not None:
absolute_node_path = pathlib.Path(node_path).absolute()
version = get_triple_node_version(absolute_node_path)
# Validate the version
try:
test_version(version)
except NodeVersionError as err:
message = err.args[0]
print(message)
else:
return absolute_node_path, binary_path
# Otherwise, fallback on installing from nodeenv
nodeenv_path = find_nodeenv_path(nodeenv_version)
if not nodeenv_path.exists():
print(
"❗ Node.js (node) is required to run Jupyter Book, but could not be found`."
)
if ask_to_install_node(nodeenv_path):
print(f"⚙️ Attempting to install Node.js in {nodeenv_path} ...")
create_nodeenv(nodeenv_path, nodeenv_version)
print(f"ℹ️ Successfully installed Node.js {nodeenv_version}")
else:
raise PermissionDeniedError("Node.js installation was not permitted")
# Find the executable path
new_node_path = (
(nodeenv_path / "Scripts" / "node.exe")
if is_windows()
else (nodeenv_path / "bin" / "node")
)
new_path = os.pathsep.join(
[*binary_path.split(os.pathsep), str(new_node_path.parent)]
)
return new_node_path, new_path
|
NodeVersionError
|
python
|
pypa__warehouse
|
warehouse/ip_addresses/models.py
|
{
"start": 356,
"end": 442
}
|
class ____(enum.Enum):
AUTHENTICATION_ATTEMPTS = "authentication-attempts"
|
BanReason
|
python
|
PrefectHQ__prefect
|
tests/blocks/test_notifications.py
|
{
"start": 11878,
"end": 14852
}
|
class ____:
API_KEY = "api_key"
async def test_notify_async(self):
with patch("apprise.Apprise", autospec=True) as AppriseMock:
apprise_instance_mock = AppriseMock.return_value
apprise_instance_mock.async_notify = AsyncMock()
block = OpsgenieWebhook(apikey=self.API_KEY)
await block.notify("test")
AppriseMock.assert_called_once()
apprise_instance_mock.add.assert_called_once_with(
servers=f"opsgenie://{self.API_KEY}/?action=new®ion=us&priority=normal&"
"batch=no&%3Ainfo=note&%3Asuccess=close&%3Awarning=new&%3Afailure="
"new&format=text&overflow=upstream"
)
apprise_instance_mock.async_notify.assert_awaited_once_with(
body="test", title="", notify_type=PREFECT_NOTIFY_TYPE_DEFAULT
)
def _test_notify_sync(self, targets="", params=None, **kwargs):
with patch("apprise.Apprise", autospec=True) as AppriseMock:
if params is None:
params = "action=new®ion=us&priority=normal&batch=no"
apprise_instance_mock = AppriseMock.return_value
apprise_instance_mock.async_notify = AsyncMock()
block = OpsgenieWebhook(apikey=self.API_KEY, **kwargs)
@flow
def test_flow():
block.notify("test")
test_flow()
AppriseMock.assert_called_once()
apprise_instance_mock.add.assert_called_once_with(
servers=f"opsgenie://{self.API_KEY}/{targets}?{params}"
"&%3Ainfo=note&%3Asuccess=close&%3Awarning=new&%3Afailure=new&format=text&overflow=upstream"
)
apprise_instance_mock.async_notify.assert_awaited_once_with(
body="test", title="", notify_type=PREFECT_NOTIFY_TYPE_DEFAULT
)
def test_notify_sync_simple(self):
self._test_notify_sync()
def test_notify_sync_params(self):
params = "action=new®ion=eu&priority=low&batch=yes"
self._test_notify_sync(params=params, region_name="eu", priority=1, batch=True)
def test_notify_sync_targets(self):
targets = "%23team/%2Aschedule/%40user/%5Eescalation"
self._test_notify_sync(
targets=targets,
target_user=["user"],
target_team=["team"],
target_schedule=["schedule"],
target_escalation=["escalation"],
)
def test_notify_sync_users(self):
targets = "%40user1/%40user2"
self._test_notify_sync(targets=targets, target_user=["user1", "user2"])
def test_notify_sync_details(self):
params = "action=new®ion=us&priority=normal&batch=no&%2Bkey1=value1&%2Bkey2=value2"
self._test_notify_sync(
params=params,
details={
"key1": "value1",
"key2": "value2",
},
)
|
TestOpsgenieWebhook
|
python
|
apache__airflow
|
airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_task_instances.py
|
{
"start": 132723,
"end": 151153
}
|
class ____(TestTaskInstanceEndpoint):
def test_should_respond_200(self, test_client, session):
self.create_task_instances(
session=session, task_instances=[{"state": State.SUCCESS}], with_ti_history=True
)
with assert_queries_count(3):
response = test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/print_the_context/tries"
)
assert response.status_code == 200
assert response.json()["total_entries"] == 2 # The task instance and its history
assert len(response.json()["task_instances"]) == 2
assert response.json() == {
"task_instances": [
{
"dag_id": "example_python_operator",
"dag_display_name": "example_python_operator",
"duration": 10000.0,
"end_date": "2020-01-03T00:00:00Z",
"executor": None,
"executor_config": "{}",
"hostname": "",
"map_index": -1,
"max_tries": 0,
"operator": "PythonOperator",
"operator_name": "PythonOperator",
"pid": 100,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 9,
"queue": "default_queue",
"queued_when": None,
"scheduled_when": None,
"start_date": "2020-01-02T00:00:00Z",
"state": "success",
"task_id": "print_the_context",
"task_display_name": "print_the_context",
"try_number": 1,
"unixname": getuser(),
"dag_run_id": "TEST_DAG_RUN_ID",
"dag_version": mock.ANY,
"hitl_detail": None,
},
{
"dag_id": "example_python_operator",
"dag_display_name": "example_python_operator",
"duration": 10000.0,
"end_date": "2020-01-03T00:00:00Z",
"executor": None,
"executor_config": "{}",
"hostname": "",
"map_index": -1,
"max_tries": 1,
"operator": "PythonOperator",
"operator_name": "PythonOperator",
"pid": 100,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 9,
"queue": "default_queue",
"queued_when": None,
"scheduled_when": None,
"start_date": "2020-01-02T00:00:00Z",
"state": None,
"task_id": "print_the_context",
"task_display_name": "print_the_context",
"try_number": 2,
"unixname": getuser(),
"dag_run_id": "TEST_DAG_RUN_ID",
"dag_version": mock.ANY,
"hitl_detail": None,
},
],
"total_entries": 2,
}
def test_should_respond_200_with_hitl(
self, test_client, create_task_instance: CreateTaskInstance, session
):
ti = create_task_instance(dag_id="test_hitl_dag", task_id="sample_task_hitl")
ti.try_number = 1
session.add(ti)
hitl_detail = HITLDetail(
ti_id=ti.id,
options=["Approve", "Reject"],
subject="This is subject",
body="this is body",
defaults=["Approve"],
multiple=False,
params={"input_1": 1},
assignees=None,
)
session.add(hitl_detail)
session.commit()
# Record the TaskInstanceHistory
TaskInstanceHistory.record_ti(ti, session=session)
session.flush()
with assert_queries_count(3):
response = test_client.get(
f"/dags/{ti.dag_id}/dagRuns/{ti.run_id}/taskInstances/{ti.task_id}/tries",
)
assert response.status_code == 200
assert response.json() == {
"task_instances": [
{
"dag_id": "test_hitl_dag",
"dag_display_name": "test_hitl_dag",
"duration": None,
"end_date": mock.ANY,
"executor": None,
"executor_config": "{}",
"hostname": "",
"map_index": -1,
"max_tries": 0,
"operator": "EmptyOperator",
"operator_name": "EmptyOperator",
"pid": None,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 1,
"queue": "default",
"queued_when": None,
"scheduled_when": None,
"start_date": None,
"state": None,
"task_id": "sample_task_hitl",
"task_display_name": "sample_task_hitl",
"try_number": 1,
"unixname": getuser(),
"dag_run_id": "test",
"dag_version": mock.ANY,
"hitl_detail": {
"assigned_users": [],
"body": "this is body",
"chosen_options": None,
"created_at": mock.ANY,
"defaults": ["Approve"],
"multiple": False,
"options": ["Approve", "Reject"],
"params": {"input_1": {"value": 1, "description": None, "schema": {}}},
"params_input": {},
"responded_at": None,
"responded_by_user": None,
"response_received": False,
"subject": "This is subject",
},
},
],
"total_entries": 1,
}
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/print_the_context/tries"
)
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/print_the_context/tries"
)
assert response.status_code == 403
def test_ti_in_retry_state_not_returned(self, test_client, session):
self.create_task_instances(
session=session, task_instances=[{"state": State.SUCCESS}], with_ti_history=True
)
ti = session.query(TaskInstance).one()
ti.state = State.UP_FOR_RETRY
session.merge(ti)
session.commit()
response = test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/print_the_context/tries"
)
assert response.status_code == 200
assert response.json()["total_entries"] == 1
assert len(response.json()["task_instances"]) == 1
assert response.json() == {
"task_instances": [
{
"dag_id": "example_python_operator",
"dag_display_name": "example_python_operator",
"duration": 10000.0,
"end_date": "2020-01-03T00:00:00Z",
"executor": None,
"executor_config": "{}",
"hostname": "",
"map_index": -1,
"max_tries": 0,
"operator": "PythonOperator",
"operator_name": "PythonOperator",
"pid": 100,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 9,
"queue": "default_queue",
"queued_when": None,
"scheduled_when": None,
"start_date": "2020-01-02T00:00:00Z",
"state": "success",
"task_id": "print_the_context",
"task_display_name": "print_the_context",
"try_number": 1,
"unixname": getuser(),
"dag_run_id": "TEST_DAG_RUN_ID",
"dag_version": mock.ANY,
"hitl_detail": None,
},
],
"total_entries": 1,
}
def test_mapped_task_should_respond_200(self, test_client, session):
tis = self.create_task_instances(session, task_instances=[{"state": State.FAILED}])
old_ti = tis[0]
for idx in (1, 2):
ti = TaskInstance(
task=old_ti.task, run_id=old_ti.run_id, map_index=idx, dag_version_id=old_ti.dag_version_id
)
for attr in ["duration", "end_date", "pid", "start_date", "state", "queue"]:
setattr(ti, attr, getattr(old_ti, attr))
ti.try_number = 1
session.add(ti)
session.commit()
tis = session.query(TaskInstance).all()
# Record the task instance history
from airflow.models.taskinstance import clear_task_instances
clear_task_instances(tis, session)
# Simulate the try_number increasing to new values in TI
for ti in tis:
if ti.map_index > 0:
ti.try_number += 1
ti.queue = "default_queue"
session.merge(ti)
session.commit()
# in each loop, we should get the right mapped TI back
for map_index in (1, 2):
# Get the info from TIHistory: try_number 1, try_number 2 is TI table(latest)
with assert_queries_count(3):
response = test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances"
f"/print_the_context/{map_index}/tries",
)
assert response.status_code == 200
assert (
response.json()["total_entries"] == 2
) # the mapped task was cleared. So both the task instance and its history
assert len(response.json()["task_instances"]) == 2
assert response.json() == {
"task_instances": [
{
"dag_id": "example_python_operator",
"dag_display_name": "example_python_operator",
"duration": 10000.0,
"end_date": "2020-01-03T00:00:00Z",
"executor": None,
"executor_config": "{}",
"hostname": "",
"map_index": map_index,
"max_tries": 0,
"operator": "PythonOperator",
"operator_name": "PythonOperator",
"pid": 100,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 9,
"queue": "default_queue",
"queued_when": None,
"scheduled_when": None,
"start_date": "2020-01-02T00:00:00Z",
"state": "failed",
"task_id": "print_the_context",
"task_display_name": "print_the_context",
"try_number": 1,
"unixname": getuser(),
"dag_run_id": "TEST_DAG_RUN_ID",
"dag_version": mock.ANY,
"hitl_detail": None,
},
{
"dag_id": "example_python_operator",
"dag_display_name": "example_python_operator",
"duration": 10000.0,
"end_date": "2020-01-03T00:00:00Z",
"executor": None,
"executor_config": "{}",
"hostname": "",
"map_index": map_index,
"max_tries": 1,
"operator": "PythonOperator",
"operator_name": "PythonOperator",
"pid": 100,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 9,
"queue": "default_queue",
"queued_when": None,
"scheduled_when": None,
"start_date": "2020-01-02T00:00:00Z",
"state": None,
"task_id": "print_the_context",
"task_display_name": "print_the_context",
"try_number": 2,
"unixname": getuser(),
"dag_run_id": "TEST_DAG_RUN_ID",
"dag_version": mock.ANY,
"hitl_detail": None,
},
],
"total_entries": 2,
}
def test_raises_404_for_nonexistent_task_instance(self, test_client, session):
self.create_task_instances(session)
response = test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/non_existent_task/tries"
)
assert response.status_code == 404
assert response.json() == {
"detail": "The Task Instance with dag_id: `example_python_operator`, run_id: `TEST_DAG_RUN_ID`, task_id: `non_existent_task` and map_index: `-1` was not found"
}
@pytest.mark.parametrize(
("run_id", "expected_version_number"),
[
("run1", 1),
("run2", 2),
("run3", 3),
],
)
@pytest.mark.usefixtures("make_dag_with_multiple_versions")
@mock.patch("airflow.api_fastapi.core_api.datamodels.dag_versions.hasattr")
def test_should_respond_200_with_versions(
self, mock_hasattr, test_client, run_id, expected_version_number
):
mock_hasattr.return_value = False
response = test_client.get(
f"/dags/dag_with_multiple_versions/dagRuns/{run_id}/taskInstances/task1/tries"
)
assert response.status_code == 200
assert response.json()["task_instances"][0] == {
"task_id": "task1",
"dag_id": "dag_with_multiple_versions",
"dag_display_name": "dag_with_multiple_versions",
"dag_run_id": run_id,
"map_index": -1,
"start_date": None,
"end_date": mock.ANY,
"duration": None,
"state": mock.ANY,
"try_number": 0,
"max_tries": 0,
"task_display_name": "task1",
"hostname": "",
"unixname": getuser(),
"pool": "default_pool",
"pool_slots": 1,
"queue": "default",
"priority_weight": 1,
"operator": "EmptyOperator",
"operator_name": "EmptyOperator",
"queued_when": None,
"scheduled_when": None,
"pid": None,
"executor": None,
"executor_config": "{}",
"dag_version": {
"id": mock.ANY,
"version_number": expected_version_number,
"dag_id": "dag_with_multiple_versions",
"bundle_name": "dag_maker",
"bundle_version": f"some_commit_hash{expected_version_number}",
"bundle_url": f"http://test_host.github.com/tree/some_commit_hash{expected_version_number}/dags",
"created_at": mock.ANY,
"dag_display_name": "dag_with_multiple_versions",
},
"hitl_detail": None,
}
@pytest.mark.parametrize(
("run_id", "expected_version_number"),
[
("run1", 1),
("run2", 2),
("run3", 3),
],
)
@pytest.mark.usefixtures("make_dag_with_multiple_versions")
def test_should_respond_200_with_versions_using_url_template(
self, test_client, run_id, expected_version_number
):
response = test_client.get(
f"/dags/dag_with_multiple_versions/dagRuns/{run_id}/taskInstances/task1/tries"
)
assert response.status_code == 200
assert response.json()["task_instances"][0] == {
"task_id": "task1",
"dag_id": "dag_with_multiple_versions",
"dag_display_name": "dag_with_multiple_versions",
"dag_run_id": run_id,
"map_index": -1,
"start_date": None,
"end_date": mock.ANY,
"duration": None,
"state": mock.ANY,
"try_number": 0,
"max_tries": 0,
"task_display_name": "task1",
"hostname": "",
"unixname": getuser(),
"pool": "default_pool",
"pool_slots": 1,
"queue": "default",
"priority_weight": 1,
"operator": "EmptyOperator",
"operator_name": "EmptyOperator",
"queued_when": None,
"scheduled_when": None,
"pid": None,
"executor": None,
"executor_config": "{}",
"dag_version": {
"id": mock.ANY,
"version_number": expected_version_number,
"dag_id": "dag_with_multiple_versions",
"bundle_name": "dag_maker",
"bundle_version": f"some_commit_hash{expected_version_number}",
"bundle_url": f"http://test_host.github.com/tree/some_commit_hash{expected_version_number}/dags",
"created_at": mock.ANY,
"dag_display_name": "dag_with_multiple_versions",
},
"hitl_detail": None,
}
|
TestGetTaskInstanceTries
|
python
|
coleifer__peewee
|
peewee.py
|
{
"start": 221242,
"end": 222021
}
|
class ____(object):
def __init__(self, models, database, bind_refs, bind_backrefs):
self.models = models
self.database = database
self.bind_refs = bind_refs
self.bind_backrefs = bind_backrefs
def __enter__(self):
self._orig_database = []
for model in self.models:
self._orig_database.append(model._meta.database)
model.bind(self.database, self.bind_refs, self.bind_backrefs,
_exclude=set(self.models))
return self.models
def __exit__(self, exc_type, exc_val, exc_tb):
for model, db in zip(self.models, self._orig_database):
model.bind(db, self.bind_refs, self.bind_backrefs,
_exclude=set(self.models))
|
_BoundModelsContext
|
python
|
cython__cython
|
Cython/Compiler/TypeSlots.py
|
{
"start": 15197,
"end": 16351
}
|
class ____(InternalMethodSlot):
# Descriptor for a slot whose value depends on whether
# the type participates in GC.
def __init__(self, slot_name, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
def slot_code(self, scope):
# We treat external types as needing gc, but don't generate a slot code
# because we don't know it to be able to call it directly.
if not scope.needs_gc() or scope.parent_type.is_external:
return "0"
if not scope.has_cyclic_pyobject_attrs:
# if the type does not have GC relevant object attributes, it can
# delegate GC methods to its parent - iff the parent functions
# are defined in the same module
parent_type_scope = scope.parent_type.base_type.scope
if scope.parent_scope is parent_type_scope.parent_scope:
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if entry.visibility != 'extern':
return self.slot_code(parent_type_scope)
return InternalMethodSlot.slot_code(self, scope)
|
GCDependentSlot
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 1140843,
"end": 1141875
}
|
class ____(ScaleInvalidDataShowAssize):
"""
ScaleInvalidDataShowAsValuesize schema wrapper.
Parameters
----------
value : float
Default size for marks.
* For ``point``/``circle``/``square``, this represents the pixel area of the marks.
Note that this value sets the area of the symbol; the side lengths will increase
with the square root of this value.
* For ``bar``, this represents the band size of the bar, in pixels.
* For ``text``, this represents the font size, in pixels.
**Default value:**
* ``30`` for point, circle, square marks; width/height's ``step``
* ``2`` for bar marks with discrete dimensions;
* ``5`` for bar marks with continuous dimensions;
* ``11`` for text marks.
"""
_schema = {"$ref": '#/definitions/ScaleInvalidDataShowAsValue<"size">'}
def __init__(self, value: Optional[float] = Undefined, **kwds):
super().__init__(value=value, **kwds)
|
ScaleInvalidDataShowAsValuesize
|
python
|
astropy__astropy
|
astropy/units/function/logarithmic.py
|
{
"start": 15307,
"end": 15363
}
|
class ____(LogQuantity):
_unit_class = MagUnit
|
Magnitude
|
python
|
doocs__leetcode
|
lcp/LCP 05. 发 LeetCoin/Solution.py
|
{
"start": 1766,
"end": 2592
}
|
class ____:
def bonus(
self, n: int, leadership: List[List[int]], operations: List[List[int]]
) -> List[int]:
def dfs(u):
nonlocal idx
begin[u] = idx
for v in g[u]:
dfs(v)
end[u] = idx
idx += 1
g = defaultdict(list)
for a, b in leadership:
g[a].append(b)
begin = [0] * (n + 1)
end = [0] * (n + 1)
idx = 1
dfs(1)
ans = []
tree = SegmentTree(n)
for op in operations:
p, v = op[:2]
if p == 1:
tree.modify(end[v], end[v], op[2])
elif p == 2:
tree.modify(begin[v], end[v], op[2])
else:
ans.append(tree.query(begin[v], end[v]))
return ans
|
Solution
|
python
|
pytorch__pytorch
|
torch/distributed/debug/_frontend.py
|
{
"start": 7250,
"end": 11105
}
|
class ____:
def __init__(self, port: int):
# Setup templates
loader = DictLoader(templates)
self._jinja_env = Environment(loader=loader, enable_async=True)
self._jinja_env.globals.update(
zip=zip,
format_json=format_json,
enumerate=enumerate,
)
# Create routes
self._routes = {
"/": self._handle_index,
"/stacks": self._handle_stacks,
"/fr_trace": self._handle_fr_trace,
"/fr_trace_nccl": self._handle_fr_trace_nccl,
"/profile": self._handle_profiler,
"/wait_counters": self._handle_wait_counters,
}
# Create HTTP server
RequestHandlerClass = type(
"HTTPRequestHandler",
(HTTPRequestHandler,),
{"frontend": self},
)
server_address = ("", port)
self._server = _IPv6HTTPServer(server_address, RequestHandlerClass)
self._thread = threading.Thread(
target=self._serve,
args=(),
daemon=True,
)
self._thread.start()
def _serve(self) -> None:
try:
self._server.serve_forever()
except Exception:
logger.exception("got exception in checkpoint server")
def join(self) -> None:
self._thread.join()
def _handle_request(self, req: HTTPRequestHandler) -> None:
path = req.get_path()
if path not in self._routes:
req.send_error(404, f"Handler not found: {path}")
return
handler = self._routes[path]
try:
resp = handler(req)
except Exception as e:
logger.exception(
"Exception in checkpoint server when handling %s",
path,
)
req.send_error(500, str(e))
return
req.send_response(200)
req.send_header("Content-type", "text/html")
req.end_headers()
req.wfile.write(resp)
def _render_template(self, template: str, **kwargs: object) -> bytes:
return self._jinja_env.get_template(template).render(**kwargs).encode()
def _handle_index(self, req: HTTPRequestHandler) -> bytes:
return self._render_template("index.html")
def _handle_stacks(self, req: HTTPRequestHandler) -> bytes:
addrs, resps = fetch_all("dump_traceback")
return self._render_template(
"raw_resp.html", title="Stacks", addrs=addrs, resps=resps
)
def _handle_fr_trace(self, req: HTTPRequestHandler) -> bytes:
addrs, resps = fetch_all("fr_trace_json")
return self._render_template(
"json_resp.html",
title="FlightRecorder",
addrs=addrs,
resps=resps,
)
def _handle_fr_trace_nccl(self, req: HTTPRequestHandler) -> bytes:
addrs, resps = fetch_all("dump_nccl_trace_json", "onlyactive=true")
return self._render_template(
"json_resp.html",
title="FlightRecorder NCCL",
addrs=addrs,
resps=resps,
)
def _handle_profiler(self, req: HTTPRequestHandler) -> bytes:
duration = req.get_query_arg("duration", default=1.0, type=float)
addrs, resps = fetch_all("torch_profile", f"duration={duration}")
return self._render_template("profile.html", addrs=addrs, resps=resps)
def _handle_wait_counters(self, req: HTTPRequestHandler) -> bytes:
addrs, resps = fetch_all("wait_counter_values")
return self._render_template(
"json_resp.html", title="Wait Counters", addrs=addrs, resps=resps
)
def main(port: int) -> None:
server = FrontendServer(port=port)
logger.info("Frontend server started on port %d", server._server.server_port)
server.join()
|
FrontendServer
|
python
|
milvus-io__pymilvus
|
pymilvus/bulk_writer/local_bulk_writer.py
|
{
"start": 928,
"end": 5154
}
|
class ____(BulkWriter):
def __init__(
self,
schema: CollectionSchema,
local_path: str,
chunk_size: int = 128 * MB,
file_type: BulkFileType = BulkFileType.PARQUET,
config: Optional[dict] = None,
**kwargs,
):
super().__init__(schema, chunk_size, file_type, config, **kwargs)
self._local_path = local_path
self._uuid = str(uuid.uuid4())
self._flush_count = 0
self._working_thread = {}
self._working_thread_lock = Lock()
self._local_files = []
self._make_dir()
@property
def uuid(self):
return self._uuid
def __enter__(self):
return self
def __exit__(self, exc_type: object, exc_val: object, exc_tb: object):
self._exit()
def __del__(self):
self._exit()
def _exit(self):
# wait flush thread
if len(self._working_thread) > 0:
for k, th in self._working_thread.items():
logger.info(f"Wait flush thread '{k}' to finish")
th.join()
self._rm_dir()
def _make_dir(self):
Path(self._local_path).mkdir(exist_ok=True)
logger.info(f"Data path created: {self._local_path}")
uidir = Path(self._local_path).joinpath(self._uuid)
self._local_path = uidir
Path(uidir).mkdir(exist_ok=True)
logger.info(f"Data path created: {uidir}")
def _rm_dir(self):
# remove the uuid folder if it is empty
if Path(self._local_path).exists() and not any(Path(self._local_path).iterdir()):
Path(self._local_path).rmdir()
logger.info(f"Delete local directory '{self._local_path}'")
def append_row(self, row: dict, **kwargs):
super().append_row(row, **kwargs)
# only one thread can enter this section to persist data,
# in the _flush() method, the buffer will be swapped to a new one.
# in anync mode, the flush thread is asynchronously, other threads can
# continue to append if the new buffer size is less than target size
with self._working_thread_lock:
if self.buffer_size > self.chunk_size:
self.commit(_async=True)
def commit(self, **kwargs):
# _async=True, the flush thread is asynchronously
while len(self._working_thread) > 0:
logger.info(
f"Previous flush action is not finished, {threading.current_thread().name} is waiting..."
)
time.sleep(1.0)
logger.info(
f"Prepare to flush buffer, row_count: {self.buffer_row_count}, size: {self.buffer_size}"
)
_async = kwargs.get("_async", False)
call_back = kwargs.get("call_back")
x = Thread(target=self._flush, args=(call_back,))
logger.info(f"Flush thread begin, name: {x.name}")
self._working_thread[x.name] = x
x.start()
if not _async:
logger.info("Wait flush to finish")
x.join()
super().commit() # reset the buffer size
logger.info(f"Commit done with async={_async}")
def _flush(self, call_back: Optional[Callable] = None):
try:
self._flush_count = self._flush_count + 1
target_path = Path.joinpath(self._local_path, str(self._flush_count))
old_buffer = super()._new_buffer()
if old_buffer.row_count > 0:
file_list = old_buffer.persist(
local_path=str(target_path),
buffer_size=self.buffer_size,
buffer_row_count=self.buffer_row_count,
)
self._local_files.append(file_list)
if call_back:
call_back(file_list)
except Exception as e:
logger.error(f"Failed to fulsh, error: {e}")
raise e from e
finally:
del self._working_thread[threading.current_thread().name]
logger.info(f"Flush thread finished, name: {threading.current_thread().name}")
@property
def data_path(self):
return self._local_path
@property
def batch_files(self):
return self._local_files
|
LocalBulkWriter
|
python
|
getsentry__sentry
|
src/sentry/integrations/slack/analytics.py
|
{
"start": 1184,
"end": 1414
}
|
class ____(analytics.Event):
organization_id: int
actor_id: int
invitation_type: str
invited_member_id: int
@analytics.eventclass("integrations.slack.reject_member_invitation")
|
SlackIntegrationApproveMemberInvitation
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/streams.py
|
{
"start": 56929,
"end": 60360
}
|
class ____(GithubStream):
"""
API docs: https://docs.github.com/en/rest/projects/columns?apiVersion=2022-11-28#list-project-columns
"""
use_cache = True
cursor_field = "updated_at"
def __init__(self, parent: HttpStream, start_date: str, **kwargs):
super().__init__(**kwargs)
self.parent = parent
self._start_date = start_date
def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str:
return f"projects/{stream_slice['project_id']}/columns"
def stream_slices(
self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
parent_stream_slices = self.parent.stream_slices(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state
)
for stream_slice in parent_stream_slices:
parent_records = self.parent.read_records(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state
)
for record in parent_records:
yield {"repository": record["repository"], "project_id": record["id"]}
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
starting_point = self.get_starting_point(stream_state=stream_state, stream_slice=stream_slice)
for record in super().read_records(
sync_mode=sync_mode, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state
):
if not starting_point or record[self.cursor_field] > starting_point:
yield record
self.state = self._get_updated_state(self.state, record)
def get_starting_point(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any]) -> str:
if stream_state:
repository = stream_slice["repository"]
project_id = str(stream_slice["project_id"])
stream_state_value = stream_state.get(repository, {}).get(project_id, {}).get(self.cursor_field)
if stream_state_value:
if self._start_date:
return max(self._start_date, stream_state_value)
return stream_state_value
return self._start_date
def _get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]):
repository = latest_record["repository"]
project_id = str(latest_record["project_id"])
updated_state = latest_record[self.cursor_field]
stream_state_value = current_stream_state.get(repository, {}).get(project_id, {}).get(self.cursor_field)
if stream_state_value:
updated_state = max(updated_state, stream_state_value)
current_stream_state.setdefault(repository, {}).setdefault(project_id, {})[self.cursor_field] = updated_state
return current_stream_state
def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]:
record = super().transform(record=record, stream_slice=stream_slice)
record["project_id"] = stream_slice["project_id"]
return record
|
ProjectColumns
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/cross_device_ops.py
|
{
"start": 39352,
"end": 40467
}
|
class ____(AllReduceCrossDeviceOps):
"""NCCL all-reduce implementation of CrossDeviceOps.
It uses Nvidia NCCL for all-reduce. For the batch API, tensors will be
repacked or aggregated for more efficient cross-device transportation.
For reduces that are not all-reduce, it falls back to
`tf.distribute.ReductionToOneDevice`.
Here is how you can use `NcclAllReduce` in `tf.distribute.MirroredStrategy`:
```
strategy = tf.distribute.MirroredStrategy(
cross_device_ops=tf.distribute.NcclAllReduce())
```
"""
def __init__(self, num_packs=1):
"""Initializes the object.
Args:
num_packs: a non-negative integer. The number of packs to split values
into. If zero, no packing will be done.
Raises:
ValueError: if `num_packs` is negative.
"""
if num_packs < 0:
raise ValueError(
"NCCL all-reduce requires num_packs >= 0, but {} is specified".format(
num_packs))
super(NcclAllReduce, self).__init__(
all_reduce_alg="nccl", num_packs=num_packs)
@tf_export("distribute.HierarchicalCopyAllReduce")
|
NcclAllReduce
|
python
|
numpy__numpy
|
numpy/_core/tests/test_abc.py
|
{
"start": 116,
"end": 2221
}
|
class ____:
def test_abstract(self):
assert_(issubclass(np.number, numbers.Number))
assert_(issubclass(np.inexact, numbers.Complex))
assert_(issubclass(np.complexfloating, numbers.Complex))
assert_(issubclass(np.floating, numbers.Real))
assert_(issubclass(np.integer, numbers.Integral))
assert_(issubclass(np.signedinteger, numbers.Integral))
assert_(issubclass(np.unsignedinteger, numbers.Integral))
def test_floats(self):
for t in sctypes['float']:
assert_(isinstance(t(), numbers.Real),
f"{t.__name__} is not instance of Real")
assert_(issubclass(t, numbers.Real),
f"{t.__name__} is not subclass of Real")
assert_(not isinstance(t(), numbers.Rational),
f"{t.__name__} is instance of Rational")
assert_(not issubclass(t, numbers.Rational),
f"{t.__name__} is subclass of Rational")
def test_complex(self):
for t in sctypes['complex']:
assert_(isinstance(t(), numbers.Complex),
f"{t.__name__} is not instance of Complex")
assert_(issubclass(t, numbers.Complex),
f"{t.__name__} is not subclass of Complex")
assert_(not isinstance(t(), numbers.Real),
f"{t.__name__} is instance of Real")
assert_(not issubclass(t, numbers.Real),
f"{t.__name__} is subclass of Real")
def test_int(self):
for t in sctypes['int']:
assert_(isinstance(t(), numbers.Integral),
f"{t.__name__} is not instance of Integral")
assert_(issubclass(t, numbers.Integral),
f"{t.__name__} is not subclass of Integral")
def test_uint(self):
for t in sctypes['uint']:
assert_(isinstance(t(), numbers.Integral),
f"{t.__name__} is not instance of Integral")
assert_(issubclass(t, numbers.Integral),
f"{t.__name__} is not subclass of Integral")
|
TestABC
|
python
|
scikit-learn__scikit-learn
|
sklearn/exceptions.py
|
{
"start": 1895,
"end": 2061
}
|
class ____(UserWarning):
"""Custom warning to capture convergence problems
.. versionchanged:: 0.18
Moved from sklearn.utils.
"""
|
ConvergenceWarning
|
python
|
jazzband__django-simple-history
|
simple_history/tests/models.py
|
{
"start": 4771,
"end": 4869
}
|
class ____(HistoryManager):
def low_ids(self):
return self.filter(id__lte=3)
|
PollManager
|
python
|
django__django
|
tests/queries/models.py
|
{
"start": 13985,
"end": 14090
}
|
class ____(models.Model):
text = models.TextField()
page = models.ManyToManyField("Page")
|
Paragraph
|
python
|
sympy__sympy
|
sympy/physics/biomechanics/tests/test_curve.py
|
{
"start": 27149,
"end": 35041
}
|
class ____:
@pytest.fixture(autouse=True)
def _fiber_force_length_passive_arguments_fixture(self):
self.fl_M_pas = Symbol('fl_M_pas')
self.c0 = Symbol('c_0')
self.c1 = Symbol('c_1')
self.constants = (self.c0, self.c1)
@staticmethod
def test_class():
assert issubclass(FiberForceLengthPassiveInverseDeGroote2016, Function)
assert issubclass(FiberForceLengthPassiveInverseDeGroote2016, CharacteristicCurveFunction)
assert FiberForceLengthPassiveInverseDeGroote2016.__name__ == 'FiberForceLengthPassiveInverseDeGroote2016'
def test_instance(self):
fl_M_pas_inv = FiberForceLengthPassiveInverseDeGroote2016(self.fl_M_pas, *self.constants)
assert isinstance(fl_M_pas_inv, FiberForceLengthPassiveInverseDeGroote2016)
assert str(fl_M_pas_inv) == 'FiberForceLengthPassiveInverseDeGroote2016(fl_M_pas, c_0, c_1)'
def test_doit(self):
fl_M_pas_inv = FiberForceLengthPassiveInverseDeGroote2016(self.fl_M_pas, *self.constants).doit()
assert fl_M_pas_inv == self.c0*log(self.fl_M_pas*(exp(self.c1) - 1) + 1)/self.c1 + 1
def test_doit_evaluate_false(self):
fl_M_pas_inv = FiberForceLengthPassiveInverseDeGroote2016(self.fl_M_pas, *self.constants).doit(evaluate=False)
assert fl_M_pas_inv == self.c0*log(UnevaluatedExpr(self.fl_M_pas*(exp(self.c1) - 1)) + 1)/self.c1 + 1
def test_with_defaults(self):
constants = (
Float('0.6'),
Float('4.0'),
)
fl_M_pas_inv_manual = FiberForceLengthPassiveInverseDeGroote2016(self.fl_M_pas, *constants)
fl_M_pas_inv_constants = FiberForceLengthPassiveInverseDeGroote2016.with_defaults(self.fl_M_pas)
assert fl_M_pas_inv_manual == fl_M_pas_inv_constants
def test_differentiate_wrt_fl_T(self):
fl_M_pas_inv = FiberForceLengthPassiveInverseDeGroote2016(self.fl_M_pas, *self.constants)
expected = self.c0*(exp(self.c1) - 1)/(self.c1*(self.fl_M_pas*(exp(self.c1) - 1) + 1))
assert fl_M_pas_inv.diff(self.fl_M_pas) == expected
def test_differentiate_wrt_c0(self):
fl_M_pas_inv = FiberForceLengthPassiveInverseDeGroote2016(self.fl_M_pas, *self.constants)
expected = log(self.fl_M_pas*(exp(self.c1) - 1) + 1)/self.c1
assert fl_M_pas_inv.diff(self.c0) == expected
def test_differentiate_wrt_c1(self):
fl_M_pas_inv = FiberForceLengthPassiveInverseDeGroote2016(self.fl_M_pas, *self.constants)
expected = (
self.c0*self.fl_M_pas*exp(self.c1)/(self.c1*(self.fl_M_pas*(exp(self.c1) - 1) + 1))
- self.c0*log(self.fl_M_pas*(exp(self.c1) - 1) + 1)/self.c1**2
)
assert fl_M_pas_inv.diff(self.c1) == expected
def test_inverse(self):
fl_M_pas_inv = FiberForceLengthPassiveInverseDeGroote2016(self.fl_M_pas, *self.constants)
assert fl_M_pas_inv.inverse() is FiberForceLengthPassiveDeGroote2016
def test_function_print_latex(self):
fl_M_pas_inv = FiberForceLengthPassiveInverseDeGroote2016(self.fl_M_pas, *self.constants)
expected = r'\left( \operatorname{fl}^M_{pas} \right)^{-1} \left( fl_{M pas} \right)'
assert LatexPrinter().doprint(fl_M_pas_inv) == expected
def test_expression_print_latex(self):
fl_T = FiberForceLengthPassiveInverseDeGroote2016(self.fl_M_pas, *self.constants)
expected = r'\frac{c_{0} \log{\left(fl_{M pas} \left(e^{c_{1}} - 1\right) + 1 \right)}}{c_{1}} + 1'
assert LatexPrinter().doprint(fl_T.doit()) == expected
@pytest.mark.parametrize(
'code_printer, expected',
[
(
C89CodePrinter,
'(1 + 0.14999999999999999*log(1 + 53.598150033144236*fl_M_pas))',
),
(
C99CodePrinter,
'(1 + 0.14999999999999999*log(1 + 53.598150033144236*fl_M_pas))',
),
(
C11CodePrinter,
'(1 + 0.14999999999999999*log(1 + 53.598150033144236*fl_M_pas))',
),
(
CXX98CodePrinter,
'(1 + 0.14999999999999999*log(1 + 53.598150033144236*fl_M_pas))',
),
(
CXX11CodePrinter,
'(1 + 0.14999999999999999*std::log(1 + 53.598150033144236*fl_M_pas))',
),
(
CXX17CodePrinter,
'(1 + 0.14999999999999999*std::log(1 + 53.598150033144236*fl_M_pas))',
),
(
FCodePrinter,
' (1 + 0.15d0*log(1.0d0 + 53.5981500331442d0*fl_M_pas))',
),
(
OctaveCodePrinter,
'(1 + 0.15*log(1 + 53.5981500331442*fl_M_pas))',
),
(
PythonCodePrinter,
'(1 + 0.15*math.log(1 + 53.5981500331442*fl_M_pas))',
),
(
NumPyPrinter,
'(1 + 0.15*numpy.log(1 + 53.5981500331442*fl_M_pas))',
),
(
SciPyPrinter,
'(1 + 0.15*numpy.log(1 + 53.5981500331442*fl_M_pas))',
),
(
CuPyPrinter,
'(1 + 0.15*cupy.log(1 + 53.5981500331442*fl_M_pas))',
),
(
JaxPrinter,
'(1 + 0.15*jax.numpy.log(1 + 53.5981500331442*fl_M_pas))',
),
(
MpmathPrinter,
'(1 + mpmath.mpf((0, 5404319552844595, -55, 53))*mpmath.log(1 '
'+ mpmath.mpf((0, 942908627019595, -44, 50))*fl_M_pas))',
),
(
LambdaPrinter,
'(1 + 0.15*math.log(1 + 53.5981500331442*fl_M_pas))',
),
]
)
def test_print_code(self, code_printer, expected):
fl_M_pas_inv = FiberForceLengthPassiveInverseDeGroote2016.with_defaults(self.fl_M_pas)
assert code_printer().doprint(fl_M_pas_inv) == expected
def test_derivative_print_code(self):
fl_M_pas_inv = FiberForceLengthPassiveInverseDeGroote2016.with_defaults(self.fl_M_pas)
dfl_M_pas_inv_dfl_T = fl_M_pas_inv.diff(self.fl_M_pas)
expected = '32.1588900198865/(214.392600132577*fl_M_pas + 4.0)'
assert PythonCodePrinter().doprint(dfl_M_pas_inv_dfl_T) == expected
def test_lambdify(self):
fl_M_pas_inv = FiberForceLengthPassiveInverseDeGroote2016.with_defaults(self.fl_M_pas)
fl_M_pas_inv_callable = lambdify(self.fl_M_pas, fl_M_pas_inv)
assert fl_M_pas_inv_callable(0.0) == pytest.approx(1.0)
@pytest.mark.skipif(numpy is None, reason='NumPy not installed')
def test_lambdify_numpy(self):
fl_M_pas_inv = FiberForceLengthPassiveInverseDeGroote2016.with_defaults(self.fl_M_pas)
fl_M_pas_inv_callable = lambdify(self.fl_M_pas, fl_M_pas_inv, 'numpy')
fl_M_pas = numpy.array([-0.01, 0.0, 0.01, 0.02, 0.05, 0.1])
expected = numpy.array([
0.8848253714,
1.0,
1.0643754386,
1.1092744701,
1.1954331425,
1.2774998934,
])
numpy.testing.assert_allclose(fl_M_pas_inv_callable(fl_M_pas), expected)
@pytest.mark.skipif(jax is None, reason='JAX not installed')
def test_lambdify_jax(self):
fl_M_pas_inv = FiberForceLengthPassiveInverseDeGroote2016.with_defaults(self.fl_M_pas)
fl_M_pas_inv_callable = jax.jit(lambdify(self.fl_M_pas, fl_M_pas_inv, 'jax'))
fl_M_pas = jax.numpy.array([-0.01, 0.0, 0.01, 0.02, 0.05, 0.1])
expected = jax.numpy.array([
0.8848253714,
1.0,
1.0643754386,
1.1092744701,
1.1954331425,
1.2774998934,
])
numpy.testing.assert_allclose(fl_M_pas_inv_callable(fl_M_pas), expected)
|
TestFiberForceLengthPassiveInverseDeGroote2016
|
python
|
wandb__wandb
|
tests/fixtures/wandb_backend_spy/spy.py
|
{
"start": 14840,
"end": 15417
}
|
class ____:
def __init__(self) -> None:
# See docs on WandbBackendSnapshot methods.
self._was_ever_preempting = False
self._uploaded_files: set[str] = set()
self._file_stream_files: dict[str, dict[int, str]] = {}
self._config_json_string: str | None = None
self._tags: list[str] = []
self._remote: str | None = None
self._commit: str | None = None
self._sweep_name: str | None = None
self._completed = False
self._exit_code: int | None = None
@dataclasses.dataclass(frozen=True)
|
_RunData
|
python
|
pandas-dev__pandas
|
pandas/tests/frame/indexing/test_setitem.py
|
{
"start": 43974,
"end": 53181
}
|
class ____:
def test_setitem_always_copy(self, float_frame):
assert "E" not in float_frame.columns
s = float_frame["A"].copy()
float_frame["E"] = s
float_frame.iloc[5:10, float_frame.columns.get_loc("E")] = np.nan
assert notna(s[5:10]).all()
@pytest.mark.parametrize("consolidate", [True, False])
def test_setitem_partial_column_inplace(self, consolidate):
# This setting should be in-place, regardless of whether frame is
# single-block or multi-block
# GH#304 this used to be incorrectly not-inplace, in which case
# we needed to ensure _item_cache was cleared.
df = DataFrame(
{"x": [1.1, 2.1, 3.1, 4.1], "y": [5.1, 6.1, 7.1, 8.1]}, index=[0, 1, 2, 3]
)
df.insert(2, "z", np.nan)
if consolidate:
df._consolidate_inplace()
assert len(df._mgr.blocks) == 1
else:
assert len(df._mgr.blocks) == 2
df.loc[2:, "z"] = 42
expected = Series([np.nan, np.nan, 42, 42], index=df.index, name="z")
tm.assert_series_equal(df["z"], expected)
def test_setitem_duplicate_columns_not_inplace(self):
# GH#39510
cols = ["A", "B"] * 2
df = DataFrame(0.0, index=[0], columns=cols)
df_copy = df.copy()
df_view = df[:]
df["B"] = (2, 5)
expected = DataFrame([[0.0, 2, 0.0, 5]], columns=cols)
tm.assert_frame_equal(df_view, df_copy)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"value", [1, np.array([[1], [1]], dtype="int64"), [[1], [1]]]
)
def test_setitem_same_dtype_not_inplace(self, value):
# GH#39510
cols = ["A", "B"]
df = DataFrame(0, index=[0, 1], columns=cols)
df_copy = df.copy()
df_view = df[:]
df[["B"]] = value
expected = DataFrame([[0, 1], [0, 1]], columns=cols)
tm.assert_frame_equal(df, expected)
tm.assert_frame_equal(df_view, df_copy)
@pytest.mark.parametrize("value", [1.0, np.array([[1.0], [1.0]]), [[1.0], [1.0]]])
def test_setitem_listlike_key_scalar_value_not_inplace(self, value):
# GH#39510
cols = ["A", "B"]
df = DataFrame(0, index=[0, 1], columns=cols)
df_copy = df.copy()
df_view = df[:]
df[["B"]] = value
expected = DataFrame([[0, 1.0], [0, 1.0]], columns=cols)
tm.assert_frame_equal(df_view, df_copy)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"indexer",
[
"a",
["a"],
pytest.param(
[True, False],
marks=pytest.mark.xfail(
reason="Boolean indexer incorrectly setting inplace",
strict=False, # passing on some builds, no obvious pattern
),
),
],
)
@pytest.mark.parametrize(
"value, set_value",
[
(1, 5),
(1.0, 5.0),
(Timestamp("2020-12-31"), Timestamp("2021-12-31")),
("a", "b"),
],
)
def test_setitem_not_operating_inplace(self, value, set_value, indexer):
# GH#43406
df = DataFrame({"a": value}, index=[0, 1])
expected = df.copy()
view = df[:]
df[indexer] = set_value
tm.assert_frame_equal(view, expected)
def test_setitem_column_update_inplace(self):
# https://github.com/pandas-dev/pandas/issues/47172
labels = [f"c{i}" for i in range(10)]
df = DataFrame({col: np.zeros(len(labels)) for col in labels}, index=labels)
values = df._mgr.blocks[0].values
with tm.raises_chained_assignment_error():
for label in df.columns:
df[label][label] = 1
# original dataframe not updated
assert np.all(values[np.arange(10), np.arange(10)] == 0)
def test_setitem_column_frame_as_category(self):
# GH31581
df = DataFrame([1, 2, 3])
df["col1"] = DataFrame([1, 2, 3], dtype="category")
df["col2"] = Series([1, 2, 3], dtype="category")
expected_types = Series(
["int64", "category", "category"], index=[0, "col1", "col2"], dtype=object
)
tm.assert_series_equal(df.dtypes, expected_types)
@pytest.mark.parametrize("dtype", ["int64", "Int64"])
def test_setitem_iloc_with_numpy_array(self, dtype):
# GH-33828
df = DataFrame({"a": np.ones(3)}, dtype=dtype)
df.iloc[np.array([0]), np.array([0])] = np.array([[2]])
expected = DataFrame({"a": [2, 1, 1]}, dtype=dtype)
tm.assert_frame_equal(df, expected)
def test_setitem_frame_dup_cols_dtype(self):
# GH#53143
df = DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=["a", "b", "a", "c"])
rhs = DataFrame([[0, 1.5], [2, 2.5]], columns=["a", "a"])
df["a"] = rhs
expected = DataFrame(
[[0, 2, 1.5, 4], [2, 5, 2.5, 7]], columns=["a", "b", "a", "c"]
)
tm.assert_frame_equal(df, expected)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])
rhs = DataFrame([[0, 1.5], [2, 2.5]], columns=["a", "a"])
df["a"] = rhs
expected = DataFrame([[0, 1.5, 3], [2, 2.5, 6]], columns=["a", "a", "b"])
tm.assert_frame_equal(df, expected)
def test_frame_setitem_empty_dataframe(self):
# GH#28871
dti = DatetimeIndex(["2000-01-01"], dtype="M8[ns]", name="date")
df = DataFrame({"date": dti}).set_index("date")
df = df[0:0].copy()
df["3010"] = None
df["2010"] = None
expected = DataFrame(
[],
columns=["3010", "2010"],
index=dti[:0],
)
tm.assert_frame_equal(df, expected)
def test_iloc_setitem_view_2dblock(self):
# https://github.com/pandas-dev/pandas/issues/60309
df_parent = DataFrame(
{
"A": [1, 4, 1, 5],
"B": [2, 5, 2, 6],
"C": [3, 6, 1, 7],
"D": [8, 9, 10, 11],
}
)
df_orig = df_parent.copy()
df = df_parent[["B", "C"]]
# Perform the iloc operation
df.iloc[[1, 3], :] = [[2, 2], [2, 2]]
# Check that original DataFrame is unchanged
tm.assert_frame_equal(df_parent, df_orig)
# Check that df is modified correctly
expected = DataFrame({"B": [2, 2, 2, 2], "C": [3, 2, 1, 2]}, index=df.index)
tm.assert_frame_equal(df, expected)
# with setting to subset of columns
df = df_parent[["B", "C", "D"]]
df.iloc[[1, 3], 0:3:2] = [[2, 2], [2, 2]]
tm.assert_frame_equal(df_parent, df_orig)
expected = DataFrame(
{"B": [2, 2, 2, 2], "C": [3, 6, 1, 7], "D": [8, 2, 10, 2]}, index=df.index
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"indexer, value",
[
(([0, 2], slice(None)), [[2, 2, 2, 2], [2, 2, 2, 2]]),
((slice(None), slice(None)), 2),
((0, [1, 3]), [2, 2]),
(([0], 1), [2]),
(([0], np.int64(1)), [2]),
((slice(None), np.int64(1)), [2, 2, 2]),
((slice(None, 2), np.int64(1)), [2, 2]),
(
(np.array([False, True, False]), np.array([False, True, False, True])),
[2, 2],
),
],
)
def test_setitem_2dblock_with_ref(self, indexer, value):
# https://github.com/pandas-dev/pandas/issues/60309
arr = np.arange(12).reshape(3, 4)
df_parent = DataFrame(arr.copy(), columns=list("ABCD"))
# the test is specifically for the case where the df is backed by a single
# block (taking the non-split path)
assert df_parent._mgr.is_single_block
df_orig = df_parent.copy()
df = df_parent[:]
df.iloc[indexer] = value
# Check that original DataFrame is unchanged
tm.assert_frame_equal(df_parent, df_orig)
# Check that df is modified correctly
arr[indexer] = value
expected = DataFrame(arr, columns=list("ABCD"))
tm.assert_frame_equal(df, expected)
def test_full_setter_loc_incompatible_dtype():
# https://github.com/pandas-dev/pandas/issues/55791
df = DataFrame({"a": [1, 2]})
with pytest.raises(TypeError, match="Invalid value"):
df.loc[:, "a"] = True
with pytest.raises(TypeError, match="Invalid value"):
df.loc[:, "a"] = {0: 3.5, 1: 4.5}
df.loc[:, "a"] = {0: 3, 1: 4}
expected = DataFrame({"a": [3, 4]})
tm.assert_frame_equal(df, expected)
def test_setitem_partial_row_multiple_columns():
# https://github.com/pandas-dev/pandas/issues/56503
df = DataFrame({"A": [1, 2, 3], "B": [4.0, 5, 6]})
# should not warn
df.loc[df.index <= 1, ["F", "G"]] = (1, "abc")
expected = DataFrame(
{
"A": [1, 2, 3],
"B": [4.0, 5, 6],
"F": [1.0, 1, float("nan")],
"G": ["abc", "abc", float("nan")],
}
)
tm.assert_frame_equal(df, expected)
|
TestDataFrameSetitemCopyViewSemantics
|
python
|
apache__airflow
|
task-sdk/src/airflow/sdk/api/datamodels/_generated.py
|
{
"start": 11601,
"end": 11955
}
|
class ____(RootModel[list[JsonValue]]):
"""
XCom schema with minimal structure for slice-based access.
"""
root: Annotated[
list[JsonValue],
Field(
description="XCom schema with minimal structure for slice-based access.",
title="XComSequenceSliceResponse",
),
]
|
XComSequenceSliceResponse
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/tasks.py
|
{
"start": 234696,
"end": 240587
}
|
class ____(Request):
"""
Delete tasks
:param ids: IDs of the tasks to delete
:type ids: Sequence[str]
:param move_to_trash: Move task to trash instead of deleting it. For internal
use only, tasks in the trash are not visible from the API and cannot be
restored!
:type move_to_trash: bool
:param force: If not true, call fails if the task status is 'in_progress'
:type force: bool
:param return_file_urls: If set to 'true' then return the urls of the files
that were uploaded by the tasks. Default value is 'false'
:type return_file_urls: bool
:param delete_output_models: If set to 'true' then delete output models of the
tasks that are not referenced by other tasks. Default value is 'true'
:type delete_output_models: bool
:param delete_external_artifacts: If set to 'true' then BE will try to delete
the extenal artifacts associated with the tasks from the fileserver (if
configured to do so)
:type delete_external_artifacts: bool
"""
_service = "tasks"
_action = "delete_many"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"delete_external_artifacts": {
"default": True,
"description": (
"If set to 'true' then BE will try to delete the extenal artifacts associated with the tasks from"
" the fileserver (if configured to do so)"
),
"type": "boolean",
},
"delete_output_models": {
"description": (
"If set to 'true' then delete output models of the tasks that are not referenced by other tasks."
" Default value is 'true'"
),
"type": "boolean",
},
"force": {
"default": False,
"description": "If not true, call fails if the task status is 'in_progress'",
"type": "boolean",
},
"ids": {
"description": "IDs of the tasks to delete",
"items": {"type": "string"},
"type": "array",
},
"move_to_trash": {
"default": False,
"description": (
"Move task to trash instead of deleting it. For internal use only, tasks in the trash are not"
" visible from the API and cannot be restored!"
),
"type": "boolean",
},
"return_file_urls": {
"description": (
"If set to 'true' then return the urls of the files that were uploaded by the tasks. Default "
"value is 'false'"
),
"type": "boolean",
},
},
"required": ["ids"],
"type": "object",
}
def __init__(
self,
ids,
move_to_trash=False,
force=False,
return_file_urls=None,
delete_output_models=None,
delete_external_artifacts=True,
**kwargs
):
super(DeleteManyRequest, self).__init__(**kwargs)
self.ids = ids
self.move_to_trash = move_to_trash
self.force = force
self.return_file_urls = return_file_urls
self.delete_output_models = delete_output_models
self.delete_external_artifacts = delete_external_artifacts
@schema_property("ids")
def ids(self):
return self._property_ids
@ids.setter
def ids(self, value):
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
@schema_property("move_to_trash")
def move_to_trash(self):
return self._property_move_to_trash
@move_to_trash.setter
def move_to_trash(self, value):
if value is None:
self._property_move_to_trash = None
return
self.assert_isinstance(value, "move_to_trash", (bool,))
self._property_move_to_trash = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("return_file_urls")
def return_file_urls(self):
return self._property_return_file_urls
@return_file_urls.setter
def return_file_urls(self, value):
if value is None:
self._property_return_file_urls = None
return
self.assert_isinstance(value, "return_file_urls", (bool,))
self._property_return_file_urls = value
@schema_property("delete_output_models")
def delete_output_models(self):
return self._property_delete_output_models
@delete_output_models.setter
def delete_output_models(self, value):
if value is None:
self._property_delete_output_models = None
return
self.assert_isinstance(value, "delete_output_models", (bool,))
self._property_delete_output_models = value
@schema_property("delete_external_artifacts")
def delete_external_artifacts(self):
return self._property_delete_external_artifacts
@delete_external_artifacts.setter
def delete_external_artifacts(self, value):
if value is None:
self._property_delete_external_artifacts = None
return
self.assert_isinstance(value, "delete_external_artifacts", (bool,))
self._property_delete_external_artifacts = value
|
DeleteManyRequest
|
python
|
Textualize__textual
|
src/textual/demo/widgets.py
|
{
"start": 22484,
"end": 23782
}
|
class ____(PageScreen):
"""The Widgets screen"""
CSS = """
WidgetsScreen {
align-horizontal: center;
Markdown { background: transparent; }
& > VerticalScroll {
scrollbar-gutter: stable;
& > * {
&:even { background: $boost; }
padding-bottom: 1;
}
}
}
"""
BINDINGS = [Binding("escape", "blur", "Unfocus any focused widget", show=False)]
def compose(self) -> ComposeResult:
with lazy.Reveal(containers.VerticalScroll(can_focus=True)):
yield Markdown(WIDGETS_MD, classes="column")
yield Buttons()
yield Checkboxes()
yield Datatables()
yield Inputs()
yield ListViews()
yield Logs()
yield Markdowns()
yield Selects()
yield Sparklines()
yield Switches()
yield TabsDemo()
yield TextAreas()
yield Trees()
yield YourWidgets()
yield Footer()
if __name__ == "__main__":
from textual.app import App
class GameApp(App):
def get_default_screen(self) -> Screen:
return WidgetsScreen()
app = GameApp()
app.run()
|
WidgetsScreen
|
python
|
pypa__setuptools
|
pkg_resources/__init__.py
|
{
"start": 65056,
"end": 65991
}
|
class ____(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path) -> bool:
return os.path.exists(path)
def _isdir(self, path) -> bool:
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(
self, manager: object, resource_name: str
) -> io.BufferedReader:
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path) -> bytes:
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls) -> None:
loader_names = (
'SourceFileLoader',
'SourcelessFileLoader',
)
for name in loader_names:
loader_cls = getattr(importlib.machinery, name, type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
|
DefaultProvider
|
python
|
scipy__scipy
|
scipy/optimize/_hessian_update_strategy.py
|
{
"start": 16302,
"end": 18423
}
|
class ____(FullHessianUpdateStrategy):
"""Symmetric-rank-1 Hessian update strategy.
Parameters
----------
min_denominator : float
This number, scaled by a normalization factor,
defines the minimum denominator magnitude allowed
in the update. When the condition is violated we skip
the update. By default uses ``1e-8``.
init_scale : {float, np.array, 'auto'}, optional
This parameter can be used to initialize the Hessian or its
inverse. When a float is given, the relevant array is initialized
to ``np.eye(n) * init_scale``, where ``n`` is the problem dimension.
Alternatively, if a precisely ``(n, n)`` shaped, symmetric array is given,
this array will be used. Otherwise an error is generated.
Set it to 'auto' in order to use an automatic heuristic for choosing
the initial scale. The heuristic is described in [1]_, p.143.
The default is 'auto'.
Notes
-----
The update is based on the description in [1]_, p.144-146.
References
----------
.. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
Second Edition (2006).
"""
def __init__(self, min_denominator=1e-8, init_scale='auto'):
self.min_denominator = min_denominator
super().__init__(init_scale)
def _update_implementation(self, delta_x, delta_grad):
# Auxiliary variables w and z
if self.approx_type == 'hess':
w = delta_x
z = delta_grad
else:
w = delta_grad
z = delta_x
# Do some common operations
Mw = self @ w
z_minus_Mw = z - Mw
denominator = np.dot(w, z_minus_Mw)
# If the denominator is too small
# we just skip the update.
if np.abs(denominator) <= self.min_denominator*norm(w)*norm(z_minus_Mw):
return
# Update matrix
if self.approx_type == 'hess':
self.B = self._syr(1/denominator, z_minus_Mw, a=self.B)
else:
self.H = self._syr(1/denominator, z_minus_Mw, a=self.H)
|
SR1
|
python
|
keras-team__keras
|
keras/src/optimizers/adamax_test.py
|
{
"start": 172,
"end": 3190
}
|
class ____(testing.TestCase):
def test_config(self):
optimizer = Adamax(
learning_rate=0.5,
beta_1=0.8,
beta_2=0.95,
epsilon=1e-5,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Adamax(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(vars, [0.5, 1.5, 2.5, 3.5], rtol=1e-4, atol=1e-4)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Adamax(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Adamax(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Adamax(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Adamax(
learning_rate=0.2, beta_1=0.85, beta_2=0.95, epsilon=1e-6
)
x = backend.Variable(np.ones([10], dtype="float32"))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8],
[0.6827, 0.6873, 0.6888, 0.6896, 0.6901, 0.6904, 0.6906, 0.6908, 0.6909, 0.691],
[0.5333, 0.5407, 0.5431, 0.5444, 0.5451, 0.5456, 0.546, 0.5462, 0.5464, 0.5466],
[0.368, 0.3773, 0.3804, 0.382, 0.3829, 0.3835, 0.384, 0.3843, 0.3846, 0.3848],
[0.1933, 0.204, 0.2076, 0.2094, 0.2105, 0.2112, 0.2117, 0.2121, 0.2124, 0.2126]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Adamax(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Adamax(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
|
AdamaxTest
|
python
|
aio-libs__aiohttp
|
aiohttp/web_exceptions.py
|
{
"start": 4868,
"end": 4922
}
|
class ____(HTTPSuccessful):
status_code = 200
|
HTTPOk
|
python
|
django-compressor__django-compressor
|
compressor/tests/test_offline.py
|
{
"start": 3074,
"end": 9847
}
|
class ____:
CHARSET = "utf-8"
template_name = "test_compressor_offline.html"
# Change this for each test class
templates_dir = ""
expected_basename = "output"
expected_hash = ""
# Engines to test
engines = ("django", "jinja2")
additional_test_settings = None
def setUp(self):
# Reset template dirs, because it enables us to force compress to
# consider only a specific directory (helps us make true,
# independent unit tests).
# Specify both Jinja2 and Django template locations. When the wrong
# engine is used to parse a template, the TemplateSyntaxError will
# cause the template to be skipped over.
# We've hardcoded TEMPLATES[0] to be Django templates backend and
# TEMPLATES[1] to be Jinja2 templates backend in test_settings.
TEMPLATES = copy.deepcopy(settings.TEMPLATES)
django_template_dir = os.path.join(TEMPLATES[0]["DIRS"][0], self.templates_dir)
jinja2_template_dir = os.path.join(TEMPLATES[1]["DIRS"][0], self.templates_dir)
TEMPLATES[0]["DIRS"] = [django_template_dir]
TEMPLATES[1]["DIRS"] = [jinja2_template_dir]
override_settings = {
"TEMPLATES": TEMPLATES,
"COMPRESS_ENABLED": True,
"COMPRESS_OFFLINE": True,
}
if "jinja2" in self.engines:
override_settings[
"COMPRESS_JINJA2_GET_ENVIRONMENT"
] = lambda: self._get_jinja2_env()
if self.additional_test_settings is not None:
override_settings.update(self.additional_test_settings)
self.override_settings = self.settings(**override_settings)
self.override_settings.__enter__()
if "django" in self.engines:
self.template_path = os.path.join(django_template_dir, self.template_name)
origin = Origin(
name=self.template_path, # Absolute path
template_name=self.template_name,
) # Loader-relative path
with io.open(self.template_path, encoding=self.CHARSET) as file_:
self.template = Template(file_.read(), origin=origin)
if "jinja2" in self.engines:
self.template_path_jinja2 = os.path.join(
jinja2_template_dir, self.template_name
)
jinja2_env = override_settings["COMPRESS_JINJA2_GET_ENVIRONMENT"]()
with io.open(self.template_path_jinja2, encoding=self.CHARSET) as file_:
self.template_jinja2 = jinja2_env.from_string(file_.read())
def tearDown(self):
self.override_settings.__exit__(None, None, None)
manifest_filename = "manifest.json"
if default_offline_manifest_storage.exists(manifest_filename):
default_offline_manifest_storage.delete(manifest_filename)
def _prepare_contexts(self, engine):
contexts = settings.COMPRESS_OFFLINE_CONTEXT
if not isinstance(contexts, (list, tuple)):
contexts = [contexts]
if engine == "django":
return [Context(c) for c in contexts]
if engine == "jinja2":
return contexts
return None
def _render_template(self, engine):
contexts = self._prepare_contexts(engine)
if engine == "django":
return "".join(self.template.render(c) for c in contexts)
if engine == "jinja2":
return "\n".join(self.template_jinja2.render(c) for c in contexts) + "\n"
return None
def _render_script(self, hash):
return '<script src="{}CACHE/js/{}.{}.js">' "</script>".format(
settings.COMPRESS_URL_PLACEHOLDER, self.expected_basename, hash
)
def _render_link(self, hash):
return (
'<link rel="stylesheet" href="{}CACHE/css/{}.{}.css" '
'type="text/css">'.format(
settings.COMPRESS_URL_PLACEHOLDER, self.expected_basename, hash
)
)
def _render_result(self, result, separator="\n"):
return (separator.join(result) + "\n").replace(
settings.COMPRESS_URL_PLACEHOLDER, str(settings.COMPRESS_URL)
)
def _test_offline(self, engine, verbosity=0):
hashes = self.expected_hash
if not isinstance(hashes, (list, tuple)):
hashes = [hashes]
count, result = CompressCommand().handle_inner(
engines=[engine], verbosity=verbosity
)
self.assertEqual(len(hashes), count)
self.assertEqual([self._render_script(h) for h in hashes], result)
rendered_template = self._render_template(engine)
self.assertEqual(rendered_template, self._render_result(result))
def test_offline_django(self):
if "django" not in self.engines:
raise SkipTest("This test class does not support django engine.")
self._test_offline(engine="django")
def test_offline_jinja2(self):
if "jinja2" not in self.engines:
raise SkipTest("This test class does not support jinja2 engine.")
self._test_offline(engine="jinja2")
def test_offline_django_verbosity_1(self):
if "django" not in self.engines:
raise SkipTest("This test class does not support django engine.")
self._test_offline(engine="django", verbosity=1)
def test_offline_jinja2_verbosity_1(self):
if "jinja2" not in self.engines:
raise SkipTest("This test class does not support jinja2 engine.")
self._test_offline(engine="jinja2", verbosity=1)
def test_offline_django_verbosity_2(self):
if "django" not in self.engines:
raise SkipTest("This test class does not support django engine.")
self._test_offline(engine="django", verbosity=2)
def test_offline_jinja2_verbosity_2(self):
if "jinja2" not in self.engines:
raise SkipTest("This test class does not support jinja2 engine.")
self._test_offline(engine="jinja2", verbosity=2)
def _get_jinja2_env(self):
import jinja2.ext
from compressor.offline.jinja2 import url_for, SpacelessExtension
from compressor.contrib.jinja2ext import CompressorExtension
# Extensions needed for the test cases only.
extensions = [
CompressorExtension,
SpacelessExtension,
jinja2.ext.do,
]
loader = self._get_jinja2_loader()
env = jinja2.Environment(extensions=extensions, loader=loader)
env.globals["url_for"] = url_for
return env
def _get_jinja2_loader(self):
import jinja2
loader = jinja2.FileSystemLoader(
settings.TEMPLATES[1]["DIRS"], encoding=self.CHARSET
)
return loader
|
OfflineTestCaseMixin
|
python
|
matplotlib__matplotlib
|
lib/mpl_toolkits/axes_grid1/axes_size.py
|
{
"start": 3597,
"end": 4365
}
|
class ____(_Base):
"""
Scaled size whose relative part corresponds to the data height
of the *axes* multiplied by the *aspect*.
"""
def __init__(self, axes, aspect=1., ref_ax=None):
self._axes = axes
self._aspect = aspect
if aspect == "axes" and ref_ax is None:
raise ValueError("ref_ax must be set when aspect='axes'")
self._ref_ax = ref_ax
def get_size(self, renderer):
l1, l2 = self._axes.get_ylim()
if self._aspect == "axes":
ref_aspect = _get_axes_aspect(self._ref_ax)
aspect = _get_axes_aspect(self._axes)
else:
aspect = self._aspect
rel_size = abs(l2-l1)*aspect
abs_size = 0.
return rel_size, abs_size
|
AxesY
|
python
|
ray-project__ray
|
python/ray/air/_internal/device_manager/nvidia_gpu.py
|
{
"start": 154,
"end": 2993
}
|
class ____(TorchDeviceManager):
"""CUDA device manager"""
def is_available(self) -> bool():
return torch.cuda.is_available()
def get_devices(self) -> List[torch.device]:
"""Gets the correct torch device list configured for this process.
Returns a list of torch CUDA devices allocated for the current worker.
If no GPUs are assigned, then it returns a list with a single CPU device.
Assumes that `CUDA_VISIBLE_DEVICES` is set and is a
superset of the `ray.get_gpu_ids()`.
"""
# GPU IDs are assigned by Ray after you specify "use_gpu"
# GPU `ray.get_gpu_ids()` may return ints or may return strings.
# We should always convert to strings.
gpu_ids = [str(id) for id in ray.get_gpu_ids()]
device_ids = []
if len(gpu_ids) > 0:
cuda_visible_str = os.environ.get("CUDA_VISIBLE_DEVICES", "")
if cuda_visible_str and cuda_visible_str != "NoDevFiles":
cuda_visible_list = cuda_visible_str.split(",")
else:
cuda_visible_list = []
# By default, there should only be one GPU ID if `use_gpu=True`.
# If there are multiple GPUs, return a list of devices.
# If using fractional GPUs, these IDs are not guaranteed
# to be unique across different processes.
for gpu_id in gpu_ids:
try:
device_ids.append(cuda_visible_list.index(gpu_id))
except IndexError:
raise RuntimeError(
"CUDA_VISIBLE_DEVICES set incorrectly. "
f"Got {cuda_visible_str}, expected to include {gpu_id}. "
"Did you override the `CUDA_VISIBLE_DEVICES` environment"
" variable? If not, please help file an issue on Github."
)
else:
# If called on the driver or outside of Ray Train, return the
# 0th device.
device_ids.append(0)
return [torch.device(f"cuda:{device_id}") for device_id in device_ids]
def set_device(self, device: Union[torch.device, int, str, None]):
torch.cuda.set_device(device)
def supports_stream(self) -> bool:
"""Validate if the device type support create a stream"""
return True
def create_stream(self, device: torch.device) -> torch.cuda.Stream:
"""Create a stream on cuda device"""
return torch.cuda.Stream(device)
def get_stream_context(self, stream):
"""Get a stream context for cuda device"""
return torch.cuda.stream(stream)
def get_current_stream(self) -> torch.cuda.Stream:
"""Get current stream for cuda device"""
return torch.cuda.current_stream()
|
CUDATorchDeviceManager
|
python
|
PyCQA__pylint
|
tests/functional/i/invalid/invalid_overridden_method.py
|
{
"start": 1276,
"end": 1861
}
|
class ____:
@property
@abc.abstractmethod
def prop(self):
return
# https://github.com/pylint-dev/pylint/issues/4368
# Decrator functions with a nested property decorator should still be
# inferred as property.
def my_property(func):
@property
def _wrapper(self):
pass
return _wrapper
def not_a_property(func):
def _wrapper(self):
pass
return _wrapper
def multiple_returns(func):
def _wrapper(self):
pass
if foobar: # pylint: disable=undefined-variable
return False
return _wrapper
|
AbstractProperty
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 971382,
"end": 972102
}
|
class ____(sgqlc.types.relay.Connection):
"""The connection type for Sponsor."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("SponsorEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("Sponsor"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
|
SponsorConnection
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1beta2_device_constraint.py
|
{
"start": 383,
"end": 8480
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'distinct_attribute': 'str',
'match_attribute': 'str',
'requests': 'list[str]'
}
attribute_map = {
'distinct_attribute': 'distinctAttribute',
'match_attribute': 'matchAttribute',
'requests': 'requests'
}
def __init__(self, distinct_attribute=None, match_attribute=None, requests=None, local_vars_configuration=None): # noqa: E501
"""V1beta2DeviceConstraint - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._distinct_attribute = None
self._match_attribute = None
self._requests = None
self.discriminator = None
if distinct_attribute is not None:
self.distinct_attribute = distinct_attribute
if match_attribute is not None:
self.match_attribute = match_attribute
if requests is not None:
self.requests = requests
@property
def distinct_attribute(self):
"""Gets the distinct_attribute of this V1beta2DeviceConstraint. # noqa: E501
DistinctAttribute requires that all devices in question have this attribute and that its type and value are unique across those devices. This acts as the inverse of MatchAttribute. This constraint is used to avoid allocating multiple requests to the same device by ensuring attribute-level differentiation. This is useful for scenarios where resource requests must be fulfilled by separate physical devices. For example, a container requests two network interfaces that must be allocated from two different physical NICs. # noqa: E501
:return: The distinct_attribute of this V1beta2DeviceConstraint. # noqa: E501
:rtype: str
"""
return self._distinct_attribute
@distinct_attribute.setter
def distinct_attribute(self, distinct_attribute):
"""Sets the distinct_attribute of this V1beta2DeviceConstraint.
DistinctAttribute requires that all devices in question have this attribute and that its type and value are unique across those devices. This acts as the inverse of MatchAttribute. This constraint is used to avoid allocating multiple requests to the same device by ensuring attribute-level differentiation. This is useful for scenarios where resource requests must be fulfilled by separate physical devices. For example, a container requests two network interfaces that must be allocated from two different physical NICs. # noqa: E501
:param distinct_attribute: The distinct_attribute of this V1beta2DeviceConstraint. # noqa: E501
:type: str
"""
self._distinct_attribute = distinct_attribute
@property
def match_attribute(self):
"""Gets the match_attribute of this V1beta2DeviceConstraint. # noqa: E501
MatchAttribute requires that all devices in question have this attribute and that its type and value are the same across those devices. For example, if you specified \"dra.example.com/numa\" (a hypothetical example!), then only devices in the same NUMA node will be chosen. A device which does not have that attribute will not be chosen. All devices should use a value of the same type for this attribute because that is part of its specification, but if one device doesn't, then it also will not be chosen. Must include the domain qualifier. # noqa: E501
:return: The match_attribute of this V1beta2DeviceConstraint. # noqa: E501
:rtype: str
"""
return self._match_attribute
@match_attribute.setter
def match_attribute(self, match_attribute):
"""Sets the match_attribute of this V1beta2DeviceConstraint.
MatchAttribute requires that all devices in question have this attribute and that its type and value are the same across those devices. For example, if you specified \"dra.example.com/numa\" (a hypothetical example!), then only devices in the same NUMA node will be chosen. A device which does not have that attribute will not be chosen. All devices should use a value of the same type for this attribute because that is part of its specification, but if one device doesn't, then it also will not be chosen. Must include the domain qualifier. # noqa: E501
:param match_attribute: The match_attribute of this V1beta2DeviceConstraint. # noqa: E501
:type: str
"""
self._match_attribute = match_attribute
@property
def requests(self):
"""Gets the requests of this V1beta2DeviceConstraint. # noqa: E501
Requests is a list of the one or more requests in this claim which must co-satisfy this constraint. If a request is fulfilled by multiple devices, then all of the devices must satisfy the constraint. If this is not specified, this constraint applies to all requests in this claim. References to subrequests must include the name of the main request and may include the subrequest using the format <main request>[/<subrequest>]. If just the main request is given, the constraint applies to all subrequests. # noqa: E501
:return: The requests of this V1beta2DeviceConstraint. # noqa: E501
:rtype: list[str]
"""
return self._requests
@requests.setter
def requests(self, requests):
"""Sets the requests of this V1beta2DeviceConstraint.
Requests is a list of the one or more requests in this claim which must co-satisfy this constraint. If a request is fulfilled by multiple devices, then all of the devices must satisfy the constraint. If this is not specified, this constraint applies to all requests in this claim. References to subrequests must include the name of the main request and may include the subrequest using the format <main request>[/<subrequest>]. If just the main request is given, the constraint applies to all subrequests. # noqa: E501
:param requests: The requests of this V1beta2DeviceConstraint. # noqa: E501
:type: list[str]
"""
self._requests = requests
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2DeviceConstraint):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2DeviceConstraint):
return True
return self.to_dict() != other.to_dict()
|
V1beta2DeviceConstraint
|
python
|
PyCQA__pylint
|
tests/functional/m/method_cache_max_size_none.py
|
{
"start": 401,
"end": 1437
}
|
class ____:
@lru_cache()
def my_func(self, param):
return param + 1
@lru_cache(1)
def my_func(self, param):
return param + 1
@lru_cache(None) # [method-cache-max-size-none]
def my_func(self, param):
return param + 1
@functools.lru_cache(None) # [method-cache-max-size-none]
def my_func(self, param):
return param + 1
@aliased_functools.lru_cache(None) # [method-cache-max-size-none]
def my_func(self, param):
return param + 1
@aliased_cache(None) # [method-cache-max-size-none]
def my_func(self, param):
return param + 1
# Check double decorating to check robustness of checker itself
@aliased_cache(None) # [method-cache-max-size-none]
@aliased_cache(None) # [method-cache-max-size-none]
def my_func(self, param):
return param + 1
maxsizeKwarg = {"maxsize": None}
@lru_cache(**maxsizeKwarg) # [method-cache-max-size-none]
def my_func(self, param):
return param + 1
|
MyClassWithMethods
|
python
|
pytorch__pytorch
|
test/inductor/test_ordered_set.py
|
{
"start": 46331,
"end": 46584
}
|
class ____(TestSubsets, TestCase):
left = OrderedSet([1])
right = OrderedSet([2])
name = "neither empty, neither contains"
cases = "!="
# ==============================================================================
|
TestSubsetNonOverlap
|
python
|
tiangolo__fastapi
|
docs_src/body_nested_models/tutorial004_py39.py
|
{
"start": 157,
"end": 499
}
|
class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
tags: set[str] = set()
image: Union[Image, None] = None
@app.put("/items/{item_id}")
async def update_item(item_id: int, item: Item):
results = {"item_id": item_id, "item": item}
return results
|
Item
|
python
|
getsentry__sentry
|
src/sentry/snuba/models.py
|
{
"start": 6946,
"end": 8677
}
|
class ____(DataSourceTypeHandler[QuerySubscription]):
@override
@staticmethod
def bulk_get_query_object(
data_sources: list[DataSource],
) -> dict[int, QuerySubscription | None]:
query_subscription_ids: list[int] = []
for ds in data_sources:
try:
subscription_id = int(ds.source_id)
query_subscription_ids.append(subscription_id)
except ValueError:
logger.exception(
"Invalid DataSource.source_id fetching subscriptions",
extra={"id": ds.id, "source_id": ds.source_id},
)
qs_lookup = {
str(qs.id): qs for qs in QuerySubscription.objects.filter(id__in=query_subscription_ids)
}
return {ds.id: qs_lookup.get(ds.source_id) for ds in data_sources}
@override
@staticmethod
def related_model(instance) -> list[ModelRelation]:
return [ModelRelation(QuerySubscription, {"id": instance.source_id})]
@override
@staticmethod
def get_instance_limit(org: Organization) -> int | None:
return get_max_metric_alert_subscriptions(org)
@override
@staticmethod
def get_current_instance_count(org: Organization) -> int:
return QuerySubscription.objects.filter(
project__organization_id=org.id,
status__in=(
QuerySubscription.Status.ACTIVE.value,
QuerySubscription.Status.CREATING.value,
QuerySubscription.Status.UPDATING.value,
),
).count()
@override
@staticmethod
def get_relocation_model_name() -> str:
return "sentry.querysubscription"
|
QuerySubscriptionDataSourceHandler
|
python
|
openai__openai-python
|
src/openai/types/responses/response_code_interpreter_tool_call.py
|
{
"start": 807,
"end": 1650
}
|
class ____(BaseModel):
id: str
"""The unique ID of the code interpreter tool call."""
code: Optional[str] = None
"""The code to run, or null if not available."""
container_id: str
"""The ID of the container used to run the code."""
outputs: Optional[List[Output]] = None
"""
The outputs generated by the code interpreter, such as logs or images. Can be
null if no outputs are available.
"""
status: Literal["in_progress", "completed", "incomplete", "interpreting", "failed"]
"""The status of the code interpreter tool call.
Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and
`failed`.
"""
type: Literal["code_interpreter_call"]
"""The type of the code interpreter tool call. Always `code_interpreter_call`."""
|
ResponseCodeInterpreterToolCall
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.