language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
getsentry__sentry
|
tests/sentry/data_export/endpoints/test_data_export.py
|
{
"start": 380,
"end": 30176
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-organization-data-export"
method = "post"
def setUp(self) -> None:
self.user = self.create_user("user1@example.com")
self.org = self.create_organization(name="Test")
self.team = self.create_team(organization=self.org, name="Data Export Team")
self.project = self.create_project(
organization=self.org, teams=[self.team], name="Data Export Proj"
)
self.create_member(user=self.user, organization=self.org, teams=[self.team])
self.login_as(user=self.user)
def make_payload(
self, payload_type: str, extras: dict[str, Any] | None = None, overwrite: bool = False
) -> dict[str, Any]:
payload: dict[str, Any] = {}
if payload_type == "issue":
payload = {
"query_type": ExportQueryType.ISSUES_BY_TAG_STR,
"query_info": {"env": "test", "project": [self.project.id]},
}
elif payload_type == "discover":
payload = {
"query_type": ExportQueryType.DISCOVER_STR,
"query_info": {"field": ["id"], "query": "", "project": [self.project.id]},
}
elif payload_type == "explore":
payload = {
"query_type": ExportQueryType.EXPLORE_STR,
"query_info": {
"field": ["span_id"],
"query": "",
"project": [self.project.id],
"dataset": "spans",
},
}
if extras is not None:
if overwrite:
payload["query_info"] = extras
else:
payload["query_info"].update(extras)
return payload
def test_authorization(self) -> None:
payload = self.make_payload("issue")
payload_explore = self.make_payload("explore")
# Without the discover-query feature, the endpoint should 404
with self.feature({"organizations:discover-query": False}):
self.get_error_response(self.org.slug, status_code=404, **payload)
with self.feature({"organizations:discover-query": False}):
self.get_success_response(self.org.slug, status_code=201, **payload_explore)
# With the right permissions, the endpoint should 201
with self.feature("organizations:discover-query"):
self.get_success_response(self.org.slug, status_code=201, **payload)
modified_payload = self.make_payload("issue", {"project": -5}, overwrite=True)
# Without project permissions, the endpoint should 403
with self.feature("organizations:discover-query"):
self.get_error_response(self.org.slug, status_code=403, **modified_payload)
def test_new_export(self) -> None:
"""
Ensures that a request to this endpoint returns a 201 status code
and an appropriate response object
"""
payload = self.make_payload("issue")
with self.feature("organizations:discover-query"):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
assert response.data == {
"id": data_export.id,
"user": {
"id": str(self.user.id),
"email": self.user.email,
"username": self.user.username,
},
"dateCreated": data_export.date_added,
"dateFinished": None,
"dateExpired": None,
"query": {
"type": payload["query_type"],
"info": payload["query_info"],
},
"status": ExportStatus.Early,
"checksum": None,
"fileName": None,
}
def test_progress_export(self) -> None:
"""
Checks to make sure that identical requests (same payload, organization, user)
are routed to the same ExportedData object, with a 200 status code
"""
payload = self.make_payload("issue")
with self.feature("organizations:discover-query"):
response1 = self.get_response(self.org.slug, **payload)
data_export = ExportedData.objects.get(id=response1.data["id"])
with self.feature("organizations:discover-query"):
response2 = self.get_success_response(self.org.slug, **payload)
assert response2.data == {
"id": data_export.id,
"user": {
"id": str(self.user.id),
"email": self.user.email,
"username": self.user.username,
},
"dateCreated": data_export.date_added,
"dateFinished": data_export.date_finished,
"dateExpired": data_export.date_expired,
"query": {
"type": ExportQueryType.as_str(data_export.query_type),
"info": data_export.query_info,
},
"status": data_export.status,
"checksum": None,
"fileName": None,
}
def test_fields_are_lists(self) -> None:
"""
Ensures that if a single field is passed, we convert it to a list before making
a snuba query.
"""
payload = self.make_payload("discover", {"field": "id"})
with self.feature("organizations:discover-query"):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
# because we passed a single string as the field, we should convert it into a list
# this happens when the user selects only a single field and it results in a string
# rather than a list of strings
assert data_export.query_info["field"] == ["id"]
def test_export_too_many_fields(self) -> None:
"""
Ensures that if too many fields are requested, returns a 400 status code with the
corresponding error message.
"""
payload = self.make_payload("discover", {"field": ["id"] * (MAX_FIELDS + 1)})
with self.feature("organizations:discover-query"):
response = self.get_error_response(self.org.slug, status_code=400, **payload)
assert response.data == {
"non_field_errors": [
f"You can export up to {MAX_FIELDS} fields at a time. Please delete some and try again."
]
}
def test_export_no_fields(self) -> None:
"""
Ensures that if no fields are requested, returns a 400 status code with
the corresponding error message.
"""
payload = self.make_payload("discover", {"field": []})
with self.feature("organizations:discover-query"):
response = self.get_error_response(self.org.slug, status_code=400, **payload)
assert response.data == {"non_field_errors": ["at least one field is required to export"]}
def test_discover_without_query(self) -> None:
"""
Ensurse that we handle export requests without a query, and return a 400 status code
"""
payload = self.make_payload("discover", {"field": ["id"]}, overwrite=True)
with self.feature("organizations:discover-query"):
response = self.get_error_response(self.org.slug, status_code=400, **payload)
assert response.data == {
"non_field_errors": [
"query is a required to export, please pass an empty string if you don't want to set one"
]
}
def test_export_invalid_fields(self) -> None:
"""
Ensures that if a field is requested with the wrong parameters, the corresponding
error message is returned
"""
payload = self.make_payload("discover", {"field": ["min()"]})
with self.feature("organizations:discover-query"):
response = self.get_error_response(self.org.slug, status_code=400, **payload)
assert response.data == {
"non_field_errors": ["min: expected 1 argument(s) but got 0 argument(s)"]
}
@freeze_time("2020-02-27 12:07:37")
def test_export_invalid_date_params(self) -> None:
"""
Ensures that if an invalidate date parameter is specified, returns a 400 status code
with the corresponding error message.
"""
payload = self.make_payload("discover", {"statsPeriod": "shrug"})
with self.feature("organizations:discover-query"):
response = self.get_error_response(self.org.slug, status_code=400, **payload)
assert response.data == {"non_field_errors": ["Invalid statsPeriod: 'shrug'"]}
payload = self.make_payload(
"discover",
{
"start": "2021-02-27T12:07:37",
"end": "shrug",
},
)
with self.feature("organizations:discover-query"):
response = self.get_error_response(self.org.slug, status_code=400, **payload)
assert response.data == {"non_field_errors": ["shrug is not a valid ISO8601 date query"]}
payload = self.make_payload(
"discover",
{
"start": "shrug",
"end": "2021-02-27T12:07:37",
},
)
with self.feature("organizations:discover-query"):
response = self.get_error_response(self.org.slug, status_code=400, **payload)
assert response.data == {"non_field_errors": ["shrug is not a valid ISO8601 date query"]}
@freeze_time("2020-05-19 14:00:00")
def test_converts_stats_period(self) -> None:
"""
Ensures that statsPeriod is converted to start/end.
"""
payload = self.make_payload("discover", {"statsPeriod": "24h"})
with self.feature("organizations:discover-query"):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
query_info = data_export.query_info
assert parse_datetime_string(query_info["start"]) == parse_datetime_string(
"2020-05-18T14:00:00"
)
assert parse_datetime_string(query_info["end"]) == parse_datetime_string(
"2020-05-19T14:00:00"
)
assert "statsPeriod" not in query_info
assert "statsPeriodStart" not in query_info
assert "statsPeriodSEnd" not in query_info
@freeze_time("2020-05-19 14:00:00")
def test_converts_stats_period_start_end(self) -> None:
"""
Ensures that statsPeriodStart and statsPeriodEnd is converted to start/end.
"""
payload = self.make_payload("discover", {"statsPeriodStart": "1w", "statsPeriodEnd": "5d"})
with self.feature("organizations:discover-query"):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
query_info = data_export.query_info
assert parse_datetime_string(query_info["start"]) == parse_datetime_string(
"2020-05-12T14:00:00"
)
assert parse_datetime_string(query_info["end"]) == parse_datetime_string(
"2020-05-14T14:00:00"
)
assert "statsPeriod" not in query_info
assert "statsPeriodStart" not in query_info
assert "statsPeriodSEnd" not in query_info
def test_preserves_start_end(self) -> None:
"""
Ensures that start/end is preserved
"""
payload = self.make_payload(
"discover", {"start": "2020-05-18T14:00:00", "end": "2020-05-19T14:00:00"}
)
with self.feature("organizations:discover-query"):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
query_info = data_export.query_info
assert parse_datetime_string(query_info["start"]) == parse_datetime_string(
"2020-05-18T14:00:00"
)
assert parse_datetime_string(query_info["end"]) == parse_datetime_string(
"2020-05-19T14:00:00"
)
assert "statsPeriod" not in query_info
assert "statsPeriodStart" not in query_info
assert "statsPeriodSEnd" not in query_info
def test_validates_query_info(self) -> None:
"""
Ensures that bad queries are rejected.
"""
payload = self.make_payload("discover", {"query": "foo:"})
with self.feature("organizations:discover-query"):
response = self.get_error_response(self.org.slug, status_code=400, **payload)
assert response.data == {"non_field_errors": ["Empty string after 'foo:'"]}
@freeze_time("2020-05-19 14:00:00")
def test_export_resolves_empty_project(self) -> None:
"""
Ensures that a request to this endpoint returns a 201 if projects
is an empty list.
"""
payload = self.make_payload(
"discover",
{"project": [], "start": "2020-05-18T14:00:00", "end": "2020-05-19T14:00:00"},
)
with self.feature("organizations:discover-query"):
self.get_success_response(self.org.slug, status_code=201, **payload)
payload = self.make_payload(
"issue", {"project": None, "start": "2020-05-18T14:00:00", "end": "2020-05-19T14:00:00"}
)
with self.feature("organizations:discover-query"):
self.get_success_response(self.org.slug, status_code=201, **payload)
def test_equations(self) -> None:
"""
Ensures that equations are handled
"""
payload = self.make_payload("discover", {"field": ["equation|count() / 2", "count()"]})
with self.feature(["organizations:discover-query"]):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
query_info = data_export.query_info
assert query_info["field"] == ["count()"]
assert query_info["equations"] == ["count() / 2"]
def test_valid_dataset(self) -> None:
"""
Ensures that equations are handled
"""
payload = self.make_payload(
"discover", {"field": ["title", "count()"], "dataset": "issuePlatform"}
)
with self.feature(["organizations:discover-query"]):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
query_info = data_export.query_info
assert query_info["field"] == ["title", "count()"]
assert query_info["dataset"] == "issuePlatform"
def test_valid_dataset_transactions(self) -> None:
"""
Tests that the transactions dataset is valid
"""
payload = self.make_payload(
"discover", {"field": ["title", "count()"], "dataset": "transactions"}
)
with self.feature(["organizations:discover-query"]):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
query_info = data_export.query_info
assert query_info["field"] == ["title", "count()"]
assert query_info["dataset"] == "transactions"
def test_valid_dataset_errors(self) -> None:
"""
Tests that the errors dataset is valid
"""
payload = self.make_payload(
"discover", {"field": ["title", "count()"], "dataset": "errors"}
)
with self.feature(["organizations:discover-query"]):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
query_info = data_export.query_info
assert query_info["field"] == ["title", "count()"]
assert query_info["dataset"] == "errors"
def test_invalid_dataset(self) -> None:
"""
Ensures that equations are handled
"""
payload = self.make_payload(
"discover", {"field": ["title", "count()"], "dataset": "somefakedataset"}
)
with self.feature(["organizations:discover-query"]):
response = self.get_response(self.org.slug, **payload)
assert response.status_code == 400
def test_is_query(self) -> None:
"""
is queries should work with the errors dataset
"""
payload = self.make_payload(
"discover",
{
"field": ["title", "project", "user.display", "timestamp"],
"dataset": "errors",
"query": "is:unresolved",
"per_page": 50,
"sort": "-timestamp",
},
)
with self.feature(["organizations:discover-query"]):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
query_info = data_export.query_info
assert query_info["field"] == ["title", "project", "user.display", "timestamp"]
assert query_info["dataset"] == "errors"
assert query_info["query"] == "is:unresolved"
# Explore Query Type Tests
def test_explore_fields_are_lists(self) -> None:
"""
Ensures that if a single field is passed for explore, we convert it to a list before making
a query.
"""
payload = self.make_payload("explore", {"field": "span_id"})
with self.feature("organizations:discover-query"):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
# because we passed a single string as the field, we should convert it into a list
assert data_export.query_info["field"] == ["span_id"]
def test_explore_export_too_many_fields(self) -> None:
"""
Ensures that if too many fields are requested for explore, returns a 400 status code with the
corresponding error message.
"""
payload = self.make_payload("explore", {"field": ["span_id"] * (MAX_FIELDS + 1)})
with self.feature("organizations:discover-query"):
response = self.get_error_response(self.org.slug, status_code=400, **payload)
assert response.data == {
"non_field_errors": [
f"You can export up to {MAX_FIELDS} fields at a time. Please delete some and try again."
]
}
def test_explore_export_no_fields(self) -> None:
"""
Ensures that if no fields are requested for explore, returns a 400 status code with
the corresponding error message.
"""
payload = self.make_payload("explore", {"field": []})
with self.feature("organizations:discover-query"):
response = self.get_error_response(self.org.slug, status_code=400, **payload)
assert response.data == {"non_field_errors": ["at least one field is required to export"]}
def test_explore_without_query(self) -> None:
"""
Ensures that we handle explore export requests without a query, and return a 400 status code
"""
payload = self.make_payload("explore", {"field": ["span_id"]}, overwrite=True)
with self.feature("organizations:discover-query"):
response = self.get_error_response(self.org.slug, status_code=400, **payload)
assert response.data == {
"non_field_errors": [
"query is a required to export, please pass an empty string if you don't want to set one"
]
}
def test_explore_without_dataset(self) -> None:
"""
Ensures that explore queries require a dataset parameter
"""
payload = self.make_payload("explore", {"dataset": None})
del payload["query_info"]["dataset"]
with self.feature("organizations:discover-query"):
response = self.get_error_response(self.org.slug, status_code=400, **payload)
assert "Please specify dataset" in response.data["non_field_errors"][0]
def test_explore_invalid_dataset(self) -> None:
"""
Ensures that explore queries with invalid datasets are rejected
"""
payload = self.make_payload("explore", {"dataset": "invalid_dataset"})
with self.feature("organizations:discover-query"):
response = self.get_error_response(self.org.slug, status_code=400, **payload)
assert response.data == {
"non_field_errors": ["invalid_dataset is not supported for csv exports"]
}
def test_explore_valid_dataset_spans(self) -> None:
"""
Tests that the spans dataset is valid for explore queries
"""
payload = self.make_payload(
"explore", {"field": ["span_id", "timestamp"], "dataset": "spans"}
)
with self.feature("organizations:discover-query"):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
query_info = data_export.query_info
assert query_info["field"] == ["span_id", "timestamp"]
assert query_info["dataset"] == "spans"
def test_explore_valid_dataset_logs(self) -> None:
"""
Tests that the logs dataset is valid for explore queries
"""
payload = self.make_payload(
"explore", {"field": ["message", "timestamp"], "dataset": "logs"}
)
with self.feature("organizations:discover-query"):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
query_info = data_export.query_info
assert query_info["field"] == ["message", "timestamp"]
assert query_info["dataset"] == "logs"
@freeze_time("2020-02-27 12:07:37")
def test_explore_export_invalid_date_params(self) -> None:
"""
Ensures that if an invalid date parameter is specified for explore, returns a 400 status code
with the corresponding error message.
"""
payload = self.make_payload("explore", {"statsPeriod": "shrug"})
with self.feature("organizations:discover-query"):
response = self.get_error_response(self.org.slug, status_code=400, **payload)
assert response.data == {"non_field_errors": ["Invalid statsPeriod: 'shrug'"]}
payload = self.make_payload(
"explore",
{
"start": "2021-02-27T12:07:37",
"end": "shrug",
},
)
with self.feature("organizations:discover-query"):
response = self.get_error_response(self.org.slug, status_code=400, **payload)
assert response.data == {"non_field_errors": ["shrug is not a valid ISO8601 date query"]}
payload = self.make_payload(
"explore",
{
"start": "shrug",
"end": "2021-02-27T12:07:37",
},
)
with self.feature("organizations:discover-query"):
response = self.get_error_response(self.org.slug, status_code=400, **payload)
assert response.data == {"non_field_errors": ["shrug is not a valid ISO8601 date query"]}
@freeze_time("2020-05-19 14:00:00")
def test_explore_converts_stats_period(self) -> None:
"""
Ensures that statsPeriod is converted to start/end for explore queries.
"""
payload = self.make_payload("explore", {"statsPeriod": "24h"})
with self.feature("organizations:discover-query"):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
query_info = data_export.query_info
assert parse_datetime_string(query_info["start"]) == parse_datetime_string(
"2020-05-18T14:00:00"
)
assert parse_datetime_string(query_info["end"]) == parse_datetime_string(
"2020-05-19T14:00:00"
)
assert "statsPeriod" not in query_info
assert "statsPeriodStart" not in query_info
assert "statsPeriodSEnd" not in query_info
@freeze_time("2020-05-19 14:00:00")
def test_explore_converts_stats_period_start_end(self) -> None:
"""
Ensures that statsPeriodStart and statsPeriodEnd is converted to start/end for explore queries.
"""
payload = self.make_payload("explore", {"statsPeriodStart": "1w", "statsPeriodEnd": "5d"})
with self.feature("organizations:discover-query"):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
query_info = data_export.query_info
assert parse_datetime_string(query_info["start"]) == parse_datetime_string(
"2020-05-12T14:00:00"
)
assert parse_datetime_string(query_info["end"]) == parse_datetime_string(
"2020-05-14T14:00:00"
)
assert "statsPeriod" not in query_info
assert "statsPeriodStart" not in query_info
assert "statsPeriodSEnd" not in query_info
def test_explore_preserves_start_end(self) -> None:
"""
Ensures that start/end is preserved for explore queries
"""
payload = self.make_payload(
"explore", {"start": "2020-05-18T14:00:00", "end": "2020-05-19T14:00:00"}
)
with self.feature("organizations:discover-query"):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
query_info = data_export.query_info
assert parse_datetime_string(query_info["start"]) == parse_datetime_string(
"2020-05-18T14:00:00"
)
assert parse_datetime_string(query_info["end"]) == parse_datetime_string(
"2020-05-19T14:00:00"
)
assert "statsPeriod" not in query_info
assert "statsPeriodStart" not in query_info
assert "statsPeriodSEnd" not in query_info
def test_explore_validates_invalid_sampling_mode(self) -> None:
"""
Ensures that invalid sampling modes are rejected for explore.
"""
payload = self.make_payload("explore", {"sampling": "INVALID_MODE"})
with self.feature("organizations:discover-query"):
response = self.get_error_response(self.org.slug, status_code=400, **payload)
assert (
"sampling mode: INVALID_MODE is not supported" in response.data["non_field_errors"][0]
)
@freeze_time("2020-05-19 14:00:00")
def test_explore_resolves_empty_project(self) -> None:
"""
Ensures that a request to this endpoint returns a 201 for explore if projects
is an empty list.
"""
payload = self.make_payload(
"explore",
{"project": [], "start": "2020-05-18T14:00:00", "end": "2020-05-19T14:00:00"},
)
with self.feature("organizations:discover-query"):
self.get_success_response(self.org.slug, status_code=201, **payload)
def test_explore_equations(self) -> None:
"""
Ensures that equations are handled for explore queries
"""
payload = self.make_payload("explore", {"field": ["equation|count() / 2", "count()"]})
with self.feature(["organizations:discover-query"]):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
query_info = data_export.query_info
assert query_info["field"] == ["count()"]
assert query_info["equations"] == ["count() / 2"]
def test_explore_with_sampling(self) -> None:
"""
Tests that explore queries handle sampling modes correctly
"""
payload = self.make_payload("explore", {"sampling": "BEST_EFFORT"})
with self.feature("organizations:discover-query"):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
query_info = data_export.query_info
assert query_info["sampling"] == "BEST_EFFORT"
def test_explore_with_sort(self) -> None:
"""
Tests that explore queries handle sort parameters correctly
"""
payload = self.make_payload("explore", {"sort": ["-timestamp", "span_id"]})
with self.feature("organizations:discover-query"):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
query_info = data_export.query_info
assert query_info["sort"] == ["-timestamp", "span_id"]
def test_explore_with_single_sort_string(self) -> None:
"""
Tests that explore queries handle single sort string parameters correctly
"""
payload = self.make_payload("explore", {"sort": "-timestamp"})
with self.feature("organizations:discover-query"):
response = self.get_success_response(self.org.slug, status_code=201, **payload)
data_export = ExportedData.objects.get(id=response.data["id"])
query_info = data_export.query_info
assert query_info["sort"] == ["-timestamp"]
|
DataExportTest
|
python
|
gevent__gevent
|
src/gevent/tests/test__util.py
|
{
"start": 10274,
"end": 10705
}
|
class ____(greentest.TestCase):
def test_clear_stack_frames(self):
import inspect
import threading
completed = []
def do_it():
util.clear_stack_frames(inspect.currentframe())
completed.append(1)
t = threading.Thread(target=do_it)
t.start()
t.join(10)
self.assertEqual(completed, [1])
if __name__ == '__main__':
greentest.main()
|
TestFuncs
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/inputs.py
|
{
"start": 5707,
"end": 6074
}
|
class ____(graphene.InputObjectType):
partitionSetName = graphene.NonNull(graphene.String)
repositorySelector = graphene.NonNull(GrapheneRepositorySelector)
class Meta:
description = """This type represents the fields necessary to identify a
pipeline or pipeline subset."""
name = "PartitionSetSelector"
|
GraphenePartitionSetSelector
|
python
|
pandas-dev__pandas
|
pandas/io/json/_json.py
|
{
"start": 7213,
"end": 7711
}
|
class ____(Writer):
_default_orient = "index"
@property
def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
if not self.index and self.orient == "split":
return {"name": self.obj.name, "data": self.obj.values}
else:
return self.obj
def _format_axes(self) -> None:
if not self.obj.index.is_unique and self.orient == "index":
raise ValueError(f"Series index must be unique for orient='{self.orient}'")
|
SeriesWriter
|
python
|
astropy__astropy
|
astropy/units/tests/test_quantity_array_methods.py
|
{
"start": 14959,
"end": 20340
}
|
class ____:
"""
Test array conversion methods
"""
def test_item(self):
q1 = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
assert q1.item(1) == 2 * q1.unit
q1[1] = 1
assert q1[1] == 1000 * u.m / u.km
q1[1] = 100 * u.cm / u.km
assert q1[1] == 1 * u.m / u.km
with pytest.raises(TypeError):
q1[1] = 1.5 * u.m / u.km
@pytest.mark.skipif(not NUMPY_LT_2_0, reason="itemset method removed in numpy 2.0")
def test_itemset(self):
q1 = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
assert q1.item(1) == 2 * q1.unit
q1.itemset(1, 1)
assert q1.item(1) == 1000 * u.m / u.km
q1.itemset(1, 100 * u.cm / u.km)
assert q1.item(1) == 1 * u.m / u.km
with pytest.raises(TypeError):
q1.itemset(1, 1.5 * u.m / u.km)
with pytest.raises(ValueError):
q1.itemset()
def test_take_put(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
assert q1.take(1) == 2 * u.m / u.km
assert all(q1.take((0, 2)) == np.array([1, 3]) * u.m / u.km)
q1.put((1, 2), (3, 4))
assert np.all(q1.take((1, 2)) == np.array([3000, 4000]) * q1.unit)
q1.put(0, 500 * u.cm / u.km)
assert q1.item(0) == 5 * u.m / u.km
def test_slice(self):
"""Test that setitem changes the unit if needed (or ignores it for
values where that is allowed; viz., #2695)"""
q2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) * u.km / u.m
q1 = q2.copy()
q2[0, 0] = 10000.0
assert q2.unit == q1.unit
assert q2[0, 0].value == 10.0
q2[0] = 9.0 * u.Mm / u.km
assert all(q2.flatten()[:3].value == np.array([9.0, 9.0, 9.0]))
q2[0, :-1] = 8000.0
assert all(q2.flatten()[:3].value == np.array([8.0, 8.0, 9.0]))
with pytest.raises(u.UnitsError):
q2[1, 1] = 10 * u.s
# just to be sure, repeat with a dimensionfull unit
q3 = u.Quantity(np.arange(10.0), "m/s")
q3[5] = 100.0 * u.cm / u.s
assert q3[5].value == 1.0
# and check unit is ignored for 0, inf, nan, where that is reasonable
q3[5] = 0.0
assert q3[5] == 0.0
q3[5] = np.inf
assert np.isinf(q3[5])
q3[5] = np.nan
assert np.isnan(q3[5])
def test_fill(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
q1.fill(2)
assert np.all(q1 == 2000 * u.m / u.km)
def test_repeat_compress_diagonal(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
q2 = q1.repeat(2)
assert q2.unit == q1.unit
assert all(q2.value == q1.value.repeat(2))
q2.sort()
assert q2.unit == q1.unit
q2 = q1.compress(np.array([True, True, False, False]))
assert q2.unit == q1.unit
assert all(q2.value == q1.value.compress(np.array([True, True, False, False])))
q1 = np.array([[1, 2], [3, 4]]) * u.m / u.km
q2 = q1.diagonal()
assert q2.unit == q1.unit
assert all(q2.value == q1.value.diagonal())
def test_view(self):
q1 = np.array([1, 2, 3], dtype=np.int64) * u.m / u.km
q2 = q1.view(np.ndarray)
assert not hasattr(q2, "unit")
q3 = q2.view(u.Quantity)
assert q3._unit is None
# MaskedArray copies and properties assigned in __dict__
q4 = np.ma.MaskedArray(q1)
assert q4._unit is q1._unit
q5 = q4.view(u.Quantity)
assert q5.unit is q1.unit
def test_slice_to_quantity(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2003
"""
a = np.random.uniform(size=(10, 8))
x, y, z = a[:, 1:4].T * u.km / u.s
total = np.sum(a[:, 1] * u.km / u.s - x)
assert isinstance(total, u.Quantity)
assert total == (0.0 * u.km / u.s)
def test_byte_type_view_field_changes(self):
q1 = np.array([1, 2, 3], dtype=np.int64) * u.m / u.km
q2 = q1.byteswap()
assert q2.unit == q1.unit
assert all(q2.value == q1.value.byteswap())
q2 = q1.astype(np.float64)
assert all(q2 == q1)
assert q2.dtype == np.float64
q2a = q1.getfield(np.int32, offset=0)
q2b = q1.byteswap().getfield(np.int32, offset=4)
assert q2a.unit == q1.unit
assert all(q2b.byteswap() == q2a)
def test_sort(self):
q1 = np.array([1.0, 5.0, 2.0, 4.0]) * u.km / u.m
i = q1.argsort()
assert not hasattr(i, "unit")
q1.sort()
i = q1.searchsorted([1500, 2500])
assert not hasattr(i, "unit")
assert all(
i == q1.to(u.dimensionless_unscaled).value.searchsorted([1500, 2500])
)
def test_not_implemented(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
with pytest.raises(NotImplementedError):
q1.choose([0, 0, 1])
with pytest.raises(NotImplementedError):
q1.tolist()
with pytest.raises(NotImplementedError):
q1.tostring()
with pytest.raises(NotImplementedError):
q1.tobytes()
with pytest.raises(NotImplementedError):
q1.tofile(0)
with pytest.raises(NotImplementedError):
q1.dump("a.a")
with pytest.raises(NotImplementedError):
q1.dumps()
|
TestArrayConversion
|
python
|
Textualize__textual
|
tests/test_markdown.py
|
{
"start": 850,
"end": 7049
}
|
class ____(App[None]):
def __init__(self, markdown: str) -> None:
super().__init__()
self._markdown = markdown
def compose(self) -> ComposeResult:
yield FussyMarkdown(self._markdown)
@pytest.mark.parametrize(
["document", "expected_nodes"],
[
# Basic markup.
("", []),
("# Hello", [MD.MarkdownH1]),
("## Hello", [MD.MarkdownH2]),
("### Hello", [MD.MarkdownH3]),
("#### Hello", [MD.MarkdownH4]),
("##### Hello", [MD.MarkdownH5]),
("###### Hello", [MD.MarkdownH6]),
("---", [MD.MarkdownHorizontalRule]),
("Hello", [MD.MarkdownParagraph]),
("Hello\nWorld", [MD.MarkdownParagraph]),
("> Hello", [MD.MarkdownBlockQuote, MD.MarkdownParagraph]),
("- One\n-Two", [MD.MarkdownBulletList, MD.MarkdownParagraph]),
(
"1. One\n2. Two",
[MD.MarkdownOrderedList, MD.MarkdownParagraph, MD.MarkdownParagraph],
),
(" 1", [MD.MarkdownFence]),
("```\n1\n```", [MD.MarkdownFence]),
("```python\n1\n```", [MD.MarkdownFence]),
("""| One | Two |\n| :- | :- |\n| 1 | 2 |""", [MD.MarkdownTable]),
# Test for https://github.com/Textualize/textual/issues/2676
(
"- One\n```\nTwo\n```\n- Three\n",
[
MD.MarkdownBulletList,
MD.MarkdownParagraph,
MD.MarkdownFence,
MD.MarkdownBulletList,
MD.MarkdownParagraph,
],
),
],
)
async def test_markdown_nodes(
document: str, expected_nodes: list[Widget | list[Widget]]
) -> None:
"""A Markdown document should parse into the expected Textual node list."""
def markdown_nodes(root: Widget) -> Iterator[MarkdownBlock]:
for node in root.children:
if isinstance(node, MarkdownBlock):
yield node
yield from markdown_nodes(node)
async with MarkdownApp(document).run_test() as pilot:
await pilot.pause()
assert [
node.__class__ for node in markdown_nodes(pilot.app.query_one(Markdown))
] == expected_nodes
async def test_softbreak_split_links_rendered_correctly() -> None:
"""Test for https://github.com/Textualize/textual/issues/2805"""
document = """\
My site [has
this
URL](https://example.com)\
"""
async with MarkdownApp(document).run_test() as pilot:
markdown = pilot.app.query_one(Markdown)
paragraph = markdown.children[0]
assert isinstance(paragraph, MD.MarkdownParagraph)
assert paragraph._content.plain == "My site has this URL"
print(paragraph._content.spans)
expected_spans = [
Span(8, 20, Style.from_meta({"@click": "link('https://example.com')"})),
]
print(expected_spans)
assert paragraph._content.spans == expected_spans
async def test_load_non_existing_file() -> None:
"""Loading a file that doesn't exist should result in the obvious error."""
async with MarkdownApp("").run_test() as pilot:
with pytest.raises(FileNotFoundError):
await pilot.app.query_one(Markdown).load(
Path("---this-does-not-exist---.it.is.not.a.md")
)
@pytest.mark.parametrize(
("anchor", "found"),
[
("hello-world", False),
("hello-there", True),
],
)
async def test_goto_anchor(anchor: str, found: bool) -> None:
"""Going to anchors should return a boolean: whether the anchor was found."""
document = "# Hello There\n\nGeneral.\n"
async with MarkdownApp(document).run_test() as pilot:
markdown = pilot.app.query_one(Markdown)
assert markdown.goto_anchor(anchor) is found
async def test_update_of_document_posts_table_of_content_update_message() -> None:
"""Updating the document should post a TableOfContentsUpdated message."""
messages: list[str] = []
class TableOfContentApp(App[None]):
def compose(self) -> ComposeResult:
yield Markdown("# One\n\n#Two\n")
@on(Markdown.TableOfContentsUpdated)
def log_table_of_content_update(
self, event: Markdown.TableOfContentsUpdated
) -> None:
nonlocal messages
messages.append(event.__class__.__name__)
async with TableOfContentApp().run_test() as pilot:
assert messages == ["TableOfContentsUpdated"]
await pilot.app.query_one(Markdown).update("")
await pilot.pause()
assert messages == ["TableOfContentsUpdated", "TableOfContentsUpdated"]
async def test_link_in_markdown_table_posts_message_when_clicked():
"""A link inside a markdown table should post a `Markdown.LinkClicked`
message when clicked.
Regression test for https://github.com/Textualize/textual/issues/4683
"""
markdown_table = """\
| Textual Links |
| ------------------------------------------------ |
| [GitHub](https://github.com/textualize/textual/) |
| [Documentation](https://textual.textualize.io/) |\
"""
class MarkdownTableApp(App):
messages = []
def compose(self) -> ComposeResult:
yield Markdown(markdown_table, open_links=False)
@on(Markdown.LinkClicked)
def log_markdown_link_clicked(
self,
event: Markdown.LinkClicked,
) -> None:
self.messages.append(event.__class__.__name__)
app = MarkdownTableApp()
async with app.run_test() as pilot:
await pilot.click(Markdown, offset=(8, 3))
print(app.messages)
assert app.messages == ["LinkClicked"]
async def test_markdown_quoting():
# https://github.com/Textualize/textual/issues/3350
links = []
class MyApp(App):
def compose(self) -> ComposeResult:
self.md = Markdown(markdown="[tété](tété)", open_links=False)
yield self.md
def on_markdown_link_clicked(self, message: Markdown.LinkClicked):
links.append(message.href)
app = MyApp()
async with app.run_test() as pilot:
await pilot.click(Markdown, offset=(3, 0))
assert links == ["tété"]
|
MarkdownApp
|
python
|
squidfunk__mkdocs-material
|
material/plugins/blog/structure/__init__.py
|
{
"start": 11564,
"end": 13811
}
|
class ____(Link):
# Initialize reference - this is essentially a crossover of pages and links,
# as it inherits the metadata of the page and allows for anchors
def __init__(self, title: str, url: str):
super().__init__(title, url)
self.meta = {}
# -----------------------------------------------------------------------------
# Helper functions
# -----------------------------------------------------------------------------
# Patch configuration
def _patch(config: MkDocsConfig):
config = copy(config)
# Copy parts of configuration that needs to be patched
config.validation = copy(config.validation)
config.validation.links = copy(config.validation.links)
config.markdown_extensions = copy(config.markdown_extensions)
config.mdx_configs = copy(config.mdx_configs)
# Make sure that the author did not add another instance of the table of
# contents extension to the configuration, as this leads to weird behavior
if "markdown.extensions.toc" in config.markdown_extensions:
config.markdown_extensions.remove("markdown.extensions.toc")
# In order to render excerpts for posts, we need to make sure that the
# table of contents extension is appropriately configured
config.mdx_configs["toc"] = {
**config.mdx_configs.get("toc", {}),
**{
"anchorlink": True, # Render headline as clickable
"baselevel": 2, # Render h1 as h2 and so forth
"permalink": False, # Remove permalinks
"toc_depth": 2 # Remove everything below h2
}
}
# Additionally, we disable link validation when rendering excerpts, because
# invalid links have already been reported when rendering the page
links = config.validation.links
links.not_found = logging.DEBUG
links.absolute_links = logging.DEBUG
links.unrecognized_links = logging.DEBUG
# Return patched configuration
return config
# -----------------------------------------------------------------------------
# Data
# -----------------------------------------------------------------------------
# Set up logging
log = logging.getLogger("mkdocs.material.blog")
|
Reference
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 100278,
"end": 100911
}
|
class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"repository_vulnerability_alert_id",
"dismiss_reason",
"client_mutation_id",
)
repository_vulnerability_alert_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="repositoryVulnerabilityAlertId"
)
dismiss_reason = sgqlc.types.Field(
sgqlc.types.non_null(DismissReason), graphql_name="dismissReason"
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
|
DismissRepositoryVulnerabilityAlertInput
|
python
|
matplotlib__matplotlib
|
lib/mpl_toolkits/axisartist/grid_finder.py
|
{
"start": 11669,
"end": 11976
}
|
class ____:
def __init__(self, useMathText=True):
self._fmt = mticker.ScalarFormatter(
useMathText=useMathText, useOffset=False)
self._fmt.create_dummy_axis()
def __call__(self, direction, factor, values):
return self._fmt.format_ticks(values)
|
FormatterPrettyPrint
|
python
|
celery__celery
|
t/unit/backends/test_mongodb.py
|
{
"start": 27799,
"end": 30379
}
|
class ____:
@pytest.fixture(scope="function", autouse=True)
def fake_mongo_collection_patch(self, monkeypatch):
"""A fake collection with serialization experience close to MongoDB."""
bson = pytest.importorskip("bson")
class FakeMongoCollection:
def __init__(self):
self.data = {}
def replace_one(self, task_id, meta, upsert=True):
self.data[task_id['_id']] = bson.encode(meta)
def find_one(self, task_id):
return bson.decode(self.data[task_id['_id']])
monkeypatch.setattr(MongoBackend, "collection", FakeMongoCollection())
@pytest.mark.parametrize("serializer,result_type,result", [
(s, type(i['result']), i['result']) for i in SUCCESS_RESULT_TEST_DATA
for s in i['serializers']]
)
def test_encode_success_results(self, mongo_backend_factory, serializer,
result_type, result):
backend = mongo_backend_factory(serializer=serializer)
backend.store_result(TASK_ID, result, 'SUCCESS')
recovered = backend.get_result(TASK_ID)
assert isinstance(recovered, result_type)
assert recovered == result
@pytest.mark.parametrize("serializer",
["bson", "pickle", "yaml", "json", "msgpack"])
def test_encode_chain_results(self, mongo_backend_factory, serializer):
backend = mongo_backend_factory(serializer=serializer)
mock_request = MagicMock(spec=['children'])
children = [self.app.AsyncResult(uuid()) for i in range(10)]
mock_request.children = children
backend.store_result(TASK_ID, 0, 'SUCCESS', request=mock_request)
recovered = backend.get_children(TASK_ID)
def tuple_to_list(t): return [list(t[0]), t[1]]
assert recovered == [tuple_to_list(c.as_tuple()) for c in children]
@pytest.mark.parametrize("serializer",
["bson", "pickle", "yaml", "json", "msgpack"])
def test_encode_exception_error_results(self, mongo_backend_factory,
serializer):
backend = mongo_backend_factory(serializer=serializer)
exception = Exception("Basic Exception")
traceback = 'Traceback:\n Exception: Basic Exception\n'
backend.store_result(TASK_ID, exception, 'FAILURE', traceback)
recovered = backend.get_result(TASK_ID)
assert isinstance(recovered, type(exception))
assert recovered.args == exception.args
|
test_MongoBackend_store_get_result
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 140401,
"end": 141031
}
|
class ____(sgqlc.types.Input):
"""Autogenerated input type of AddReaction"""
__schema__ = github_schema
__field_names__ = ("subject_id", "content", "client_mutation_id")
subject_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="subjectId")
"""The Node ID of the subject to modify."""
content = sgqlc.types.Field(sgqlc.types.non_null(ReactionContent), graphql_name="content")
"""The name of the emoji to react with."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
|
AddReactionInput
|
python
|
fastapi__sqlmodel
|
tests/test_enums_models.py
|
{
"start": 121,
"end": 178
}
|
class ____(str, enum.Enum):
C = "C"
D = "D"
|
MyEnum2
|
python
|
nmslib__hnswlib
|
tests/python/bindings_test_resize.py
|
{
"start": 54,
"end": 2922
}
|
class ____(unittest.TestCase):
def testRandomSelf(self):
for idx in range(16):
print("\n**** Index resize test ****\n")
np.random.seed(idx)
dim = 16
num_elements = 10000
# Generating sample data
data = np.float32(np.random.random((num_elements, dim)))
# Declaring index
p = hnswlib.Index(space='l2', dim=dim) # possible options are l2, cosine or ip
# Initiating index
# max_elements - the maximum number of elements, should be known beforehand
# (probably will be made optional in the future)
#
# ef_construction - controls index search speed/build speed tradeoff
# M - is tightly connected with internal dimensionality of the data
# strongly affects the memory consumption
p.init_index(max_elements=num_elements//2, ef_construction=100, M=16)
# Controlling the recall by setting ef:
# higher ef leads to better accuracy, but slower search
p.set_ef(20)
p.set_num_threads(idx % 8) # by default using all available cores
# We split the data in two batches:
data1 = data[:num_elements // 2]
data2 = data[num_elements // 2:]
print("Adding first batch of %d elements" % (len(data1)))
p.add_items(data1)
# Query the elements for themselves and measure recall:
labels, distances = p.knn_query(data1, k=1)
items = p.get_items(list(range(len(data1))))
# Check the recall:
self.assertAlmostEqual(np.mean(labels.reshape(-1) == np.arange(len(data1))), 1.0, 3)
# Check that the returned element data is correct:
diff_with_gt_labels = np.max(np.abs(data1-items))
self.assertAlmostEqual(diff_with_gt_labels, 0, delta=1e-4)
print("Resizing the index")
p.resize_index(num_elements)
print("Adding the second batch of %d elements" % (len(data2)))
p.add_items(data2)
# Query the elements for themselves and measure recall:
labels, distances = p.knn_query(data, k=1)
items=p.get_items(list(range(num_elements)))
# Check the recall:
self.assertAlmostEqual(np.mean(labels.reshape(-1) == np.arange(len(data))), 1.0, 3)
# Check that the returned element data is correct:
diff_with_gt_labels = np.max(np.abs(data-items))
self.assertAlmostEqual(diff_with_gt_labels, 0, delta=1e-4)
# Checking that all labels are returned correctly:
sorted_labels = sorted(p.get_ids_list())
self.assertEqual(np.sum(~np.asarray(sorted_labels) == np.asarray(range(num_elements))), 0)
|
RandomSelfTestCase
|
python
|
jamielennox__requests-mock
|
requests_mock/exceptions.py
|
{
"start": 914,
"end": 1013
}
|
class ____(MockException):
"""This call cannot be made under a mocked environment"""
|
InvalidRequest
|
python
|
astropy__astropy
|
astropy/io/votable/exceptions.py
|
{
"start": 1399,
"end": 6345
}
|
class ____(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.io.votable.exceptions`.
"""
max_warnings = _config.ConfigItem(
10,
"Number of times the same type of warning is displayed before being suppressed",
cfgtype="integer",
)
conf = Conf()
def _format_message(message, name, config=None, pos=None):
if config is None:
config = {}
if pos is None:
pos = ("?", "?")
filename = config.get("filename", "?")
return f"{filename}:{pos[0]}:{pos[1]}: {name}: {message}"
def _suppressed_warning(warning, config, stacklevel=2):
warning_class = type(warning)
config.setdefault("_warning_counts", {}).setdefault(warning_class, 0)
config["_warning_counts"][warning_class] += 1
message_count = config["_warning_counts"][warning_class]
if message_count <= conf.max_warnings:
if message_count == conf.max_warnings:
warning.formatted_message += (
" (suppressing further warnings of this type...)"
)
warn(warning, stacklevel=stacklevel + 1)
def warn_or_raise(
warning_class, exception_class=None, args=(), config=None, pos=None, stacklevel=1
):
"""
Warn or raise an exception, depending on the verify setting.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
config_value = config.get("verify", "warn")
if config_value == "exception":
if exception_class is None:
exception_class = warning_class
vo_raise(exception_class, args, config, pos)
elif config_value == "warn":
vo_warn(warning_class, args, config, pos, stacklevel=stacklevel + 1)
def vo_raise(exception_class, args=(), config=None, pos=None):
"""
Raise an exception, with proper position information if available.
"""
if config is None:
config = {}
raise exception_class(args, config, pos)
def vo_reraise(exc, config=None, pos=None, additional=""):
"""
Raise an exception, with proper position information if available.
Restores the original traceback of the exception, and should only
be called within an "except:" block of code.
"""
if config is None:
config = {}
message = _format_message(str(exc), exc.__class__.__name__, config, pos)
if message.split()[0] == str(exc).split()[0]:
message = str(exc)
if len(additional):
message += " " + additional
exc.args = (message,)
raise exc
def vo_warn(warning_class, args=(), config=None, pos=None, stacklevel=1):
"""
Warn, with proper position information if available.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
if config.get("verify", "warn") != "ignore":
warning = warning_class(args, config, pos)
_suppressed_warning(warning, config, stacklevel=stacklevel + 1)
def warn_unknown_attrs(element, attrs, config, pos, good_attr=[], stacklevel=1):
for attr in attrs:
if attr not in good_attr:
vo_warn(W48, (attr, element), config, pos, stacklevel=stacklevel + 1)
_warning_pat = re.compile(
r":?(?P<nline>[0-9?]+):(?P<nchar>[0-9?]+): "
r"((?P<warning>[WE]\d+): )?(?P<rest>.*)$"
)
def parse_vowarning(line):
"""
Parses the vo warning string back into its parts.
"""
result = {}
match = _warning_pat.search(line)
if match:
result["warning"] = warning = match.group("warning")
if warning is not None:
result["is_warning"] = warning[0].upper() == "W"
result["is_exception"] = not result["is_warning"]
result["number"] = int(match.group("warning")[1:])
result["doc_url"] = f"io/votable/api_exceptions.html#{warning.lower()}"
else:
result["is_warning"] = False
result["is_exception"] = False
result["is_other"] = True
result["number"] = None
result["doc_url"] = None
try:
result["nline"] = int(match.group("nline"))
except ValueError:
result["nline"] = 0
try:
result["nchar"] = int(match.group("nchar"))
except ValueError:
result["nchar"] = 0
result["message"] = match.group("rest")
result["is_something"] = True
else:
result["warning"] = None
result["is_warning"] = False
result["is_exception"] = False
result["is_other"] = False
result["is_something"] = False
if not isinstance(line, str):
line = line.decode("utf-8")
result["message"] = line
return result
|
Conf
|
python
|
huggingface__transformers
|
src/transformers/models/fuyu/modeling_fuyu.py
|
{
"start": 1661,
"end": 11021
}
|
class ____(FuyuPreTrainedModel):
_checkpoint_conversion_mapping = {"language_model.model": "language_model"}
def __init__(self, config: FuyuConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.text_config.vocab_size
self.language_model = AutoModel.from_config(config.text_config)
self.vision_embed_tokens = nn.Linear(
config.patch_size * config.patch_size * config.num_channels, config.hidden_size
)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def gather_continuous_embeddings(
self,
word_embeddings: torch.Tensor,
continuous_embeddings: list[torch.Tensor],
image_patch_input_indices: torch.Tensor,
) -> torch.Tensor:
"""This function places the continuous_embeddings into the word_embeddings at the locations
indicated by image_patch_input_indices. Different batch elements can have different numbers of continuous
embeddings.
Args:
word_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Tensor of word embeddings.
continuous_embeddings (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):
Tensor of continuous embeddings. The length of the list is the batch size. Each entry is shape
[num_image_embeddings, hidden], and num_image_embeddings needs to match the number of non-negative
indices in image_patch_input_indices for that batch element.
image_patch_input_indices (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Tensor of indices of the image patches in the input_ids tensor.
"""
if not (word_embeddings.shape[0] == len(continuous_embeddings)):
raise ValueError(
f"Batch sizes must match! Got {len(continuous_embeddings)=} and {word_embeddings.shape[0]=}"
)
output_embeddings = word_embeddings.clone()
for batch_idx in range(word_embeddings.shape[0]):
# First, find the positions of all the non-negative values in image_patch_input_indices, those are the
# positions in word_embeddings that we want to replace with content from continuous_embeddings.
dst_indices = torch.nonzero(image_patch_input_indices[batch_idx] >= 0, as_tuple=True)[0]
# Next look up those indices in image_patch_input_indices to find the indices in continuous_embeddings that we
# want to use to replace the values in word_embeddings.
src_indices = image_patch_input_indices[batch_idx][dst_indices]
# Check if we have more indices than embeddings. Note that we could have fewer indices if images got truncated.
if src_indices.shape[0] > continuous_embeddings[batch_idx].shape[0]:
raise ValueError(
f"Number of continuous embeddings {continuous_embeddings[batch_idx].shape=} does not match "
f"number of continuous token ids {src_indices.shape=} in batch element {batch_idx}."
)
output_embeddings[batch_idx, dst_indices] = continuous_embeddings[batch_idx][src_indices].to(
output_embeddings.device
)
return output_embeddings
def get_image_features(self, pixel_values: torch.FloatTensor, **kwargs):
"""
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
"""
patch_embeddings = [
self.vision_embed_tokens(patch.to(self.vision_embed_tokens.weight.dtype)).squeeze(0)
for patch in pixel_values
]
return patch_embeddings
def get_placeholder_mask(
self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
n_image_features = image_features.shape[0] * image_features.shape[1]
if inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
return special_image_mask
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
# [batch_size, num_total_patches, patch_size_ x patch_size x num_channels ]
image_patches: Optional[torch.Tensor] = None,
image_patches_indices: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple, CausalLMOutputWithPast]:
r"""
image_patches (`torch.FloatTensor` of shape `(batch_size, num_total_patches, patch_size_ x patch_size x num_channels)`, *optional*):
Image patches to be used as continuous embeddings. The patches are flattened and then projected to the
hidden size of the model.
image_patches_indices (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Tensor of indices of the image patches in the input_ids tensor.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
batch_size, seq_length = input_ids.shape
elif inputs_embeds is not None:
batch_size, seq_length, _ = inputs_embeds.shape
else:
raise ValueError("You have to specify either input_is or inputs_embeds")
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
position_ids = torch.arange(
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
)
position_ids = position_ids.unsqueeze(0)
if inputs_embeds is None:
inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
if image_patches is not None:
patch_embeddings = self.get_image_features(image_patches)
patch_embeddings = torch.cat(patch_embeddings, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=patch_embeddings
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, patch_embeddings)
outputs = self.language_model(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
use_cache=use_cache,
return_dict=return_dict,
**kwargs,
)
return outputs
@auto_docstring(
custom_intro="""
Fuyu Model with a language modeling head on top for causal language model conditioned on image patches and text.
"""
)
|
FuyuModel
|
python
|
great-expectations__great_expectations
|
tests/integration/test_utils/data_source_config/redshift.py
|
{
"start": 1323,
"end": 2127
}
|
class ____(DataSourceTestConfig):
@property
@override
def label(self) -> str:
return "redshift"
@property
@override
def pytest_mark(self) -> pytest.MarkDecorator:
return pytest.mark.redshift
@override
def create_batch_setup(
self,
request: pytest.FixtureRequest,
data: pd.DataFrame,
extra_data: Mapping[str, pd.DataFrame],
context: AbstractDataContext,
engine_manager: Optional[SessionSQLEngineManager] = None,
) -> BatchTestSetup:
return RedshiftBatchTestSetup(
data=data,
config=self,
extra_data=extra_data,
table_name=self.table_name,
context=context,
engine_manager=engine_manager,
)
|
RedshiftDatasourceTestConfig
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/image_ops_test.py
|
{
"start": 70875,
"end": 77673
}
|
class ____(test_util.TensorFlowTestCase):
def _assertShapeInference(self, pre_shape, fraction, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.central_crop(image, fraction)
if post_shape is None:
self.assertEqual(y.get_shape().dims, None)
else:
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shapes = [[13, 9, 3], [5, 13, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 1.0)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testCropping(self):
x_shape = [4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1])
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
x_shape = [2, 4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[[3, 4, 5, 6], [3, 4, 5, 6]],
[[6, 5, 4, 3], [6, 5, 4, 3]]]).reshape([2, 2, 4, 1])
with self.cached_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
def testCropping2(self):
# Test case for 10315
x_shapes = [[240, 320, 3], [5, 240, 320, 3]]
expected_y_shapes = [[80, 106, 3], [5, 80, 106, 3]]
for x_shape, y_shape in zip(x_shapes, expected_y_shapes):
x_np = np.zeros(x_shape, dtype=np.int32)
y_np = np.zeros(y_shape, dtype=np.int32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
y_tf = self.evaluate(image_ops.central_crop(x_np, 0.33))
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Test no-op fraction=1.0, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3])
self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3])
self._assertShapeInference([50, None, 3], 1.0, [50, None, 3])
self._assertShapeInference([None, None, 3], 1.0, [None, None, 3])
self._assertShapeInference([50, 60, None], 1.0, [50, 60, None])
self._assertShapeInference([None, None, None], 1.0, [None, None, None])
# Test no-op fraction=0.5, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 0.5, [26, 30, 3])
self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3])
self._assertShapeInference([50, None, 3], 0.5, [26, None, 3])
self._assertShapeInference([None, None, 3], 0.5, [None, None, 3])
self._assertShapeInference([50, 60, None], 0.5, [26, 30, None])
self._assertShapeInference([None, None, None], 0.5, [None, None, None])
# Test no-op fraction=1.0, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 1.0, [5, 50, 60, 3])
self._assertShapeInference([5, None, 60, 3], 1.0, [5, None, 60, 3])
self._assertShapeInference([5, 50, None, 3], 1.0, [5, 50, None, 3])
self._assertShapeInference([5, None, None, 3], 1.0, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 1.0, [5, 50, 60, None])
self._assertShapeInference([5, None, None, None], 1.0,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 1.0,
[None, None, None, None])
# Test no-op fraction=0.5, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 0.5, [5, 26, 30, 3])
self._assertShapeInference([5, None, 60, 3], 0.5, [5, None, 30, 3])
self._assertShapeInference([5, 50, None, 3], 0.5, [5, 26, None, 3])
self._assertShapeInference([5, None, None, 3], 0.5, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 0.5, [5, 26, 30, None])
self._assertShapeInference([5, None, None, None], 0.5,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 0.5,
[None, None, None, None])
def testErrorOnInvalidCentralCropFractionValues(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.0)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 1.01)
def testErrorOnInvalidShapes(self):
x_shapes = [[], [3], [3, 9], [3, 9, 3, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.5)
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
y = image_ops.central_crop(x_np, 1.0)
self.assertTrue(y.op.name.startswith("central_crop"))
def testCentralFractionTensor(self):
# Test case for GitHub issue 45324.
x_shape = [240, 320, 3]
y_shape = [80, 106, 3]
@def_function.function(autograph=False)
def f(x, central_fraction):
return image_ops.central_crop(x, central_fraction)
x_np = np.zeros(x_shape, dtype=np.int32)
y_np = np.zeros(y_shape, dtype=np.int32)
y_tf = self.evaluate(f(x_np, constant_op.constant(0.33)))
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
|
CentralCropTest
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_lambdas.py
|
{
"start": 11869,
"end": 14596
}
|
class ____(fixtures.MappedTest):
__sparse_driver_backend__ = True
run_setup_mappers = "once"
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(32)),
Column("age_int", Integer),
)
Table(
"addresses",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", ForeignKey("users.id")),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
@classmethod
def insert_data(cls, connection):
users = cls.tables.users
connection.execute(
users.insert(),
[
dict(id=1, name="john", age_int=25),
dict(id=2, name="jack", age_int=47),
dict(id=3, name="jill", age_int=29),
dict(id=4, name="jane", age_int=37),
],
)
@classmethod
def setup_mappers(cls):
User = cls.classes.User
users = cls.tables.users
Address = cls.classes.Address
addresses = cls.tables.addresses
cls.mapper_registry.map_imperatively(
User,
users,
properties={
"age": users.c.age_int,
"addresses": relationship(Address),
},
)
cls.mapper_registry.map_imperatively(Address, addresses)
def test_update(self):
User, Address = self.classes("User", "Address")
s = Session(testing.db, future=True)
def go(ids, values):
stmt = lambda_stmt(lambda: update(User).where(User.id.in_(ids)))
s.execute(
stmt,
values,
# note this currently just unrolls the lambda on the statement.
# so lambda caching for updates is not actually that useful
# unless synchronize_session is turned off.
# evaluate is similar just doesn't work for IN yet.
execution_options={"synchronize_session": "fetch"},
)
go([1, 2], {"name": "jack2"})
eq_(
s.execute(select(User.id, User.name).order_by(User.id)).all(),
[(1, "jack2"), (2, "jack2"), (3, "jill"), (4, "jane")],
)
go([3], {"name": "jane2"})
eq_(
s.execute(select(User.id, User.name).order_by(User.id)).all(),
[(1, "jack2"), (2, "jack2"), (3, "jane2"), (4, "jane")],
)
|
UpdateDeleteTest
|
python
|
allegroai__clearml
|
clearml/storage/callbacks.py
|
{
"start": 6892,
"end": 7799
}
|
class ____(ProgressReport):
def __init__(
self,
total_size: float,
verbose: bool,
remote_path: str,
log: logging.Logger,
report_chunk_size_mb: Optional[int] = None,
report_start: Optional[bool] = None,
) -> None:
report_chunk_size_mb = (
report_chunk_size_mb
if report_chunk_size_mb is not None
else ProgressReport.report_download_chunk_size_mb
or int(config.get("storage.log.report_download_chunk_size_mb", 5))
)
super(DownloadProgressReport, self).__init__(
verbose,
total_size,
log,
report_chunk_size_mb,
description_prefix="Downloading",
description_suffix="from {}".format(remote_path),
report_start=report_start,
)
self._remote_path = remote_path
|
DownloadProgressReport
|
python
|
pypa__setuptools
|
setuptools/_distutils/tests/test_log.py
|
{
"start": 79,
"end": 311
}
|
class ____:
def test_non_ascii(self, caplog):
caplog.set_level(logging.DEBUG)
log.debug('Dεbug\tMėssãge')
log.fatal('Fαtal\tÈrrōr')
assert caplog.messages == ['Dεbug\tMėssãge', 'Fαtal\tÈrrōr']
|
TestLog
|
python
|
python-openxml__python-docx
|
src/docx/shared.py
|
{
"start": 11858,
"end": 12517
}
|
class ____:
"""A document element within a story part.
Story parts include DocumentPart and Header/FooterPart and can contain block items
(paragraphs and tables). Items from the block-item subtree occasionally require an
ancestor object to provide access to part-level or package-level items like styles
or images or to add or drop a relationship.
Provides `self._parent` attribute to subclasses.
"""
def __init__(self, parent: t.ProvidesStoryPart):
self._parent = parent
@property
def part(self) -> StoryPart:
"""The package part containing this object."""
return self._parent.part
|
StoryChild
|
python
|
django__django
|
django/db/models/fields/__init__.py
|
{
"start": 83024,
"end": 83392
}
|
class ____(PositiveIntegerRelDbTypeMixin, BigIntegerField):
description = _("Positive big integer")
def get_internal_type(self):
return "PositiveBigIntegerField"
def formfield(self, **kwargs):
return super().formfield(
**{
"min_value": 0,
**kwargs,
}
)
|
PositiveBigIntegerField
|
python
|
PyCQA__pylint
|
doc/data/messages/r/return-in-init/good.py
|
{
"start": 0,
"end": 77
}
|
class ____:
def __init__(self, a, b) -> None:
self.result = a + b
|
Sum
|
python
|
tensorflow__tensorflow
|
tensorflow/python/checkpoint/checkpoint_with_v1_optimizers_test.py
|
{
"start": 11484,
"end": 13784
}
|
class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_trackable_save_restore(self):
def _templated():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
v2 = variable_scope.get_variable(
"v2", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
manual = _ManualScope()
return v, v + 1., v2, manual, manual()
save_template = template.make_template("s1", _templated)
v1_save, _, v2_save, manual_scope, manual_scope_v = save_template()
self.assertCountEqual([
id(obj) for obj in
[v1_save, v2_save, manual_scope, manual_scope_v, save_template]
], [id(obj) for obj in trackable_utils.list_objects(save_template)])
self.assertDictEqual({"in_manual_scope": manual_scope_v},
manual_scope._trackable_children())
optimizer = adam.AdamOptimizer(0.0)
save_root = trackable_utils.Checkpoint(
my_template=save_template, optimizer=optimizer)
optimizer.minimize(v1_save.read_value)
self.evaluate([v.initializer for v in save_template.variables])
self.evaluate([v.initializer for v in optimizer.variables()])
self.evaluate(v1_save.assign([12.]))
self.evaluate(v2_save.assign([14.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _templated)
load_optimizer = adam.AdamOptimizer(0.0)
load_root = trackable_utils.Checkpoint(
my_template=load_template, optimizer=load_optimizer)
status = load_root.restore(save_path)
var, var_plus_one, var2, _, _ = load_template()
load_optimizer.minimize(var.read_value)
self.assertEqual(3, len(load_template._trackable_children()))
self.assertEqual(set(["v", "v2", "ManualScope"]),
load_template._trackable_children().keys())
status.assert_consumed().run_restore_ops()
self.assertAllEqual([12.], self.evaluate(var))
self.assertAllEqual([13.], self.evaluate(var_plus_one))
self.assertAllEqual([14.], self.evaluate(var2))
if __name__ == "__main__":
test.main()
|
TemplateTests
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 950429,
"end": 951470
}
|
class ____(sgqlc.types.Type):
"""Choose which status checks must pass before branches can be merged
into a branch that matches this rule. When enabled, commits must
first be pushed to another branch, then merged or pushed directly
to a branch that matches this rule after status checks have
passed.
"""
__schema__ = github_schema
__field_names__ = ("required_status_checks", "strict_required_status_checks_policy")
required_status_checks = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null("StatusCheckConfiguration"))), graphql_name="requiredStatusChecks"
)
"""Status checks that are required."""
strict_required_status_checks_policy = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="strictRequiredStatusChecksPolicy")
"""Whether pull requests targeting a matching branch must be tested
with the latest code. This setting will not take effect unless at
least one status check is enabled.
"""
|
RequiredStatusChecksParameters
|
python
|
joke2k__faker
|
faker/providers/color/pt_BR/__init__.py
|
{
"start": 98,
"end": 9605
}
|
class ____(ColorProvider):
"""Implement color provider for ``pt_BR`` locale."""
all_colors = OrderedDict(
(
("Açafrão", "#F4C430"),
("Água-marinha média", "#66CDAA"),
("Água-marinha", "#7FFFD4"),
("Água", "#00FFFF"),
("Alizarina", "#E32636"),
("Amarelo brasilis", "#ECDB00"),
("Amarelo claro", "#FFFFE0"),
("Amarelo creme", "#ECD690"),
("Amarelo escuro", "#F2B73F"),
("Amarelo esverdeado", "#9ACD32"),
("Amarelo esverdeado", "#ADFF2F"),
("Amarelo ouro claro", "#FAFAD2"),
("Amarelo queimado", "#EEAD2D"),
("Amarelo", "#FFFF00"),
("Âmbar", "#FFBF00"),
("Ameixa", "#DDA0DD"),
("Amêndoa", "#FFEBCD"),
("Ametista", "#9966CC"),
("Aspargo", "#7BA05B"),
("Azul aço claro", "#B0C4DE"),
("Azul aço", "#4682B4"),
("Azul alice", "#F0F8FF"),
("Azul ardósia claro", "#8470FF"),
("Azul ardósia escuro", "#483D8B"),
("Azul ardósia médio", "#7B68EE"),
("Azul ardósia", "#6A5ACD"),
("Azul areado", "#B8CAD4"),
("Azul brasilis brilhante", "#09ACDB"),
("Azul brasilis", "#00BDCE"),
("Azul cadete", "#5F9EA0"),
("Azul camarada", "#054F77"),
("Azul celeste brilhante", "#007FFF"),
("Azul celeste pernambucano", "#00A4CD"),
("Azul celeste", "#F0FFFF"),
("Azul céu claro", "#87CEFA"),
("Azul céu profundo", "#00BFFF"),
("Azul céu", "#87CEEB"),
("Azul claro", "#ADD8E6"),
("Azul cobalto", "#0047AB"),
("Azul escuro", "#00008B"),
("Azul flor de milho", "#6495ED"),
("Azul força aérea", "#5D8AA8"),
("Azul furtivo", "#1E90FF"),
("Azul manteiga", "#A6AA3E"),
("Azul marinho", "#120A8F"),
("Azul médio", "#0000CD"),
("Azul meia-noite", "#191970"),
("Azul petróleo", "#084D6E"),
("Azul pólvora", "#B0E0E6"),
("Azul real", "#0000DD"),
("Azul taparuere", "#248EFF"),
("Azul turquesa brilhante", "#00DDFF"),
("Azul turquesa", "#00CCEE"),
("Azul violeta", "#8A2BE2"),
("Azul", "#0000FF"),
("Bege", "#F5F5DC"),
("Bordô", "#800000"),
("Borgonha", "#900020"),
("Branco antigo", "#FAEBD7"),
("Branco fantasma", "#F8F8FF"),
("Branco floral", "#FFFAF0"),
("Branco fumaça", "#F5F5F5"),
("Branco navajo", "#FFDEAD"),
("Branco", "#FFFFFF"),
("Brasil", "#A7F432"),
("Bronze", "#CD7F32"),
("Caqui escuro", "#BDB76B"),
("Caqui", "#F0E68C"),
("Caramelo", "#8B5742"),
("Cardo", "#D8BFD8"),
("Carmesim", "#DC143C"),
("Carmim carnáceo", "#960018"),
("Carmim clássico", "#992244"),
("Carmim", "#712F26"),
("Castanho avermelhado", "#8B0000"),
("Castanho claro", "#D2B48C"),
("Cenoura", "#ED9121"),
("Cereja Hollywood", "#F400A1"),
("Cereja", "#DE3163"),
("Chocolate", "#D2691E"),
("Ciano claro", "#E0FFFF"),
("Ciano escuro", "#008B8B"),
("Ciano", "#00FFFF"),
("Cinza ardósia claro", "#778899"),
("Cinza ardósia escuro", "#2F4F4F"),
("Cinza ardósia", "#708090"),
("Cinza claro", "#D3D3D3"),
("Cinza escuro", "#A9A9A9"),
("Cinza fosco", "#696969"),
("Cinza médio", "#DCDCDC"),
("Cinza", "#808080"),
("Cobre", "#B87333"),
("Concha", "#FFF5EE"),
("Coral claro", "#F08080"),
("Coral", "#FF7F50"),
("Couro", "#F0DC82"),
("Creme de marisco", "#FFE4C4"),
("Creme de menta", "#F5FFFA"),
("Creme", "#FFFDD0"),
("Dourado escuro", "#B8860B"),
("Dourado pálido", "#EEE8AA"),
("Dourado", "#DAA520"),
("Ébano", "#555D50"),
("Eminência", "#6C3082"),
("Escarlate", "#FF2400"),
("Esmeralda", "#50C878"),
("Eucalipto", "#44D7A8"),
("Fandango", "#B53389"),
("Feldspato", "#FDD5B1"),
("Ferrugem", "#B7410E"),
("Flerte", "#A2006D"),
("Fúcsia", "#FF00FF"),
("Fuligem", "#3D2B1F"),
("Glicínia", "#C9A0DC"),
("Glitter", "#E6E8FA"),
("Grená", "#831D1C"),
("Heliotrópio", "#DF73FF"),
("Herbal", "#2E8B57"),
("Independência", "#4C516D"),
("Índigo", "#4B0082"),
("Iris", "#5A4FCF"),
("Jade", "#00A86B"),
("Jambo", "#FF4500"),
("Jasmine", "#F8DE7E"),
("Kiwi", "#8EE53F"),
("Laranja claro", "#FFB84D"),
("Laranja escuro", "#FF8C00"),
("Laranja", "#FFA500"),
("Lavanda avermelhada", "#FFF0F5"),
("Lavanda", "#E6E6FA"),
("Lilás", "#C8A2C8"),
("Lima", "#FDE910"),
("Limão", "#00FF00"),
("Linho", "#FAF0E6"),
("Madeira", "#DEB887"),
("Magenta escuro", "#8B008B"),
("Magenta", "#FF00FF"),
("Malva", "#E0B0FF"),
("Mamão batido", "#FFEFD5"),
("Maná", "#F0FFF0"),
("Marfim", "#FFFFF0"),
("Marrom amarelado", "#F4A460"),
("Marrom claro", "#A52A2A"),
("Marrom rosado", "#BC8F8F"),
("Marrom sela", "#8B4513"),
("Marrom", "#964B00"),
("Milho Claro", "#FFF8DC"),
("Milho", "#FBEC5D"),
("Mocassim", "#FFE4B5"),
("Mostarda", "#FFDB58"),
("Naval", "#000080"),
("Neve", "#FFFAFA"),
("Nyanza", "#E9FFDB"),
("Ocre", "#CC7722"),
("Oliva escura", "#556B2F"),
("Oliva parda", "#6B8E23"),
("Oliva", "#808000"),
("Orquídea escura", "#9932CC"),
("Orquídea média", "#BA55D3"),
("Orquídea", "#DA70D6"),
("Ouro", "#FFD700"),
("Pardo escuro", "#CC6600"),
("Pardo", "#CD853F"),
("Pêssego", "#FFDAB9"),
("Prata", "#C0C0C0"),
("Preto", "#000000"),
("Púrpura média", "#9370DB"),
("Púrpura", "#800080"),
("Quantum", "#111111"),
("Quartzo", "#51484F"),
("Renda antiga", "#FDF5E6"),
("Rosa amoroso", "#CD69CD"),
("Rosa brilhante", "#FF007F"),
("Rosa Choque", "#FC0FC0"),
("Rosa claro", "#FFB6C1"),
("Rosa danação", "#DA69A1"),
("Rosa embaçado", "#FFE4E1"),
("Rosa forte", "#FF69B4"),
("Rosa profundo", "#FF1493"),
("Rosa", "#FFCBDB"),
("Roxo brasilis", "#8A008A"),
("Roxo", "#993399"),
("Rútilo", "#6D351A"),
("Salmão claro", "#FFA07A"),
("Salmão escuro", "#E9967A"),
("Salmão", "#FA7F72"),
("Sépia", "#705714"),
("Siena", "#FF8247"),
("Tangerina", "#F28500"),
("Terracota", "#E2725B"),
("Tijolo refratário", "#B22222"),
("Tomate", "#FF6347"),
("Triássico", "#FF2401"),
("Trigo", "#F5DEB3"),
("Turquesa escura", "#00CED1"),
("Turquesa média", "#48D1CC"),
("Turquesa pálida", "#AFEEEE"),
("Turquesa", "#40E0D0"),
("Urucum", "#EC2300"),
("Verde amarelado", "#9ACD32"),
("Verde claro", "#90EE90"),
("Verde escuro", "#006400"),
("Verde espectro", "#00FF00"),
("Verde floresta", "#228B22"),
("Verde fluorescente", "#CCFF33"),
("Verde grama", "#7CFC00"),
("Verde lima", "#32CD32"),
("Verde mar claro", "#20B2AA"),
("Verde mar escuro", "#8FBC8F"),
("Verde mar médio", "#3CB371"),
("Verde militar", "#78866B"),
("Verde pálido", "#98FB98"),
("Verde Paris", "#7FFF00"),
("Verde primavera médio", "#00FA9A"),
("Verde primavera", "#00FF7F"),
("Verde-azulado", "#008080"),
("Verde", "#008000"),
("Vermelho enegrecido", "#550000"),
("Vermelho escuro", "#8B0000"),
("Vermelho indiano", "#CD5C5C"),
("Vermelho violeta médio", "#C71585"),
("Vermelho violeta pálido", "#DB7093"),
("Vermelho violeta", "#D02090"),
("Vermelho", "#FF0000"),
("Violeta claro", "#F8CBF8"),
("Violeta escuro", "#9400D3"),
("Violeta", "#EE82EE"),
("Zinco", "#E2DDF0"),
)
)
safe_colors = (
"preto",
"marrom",
"verde",
"azul escuro",
"verde escuro",
"roxo",
"laranja",
"verde claro",
"azul",
"rosa",
"violeta",
"cinza",
"amarelo",
"magenta",
"ciano",
"branco",
)
|
Provider
|
python
|
huggingface__transformers
|
src/transformers/modeling_outputs.py
|
{
"start": 44526,
"end": 47226
}
|
class ____(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Cross attentions weights after the attention softmax, used to compute the weighted average in the
cross-attention heads.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
|
CausalLMOutputWithCrossAttentions
|
python
|
pytorch__pytorch
|
torch/_inductor/template_heuristics/triton.py
|
{
"start": 89285,
"end": 89487
}
|
class ____(AddMMConfigMixin, CPUMMTemplateConfigHeuristic):
"""Addmm specific mixin for CPU"""
@register_template_heuristic(mm_template.uid, "cpu", op_name="scaled_mm")
|
CPUAddmmTemplateConfigHeuristic
|
python
|
getsentry__sentry
|
src/sentry/sentry_apps/api/serializers/sentry_app_installation.py
|
{
"start": 628,
"end": 707
}
|
class ____(TypedDict):
uuid: str
slug: str
|
SentryAppInstallationAppResult
|
python
|
doocs__leetcode
|
solution/1800-1899/1894.Find the Student that Will Replace the Chalk/Solution.py
|
{
"start": 0,
"end": 220
}
|
class ____:
def chalkReplacer(self, chalk: List[int], k: int) -> int:
s = sum(chalk)
k %= s
for i, x in enumerate(chalk):
if k < x:
return i
k -= x
|
Solution
|
python
|
numba__numba
|
numba/core/untyped_passes.py
|
{
"start": 2329,
"end": 2792
}
|
class ____(FunctionPass):
_name = "extract_bytecode"
def __init__(self):
FunctionPass.__init__(self)
def run_pass(self, state):
"""
Extract bytecode from function
"""
func_id = state['func_id']
bc = bytecode.ByteCode(func_id)
if config.DUMP_BYTECODE:
print(bc.dump())
state['bc'] = bc
return True
@register_pass(mutates_CFG=True, analysis_only=False)
|
ExtractByteCode
|
python
|
spack__spack
|
lib/spack/spack/environment/environment.py
|
{
"start": 107078,
"end": 119573
}
|
class ____(collections.abc.Mapping):
"""Manages the in-memory representation of a manifest file, and its synchronization
with the actual manifest on disk.
"""
@staticmethod
def from_lockfile(manifest_dir: Union[pathlib.Path, str]) -> "EnvironmentManifestFile":
"""Returns an environment manifest file compatible with the lockfile already present in
the environment directory.
This function also writes a spack.yaml file that is consistent with the spack.lock
already existing in the directory.
Args:
manifest_dir: directory containing the manifest and lockfile
"""
# TBD: Should this be the abspath?
manifest_dir = pathlib.Path(manifest_dir)
lockfile = manifest_dir / lockfile_name
with lockfile.open("r", encoding="utf-8") as f:
data = sjson.load(f)
user_specs = data["roots"]
default_content = manifest_dir / manifest_name
default_content.write_text(default_manifest_yaml())
manifest = EnvironmentManifestFile(manifest_dir)
for item in user_specs:
manifest.add_user_spec(item["spec"])
manifest.flush()
return manifest
def __init__(self, manifest_dir: Union[pathlib.Path, str], name: Optional[str] = None) -> None:
self.manifest_dir = pathlib.Path(manifest_dir)
self.name = name or str(manifest_dir)
self.manifest_file = self.manifest_dir / manifest_name
self.scope_name = f"env:{self.name}"
self.config_stage_dir = os.path.join(env_subdir_path(manifest_dir), "config")
#: Configuration scope associated with this environment. Note that this is not
#: invalidated by a re-read of the manifest file.
self._env_config_scope: Optional[spack.config.ConfigScope] = None
if not self.manifest_file.exists():
msg = f"cannot find '{manifest_name}' in {self.manifest_dir}"
raise SpackEnvironmentError(msg)
with self.manifest_file.open(encoding="utf-8") as f:
self.yaml_content = _read_yaml(f)
self.changed = False
def _all_matches(self, user_spec: str) -> List[str]:
"""Maps the input string to the first equivalent user spec in the manifest,
and returns it.
Args:
user_spec: user spec to be found
Raises:
ValueError: if no equivalent match is found
"""
result = []
for yaml_spec_str in self.configuration["specs"]:
if Spec(yaml_spec_str) == Spec(user_spec):
result.append(yaml_spec_str)
if not result:
raise ValueError(f"cannot find a spec equivalent to {user_spec}")
return result
def add_user_spec(self, user_spec: str) -> None:
"""Appends the user spec passed as input to the list of root specs.
Args:
user_spec: user spec to be appended
"""
self.configuration.setdefault("specs", []).append(user_spec)
self.changed = True
def remove_user_spec(self, user_spec: str) -> None:
"""Removes the user spec passed as input from the list of root specs
Args:
user_spec: user spec to be removed
Raises:
SpackEnvironmentError: when the user spec is not in the list
"""
try:
for key in self._all_matches(user_spec):
self.configuration["specs"].remove(key)
except ValueError as e:
msg = f"cannot remove {user_spec} from {self}, no such spec exists"
raise SpackEnvironmentError(msg) from e
self.changed = True
def clear(self) -> None:
"""Clear all user specs from the list of root specs"""
self.configuration["specs"] = []
self.changed = True
def override_user_spec(self, user_spec: str, idx: int) -> None:
"""Overrides the user spec at index idx with the one passed as input.
Args:
user_spec: new user spec
idx: index of the spec to be overridden
Raises:
SpackEnvironmentError: when the user spec cannot be overridden
"""
try:
self.configuration["specs"][idx] = user_spec
except ValueError as e:
msg = f"cannot override {user_spec} from {self}"
raise SpackEnvironmentError(msg) from e
self.changed = True
def set_include_concrete(self, include_concrete: List[str]) -> None:
"""Sets the included concrete environments in the manifest to the value(s) passed as input.
Args:
include_concrete: list of already existing concrete environments to include
"""
self.configuration[included_concrete_name] = []
for env_path in include_concrete:
self.configuration[included_concrete_name].append(env_path)
self.changed = True
def add_definition(self, user_spec: str, list_name: str) -> None:
"""Appends a user spec to the first active definition matching the name passed as argument.
Args:
user_spec: user spec to be appended
list_name: name of the definition where to append
Raises:
SpackEnvironmentError: is no valid definition exists already
"""
defs = self.configuration.get("definitions", [])
msg = f"cannot add {user_spec} to the '{list_name}' definition, no valid list exists"
for idx, item in self._iterate_on_definitions(defs, list_name=list_name, err_msg=msg):
item[list_name].append(user_spec)
break
# "definitions" can be remote, so we need to update the global config too
spack.config.CONFIG.set("definitions", defs, scope=self.scope_name)
self.changed = True
def remove_definition(self, user_spec: str, list_name: str) -> None:
"""Removes a user spec from an active definition that matches the name passed as argument.
Args:
user_spec: user spec to be removed
list_name: name of the definition where to remove the spec from
Raises:
SpackEnvironmentError: if the user spec cannot be removed from the list,
or the list does not exist
"""
defs = self.configuration.get("definitions", [])
msg = f"cannot remove {user_spec} from the '{list_name}' definition, no valid list exists"
for idx, item in self._iterate_on_definitions(defs, list_name=list_name, err_msg=msg):
try:
item[list_name].remove(user_spec)
break
except ValueError:
pass
# "definitions" can be remote, so we need to update the global config too
spack.config.CONFIG.set("definitions", defs, scope=self.scope_name)
self.changed = True
def override_definition(self, user_spec: str, *, override: str, list_name: str) -> None:
"""Overrides a user spec from an active definition that matches the name passed
as argument.
Args:
user_spec: user spec to be overridden
override: new spec to be used
list_name: name of the definition where to override the spec
Raises:
SpackEnvironmentError: if the user spec cannot be overridden
"""
defs = self.configuration.get("definitions", [])
msg = f"cannot override {user_spec} with {override} in the '{list_name}' definition"
for idx, item in self._iterate_on_definitions(defs, list_name=list_name, err_msg=msg):
try:
sub_index = item[list_name].index(user_spec)
item[list_name][sub_index] = override
break
except ValueError:
pass
# "definitions" can be remote, so we need to update the global config too
spack.config.CONFIG.set("definitions", defs, scope=self.scope_name)
self.changed = True
def _iterate_on_definitions(self, definitions, *, list_name, err_msg):
"""Iterates on definitions, returning the active ones matching a given name."""
def extract_name(_item):
names = list(x for x in _item if x != "when")
assert len(names) == 1, f"more than one name in {_item}"
return names[0]
for idx, item in enumerate(definitions):
name = extract_name(item)
if name != list_name:
continue
condition_str = item.get("when", "True")
if not spack.spec.eval_conditional(condition_str):
continue
yield idx, item
else:
raise SpackEnvironmentError(err_msg)
def set_default_view(self, view: Union[bool, str, pathlib.Path, Dict[str, str]]) -> None:
"""Sets the default view root in the manifest to the value passed as input.
Args:
view: If the value is a string or a path, it specifies the path to the view. If
True the default view is used for the environment, if False there's no view.
"""
if isinstance(view, dict):
self.configuration["view"][default_view_name].update(view)
self.changed = True
return
if not isinstance(view, bool):
view = str(view)
self.configuration["view"] = view
self.changed = True
def remove_default_view(self) -> None:
"""Removes the default view from the manifest file"""
view_data = self.configuration.get("view")
if isinstance(view_data, collections.abc.Mapping):
self.configuration["view"].pop(default_view_name)
self.changed = True
return
self.set_default_view(view=False)
def flush(self) -> None:
"""Synchronizes the object with the manifest file on disk."""
if not self.changed:
return
with fs.write_tmp_and_move(os.path.realpath(self.manifest_file)) as f:
_write_yaml(self.yaml_content, f)
self.changed = False
@property
def configuration(self):
"""Return the dictionaries in the pristine YAML, without the top level attribute"""
return self.yaml_content[TOP_LEVEL_KEY]
def __len__(self):
return len(self.yaml_content)
def __getitem__(self, key):
return self.yaml_content[key]
def __iter__(self):
return iter(self.yaml_content)
def __str__(self):
return str(self.manifest_file)
@property
def env_config_scope(self) -> spack.config.ConfigScope:
"""The configuration scope for the environment manifest"""
if self._env_config_scope is None:
self._env_config_scope = spack.config.SingleFileScope(
self.scope_name,
str(self.manifest_file),
spack.schema.env.schema,
yaml_path=[TOP_LEVEL_KEY],
)
ensure_no_disallowed_env_config_mods(self._env_config_scope)
return self._env_config_scope
def prepare_config_scope(self) -> None:
"""Add the manifest's scope to the global configuration search path."""
spack.config.CONFIG.push_scope(
self.env_config_scope, priority=ConfigScopePriority.ENVIRONMENT
)
def deactivate_config_scope(self) -> None:
"""Remove the manifest's scope from the global config path."""
spack.config.CONFIG.remove_scope(self.env_config_scope.name)
@contextlib.contextmanager
def use_config(self):
"""Ensure only the manifest's configuration scopes are global."""
with no_active_environment():
self.prepare_config_scope()
yield
self.deactivate_config_scope()
def environment_path_scope(name: str, path: str) -> Optional[spack.config.ConfigScope]:
"""Retrieve the suitably named environment path scope
Arguments:
name: configuration scope name
path: path to configuration file(s)
Returns: list of environment scopes, if any, or None
"""
if exists(path): # managed environment
manifest = EnvironmentManifestFile(root(path))
elif is_env_dir(path): # anonymous environment
manifest = EnvironmentManifestFile(path)
else:
return None
manifest.env_config_scope.name = f"{name}:{manifest.env_config_scope.name}"
manifest.env_config_scope.writable = False
return manifest.env_config_scope
|
EnvironmentManifestFile
|
python
|
getsentry__sentry
|
src/sentry/integrations/github/webhook.py
|
{
"start": 8695,
"end": 12387
}
|
class ____(GitHubWebhook):
"""
Unlike other GitHub webhooks, installation webhooks are handled in control silo.
https://developer.github.com/v3/activity/events/types/#installationevent
"""
@property
def event_type(self) -> IntegrationWebhookEventType:
return IntegrationWebhookEventType.INSTALLATION
def __call__(self, event: Mapping[str, Any], **kwargs) -> None:
installation = event["installation"]
if not installation:
return
if event["action"] == "created":
state = {
"installation_id": event["installation"]["id"],
"sender": {
"id": event["sender"]["id"],
"login": event["sender"]["login"],
},
}
data = GitHubIntegrationProvider().build_integration(state)
ensure_integration(IntegrationProviderSlug.GITHUB.value, data)
if event["action"] == "deleted":
external_id = event["installation"]["id"]
if host := kwargs.get("host"):
external_id = "{}:{}".format(host, event["installation"]["id"])
result = integration_service.organization_contexts(
provider=self.provider,
external_id=external_id,
)
integration = result.integration
org_integrations = result.organization_integrations
if integration is not None:
self._handle(integration, event, org_integrations=org_integrations)
else:
# It seems possible for the GH or GHE app to be installed on their
# end, but the integration to not exist. Possibly from deleting in
# Sentry first or from a failed install flow (where the integration
# didn't get created in the first place)
logger.info(
"github.deletion-missing-integration",
extra={
"action": event["action"],
"installation_name": installation["account"]["login"],
"external_id": str(external_id),
},
)
logger.error("Installation is missing.")
def _handle(
self,
integration: RpcIntegration,
event: Mapping[str, Any],
**kwargs,
) -> None:
org_ids = {oi.organization_id for oi in kwargs.get("org_integrations", [])}
logger.info(
"InstallationEventWebhook._handle_delete",
extra={
"external_id": event["installation"]["id"],
"integration_id": integration.id,
"organization_id_list": org_ids,
},
)
integration_service.update_integration(
integration_id=integration.id, status=ObjectStatus.DISABLED
)
for organization_id in org_ids:
repository_service.disable_repositories_for_integration(
organization_id=organization_id,
provider=f"integrations:{self.provider}",
integration_id=integration.id,
)
github_app_id = event["installation"].get("app_id")
SENTRY_GITHUB_APP_ID = options.get("github-app.id")
if (
github_app_id
and SENTRY_GITHUB_APP_ID
and str(github_app_id) == str(SENTRY_GITHUB_APP_ID)
):
codecov_account_unlink.apply_async(
kwargs={
"integration_id": integration.id,
"organization_ids": list(org_ids),
}
)
|
InstallationEventWebhook
|
python
|
kamyu104__LeetCode-Solutions
|
Python/validate-binary-tree-nodes.py
|
{
"start": 29,
"end": 763
}
|
class ____(object):
def validateBinaryTreeNodes(self, n, leftChild, rightChild):
"""
:type n: int
:type leftChild: List[int]
:type rightChild: List[int]
:rtype: bool
"""
roots = set(range(n)) - set(leftChild) - set(rightChild)
if len(roots) != 1:
return False
root, = roots
stk = [root]
lookup = set([root])
while stk:
node = stk.pop()
for c in (leftChild[node], rightChild[node]):
if c < 0:
continue
if c in lookup:
return False
lookup.add(c)
stk.append(c)
return len(lookup) == n
|
Solution
|
python
|
pytorch__pytorch
|
test/quantization/ao_migration/common.py
|
{
"start": 106,
"end": 2178
}
|
class ____(TestCase):
def _test_function_import(
self,
package_name: str,
function_list: list[str],
base: Optional[str] = None,
new_package_name: Optional[str] = None,
):
r"""Tests individual function list import by comparing the functions
and their hashes."""
if base is None:
base = "quantization"
old_base = "torch." + base
new_base = "torch.ao." + base
if new_package_name is None:
new_package_name = package_name
old_location = importlib.import_module(f"{old_base}.{package_name}")
new_location = importlib.import_module(f"{new_base}.{new_package_name}")
for fn_name in function_list:
old_function = getattr(old_location, fn_name)
new_function = getattr(new_location, fn_name)
assert old_function == new_function, f"Functions don't match: {fn_name}"
assert hash(old_function) == hash(new_function), (
f"Hashes don't match: {old_function}({hash(old_function)}) vs. "
f"{new_function}({hash(new_function)})"
)
def _test_dict_import(
self, package_name: str, dict_list: list[str], base: Optional[str] = None
):
r"""Tests individual function list import by comparing the functions
and their hashes."""
if base is None:
base = "quantization"
old_base = "torch." + base
new_base = "torch.ao." + base
old_location = importlib.import_module(f"{old_base}.{package_name}")
new_location = importlib.import_module(f"{new_base}.{package_name}")
for dict_name in dict_list:
old_dict = getattr(old_location, dict_name)
new_dict = getattr(new_location, dict_name)
assert old_dict == new_dict, f"Dicts don't match: {dict_name}"
for key in new_dict:
assert old_dict[key] == new_dict[key], (
f"Dicts don't match: {dict_name} for key {key}"
)
|
AOMigrationTestCase
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/rest_framework/dashboard.py
|
{
"start": 3558,
"end": 4593
}
|
class ____(serializers.Field):
REQUIRED_KEYS = {
"x",
"y",
"w",
"h",
"min_h",
}
def to_internal_value(self, data):
if data is None:
return None
missing_keys = self.REQUIRED_KEYS - set(data.keys())
if missing_keys:
missing_key_str = ", ".join(sorted(snake_to_camel_case(key) for key in missing_keys))
raise serializers.ValidationError(f"Missing required keys: {missing_key_str}")
layout_to_store = {}
for key in self.REQUIRED_KEYS:
value = data.get(key)
if value is None:
continue
if not isinstance(value, int):
raise serializers.ValidationError(f"Expected number for: {key}")
layout_to_store[key] = value
# Store the layout with camel case dict keys because they'll be
# served as camel case in outgoing responses anyways
return convert_dict_key_case(layout_to_store, snake_to_camel_case)
|
LayoutField
|
python
|
pyca__cryptography
|
tests/hazmat/primitives/decrepit/test_algorithms.py
|
{
"start": 4964,
"end": 5717
}
|
class ____:
@pytest.mark.parametrize(
("key", "keysize"),
[(b"0" * (keysize // 4), keysize) for keysize in range(40, 129, 8)],
)
def test_key_size(self, key, keysize):
cipher = CAST5(binascii.unhexlify(key))
assert cipher.key_size == keysize
def test_invalid_key_size(self):
with pytest.raises(ValueError):
CAST5(binascii.unhexlify(b"0" * 34))
def test_invalid_key_type(self):
with pytest.raises(TypeError, match="key must be bytes"):
CAST5("0" * 10) # type: ignore[arg-type]
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
CAST5(b"\x00" * 16), modes.ECB()
),
skip_message="Does not support CAST5 ECB",
)
|
TestCAST5
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/runtime/memory_media_file_storage_test.py
|
{
"start": 1067,
"end": 7167
}
|
class ____(unittest.TestCase):
def setUp(self):
super().setUp()
self.storage = MemoryMediaFileStorage(media_endpoint="/mock/media")
@mock.patch(
"streamlit.runtime.memory_media_file_storage.open",
mock_open(read_data=b"mock_bytes"),
)
def test_load_with_path(self):
"""Adding a file by path creates a MemoryFile instance."""
file_id = self.storage.load_and_get_id(
"mock/file/path",
mimetype="video/mp4",
kind=MediaFileKind.MEDIA,
filename="file.mp4",
)
assert MemoryFile(
content=b"mock_bytes",
mimetype="video/mp4",
kind=MediaFileKind.MEDIA,
filename="file.mp4",
) == self.storage.get_file(file_id)
def test_load_with_bytes(self):
"""Adding a file with bytes creates a MemoryFile instance."""
file_id = self.storage.load_and_get_id(
b"mock_bytes",
mimetype="video/mp4",
kind=MediaFileKind.MEDIA,
filename="file.mp4",
)
assert MemoryFile(
content=b"mock_bytes",
mimetype="video/mp4",
kind=MediaFileKind.MEDIA,
filename="file.mp4",
) == self.storage.get_file(file_id)
def test_identical_files_have_same_id(self):
"""Two files with the same content, mimetype, and filename should share an ID."""
# Create 2 identical files. We'll just get one ID.
file_id1 = self.storage.load_and_get_id(
b"mock_bytes",
mimetype="video/mp4",
kind=MediaFileKind.MEDIA,
filename="file.mp4",
)
file_id2 = self.storage.load_and_get_id(
b"mock_bytes",
mimetype="video/mp4",
kind=MediaFileKind.MEDIA,
filename="file.mp4",
)
assert file_id1 == file_id2
# Change file content -> different ID
changed_content = self.storage.load_and_get_id(
b"mock_bytes_2",
mimetype="video/mp4",
kind=MediaFileKind.MEDIA,
filename="file.mp4",
)
assert file_id1 != changed_content
# Change mimetype -> different ID
changed_mimetype = self.storage.load_and_get_id(
b"mock_bytes",
mimetype="image/png",
kind=MediaFileKind.MEDIA,
filename="file.mp4",
)
assert file_id1 != changed_mimetype
# Change (or omit) filename -> different ID
changed_filename = self.storage.load_and_get_id(
b"mock_bytes", mimetype="video/mp4", kind=MediaFileKind.MEDIA
)
assert file_id1 != changed_filename
@mock.patch(
"streamlit.runtime.memory_media_file_storage.open",
MagicMock(side_effect=Exception),
)
def test_load_with_bad_path(self):
"""Adding a file by path raises a MediaFileStorageError if the file can't be read."""
with pytest.raises(MediaFileStorageError):
self.storage.load_and_get_id(
"mock/file/path",
mimetype="video/mp4",
kind=MediaFileKind.MEDIA,
filename="file.mp4",
)
@parameterized.expand(
[
("video/mp4", ".mp4"),
("audio/wav", ".wav"),
("image/png", ".png"),
("image/jpeg", ".jpg"),
]
)
def test_get_url(self, mimetype, extension):
"""URLs should be formatted correctly, and have the expected extension."""
file_id = self.storage.load_and_get_id(
b"mock_bytes", mimetype=mimetype, kind=MediaFileKind.MEDIA
)
url = self.storage.get_url(file_id)
assert f"/mock/media/{file_id}{extension}" == url
def test_get_url_invalid_fileid(self):
"""get_url raises if it gets a bad file_id."""
with pytest.raises(MediaFileStorageError):
self.storage.get_url("not_a_file_id")
def test_delete_file(self):
"""delete_file removes the file with the given ID."""
file_id1 = self.storage.load_and_get_id(
b"mock_bytes_1",
mimetype="video/mp4",
kind=MediaFileKind.MEDIA,
filename="file.mp4",
)
file_id2 = self.storage.load_and_get_id(
b"mock_bytes_2",
mimetype="video/mp4",
kind=MediaFileKind.MEDIA,
filename="file.mp4",
)
# delete file 1. It should not exist, but file2 should.
self.storage.delete_file(file_id1)
with pytest.raises(MediaFileStorageError):
self.storage.get_file(file_id1)
assert self.storage.get_file(file_id2) is not None
# delete file 2
self.storage.delete_file(file_id2)
with pytest.raises(MediaFileStorageError):
self.storage.get_file(file_id2)
def test_delete_invalid_file_is_a_noop(self):
"""deleting a file that doesn't exist doesn't raise an error."""
self.storage.delete_file("mock_file_id")
def test_cache_stats(self):
"""Test our CacheStatsProvider implementation."""
assert len(self.storage.get_stats()) == 0
# Add several files to storage. We'll unique-ify them by filename.
mock_data = b"some random mock binary data"
num_files = 5
for ii in range(num_files):
self.storage.load_and_get_id(
mock_data,
mimetype="video/mp4",
kind=MediaFileKind.MEDIA,
filename=f"{ii}.mp4",
)
stats = self.storage.get_stats()
assert len(stats) == 1
assert stats[0].category_name == "st_memory_media_file_storage"
assert len(mock_data) * num_files == sum(stat.byte_length for stat in stats)
# Remove files, and ensure our cache doesn't report they still exist
for file_id in list(self.storage._files_by_id.keys()):
self.storage.delete_file(file_id)
assert len(self.storage.get_stats()) == 0
|
MemoryMediaFileStorageTest
|
python
|
django__django
|
django/contrib/sessions/exceptions.py
|
{
"start": 272,
"end": 359
}
|
class ____(BadRequest):
"""The session was interrupted."""
pass
|
SessionInterrupted
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_device_allocation_configuration.py
|
{
"start": 383,
"end": 6196
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'opaque': 'V1OpaqueDeviceConfiguration',
'requests': 'list[str]',
'source': 'str'
}
attribute_map = {
'opaque': 'opaque',
'requests': 'requests',
'source': 'source'
}
def __init__(self, opaque=None, requests=None, source=None, local_vars_configuration=None): # noqa: E501
"""V1DeviceAllocationConfiguration - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._opaque = None
self._requests = None
self._source = None
self.discriminator = None
if opaque is not None:
self.opaque = opaque
if requests is not None:
self.requests = requests
self.source = source
@property
def opaque(self):
"""Gets the opaque of this V1DeviceAllocationConfiguration. # noqa: E501
:return: The opaque of this V1DeviceAllocationConfiguration. # noqa: E501
:rtype: V1OpaqueDeviceConfiguration
"""
return self._opaque
@opaque.setter
def opaque(self, opaque):
"""Sets the opaque of this V1DeviceAllocationConfiguration.
:param opaque: The opaque of this V1DeviceAllocationConfiguration. # noqa: E501
:type: V1OpaqueDeviceConfiguration
"""
self._opaque = opaque
@property
def requests(self):
"""Gets the requests of this V1DeviceAllocationConfiguration. # noqa: E501
Requests lists the names of requests where the configuration applies. If empty, its applies to all requests. References to subrequests must include the name of the main request and may include the subrequest using the format <main request>[/<subrequest>]. If just the main request is given, the configuration applies to all subrequests. # noqa: E501
:return: The requests of this V1DeviceAllocationConfiguration. # noqa: E501
:rtype: list[str]
"""
return self._requests
@requests.setter
def requests(self, requests):
"""Sets the requests of this V1DeviceAllocationConfiguration.
Requests lists the names of requests where the configuration applies. If empty, its applies to all requests. References to subrequests must include the name of the main request and may include the subrequest using the format <main request>[/<subrequest>]. If just the main request is given, the configuration applies to all subrequests. # noqa: E501
:param requests: The requests of this V1DeviceAllocationConfiguration. # noqa: E501
:type: list[str]
"""
self._requests = requests
@property
def source(self):
"""Gets the source of this V1DeviceAllocationConfiguration. # noqa: E501
Source records whether the configuration comes from a class and thus is not something that a normal user would have been able to set or from a claim. # noqa: E501
:return: The source of this V1DeviceAllocationConfiguration. # noqa: E501
:rtype: str
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this V1DeviceAllocationConfiguration.
Source records whether the configuration comes from a class and thus is not something that a normal user would have been able to set or from a claim. # noqa: E501
:param source: The source of this V1DeviceAllocationConfiguration. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and source is None: # noqa: E501
raise ValueError("Invalid value for `source`, must not be `None`") # noqa: E501
self._source = source
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DeviceAllocationConfiguration):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1DeviceAllocationConfiguration):
return True
return self.to_dict() != other.to_dict()
|
V1DeviceAllocationConfiguration
|
python
|
mkdocs__mkdocs
|
mkdocs/tests/cli_tests.py
|
{
"start": 165,
"end": 25528
}
|
class ____(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
@mock.patch('mkdocs.commands.serve.serve', autospec=True)
def test_serve_default(self, mock_serve):
result = self.runner.invoke(cli.cli, ["serve"], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
mock_serve.assert_called_once_with(
dev_addr=None,
open_in_browser=False,
livereload=True,
build_type=None,
config_file=None,
strict=None,
theme=None,
use_directory_urls=None,
watch_theme=False,
watch=(),
)
@mock.patch('mkdocs.commands.serve.serve', autospec=True)
def test_serve_config_file(self, mock_serve):
result = self.runner.invoke(
cli.cli, ["serve", "--config-file", "mkdocs.yml"], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_serve.call_count, 1)
args, kwargs = mock_serve.call_args
self.assertTrue('config_file' in kwargs)
self.assertIsInstance(kwargs['config_file'], io.BufferedReader)
self.assertEqual(kwargs['config_file'].name, 'mkdocs.yml')
@mock.patch('mkdocs.commands.serve.serve', autospec=True)
def test_serve_dev_addr(self, mock_serve):
result = self.runner.invoke(
cli.cli, ["serve", '--dev-addr', '0.0.0.0:80'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
mock_serve.assert_called_once_with(
dev_addr='0.0.0.0:80',
open_in_browser=False,
livereload=True,
build_type=None,
config_file=None,
strict=None,
theme=None,
use_directory_urls=None,
watch_theme=False,
watch=(),
)
@mock.patch('mkdocs.commands.serve.serve', autospec=True)
def test_serve_strict(self, mock_serve):
result = self.runner.invoke(cli.cli, ["serve", '--strict'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
mock_serve.assert_called_once_with(
dev_addr=None,
open_in_browser=False,
livereload=True,
build_type=None,
config_file=None,
strict=True,
theme=None,
use_directory_urls=None,
watch_theme=False,
watch=(),
)
@mock.patch('mkdocs.commands.serve.serve', autospec=True)
def test_serve_theme(self, mock_serve):
result = self.runner.invoke(
cli.cli, ["serve", '--theme', 'readthedocs'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
mock_serve.assert_called_once_with(
dev_addr=None,
open_in_browser=False,
livereload=True,
build_type=None,
config_file=None,
strict=None,
theme='readthedocs',
use_directory_urls=None,
watch_theme=False,
watch=(),
)
@mock.patch('mkdocs.commands.serve.serve', autospec=True)
def test_serve_use_directory_urls(self, mock_serve):
result = self.runner.invoke(
cli.cli, ["serve", '--use-directory-urls'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
mock_serve.assert_called_once_with(
dev_addr=None,
open_in_browser=False,
livereload=True,
build_type=None,
config_file=None,
strict=None,
theme=None,
use_directory_urls=True,
watch_theme=False,
watch=(),
)
@mock.patch('mkdocs.commands.serve.serve', autospec=True)
def test_serve_no_directory_urls(self, mock_serve):
result = self.runner.invoke(
cli.cli, ["serve", '--no-directory-urls'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
mock_serve.assert_called_once_with(
dev_addr=None,
open_in_browser=False,
livereload=True,
build_type=None,
config_file=None,
strict=None,
theme=None,
use_directory_urls=False,
watch_theme=False,
watch=(),
)
@mock.patch('mkdocs.commands.serve.serve', autospec=True)
def test_serve_livereload(self, mock_serve):
result = self.runner.invoke(cli.cli, ["serve", '--livereload'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
mock_serve.assert_called_once_with(
dev_addr=None,
open_in_browser=False,
livereload=True,
build_type=None,
config_file=None,
strict=None,
theme=None,
use_directory_urls=None,
watch_theme=False,
watch=(),
)
@mock.patch('mkdocs.commands.serve.serve', autospec=True)
def test_serve_no_livereload(self, mock_serve):
result = self.runner.invoke(cli.cli, ["serve", '--no-livereload'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
mock_serve.assert_called_once_with(
dev_addr=None,
open_in_browser=False,
livereload=False,
build_type=None,
config_file=None,
strict=None,
theme=None,
use_directory_urls=None,
watch_theme=False,
watch=(),
)
@mock.patch('mkdocs.commands.serve.serve', autospec=True)
def test_serve_dirtyreload(self, mock_serve):
result = self.runner.invoke(cli.cli, ["serve", '--dirty'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
mock_serve.assert_called_once_with(
dev_addr=None,
open_in_browser=False,
livereload=True,
build_type='dirty',
config_file=None,
strict=None,
theme=None,
use_directory_urls=None,
watch_theme=False,
watch=(),
)
@mock.patch('mkdocs.commands.serve.serve', autospec=True)
def test_serve_watch_theme(self, mock_serve):
result = self.runner.invoke(cli.cli, ["serve", '--watch-theme'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
mock_serve.assert_called_once_with(
dev_addr=None,
open_in_browser=False,
livereload=True,
build_type=None,
config_file=None,
strict=None,
theme=None,
use_directory_urls=None,
watch_theme=True,
watch=(),
)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
def test_build_defaults(self, mock_build, mock_load_config):
result = self.runner.invoke(cli.cli, ['build'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
args, kwargs = mock_build.call_args
self.assertTrue('dirty' in kwargs)
self.assertFalse(kwargs['dirty'])
mock_load_config.assert_called_once_with(
config_file=None,
strict=None,
theme=None,
use_directory_urls=None,
site_dir=None,
)
for log_name in 'mkdocs', 'mkdocs.structure.pages', 'mkdocs.plugins.foo':
self.assertEqual(logging.getLogger(log_name).getEffectiveLevel(), logging.INFO)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
def test_build_clean(self, mock_build, mock_load_config):
result = self.runner.invoke(cli.cli, ['build', '--clean'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
args, kwargs = mock_build.call_args
self.assertTrue('dirty' in kwargs)
self.assertFalse(kwargs['dirty'])
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
def test_build_dirty(self, mock_build, mock_load_config):
result = self.runner.invoke(cli.cli, ['build', '--dirty'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
args, kwargs = mock_build.call_args
self.assertTrue('dirty' in kwargs)
self.assertTrue(kwargs['dirty'])
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
def test_build_config_file(self, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['build', '--config-file', 'mkdocs.yml'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
self.assertEqual(mock_load_config.call_count, 1)
args, kwargs = mock_load_config.call_args
self.assertTrue('config_file' in kwargs)
self.assertIsInstance(kwargs['config_file'], io.BufferedReader)
self.assertEqual(kwargs['config_file'].name, 'mkdocs.yml')
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
def test_build_strict(self, mock_build, mock_load_config):
result = self.runner.invoke(cli.cli, ['build', '--strict'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
mock_load_config.assert_called_once_with(
config_file=None,
strict=True,
theme=None,
use_directory_urls=None,
site_dir=None,
)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
def test_build_theme(self, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['build', '--theme', 'readthedocs'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
mock_load_config.assert_called_once_with(
config_file=None,
strict=None,
theme='readthedocs',
use_directory_urls=None,
site_dir=None,
)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
def test_build_use_directory_urls(self, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['build', '--use-directory-urls'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
mock_load_config.assert_called_once_with(
config_file=None,
strict=None,
theme=None,
use_directory_urls=True,
site_dir=None,
)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
def test_build_no_directory_urls(self, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['build', '--no-directory-urls'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
mock_load_config.assert_called_once_with(
config_file=None,
strict=None,
theme=None,
use_directory_urls=False,
site_dir=None,
)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
def test_build_site_dir(self, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['build', '--site-dir', 'custom'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
mock_load_config.assert_called_once_with(
config_file=None,
strict=None,
theme=None,
use_directory_urls=None,
site_dir='custom',
)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
def test_build_verbose(self, mock_build, mock_load_config):
result = self.runner.invoke(cli.cli, ['build', '--verbose'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
for log_name in 'mkdocs', 'mkdocs.structure.pages', 'mkdocs.plugins.foo':
self.assertEqual(logging.getLogger(log_name).getEffectiveLevel(), logging.DEBUG)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
def test_build_quiet(self, mock_build, mock_load_config):
result = self.runner.invoke(cli.cli, ['build', '--quiet'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
for log_name in 'mkdocs', 'mkdocs.structure.pages', 'mkdocs.plugins.foo':
self.assertEqual(logging.getLogger(log_name).getEffectiveLevel(), logging.ERROR)
@mock.patch('mkdocs.commands.new.new', autospec=True)
def test_new(self, mock_new):
result = self.runner.invoke(cli.cli, ["new", "project"], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
mock_new.assert_called_once_with('project')
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
@mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
def test_gh_deploy_defaults(self, mock_gh_deploy, mock_build, mock_load_config):
result = self.runner.invoke(cli.cli, ['gh-deploy'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_gh_deploy.call_count, 1)
g_args, g_kwargs = mock_gh_deploy.call_args
self.assertTrue('message' in g_kwargs)
self.assertEqual(g_kwargs['message'], None)
self.assertTrue('force' in g_kwargs)
self.assertEqual(g_kwargs['force'], False)
self.assertTrue('ignore_version' in g_kwargs)
self.assertEqual(g_kwargs['ignore_version'], False)
self.assertEqual(mock_build.call_count, 1)
b_args, b_kwargs = mock_build.call_args
self.assertTrue('dirty' in b_kwargs)
self.assertFalse(b_kwargs['dirty'])
mock_load_config.assert_called_once_with(
remote_branch=None,
remote_name=None,
config_file=None,
strict=None,
theme=None,
use_directory_urls=None,
site_dir=None,
)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
@mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
def test_gh_deploy_clean(self, mock_gh_deploy, mock_build, mock_load_config):
result = self.runner.invoke(cli.cli, ['gh-deploy', '--clean'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_gh_deploy.call_count, 1)
self.assertEqual(mock_build.call_count, 1)
args, kwargs = mock_build.call_args
self.assertTrue('dirty' in kwargs)
self.assertFalse(kwargs['dirty'])
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
@mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
def test_gh_deploy_dirty(self, mock_gh_deploy, mock_build, mock_load_config):
result = self.runner.invoke(cli.cli, ['gh-deploy', '--dirty'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_gh_deploy.call_count, 1)
self.assertEqual(mock_build.call_count, 1)
args, kwargs = mock_build.call_args
self.assertTrue('dirty' in kwargs)
self.assertTrue(kwargs['dirty'])
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
@mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
def test_gh_deploy_config_file(self, mock_gh_deploy, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['gh-deploy', '--config-file', 'mkdocs.yml'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_gh_deploy.call_count, 1)
self.assertEqual(mock_build.call_count, 1)
self.assertEqual(mock_load_config.call_count, 1)
args, kwargs = mock_load_config.call_args
self.assertTrue('config_file' in kwargs)
self.assertIsInstance(kwargs['config_file'], io.BufferedReader)
self.assertEqual(kwargs['config_file'].name, 'mkdocs.yml')
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
@mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
def test_gh_deploy_message(self, mock_gh_deploy, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['gh-deploy', '--message', 'A commit message'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_gh_deploy.call_count, 1)
g_args, g_kwargs = mock_gh_deploy.call_args
self.assertTrue('message' in g_kwargs)
self.assertEqual(g_kwargs['message'], 'A commit message')
self.assertEqual(mock_build.call_count, 1)
self.assertEqual(mock_load_config.call_count, 1)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
@mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
def test_gh_deploy_remote_branch(self, mock_gh_deploy, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['gh-deploy', '--remote-branch', 'foo'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_gh_deploy.call_count, 1)
self.assertEqual(mock_build.call_count, 1)
mock_load_config.assert_called_once_with(
remote_branch='foo',
remote_name=None,
config_file=None,
strict=None,
theme=None,
use_directory_urls=None,
site_dir=None,
)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
@mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
def test_gh_deploy_remote_name(self, mock_gh_deploy, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['gh-deploy', '--remote-name', 'foo'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_gh_deploy.call_count, 1)
self.assertEqual(mock_build.call_count, 1)
mock_load_config.assert_called_once_with(
remote_branch=None,
remote_name='foo',
config_file=None,
strict=None,
theme=None,
use_directory_urls=None,
site_dir=None,
)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
@mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
def test_gh_deploy_force(self, mock_gh_deploy, mock_build, mock_load_config):
result = self.runner.invoke(cli.cli, ['gh-deploy', '--force'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_gh_deploy.call_count, 1)
g_args, g_kwargs = mock_gh_deploy.call_args
self.assertTrue('force' in g_kwargs)
self.assertEqual(g_kwargs['force'], True)
self.assertEqual(mock_build.call_count, 1)
self.assertEqual(mock_load_config.call_count, 1)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
@mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
def test_gh_deploy_ignore_version(self, mock_gh_deploy, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['gh-deploy', '--ignore-version'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_gh_deploy.call_count, 1)
g_args, g_kwargs = mock_gh_deploy.call_args
self.assertTrue('ignore_version' in g_kwargs)
self.assertEqual(g_kwargs['ignore_version'], True)
self.assertEqual(mock_build.call_count, 1)
self.assertEqual(mock_load_config.call_count, 1)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
@mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
def test_gh_deploy_strict(self, mock_gh_deploy, mock_build, mock_load_config):
result = self.runner.invoke(cli.cli, ['gh-deploy', '--strict'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_gh_deploy.call_count, 1)
self.assertEqual(mock_build.call_count, 1)
mock_load_config.assert_called_once_with(
remote_branch=None,
remote_name=None,
config_file=None,
strict=True,
theme=None,
use_directory_urls=None,
site_dir=None,
)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
@mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
def test_gh_deploy_theme(self, mock_gh_deploy, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['gh-deploy', '--theme', 'readthedocs'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_gh_deploy.call_count, 1)
self.assertEqual(mock_build.call_count, 1)
mock_load_config.assert_called_once_with(
remote_branch=None,
remote_name=None,
config_file=None,
strict=None,
theme='readthedocs',
use_directory_urls=None,
site_dir=None,
)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
@mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
def test_gh_deploy_use_directory_urls(self, mock_gh_deploy, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['gh-deploy', '--use-directory-urls'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_gh_deploy.call_count, 1)
self.assertEqual(mock_build.call_count, 1)
mock_load_config.assert_called_once_with(
remote_branch=None,
remote_name=None,
config_file=None,
strict=None,
theme=None,
use_directory_urls=True,
site_dir=None,
)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
@mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
def test_gh_deploy_no_directory_urls(self, mock_gh_deploy, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['gh-deploy', '--no-directory-urls'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_gh_deploy.call_count, 1)
self.assertEqual(mock_build.call_count, 1)
mock_load_config.assert_called_once_with(
remote_branch=None,
remote_name=None,
config_file=None,
strict=None,
theme=None,
use_directory_urls=False,
site_dir=None,
)
@mock.patch('mkdocs.config.load_config', autospec=True)
@mock.patch('mkdocs.commands.build.build', autospec=True)
@mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
def test_gh_deploy_site_dir(self, mock_gh_deploy, mock_build, mock_load_config):
result = self.runner.invoke(
cli.cli, ['gh-deploy', '--site-dir', 'custom'], catch_exceptions=False
)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_gh_deploy.call_count, 1)
self.assertEqual(mock_build.call_count, 1)
mock_load_config.assert_called_once_with(
remote_branch=None,
remote_name=None,
config_file=None,
strict=None,
theme=None,
use_directory_urls=None,
site_dir='custom',
)
|
CLITests
|
python
|
coleifer__peewee
|
tests/models.py
|
{
"start": 110788,
"end": 112379
}
|
class ____(ModelTestCase):
requires = [User]
def test_tuples(self):
ua, ub, uc = [User.create(username=username) for username in 'abc']
query = User.select().where(
Tuple(User.username, User.id) == ('b', ub.id))
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" '
'WHERE (("t1"."username", "t1"."id") = (?, ?))'), ['b', ub.id])
self.assertEqual(query.count(), 1)
obj = query.get()
self.assertEqual(obj, ub)
def test_tuple_subquery(self):
ua, ub, uc = [User.create(username=username) for username in 'abc']
UA = User.alias()
subquery = (UA
.select(UA.username, UA.id)
.where(UA.username != 'b'))
query = (User
.select(User.username)
.where(Tuple(User.username, User.id).in_(subquery))
.order_by(User.username))
self.assertEqual([u.username for u in query], ['a', 'c'])
@requires_models(CPK)
def test_row_value_composite_key(self):
CPK.insert_many([('k1', 1, 1), ('k2', 2, 2), ('k3', 3, 3)]).execute()
cpk = CPK.get(CPK._meta.primary_key == ('k2', 2))
self.assertEqual(cpk._pk, ('k2', 2))
cpk = CPK['k3', 3]
self.assertEqual(cpk._pk, ('k3', 3))
uq = CPK.update(extra=20).where(CPK._meta.primary_key != ('k2', 2))
uq.execute()
self.assertEqual(list(sorted(CPK.select().tuples())), [
('k1', 1, 20), ('k2', 2, 2), ('k3', 3, 20)])
|
TestTupleComparison
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/components/core/decl.py
|
{
"start": 7270,
"end": 8486
}
|
class ____(YamlBackedComponentDecl):
"""Declaration of a single component loaded from a YAML file."""
@staticmethod
def from_source_tree(
context: ComponentDeclLoadContext,
source_tree: ValueAndSourcePositionTree,
path: ComponentPath,
) -> "YamlDecl":
component_file_model = _parse_and_populate_model_with_annotated_errors(
cls=ComponentFileModel, obj_parse_root=source_tree, obj_key_path_prefix=[]
)
return YamlDecl(
context=context,
source_tree=source_tree,
component_file_model=component_file_model,
path=path,
)
def _load_component(self) -> "Component":
context = self.context_with_component_injected_scope.with_source_position_tree(
check.not_none(self.source_tree).source_position_tree,
)
model_cls = self.component_type.get_model_cls()
attributes = _process_attributes_with_enriched_validation_err(
self.source_tree, self.component_file_model, model_cls
)
return self.component_type.load(
attributes, ComponentLoadContext.from_decl_load_context(context, self)
)
@record
|
YamlDecl
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/decl_api.py
|
{
"start": 10465,
"end": 15576
}
|
class ____(interfaces._MappedAttribute[_T], _declared_attr_common):
"""Mark a class-level method as representing the definition of
a mapped property or Declarative directive.
:class:`_orm.declared_attr` is typically applied as a decorator to a class
level method, turning the attribute into a scalar-like property that can be
invoked from the uninstantiated class. The Declarative mapping process
looks for these :class:`_orm.declared_attr` callables as it scans classes,
and assumes any attribute marked with :class:`_orm.declared_attr` will be a
callable that will produce an object specific to the Declarative mapping or
table configuration.
:class:`_orm.declared_attr` is usually applicable to
:ref:`mixins <orm_mixins_toplevel>`, to define relationships that are to be
applied to different implementors of the class. It may also be used to
define dynamically generated column expressions and other Declarative
attributes.
Example::
class ProvidesUserMixin:
"A mixin that adds a 'user' relationship to classes."
user_id: Mapped[int] = mapped_column(ForeignKey("user_table.id"))
@declared_attr
def user(cls) -> Mapped["User"]:
return relationship("User")
When used with Declarative directives such as ``__tablename__``, the
:meth:`_orm.declared_attr.directive` modifier may be used which indicates
to :pep:`484` typing tools that the given method is not dealing with
:class:`_orm.Mapped` attributes::
class CreateTableName:
@declared_attr.directive
def __tablename__(cls) -> str:
return cls.__name__.lower()
:class:`_orm.declared_attr` can also be applied directly to mapped
classes, to allow for attributes that dynamically configure themselves
on subclasses when using mapped inheritance schemes. Below
illustrates :class:`_orm.declared_attr` to create a dynamic scheme
for generating the :paramref:`_orm.Mapper.polymorphic_identity` parameter
for subclasses::
class Employee(Base):
__tablename__ = "employee"
id: Mapped[int] = mapped_column(primary_key=True)
type: Mapped[str] = mapped_column(String(50))
@declared_attr.directive
def __mapper_args__(cls) -> Dict[str, Any]:
if cls.__name__ == "Employee":
return {
"polymorphic_on": cls.type,
"polymorphic_identity": "Employee",
}
else:
return {"polymorphic_identity": cls.__name__}
class Engineer(Employee):
pass
:class:`_orm.declared_attr` supports decorating functions that are
explicitly decorated with ``@classmethod``. This is never necessary from a
runtime perspective, however may be needed in order to support :pep:`484`
typing tools that don't otherwise recognize the decorated function as
having class-level behaviors for the ``cls`` parameter::
class SomethingMixin:
x: Mapped[int]
y: Mapped[int]
@declared_attr
@classmethod
def x_plus_y(cls) -> Mapped[int]:
return column_property(cls.x + cls.y)
.. versionadded:: 2.0 - :class:`_orm.declared_attr` can accommodate a
function decorated with ``@classmethod`` to help with :pep:`484`
integration where needed.
.. seealso::
:ref:`orm_mixins_toplevel` - Declarative Mixin documentation with
background on use patterns for :class:`_orm.declared_attr`.
""" # noqa: E501
if typing.TYPE_CHECKING:
def __init__(
self,
fn: _DeclaredAttrDecorated[_T],
cascading: bool = False,
): ...
def __set__(self, instance: Any, value: Any) -> None: ...
def __delete__(self, instance: Any) -> None: ...
# this is the Mapped[] API where at class descriptor get time we want
# the type checker to see InstrumentedAttribute[_T]. However the
# callable function prior to mapping in fact calls the given
# declarative function that does not return InstrumentedAttribute
@overload
def __get__(
self, instance: None, owner: Any
) -> InstrumentedAttribute[_T]: ...
@overload
def __get__(self, instance: object, owner: Any) -> _T: ...
def __get__(
self, instance: Optional[object], owner: Any
) -> Union[InstrumentedAttribute[_T], _T]: ...
@hybridmethod
def _stateful(cls, **kw: Any) -> _stateful_declared_attr[_T]:
return _stateful_declared_attr(**kw)
@hybridproperty
def directive(cls) -> _declared_directive[Any]:
# see mapping_api.rst for docstring
return _declared_directive # type: ignore
@hybridproperty
def cascading(cls) -> _stateful_declared_attr[_T]:
# see mapping_api.rst for docstring
return cls._stateful(cascading=True)
|
declared_attr
|
python
|
pytorch__pytorch
|
tools/stats/utilization_stats_lib.py
|
{
"start": 2051,
"end": 2635
}
|
class ____:
created_at: int
repo: str
workflow_id: int
run_attempt: int
job_id: int
workflow_name: str
job_name: str
usage_collect_interval: float
data_model_version: str
gpu_count: int
cpu_count: int
gpu_type: str
start_at: int
end_at: int
segments: list[OssCiSegmentV1]
tags: list[str] = field(default_factory=list)
# this data model is for the time series data:
# https://github.com/pytorch/test-infra/blob/main/clickhouse_db_schema/oss_ci_utilization/oss_ci_time_series_schema.sql
@dataclass
|
OssCiUtilizationMetadataV1
|
python
|
pytorch__pytorch
|
torch/mtia/mtia_graph.py
|
{
"start": 1196,
"end": 2384
}
|
class ____:
default_capture_stream: Optional[torch.mtia.Stream] = None
def __init__(
self,
mtia_graph: MTIAGraph,
pool: Optional[_POOL_HANDLE] = None,
stream: Optional[torch.mtia.Stream] = None,
):
if self.__class__.default_capture_stream is None:
self.__class__.default_capture_stream = torch.mtia.current_stream()
self.pool: Union[tuple[()], tuple[_POOL_HANDLE]] = (
() if pool is None else (pool,)
)
self.capture_stream = (
stream if stream is not None else self.__class__.default_capture_stream
)
assert self.capture_stream is not None
self.stream_ctx = torch.mtia.stream(self.capture_stream)
self.mtia_graph = mtia_graph
def __enter__(self) -> None:
torch.mtia.synchronize()
torch.mtia.empty_cache()
self.stream_ctx.__enter__()
pool_arg = self.pool[0] if self.pool else (0, 0)
self.mtia_graph.capture_begin(pool_arg)
def __exit__(self, *args: object) -> None:
self.mtia_graph.capture_end()
self.stream_ctx.__exit__(*args)
__all__ = [
"MTIAGraph",
"graph",
]
|
graph
|
python
|
tornadoweb__tornado
|
tornado/test/simple_httpclient_test.py
|
{
"start": 1478,
"end": 1893
}
|
class ____(RequestHandler):
def initialize(self, queue, wake_callback):
self.queue = queue
self.wake_callback = wake_callback
@gen.coroutine
def get(self):
logging.debug("queuing trigger")
event = Event()
self.queue.append(event.set)
if self.get_argument("wake", "true") == "true":
self.wake_callback()
yield event.wait()
|
TriggerHandler
|
python
|
pyparsing__pyparsing
|
pyparsing/common.py
|
{
"start": 285,
"end": 15619
}
|
class ____:
"""Here are some common low-level expressions that may be useful in
jump-starting parser development:
- numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
:class:`scientific notation<sci_real>`)
- common :class:`programming identifiers<identifier>`
- network addresses (:class:`MAC<mac_address>`,
:class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
- ISO8601 :class:`dates<iso8601_date>` and
:class:`datetime<iso8601_datetime>`
- :class:`UUID<uuid>`
- :class:`comma-separated list<comma_separated_list>`
- :class:`url`
Parse actions:
- :class:`convert_to_integer`
- :class:`convert_to_float`
- :class:`convert_to_date`
- :class:`convert_to_datetime`
- :class:`strip_html_tags`
- :class:`upcase_tokens`
- :class:`downcase_tokens`
Examples:
.. testcode::
pyparsing_common.number.run_tests('''
# any int or real number, returned as the appropriate type
100
-100
+100
3.14159
6.02e23
1e-12
''')
.. testoutput::
:options: +NORMALIZE_WHITESPACE
# any int or real number, returned as the appropriate type
100
[100]
-100
[-100]
+100
[100]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
.. testcode::
pyparsing_common.fnumber.run_tests('''
# any int or real number, returned as float
100
-100
+100
3.14159
6.02e23
1e-12
''')
.. testoutput::
:options: +NORMALIZE_WHITESPACE
# any int or real number, returned as float
100
[100.0]
-100
[-100.0]
+100
[100.0]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
.. testcode::
pyparsing_common.hex_integer.run_tests('''
# hex numbers
100
FF
''')
.. testoutput::
:options: +NORMALIZE_WHITESPACE
# hex numbers
100
[256]
FF
[255]
.. testcode::
pyparsing_common.fraction.run_tests('''
# fractions
1/2
-3/4
''')
.. testoutput::
:options: +NORMALIZE_WHITESPACE
# fractions
1/2
[0.5]
-3/4
[-0.75]
.. testcode::
pyparsing_common.mixed_integer.run_tests('''
# mixed fractions
1
1/2
-3/4
1-3/4
''')
.. testoutput::
:options: +NORMALIZE_WHITESPACE
# mixed fractions
1
[1]
1/2
[0.5]
-3/4
[-0.75]
1-3/4
[1.75]
.. testcode::
import uuid
pyparsing_common.uuid.set_parse_action(token_map(uuid.UUID))
pyparsing_common.uuid.run_tests('''
# uuid
12345678-1234-5678-1234-567812345678
''')
.. testoutput::
:options: +NORMALIZE_WHITESPACE
# uuid
12345678-1234-5678-1234-567812345678
[UUID('12345678-1234-5678-1234-567812345678')]
"""
@staticmethod
def convert_to_integer(_, __, t):
"""
Parse action for converting parsed integers to Python int
"""
return [int(tt) for tt in t]
@staticmethod
def convert_to_float(_, __, t):
"""
Parse action for converting parsed numbers to Python float
"""
return [float(tt) for tt in t]
integer = (
Word(nums)
.set_name("integer")
.set_parse_action(
convert_to_integer
if PY_310
else lambda t: [int(tt) for tt in t] # type: ignore[misc]
)
)
"""expression that parses an unsigned integer, converts to an int"""
hex_integer = (
Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16))
)
"""expression that parses a hexadecimal integer, converts to an int"""
signed_integer = (
Regex(r"[+-]?\d+")
.set_name("signed integer")
.set_parse_action(
convert_to_integer
if PY_310
else lambda t: [int(tt) for tt in t] # type: ignore[misc]
)
)
"""expression that parses an integer with optional leading sign, converts to an int"""
fraction = (
signed_integer().set_parse_action(
convert_to_float
if PY_310
else lambda t: [float(tt) for tt in t] # type: ignore[misc]
)
+ "/"
+ signed_integer().set_parse_action(
convert_to_float
if PY_310
else lambda t: [float(tt) for tt in t] # type: ignore[misc]
)
).set_name("fraction")
"""fractional expression of an integer divided by an integer, converts to a float"""
fraction.add_parse_action(lambda tt: tt[0] / tt[-1])
mixed_integer = (
fraction | signed_integer + Opt(Opt("-").suppress() + fraction)
).set_name("fraction or mixed integer-fraction")
"""mixed integer of the form 'integer - fraction', with optional leading integer, converts to a float"""
mixed_integer.add_parse_action(sum)
real = (
Regex(r"[+-]?(?:\d+\.\d*|\.\d+)")
.set_name("real number")
.set_parse_action(
convert_to_float
if PY_310
else lambda t: [float(tt) for tt in t] # type: ignore[misc]
)
)
"""expression that parses a floating point number, converts to a float"""
sci_real = (
Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)")
.set_name("real number with scientific notation")
.set_parse_action(
convert_to_float
if PY_310
else lambda t: [float(tt) for tt in t] # type: ignore[misc]
)
)
"""expression that parses a floating point number with optional
scientific notation, converts to a float"""
# streamlining this expression makes the docs nicer-looking
number = (sci_real | real | signed_integer).set_name("number").streamline()
"""any numeric expression, converts to the corresponding Python type"""
fnumber = (
Regex(r"[+-]?\d+\.?\d*(?:[eE][+-]?\d+)?")
.set_name("fnumber")
.set_parse_action(
convert_to_float
if PY_310
else lambda t: [float(tt) for tt in t] # type: ignore[misc]
)
)
"""any int or real number, always converts to a float"""
ieee_float = (
Regex(r"(?i:[+-]?(?:(?:\d+\.?\d*(?:e[+-]?\d+)?)|nan|inf(?:inity)?))")
.set_name("ieee_float")
.set_parse_action(
convert_to_float
if PY_310
else lambda t: [float(tt) for tt in t] # type: ignore[misc]
)
)
"""any floating-point literal (int, real number, infinity, or NaN), converts to a float"""
identifier = Word(identchars, identbodychars).set_name("identifier")
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
ipv4_address = Regex(
r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}"
).set_name("IPv4 address")
"IPv4 address (``0.0.0.0 - 255.255.255.255``)"
_ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer")
_full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name(
"full IPv6 address"
)
_short_ipv6_address = (
Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
+ "::"
+ Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
).set_name("short IPv6 address")
_short_ipv6_address.add_condition(
lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8
)
_mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address")
ipv6_address = Combine(
(_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name(
"IPv6 address"
)
).set_name("IPv6 address")
"IPv6 address (long, short, or mixed form)"
mac_address = Regex(
r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}"
).set_name("MAC address")
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
@staticmethod
def convert_to_date(fmt: str = "%Y-%m-%d"):
"""
Helper to create a parse action for converting parsed date string to Python datetime.date
Params -
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
Example:
.. testcode::
date_expr = pyparsing_common.iso8601_date.copy()
date_expr.set_parse_action(pyparsing_common.convert_to_date())
print(date_expr.parse_string("1999-12-31"))
prints:
.. testoutput::
[datetime.date(1999, 12, 31)]
"""
def cvt_fn(ss, ll, tt):
try:
return datetime.strptime(tt[0], fmt).date()
except ValueError as ve:
raise ParseException(ss, ll, str(ve))
return cvt_fn
@staticmethod
def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"):
"""Helper to create a parse action for converting parsed
datetime string to Python datetime.datetime
Params -
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
Example:
.. testcode::
dt_expr = pyparsing_common.iso8601_datetime.copy()
dt_expr.set_parse_action(pyparsing_common.convert_to_datetime())
print(dt_expr.parse_string("1999-12-31T23:59:59.999"))
prints:
.. testoutput::
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
"""
def cvt_fn(s, l, t):
try:
return datetime.strptime(t[0], fmt)
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
iso8601_date = Regex(
r"(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?"
).set_name("ISO8601 date")
"ISO8601 date (``yyyy-mm-dd``)"
iso8601_datetime = Regex(
r"(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?"
).set_name("ISO8601 datetime")
"ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID")
"UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
_html_stripper = any_open_tag.suppress() | any_close_tag.suppress()
@staticmethod
def strip_html_tags(s: str, l: int, tokens: ParseResults):
"""Parse action to remove HTML tags from web page HTML source
Example:
.. testcode::
# strip HTML links from normal text
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
td, td_end = make_html_tags("TD")
table_text = td + SkipTo(td_end).set_parse_action(
pyparsing_common.strip_html_tags)("body") + td_end
print(table_text.parse_string(text).body)
Prints:
.. testoutput::
More info at the pyparsing wiki page
"""
return pyparsing_common._html_stripper.transform_string(tokens[0])
_commasepitem = (
Combine(
OneOrMore(
~Literal(",")
+ ~LineEnd()
+ Word(printables, exclude_chars=",")
+ Opt(White(" \t") + ~FollowedBy(LineEnd() | ","))
)
)
.streamline()
.set_name("commaItem")
)
comma_separated_list = DelimitedList(
Opt(quoted_string.copy() | _commasepitem, default="")
).set_name("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
@staticmethod
def upcase_tokens(s, l, t):
"""Parse action to convert tokens to upper case."""
return [tt.upper() for tt in t]
@staticmethod
def downcase_tokens(s, l, t):
"""Parse action to convert tokens to lower case."""
return [tt.lower() for tt in t]
# fmt: off
url = Regex(
# https://mathiasbynens.be/demo/url-regex
# https://gist.github.com/dperini/729294
r"(?P<url>" +
# protocol identifier (optional)
# short syntax // still required
r"(?:(?:(?P<scheme>https?|ftp):)?\/\/)" +
# user:pass BasicAuth (optional)
r"(?:(?P<auth>\S+(?::\S*)?)@)?" +
r"(?P<host>" +
# IP address exclusion
# private & local networks
r"(?!(?:10|127)(?:\.\d{1,3}){3})" +
r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" +
r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" +
# IP address dotted notation octets
# excludes loopback network 0.0.0.0
# excludes reserved space >= 224.0.0.0
# excludes network & broadcast addresses
# (first & last IP address of each class)
r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" +
r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" +
r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" +
r"|" +
# host & domain names, may end with dot
# can be replaced by a shortest alternative
# (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+
r"(?:" +
r"(?:" +
r"[a-z0-9\u00a1-\uffff]" +
r"[a-z0-9\u00a1-\uffff_-]{0,62}" +
r")?" +
r"[a-z0-9\u00a1-\uffff]\." +
r")+" +
# TLD identifier name, may end with dot
r"(?:[a-z\u00a1-\uffff]{2,}\.?)" +
r")" +
# port number (optional)
r"(:(?P<port>\d{2,5}))?" +
# resource path (optional)
r"(?P<path>\/[^?# ]*)?" +
# query string (optional)
r"(\?(?P<query>[^#]*))?" +
# fragment (optional)
r"(#(?P<fragment>\S*))?" +
r")"
).set_name("url")
"""
URL (http/https/ftp scheme)
.. versionchanged:: 3.1.0
``url`` named group added
"""
# fmt: on
# pre-PEP8 compatibility names
# fmt: off
convertToInteger = staticmethod(replaced_by_pep8("convertToInteger", convert_to_integer))
convertToFloat = staticmethod(replaced_by_pep8("convertToFloat", convert_to_float))
convertToDate = staticmethod(replaced_by_pep8("convertToDate", convert_to_date))
convertToDatetime = staticmethod(replaced_by_pep8("convertToDatetime", convert_to_datetime))
stripHTMLTags = staticmethod(replaced_by_pep8("stripHTMLTags", strip_html_tags))
upcaseTokens = staticmethod(replaced_by_pep8("upcaseTokens", upcase_tokens))
downcaseTokens = staticmethod(replaced_by_pep8("downcaseTokens", downcase_tokens))
# fmt: on
_builtin_exprs = [
v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement)
]
|
pyparsing_common
|
python
|
tox-dev__tox
|
src/tox/tox_env/python/api.py
|
{
"start": 686,
"end": 1836
}
|
class ____:
implementation: str
version_info: VersionInfo
version: str
is_64: bool
platform: str
extra: dict[str, Any]
free_threaded: bool = False
@property
def version_no_dot(self) -> str:
return f"{self.version_info.major}{self.version_info.minor}"
@property
def impl_lower(self) -> str:
return self.implementation.lower()
@property
def version_dot(self) -> str:
return f"{self.version_info.major}.{self.version_info.minor}"
PY_FACTORS_RE = re.compile(
r"""
^(?!py$) # don't match 'py' as it doesn't provide any info
(?P<impl>py|pypy|cpython|jython|graalpy|rustpython|ironpython) # the interpreter; most users will simply use 'py'
(?:
(?P<version>[2-9]\.?[0-9]?[0-9]?) # the version; one of: MAJORMINOR, MAJOR.MINOR
(?P<threaded>t?) # version followed by t for free-threading
)?$
""",
re.VERBOSE,
)
PY_FACTORS_RE_EXPLICIT_VERSION = re.compile(r"^((?P<impl>cpython|pypy)-)?(?P<version>[2-9]\.[0-9]+)(?P<threaded>t?)$")
|
PythonInfo
|
python
|
pypa__warehouse
|
warehouse/admin/views/sponsors.py
|
{
"start": 455,
"end": 7147
}
|
class ____(wtforms.Form):
name = wtforms.fields.StringField(
validators=[
wtforms.validators.Length(max=100),
wtforms.validators.InputRequired(),
],
)
service = wtforms.fields.StringField(
validators=[wtforms.validators.Length(max=256), wtforms.validators.Optional()]
)
link_url = wtforms.fields.StringField(
validators=[
wtforms.validators.InputRequired(),
URIValidator(),
]
)
color_logo = wtforms.fields.FileField()
color_logo_url = wtforms.fields.StringField(
validators=[
URIValidator(),
]
)
white_logo = wtforms.fields.FileField()
white_logo_url = wtforms.fields.StringField(
validators=[
wtforms.validators.Optional(),
URIValidator(),
]
)
activity_markdown = wtforms.fields.TextAreaField(render_kw={"rows": 10, "cols": 60})
is_active = wtforms.fields.BooleanField(default=False)
footer = wtforms.fields.BooleanField()
psf_sponsor = wtforms.fields.BooleanField()
infra_sponsor = wtforms.fields.BooleanField()
one_time = wtforms.fields.BooleanField()
sidebar = wtforms.fields.BooleanField()
def validate(self, *args, **kwargs):
if not super().validate(*args, **kwargs):
return False
require_white_logo = self.footer.data or self.infra_sponsor.data
if require_white_logo and not self.white_logo_url.data:
self.white_logo.errors.append(
"Must upload white logo if is a footer sponsor."
)
return False
return True
@view_config(
route_name="admin.sponsor.list",
renderer="warehouse.admin:templates/admin/sponsors/list.html",
permission=Permissions.AdminSponsorsRead,
request_method="GET",
uses_session=True,
)
def sponsor_list(request):
sponsors = (
request.db.query(Sponsor).order_by(Sponsor.is_active.desc(), Sponsor.name).all()
)
for sponsor in sponsors:
visibility = [
"PSF Sponsor" if sponsor.psf_sponsor else None,
"Infra Sponsor" if sponsor.infra_sponsor else None,
"One time" if sponsor.one_time else None,
"Footer" if sponsor.footer else None,
"Sidebar" if sponsor.sidebar else None,
]
sponsor.visibility = " | ".join([v for v in visibility if v])
return {"sponsors": sponsors}
def _upload_image(image_name, request, form):
sponsor_name = slugify(form.name.data)
if request.POST.get(image_name) not in [None, b""]:
with tempfile.NamedTemporaryFile() as fp:
fp.write(request.POST[image_name].file.read())
fp.flush()
content_type = request.POST[image_name].type
storage = request.find_service(ISponsorLogoStorage)
extension = os.path.splitext(request.POST[image_name].filename)[-1]
fingerprint = secrets.token_urlsafe(6)
filename = f"{sponsor_name}-{slugify(image_name)}-{fingerprint}{extension}"
return storage.store(filename, fp.name, content_type)
return ""
@view_config(
route_name="admin.sponsor.edit",
renderer="warehouse.admin:templates/admin/sponsors/edit.html",
permission=Permissions.AdminSponsorsRead,
request_method="GET",
uses_session=True,
require_csrf=True,
require_methods=False,
)
@view_config(
route_name="admin.sponsor.edit",
renderer="warehouse.admin:templates/admin/sponsors/edit.html",
permission=Permissions.AdminSponsorsWrite,
request_method="POST",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def edit_sponsor(request):
id_ = request.matchdict["sponsor_id"]
try:
sponsor = request.db.query(Sponsor).filter(Sponsor.id == id_).one()
except NoResultFound:
raise HTTPNotFound
form = SponsorForm(request.POST if request.method == "POST" else None, sponsor)
if request.method == "POST":
if _color_logo_url := _upload_image("color_logo", request, form):
form.color_logo_url.data = _color_logo_url
if _white_logo_url := _upload_image("white_logo", request, form):
form.white_logo_url.data = _white_logo_url
if form.validate():
form.populate_obj(sponsor)
request.session.flash("Sponsor updated", queue="success")
return HTTPSeeOther(location=request.current_route_path())
return {"sponsor": sponsor, "form": form}
@view_config(
route_name="admin.sponsor.create",
renderer="warehouse.admin:templates/admin/sponsors/edit.html",
permission=Permissions.AdminSponsorsRead,
request_method="GET",
uses_session=True,
require_csrf=True,
require_methods=False,
)
@view_config(
route_name="admin.sponsor.create",
renderer="warehouse.admin:templates/admin/sponsors/edit.html",
permission=Permissions.AdminSponsorsWrite,
request_method="POST",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def create_sponsor(request):
form = SponsorForm(request.POST if request.method == "POST" else None)
if request.method == "POST":
form.color_logo_url.data = _upload_image("color_logo", request, form)
form.white_logo_url.data = _upload_image("white_logo", request, form)
if form.validate():
del form.color_logo
del form.white_logo
sponsor = Sponsor(**form.data)
request.db.add(sponsor)
request.session.flash(
f"Added new sponsor '{sponsor.name}'",
queue="success",
)
redirect_url = request.route_url("admin.sponsor.list")
return HTTPSeeOther(location=redirect_url)
return {"form": form}
@view_config(
route_name="admin.sponsor.delete",
require_methods=["POST"],
permission=Permissions.AdminSponsorsWrite,
uses_session=True,
require_csrf=True,
)
def delete_sponsor(request):
id_ = request.matchdict["sponsor_id"]
try:
sponsor = request.db.query(Sponsor).filter(Sponsor.id == id_).one()
except NoResultFound:
raise HTTPNotFound
# Safeguard check on sponsor name
if sponsor.name != request.params.get("sponsor"):
request.session.flash("Wrong confirmation input", queue="error")
return HTTPSeeOther(
request.route_url("admin.sponsor.edit", sponsor_id=sponsor.id)
)
# Delete the sponsor
request.db.delete(sponsor)
request.session.flash(f"Deleted sponsor {sponsor.name}", queue="success")
return HTTPSeeOther(request.route_url("admin.sponsor.list"))
|
SponsorForm
|
python
|
imageio__imageio
|
imageio/plugins/pillow.py
|
{
"start": 2225,
"end": 22409
}
|
class ____(PluginV3):
def __init__(self, request: Request) -> None:
"""Instantiate a new Pillow Plugin Object
Parameters
----------
request : {Request}
A request object representing the resource to be operated on.
"""
super().__init__(request)
# Register HEIF opener for Pillow
try:
from pillow_heif import register_heif_opener
except ImportError:
pass
else:
register_heif_opener()
# Register AVIF opener for Pillow
try:
from pillow_heif import register_avif_opener
except ImportError:
pass
else:
register_avif_opener()
self._image: Image = None
self.images_to_write = []
if request.mode.io_mode == IOMode.read:
try:
# Check if it is generally possible to read the image.
# This will not read any data and merely try to find a
# compatible pillow plugin (ref: the pillow docs).
image = Image.open(request.get_file())
except UnidentifiedImageError:
if request._uri_type == URI_BYTES:
raise InitializationError(
"Pillow can not read the provided bytes."
) from None
else:
raise InitializationError(
f"Pillow can not read {request.raw_uri}."
) from None
self._image = image
else:
self.save_args = {}
extension = self.request.extension or self.request.format_hint
if extension is None:
warnings.warn(
"Can't determine file format to write as. You _must_"
" set `format` during write or the call will fail. Use "
"`extension` to supress this warning. ",
UserWarning,
)
return
tirage = [Image.preinit, Image.init]
for format_loader in tirage:
format_loader()
if extension in Image.registered_extensions().keys():
return
raise InitializationError(
f"Pillow can not write `{extension}` files."
) from None
def close(self) -> None:
self._flush_writer()
if self._image:
self._image.close()
self._request.finish()
def read(
self,
*,
index: int = None,
mode: str = None,
rotate: bool = False,
apply_gamma: bool = False,
writeable_output: bool = True,
pilmode: str = None,
exifrotate: bool = None,
as_gray: bool = None,
) -> np.ndarray:
"""
Parses the given URI and creates a ndarray from it.
Parameters
----------
index : int
If the ImageResource contains multiple ndimages, and index is an
integer, select the index-th ndimage from among them and return it.
If index is an ellipsis (...), read all ndimages in the file and
stack them along a new batch dimension and return them. If index is
None, this plugin reads the first image of the file (index=0) unless
the image is a GIF or APNG, in which case all images are read
(index=...).
mode : str
Convert the image to the given mode before returning it. If None,
the mode will be left unchanged. Possible modes can be found at:
https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
rotate : bool
If True and the image contains an EXIF orientation tag,
apply the orientation before returning the ndimage.
apply_gamma : bool
If True and the image contains metadata about gamma, apply gamma
correction to the image.
writable_output : bool
If True, ensure that the image is writable before returning it to
the user. This incurs a full copy of the pixel data if the data
served by pillow is read-only. Consequentially, setting this flag to
False improves performance for some images.
pilmode : str
Deprecated, use `mode` instead.
exifrotate : bool
Deprecated, use `rotate` instead.
as_gray : bool
Deprecated. Exists to raise a constructive error message.
Returns
-------
ndimage : ndarray
A numpy array containing the loaded image data
Notes
-----
If you read a paletted image (e.g. GIF) then the plugin will apply the
palette by default. Should you wish to read the palette indices of each
pixel use ``mode="P"``. The coresponding color pallete can be found in
the image's metadata using the ``palette`` key when metadata is
extracted using the ``exclude_applied=False`` kwarg. The latter is
needed, as palettes are applied by default and hence excluded by default
to keep metadata and pixel data consistent.
"""
if pilmode is not None:
warnings.warn(
"`pilmode` is deprecated. Use `mode` instead.", DeprecationWarning
)
mode = pilmode
if exifrotate is not None:
warnings.warn(
"`exifrotate` is deprecated. Use `rotate` instead.", DeprecationWarning
)
rotate = exifrotate
if as_gray is not None:
raise TypeError(
"The keyword `as_gray` is no longer supported."
"Use `mode='F'` for a backward-compatible result, or "
" `mode='L'` for an integer-valued result."
)
if self._image.format == "GIF":
# Converting GIF P frames to RGB
# https://github.com/python-pillow/Pillow/pull/6150
GifImagePlugin.LOADING_STRATEGY = (
GifImagePlugin.LoadingStrategy.RGB_AFTER_DIFFERENT_PALETTE_ONLY
)
if index is None:
if self._image.format == "GIF":
index = Ellipsis
elif self._image.custom_mimetype == "image/apng":
index = Ellipsis
else:
index = 0
if isinstance(index, int):
# will raise IO error if index >= number of frames in image
self._image.seek(index)
image = self._apply_transforms(
self._image, mode, rotate, apply_gamma, writeable_output
)
else:
iterator = self.iter(
mode=mode,
rotate=rotate,
apply_gamma=apply_gamma,
writeable_output=writeable_output,
)
image = np.stack([im for im in iterator], axis=0)
return image
def iter(
self,
*,
mode: str = None,
rotate: bool = False,
apply_gamma: bool = False,
writeable_output: bool = True,
) -> Iterator[np.ndarray]:
"""
Iterate over all ndimages/frames in the URI
Parameters
----------
mode : {str, None}
Convert the image to the given mode before returning it. If None,
the mode will be left unchanged. Possible modes can be found at:
https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
rotate : {bool}
If set to ``True`` and the image contains an EXIF orientation tag,
apply the orientation before returning the ndimage.
apply_gamma : {bool}
If ``True`` and the image contains metadata about gamma, apply gamma
correction to the image.
writable_output : bool
If True, ensure that the image is writable before returning it to
the user. This incurs a full copy of the pixel data if the data
served by pillow is read-only. Consequentially, setting this flag to
False improves performance for some images.
"""
for im in ImageSequence.Iterator(self._image):
yield self._apply_transforms(
im, mode, rotate, apply_gamma, writeable_output
)
def _apply_transforms(
self, image, mode, rotate, apply_gamma, writeable_output
) -> np.ndarray:
if mode is not None:
image = image.convert(mode)
elif image.mode == "P":
# adjust for pillow9 changes
# see: https://github.com/python-pillow/Pillow/issues/5929
image = image.convert(image.palette.mode)
elif image.format == "PNG" and image.mode == "I":
major, minor, patch = pillow_version()
if sys.byteorder == "little":
desired_mode = "I;16"
else: # pragma: no cover
# can't test big-endian in GH-Actions
desired_mode = "I;16B"
if major < 10: # pragma: no cover
warnings.warn(
"Loading 16-bit (uint16) PNG as int32 due to limitations "
"in pillow's PNG decoder. This will be fixed in a future "
"version of pillow which will make this warning dissapear.",
UserWarning,
)
elif minor < 1: # pragma: no cover
# pillow<10.1.0 can directly decode into 16-bit grayscale
image.mode = desired_mode
else:
# pillow >= 10.1.0
image = image.convert(desired_mode)
image = np.asarray(image)
meta = self.metadata(index=self._image.tell(), exclude_applied=False)
if rotate and "Orientation" in meta:
transformation = _exif_orientation_transform(
meta["Orientation"], self._image.mode
)
image = transformation(image)
if apply_gamma and "gamma" in meta:
gamma = float(meta["gamma"])
scale = float(65536 if image.dtype == np.uint16 else 255)
gain = 1.0
image = ((image / scale) ** gamma) * scale * gain + 0.4999
image = np.round(image).astype(np.uint8)
if writeable_output and not image.flags["WRITEABLE"]:
image = np.array(image)
return image
def write(
self,
ndimage: Union[ArrayLike, List[ArrayLike]],
*,
mode: str = None,
format: str = None,
is_batch: bool = None,
**kwargs,
) -> Optional[bytes]:
"""
Write an ndimage to the URI specified in path.
If the URI points to a file on the current host and the file does not
yet exist it will be created. If the file exists already, it will be
appended if possible; otherwise, it will be replaced.
If necessary, the image is broken down along the leading dimension to
fit into individual frames of the chosen format. If the format doesn't
support multiple frames, and IOError is raised.
Parameters
----------
image : ndarray or list
The ndimage to write. If a list is given each element is expected to
be an ndimage.
mode : str
Specify the image's color format. If None (default), the mode is
inferred from the array's shape and dtype. Possible modes can be
found at:
https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
format : str
Optional format override. If omitted, the format to use is
determined from the filename extension. If a file object was used
instead of a filename, this parameter must always be used.
is_batch : bool
Explicitly tell the writer that ``image`` is a batch of images
(True) or not (False). If None, the writer will guess this from the
provided ``mode`` or ``image.shape``. While the latter often works,
it may cause problems for small images due to aliasing of spatial
and color-channel axes.
kwargs : ...
Extra arguments to pass to pillow. If a writer doesn't recognise an
option, it is silently ignored. The available options are described
in pillow's `image format documentation
<https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html>`_
for each writer.
Notes
-----
When writing batches of very narrow (2-4 pixels wide) gray images set
the ``mode`` explicitly to avoid the batch being identified as a colored
image.
"""
if "fps" in kwargs:
warnings.warn(
"The keyword `fps` is no longer supported. Use `duration`"
"(in ms) instead, e.g. `fps=50` == `duration=20` (1000 * 1/50).",
DeprecationWarning,
)
kwargs["duration"] = 1000 * 1 / kwargs.get("fps")
if isinstance(ndimage, list):
ndimage = np.stack(ndimage, axis=0)
is_batch = True
else:
ndimage = np.asarray(ndimage)
# check if ndimage is a batch of frames/pages (e.g. for writing GIF)
# if mode is given, use it; otherwise fall back to image.ndim only
if is_batch is not None:
pass
elif mode is not None:
is_batch = (
ndimage.ndim > 3 if Image.getmodebands(mode) > 1 else ndimage.ndim > 2
)
elif ndimage.ndim == 2:
is_batch = False
elif ndimage.ndim == 3 and ndimage.shape[-1] == 1:
raise ValueError("Can't write images with one color channel.")
elif ndimage.ndim == 3 and ndimage.shape[-1] in [2, 3, 4]:
# Note: this makes a channel-last assumption
is_batch = False
else:
is_batch = True
if not is_batch:
ndimage = ndimage[None, ...]
for frame in ndimage:
pil_frame = Image.fromarray(frame, mode=mode)
if "bits" in kwargs:
pil_frame = pil_frame.quantize(colors=2 ** kwargs["bits"])
self.images_to_write.append(pil_frame)
if (
format is not None
and "format" in self.save_args
and self.save_args["format"] != format
):
old_format = self.save_args["format"]
warnings.warn(
"Changing the output format during incremental"
" writes is strongly discouraged."
f" Was `{old_format}`, is now `{format}`.",
UserWarning,
)
extension = self.request.extension or self.request.format_hint
self.save_args["format"] = format or Image.registered_extensions()[extension]
self.save_args.update(kwargs)
# when writing to `bytes` we flush instantly
result = None
if self._request._uri_type == URI_BYTES:
self._flush_writer()
file = cast(BytesIO, self._request.get_file())
result = file.getvalue()
return result
def _flush_writer(self):
if len(self.images_to_write) == 0:
return
primary_image = self.images_to_write.pop(0)
if len(self.images_to_write) > 0:
self.save_args["save_all"] = True
self.save_args["append_images"] = self.images_to_write
primary_image.save(self._request.get_file(), **self.save_args)
self.images_to_write.clear()
self.save_args.clear()
def get_meta(self, *, index=0) -> Dict[str, Any]:
return self.metadata(index=index, exclude_applied=False)
def metadata(
self, index: int = None, exclude_applied: bool = True
) -> Dict[str, Any]:
"""Read ndimage metadata.
Parameters
----------
index : {integer, None}
If the ImageResource contains multiple ndimages, and index is an
integer, select the index-th ndimage from among them and return its
metadata. If index is an ellipsis (...), read and return global
metadata. If index is None, this plugin reads metadata from the
first image of the file (index=0) unless the image is a GIF or APNG,
in which case global metadata is read (index=...).
exclude_applied : bool
If True, exclude metadata fields that are applied to the image while
reading. For example, if the binary data contains a rotation flag,
the image is rotated by default and the rotation flag is excluded
from the metadata to avoid confusion.
Returns
-------
metadata : dict
A dictionary of format-specific metadata.
"""
if index is None:
if self._image.format == "GIF":
index = Ellipsis
elif self._image.custom_mimetype == "image/apng":
index = Ellipsis
else:
index = 0
if isinstance(index, int) and self._image.tell() != index:
self._image.seek(index)
metadata = self._image.info.copy()
metadata["mode"] = self._image.mode
metadata["shape"] = self._image.size
if self._image.mode == "P" and not exclude_applied:
metadata["palette"] = np.asarray(tuple(self._image.palette.colors.keys()))
if self._image.getexif():
exif_data = {
ExifTags.TAGS.get(key, "unknown"): value
for key, value in dict(self._image.getexif()).items()
}
exif_data.pop("unknown", None)
metadata.update(exif_data)
if exclude_applied:
metadata.pop("Orientation", None)
return metadata
def properties(self, index: int = None) -> ImageProperties:
"""Standardized ndimage metadata
Parameters
----------
index : int
If the ImageResource contains multiple ndimages, and index is an
integer, select the index-th ndimage from among them and return its
properties. If index is an ellipsis (...), read and return the
properties of all ndimages in the file stacked along a new batch
dimension. If index is None, this plugin reads and returns the
properties of the first image (index=0) unless the image is a GIF or
APNG, in which case it reads and returns the properties all images
(index=...).
Returns
-------
properties : ImageProperties
A dataclass filled with standardized image metadata.
Notes
-----
This does not decode pixel data and is fast for large images.
"""
if index is None:
if self._image.format == "GIF":
index = Ellipsis
elif self._image.custom_mimetype == "image/apng":
index = Ellipsis
else:
index = 0
if index is Ellipsis:
self._image.seek(0)
else:
self._image.seek(index)
if self._image.mode == "P":
# mode of palette images is determined by their palette
mode = self._image.palette.mode
else:
mode = self._image.mode
width: int = self._image.width
height: int = self._image.height
shape: Tuple[int, ...] = (height, width)
n_frames: Optional[int] = None
if index is ...:
n_frames = getattr(self._image, "n_frames", 1)
shape = (n_frames, *shape)
dummy = np.asarray(Image.new(mode, (1, 1)))
pil_shape: Tuple[int, ...] = dummy.shape
if len(pil_shape) > 2:
shape = (*shape, *pil_shape[2:])
return ImageProperties(
shape=shape,
dtype=dummy.dtype,
n_images=n_frames,
is_batch=index is Ellipsis,
)
|
PillowPlugin
|
python
|
doocs__leetcode
|
solution/2500-2599/2573.Find the String with LCP/Solution.py
|
{
"start": 0,
"end": 829
}
|
class ____:
def findTheString(self, lcp: List[List[int]]) -> str:
n = len(lcp)
s = [""] * n
i = 0
for c in ascii_lowercase:
while i < n and s[i]:
i += 1
if i == n:
break
for j in range(i, n):
if lcp[i][j]:
s[j] = c
if "" in s:
return ""
for i in range(n - 1, -1, -1):
for j in range(n - 1, -1, -1):
if s[i] == s[j]:
if i == n - 1 or j == n - 1:
if lcp[i][j] != 1:
return ""
elif lcp[i][j] != lcp[i + 1][j + 1] + 1:
return ""
elif lcp[i][j]:
return ""
return "".join(s)
|
Solution
|
python
|
scrapy__scrapy
|
tests/spiders.py
|
{
"start": 7060,
"end": 8072
}
|
class ____(SimpleSpider):
name = "asyncdef_asyncio_gen_complex"
initial_reqs = 4
following_reqs = 3
depth = 2
def _get_req(self, index, cb=None):
return Request(
self.mockserver.url(f"/status?n=200&request={index}"),
meta={"index": index},
dont_filter=True,
callback=cb,
)
async def start(self):
for i in range(1, self.initial_reqs + 1):
yield self._get_req(i)
async def parse(self, response):
index = response.meta["index"]
yield {"index": index}
if index < 10**self.depth:
for new_index in range(10 * index, 10 * index + self.following_reqs):
yield self._get_req(new_index)
yield self._get_req(index, cb=self.parse2)
await asyncio.sleep(0.1)
yield {"index": index + 5}
async def parse2(self, response):
await asyncio.sleep(0.1)
yield {"index2": response.meta["index"]}
|
AsyncDefAsyncioGenComplexSpider
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/backends/_backend_gtk.py
|
{
"start": 9095,
"end": 10553
}
|
class ____(NavigationToolbar2):
# Must be implemented in GTK3/GTK4 backends:
# * __init__
# * save_figure
def set_message(self, s):
escaped = GLib.markup_escape_text(s)
self.message.set_markup(f'<small>{escaped}</small>')
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
rect = [int(val) for val in (x0, y0, x1 - x0, y1 - y0)]
self.canvas._draw_rubberband(rect)
def remove_rubberband(self):
self.canvas._draw_rubberband(None)
def _update_buttons_checked(self):
for name, active in [("Pan", "PAN"), ("Zoom", "ZOOM")]:
button = self._gtk_ids.get(name)
if button:
with button.handler_block(button._signal_handler):
button.set_active(self.mode.name == active)
def pan(self, *args):
super().pan(*args)
self._update_buttons_checked()
def zoom(self, *args):
super().zoom(*args)
self._update_buttons_checked()
def set_history_buttons(self):
can_backward = self._nav_stack._pos > 0
can_forward = self._nav_stack._pos < len(self._nav_stack) - 1
if 'Back' in self._gtk_ids:
self._gtk_ids['Back'].set_sensitive(can_backward)
if 'Forward' in self._gtk_ids:
self._gtk_ids['Forward'].set_sensitive(can_forward)
|
_NavigationToolbar2GTK
|
python
|
python__mypy
|
mypyc/irbuild/builder.py
|
{
"start": 3612,
"end": 3690
}
|
class ____(ExpressionVisitor[Value], StatementVisitor[None]):
pass
|
IRVisitor
|
python
|
sphinx-doc__sphinx
|
sphinx/domains/cpp/_ast.py
|
{
"start": 92325,
"end": 94112
}
|
class ____(ASTDeclarator):
def __init__(self, declId: ASTNestedName, size: ASTExpression) -> None:
self.declId = declId
self.size = size
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTDeclaratorNameBitField):
return NotImplemented
return self.declId == other.declId and self.size == other.size
def __hash__(self) -> int:
return hash((self.declId, self.size))
@property
def name(self) -> ASTNestedName:
return self.declId
@name.setter
def name(self, name: ASTNestedName) -> None:
self.declId = name
def get_param_id(self, version: int) -> str: # only the parameters (if any)
return ''
def get_ptr_suffix_id(self, version: int) -> str: # only the array specifiers
return ''
# ------------------------------------------------------------------------
def require_space_after_declSpecs(self) -> bool:
return self.declId is not None
def is_function_type(self) -> bool:
return False
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.declId:
res.append(transform(self.declId))
res.extend((' : ', transform(self.size)))
return ''.join(res)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
verify_description_mode(mode)
if self.declId:
self.declId.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_space()
signode += addnodes.desc_sig_punctuation(':', ':')
signode += addnodes.desc_sig_space()
self.size.describe_signature(signode, mode, env, symbol)
|
ASTDeclaratorNameBitField
|
python
|
pypa__pipenv
|
pipenv/vendor/tomlkit/exceptions.py
|
{
"start": 4411,
"end": 4658
}
|
class ____(TOMLKitError):
"""
An already present key was used.
"""
def __init__(self, key):
key = getattr(key, "key", key)
message = f'Key "{key}" already exists.'
super().__init__(message)
|
KeyAlreadyPresent
|
python
|
numba__numba
|
numba/cuda/tests/doc_examples/test_ufunc.py
|
{
"start": 201,
"end": 1418
}
|
class ____(CUDATestCase):
"""
Test calling a UFunc
"""
def setUp(self):
# Prevent output from this test showing
# up when running the test suite
self._captured_stdout = captured_stdout()
self._captured_stdout.__enter__()
super().setUp()
def tearDown(self):
# No exception type, value, or traceback
self._captured_stdout.__exit__(None, None, None)
super().tearDown()
def test_ex_cuda_ufunc_call(self):
# ex_cuda_ufunc.begin
import numpy as np
from numba import cuda
# A kernel calling a ufunc (sin, in this case)
@cuda.jit
def f(r, x):
# Compute sin(x) with result written to r
np.sin(x, r)
# Declare input and output arrays
x = np.arange(10, dtype=np.float32) - 5
r = np.zeros_like(x)
# Launch kernel that calls the ufunc
f[1, 1](r, x)
# A quick sanity check demonstrating equality of the sine computed by
# the sin ufunc inside the kernel, and NumPy's sin ufunc
np.testing.assert_allclose(r, np.sin(x))
# ex_cuda_ufunc.end
if __name__ == "__main__":
unittest.main()
|
TestUFunc
|
python
|
geekcomputers__Python
|
venv/Lib/site-packages/pip/_vendor/distlib/scripts.py
|
{
"start": 2928,
"end": 18777
}
|
class ____(object):
"""
A class to copy or create scripts from source scripts or callable
specifications.
"""
script_template = SCRIPT_TEMPLATE
executable = None # for shebangs
def __init__(self,
source_dir,
target_dir,
add_launchers=True,
dry_run=False,
fileop=None):
self.source_dir = source_dir
self.target_dir = target_dir
self.add_launchers = add_launchers
self.force = False
self.clobber = False
# It only makes sense to set mode bits on POSIX.
self.set_mode = (os.name == 'posix') or (os.name == 'java'
and os._name == 'posix')
self.variants = set(('', 'X.Y'))
self._fileop = fileop or FileOperator(dry_run)
self._is_nt = os.name == 'nt' or (os.name == 'java'
and os._name == 'nt')
self.version_info = sys.version_info
def _get_alternate_executable(self, executable, options):
if options.get('gui', False) and self._is_nt: # pragma: no cover
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
return executable
if sys.platform.startswith('java'): # pragma: no cover
def _is_shell(self, executable):
"""
Determine if the specified executable is a script
(contains a #! line)
"""
try:
with open(executable) as fp:
return fp.read(2) == '#!'
except (OSError, IOError):
logger.warning('Failed to open %s', executable)
return False
def _fix_jython_executable(self, executable):
if self._is_shell(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty('os.name') == 'Linux':
return executable
elif executable.lower().endswith('jython.exe'):
# Use wrapper exe for Jython on Windows
return executable
return '/usr/bin/env %s' % executable
def _build_shebang(self, executable, post_interp):
"""
Build a shebang line. In the simple case (on Windows, or a shebang line
which is not too long or contains spaces) use a simple formulation for
the shebang. Otherwise, use /bin/sh as the executable, with a contrived
shebang which allows the script to run either under Python or sh, using
suitable quoting. Thanks to Harald Nordgren for his input.
See also: http://www.in-ulm.de/~mascheck/various/shebang/#length
https://hg.mozilla.org/mozilla-central/file/tip/mach
"""
if os.name != 'posix':
simple_shebang = True
else:
# Add 3 for '#!' prefix and newline suffix.
shebang_length = len(executable) + len(post_interp) + 3
if sys.platform == 'darwin':
max_shebang_length = 512
else:
max_shebang_length = 127
simple_shebang = ((b' ' not in executable)
and (shebang_length <= max_shebang_length))
if simple_shebang:
result = b'#!' + executable + post_interp + b'\n'
else:
result = b'#!/bin/sh\n'
result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n'
result += b"' '''"
return result
def _get_shebang(self, encoding, post_interp=b'', options=None):
enquote = True
if self.executable:
executable = self.executable
enquote = False # assume this will be taken care of
elif not sysconfig.is_python_build():
executable = get_executable()
elif in_venv(): # pragma: no cover
executable = os.path.join(
sysconfig.get_path('scripts'),
'python%s' % sysconfig.get_config_var('EXE'))
else: # pragma: no cover
if os.name == 'nt':
# for Python builds from source on Windows, no Python executables with
# a version suffix are created, so we use python.exe
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s' % (sysconfig.get_config_var('EXE')))
else:
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'),
sysconfig.get_config_var('EXE')))
if options:
executable = self._get_alternate_executable(executable, options)
if sys.platform.startswith('java'): # pragma: no cover
executable = self._fix_jython_executable(executable)
# Normalise case for Windows - COMMENTED OUT
# executable = os.path.normcase(executable)
# N.B. The normalising operation above has been commented out: See
# issue #124. Although paths in Windows are generally case-insensitive,
# they aren't always. For example, a path containing a ẞ (which is a
# LATIN CAPITAL LETTER SHARP S - U+1E9E) is normcased to ß (which is a
# LATIN SMALL LETTER SHARP S' - U+00DF). The two are not considered by
# Windows as equivalent in path names.
# If the user didn't specify an executable, it may be necessary to
# cater for executable paths with spaces (not uncommon on Windows)
if enquote:
executable = enquote_executable(executable)
# Issue #51: don't use fsencode, since we later try to
# check that the shebang is decodable using utf-8.
executable = executable.encode('utf-8')
# in case of IronPython, play safe and enable frames support
if (sys.platform == 'cli' and '-X:Frames' not in post_interp
and '-X:FullFrames' not in post_interp): # pragma: no cover
post_interp += b' -X:Frames'
shebang = self._build_shebang(executable, post_interp)
# Python parser starts to read a script using UTF-8 until
# it gets a #coding:xxx cookie. The shebang has to be the
# first line of a file, the #coding:xxx cookie cannot be
# written before. So the shebang has to be decodable from
# UTF-8.
try:
shebang.decode('utf-8')
except UnicodeDecodeError: # pragma: no cover
raise ValueError('The shebang (%r) is not decodable from utf-8' %
shebang)
# If the script is encoded to a custom encoding (use a
# #coding:xxx cookie), the shebang has to be decodable from
# the script encoding too.
if encoding != 'utf-8':
try:
shebang.decode(encoding)
except UnicodeDecodeError: # pragma: no cover
raise ValueError('The shebang (%r) is not decodable '
'from the script encoding (%r)' %
(shebang, encoding))
return shebang
def _get_script_text(self, entry):
return self.script_template % dict(
module=entry.prefix,
import_name=entry.suffix.split('.')[0],
func=entry.suffix)
manifest = _DEFAULT_MANIFEST
def get_manifest(self, exename):
base = os.path.basename(exename)
return self.manifest % base
def _write_script(self, names, shebang, script_bytes, filenames, ext):
use_launcher = self.add_launchers and self._is_nt
linesep = os.linesep.encode('utf-8')
if not shebang.endswith(linesep):
shebang += linesep
if not use_launcher:
script_bytes = shebang + script_bytes
else: # pragma: no cover
if ext == 'py':
launcher = self._get_launcher('t')
else:
launcher = self._get_launcher('w')
stream = BytesIO()
with ZipFile(stream, 'w') as zf:
source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH')
if source_date_epoch:
date_time = time.gmtime(int(source_date_epoch))[:6]
zinfo = ZipInfo(filename='__main__.py',
date_time=date_time)
zf.writestr(zinfo, script_bytes)
else:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
script_bytes = launcher + shebang + zip_data
for name in names:
outname = os.path.join(self.target_dir, name)
if use_launcher: # pragma: no cover
n, e = os.path.splitext(outname)
if e.startswith('.py'):
outname = n
outname = '%s.exe' % outname
try:
self._fileop.write_binary_file(outname, script_bytes)
except Exception:
# Failed writing an executable - it might be in use.
logger.warning('Failed to write executable - trying to '
'use .deleteme logic')
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
self._fileop.write_binary_file(outname, script_bytes)
logger.debug('Able to replace executable using '
'.deleteme logic')
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
if self._is_nt and not outname.endswith(
'.' + ext): # pragma: no cover
outname = '%s.%s' % (outname, ext)
if os.path.exists(outname) and not self.clobber:
logger.warning('Skipping existing file %s', outname)
continue
self._fileop.write_binary_file(outname, script_bytes)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
variant_separator = '-'
def get_script_filenames(self, name):
result = set()
if '' in self.variants:
result.add(name)
if 'X' in self.variants:
result.add('%s%s' % (name, self.version_info[0]))
if 'X.Y' in self.variants:
result.add('%s%s%s.%s' %
(name, self.variant_separator, self.version_info[0],
self.version_info[1]))
return result
def _make_script(self, entry, filenames, options=None):
post_interp = b''
if options:
args = options.get('interpreter_args', [])
if args:
args = ' %s' % ' '.join(args)
post_interp = args.encode('utf-8')
shebang = self._get_shebang('utf-8', post_interp, options=options)
script = self._get_script_text(entry).encode('utf-8')
scriptnames = self.get_script_filenames(entry.name)
if options and options.get('gui', False):
ext = 'pyw'
else:
ext = 'py'
self._write_script(scriptnames, shebang, script, filenames, ext)
def _copy_script(self, script, filenames):
adjust = False
script = os.path.join(self.source_dir, convert_path(script))
outname = os.path.join(self.target_dir, os.path.basename(script))
if not self.force and not self._fileop.newer(script, outname):
logger.debug('not copying %s (up-to-date)', script)
return
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, 'rb')
except IOError: # pragma: no cover
if not self.dry_run:
raise
f = None
else:
first_line = f.readline()
if not first_line: # pragma: no cover
logger.warning('%s is an empty file (skipping)', script)
return
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
if match:
adjust = True
post_interp = match.group(1) or b''
if not adjust:
if f:
f.close()
self._fileop.copy_file(script, outname)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
else:
logger.info('copying and adjusting %s -> %s', script,
self.target_dir)
if not self._fileop.dry_run:
encoding, lines = detect_encoding(f.readline)
f.seek(0)
shebang = self._get_shebang(encoding, post_interp)
if b'pythonw' in first_line: # pragma: no cover
ext = 'pyw'
else:
ext = 'py'
n = os.path.basename(outname)
self._write_script([n], shebang, f.read(), filenames, ext)
if f:
f.close()
@property
def dry_run(self):
return self._fileop.dry_run
@dry_run.setter
def dry_run(self, value):
self._fileop.dry_run = value
if os.name == 'nt' or (os.name == 'java'
and os._name == 'nt'): # pragma: no cover
# Executable launcher support.
# Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
def _get_launcher(self, kind):
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
platform_suffix = '-arm' if get_platform() == 'win-arm64' else ''
name = '%s%s%s.exe' % (kind, bits, platform_suffix)
if name not in WRAPPERS:
msg = ('Unable to find resource %s in package %s' %
(name, distlib_package))
raise ValueError(msg)
return WRAPPERS[name]
# Public API follows
def make(self, specification, options=None):
"""
Make a script.
:param specification: The specification, which is either a valid export
entry specification (to make a script from a
callable) or a filename (to make a script by
copying from a source location).
:param options: A dictionary of options controlling script generation.
:return: A list of all absolute pathnames written to.
"""
filenames = []
entry = get_export_entry(specification)
if entry is None:
self._copy_script(specification, filenames)
else:
self._make_script(entry, filenames, options=options)
return filenames
def make_multiple(self, specifications, options=None):
"""
Take a list of specifications and make scripts from them,
:param specifications: A list of specifications.
:return: A list of all absolute pathnames written to,
"""
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames
|
ScriptMaker
|
python
|
getsentry__sentry
|
src/sentry/models/files/abstractfileblob.py
|
{
"start": 1100,
"end": 9707
}
|
class ____(Model, _Parent[BlobOwnerType]):
__relocation_scope__ = RelocationScope.Excluded
path = models.TextField(null=True)
size = WrappingU32IntegerField(null=True)
checksum = models.CharField(max_length=40, unique=True)
timestamp = models.DateTimeField(default=timezone.now, db_index=True)
class Meta:
abstract = True
@abstractmethod
def _create_blob_owner(self, organization_id: int) -> BlobOwnerType: ...
@abstractmethod
def _delete_file_task(self) -> Task[Any, Any]: ...
@classmethod
@abstractmethod
def _storage_config(cls) -> dict[str, Any] | None:
raise NotImplementedError(cls)
@classmethod
@sentry_sdk.tracing.trace
def from_files(cls, files, organization=None, logger=nooplogger) -> None:
"""A faster version of `from_file` for multiple files at the time.
If an organization is provided it will also create `FileBlobOwner`
entries. Files can be a list of files or tuples of file and checksum.
If both are provided then a checksum check is performed.
If the checksums mismatch an `IOError` is raised.
"""
logger.debug("FileBlob.from_files.start")
files_with_checksums = []
for fileobj in files:
if isinstance(fileobj, tuple):
files_with_checksums.append(fileobj)
else:
files_with_checksums.append((fileobj, None))
checksums_seen = set()
blobs_to_save = []
semaphore = Semaphore(value=MULTI_BLOB_UPLOAD_CONCURRENCY)
def _upload_and_pend_chunk(fileobj, size, checksum) -> None:
logger.debug(
"FileBlob.from_files._upload_and_pend_chunk.start",
extra={"checksum": checksum, "size": size},
)
blob = cls(size=size, checksum=checksum)
blob.path = cls.generate_unique_path()
storage = get_storage(cls._storage_config())
storage.save(blob.path, fileobj)
blobs_to_save.append(blob)
metrics.distribution(
"filestore.blob-size", size, tags={"function": "from_files"}, unit="byte"
)
logger.debug(
"FileBlob.from_files._upload_and_pend_chunk.end",
extra={"checksum": checksum, "path": blob.path},
)
def _save_blob(blob: Self) -> None:
logger.debug("FileBlob.from_files._save_blob.start", extra={"path": blob.path})
try:
blob.save()
except IntegrityError:
# this means that there was a race inserting a blob
# with this checksum. we will fetch the other blob that was
# saved, and delete our backing storage to not leave orphaned
# chunks behind.
# we also won't have to worry about concurrent deletes, as deletions
# are only happening for blobs older than 24h.
metrics.incr("filestore.upload_race", sample_rate=1.0)
saved_path = blob.path
blob = cls.objects.get(checksum=blob.checksum)
storage = get_storage(cls._storage_config())
storage.delete(saved_path)
blob._ensure_blob_owned(organization)
logger.debug("FileBlob.from_files._save_blob.end", extra={"path": blob.path})
def _flush_blobs() -> None:
while True:
try:
blob = blobs_to_save.pop()
except IndexError:
break
_save_blob(blob)
semaphore.release()
try:
for fileobj, reference_checksum in files_with_checksums:
logger.debug(
"FileBlob.from_files.executor_start", extra={"checksum": reference_checksum}
)
_flush_blobs()
# Before we go and do something with the files we calculate
# the checksums and compare it against the reference. This
# also deduplicates duplicates uploaded in the same request.
# This is necessary because we acquire multiple locks in one
# go which would let us deadlock otherwise.
size, checksum = get_size_and_checksum(fileobj)
if reference_checksum is not None and checksum != reference_checksum:
raise OSError("Checksum mismatch")
if checksum in checksums_seen:
continue
checksums_seen.add(checksum)
# Check if we need to upload the blob. If we get a result back
# here it means the blob already exists.
existing = get_and_optionally_update_blob(cls, checksum)
if existing is not None:
existing._ensure_blob_owned(organization)
continue
# Otherwise we leave the blob locked and submit the task.
# We use the semaphore to ensure we never schedule too
# many. The upload will be done with a certain amount
# of concurrency controlled by the semaphore and the
# `_flush_blobs` call will take all those uploaded
# blobs and associate them with the database.
semaphore.acquire()
_upload_and_pend_chunk(fileobj, size, checksum)
logger.debug("FileBlob.from_files.end", extra={"checksum": reference_checksum})
_flush_blobs()
finally:
logger.debug("FileBlob.from_files.end")
@classmethod
@sentry_sdk.tracing.trace
def from_file_with_organization(cls, fileobj, organization=None, logger=nooplogger) -> Self:
"""
Retrieve a single FileBlob instances for the given file and binds it to an organization via the FileBlobOwner.
"""
blob = cls.from_file(fileobj, logger=logger)
blob._ensure_blob_owned(organization)
return blob
@classmethod
@sentry_sdk.tracing.trace
def from_file(cls, fileobj, logger=nooplogger) -> Self:
"""
Retrieve a single FileBlob instances for the given file.
"""
logger.debug("FileBlob.from_file.start")
size, checksum = get_size_and_checksum(fileobj)
existing = get_and_optionally_update_blob(cls, checksum)
if existing is not None:
return existing
blob = cls(size=size, checksum=checksum)
blob.path = cls.generate_unique_path()
storage = get_storage(cls._storage_config())
storage.save(blob.path, fileobj)
try:
blob.save()
except IntegrityError:
# see `_save_blob` above
metrics.incr("filestore.upload_race", sample_rate=1.0)
saved_path = blob.path
blob = cls.objects.get(checksum=checksum)
storage.delete(saved_path)
metrics.distribution("filestore.blob-size", size, unit="byte")
logger.debug("FileBlob.from_file.end")
return blob
@classmethod
def generate_unique_path(cls):
# We intentionally do not use checksums as path names to avoid concurrency issues
# when we attempt concurrent uploads for any reason.
uuid_hex = uuid4().hex
pieces = [uuid_hex[:2], uuid_hex[2:6], uuid_hex[6:]]
return "/".join(pieces)
@sentry_sdk.tracing.trace
def delete(self, *args, **kwargs):
if self.path:
transaction.on_commit(
lambda: self._delete_file_task().delay(path=self.path, checksum=self.checksum),
using=router.db_for_write(self.__class__),
)
return super().delete(*args, **kwargs)
def getfile(self):
"""
Return a file-like object for this File's content.
>>> with blob.getfile() as src, open('/tmp/localfile', 'wb') as dst:
>>> for chunk in src.chunks():
>>> dst.write(chunk)
"""
assert self.path
storage = get_storage(self._storage_config())
return storage.open(self.path)
def _ensure_blob_owned(self, organization):
"""
Ensures that the FileBlob is owned by the given organization.
"""
if organization is None:
return
try:
with transaction.atomic(using=router.db_for_write(self.__class__)):
self._create_blob_owner(organization_id=organization.id)
except IntegrityError:
pass
|
AbstractFileBlob
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-the-minimum-area-to-cover-all-ones-ii.py
|
{
"start": 11771,
"end": 15617
}
|
class ____(object):
def minimumSum(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
def binary_search(left, right, check):
while left <= right:
mid = left + (right-left)//2
if check(mid):
right = mid-1
else:
left = mid+1
return left
def binary_search_right(left, right, check):
while left <= right:
mid = left + (right-left)//2
if not check(mid):
right = mid-1
else:
left = mid+1
return right
def minimumArea(min_i, max_i, min_j, max_j):
def count(x1, y1, x2, y2):
cnt = grid[x2][y2]
if x1-1 >= 0:
cnt -= grid[x1-1][y2]
if y1-1 >= 0:
cnt -= grid[x2][y1-1]
if x1-1 >= 0 and y1-1 >= 0:
cnt += grid[x1-1][y1-1]
return cnt
min_r = binary_search(min_i, max_i, lambda i: count(min_i, min_j, i, max_j))
max_r = binary_search_right(min_i, max_i, lambda i: count(i, min_j, max_i, max_j))
min_c = binary_search(min_j, max_j, lambda j: count(min_i, min_j, max_i, j))
max_c = binary_search_right(min_j, max_j, lambda j: count(min_i, j, max_i, max_j))
return (max_r-min_r+1)*(max_c-min_c+1) if min_r <= max_i else 0
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
if i-1 >= 0:
grid[i][j] += grid[i-1][j]
if j-1 >= 0:
grid[i][j] += grid[i][j-1]
if i-1 >= 0 and j-1 >= 0:
grid[i][j] -= grid[i-1][j-1]
result = float("inf")
result = float("inf")
for i in xrange(len(grid)-1):
a = minimumArea(i+1, len(grid)-1, 0, len(grid[0])-1)
for j in xrange(len(grid[0])-1):
b = minimumArea(0, i, 0, j)
c = minimumArea(0, i, j+1, len(grid[0])-1)
result = min(result, a+b+c)
for i in xrange(len(grid)-1):
a = minimumArea(0, i, 0, len(grid[0])-1)
for j in xrange(len(grid[0])-1):
b = minimumArea(i+1, len(grid)-1, 0, j)
c = minimumArea(i+1, len(grid)-1, j+1, len(grid[0])-1)
result = min(result, a+b+c)
for j in xrange(len(grid[0])-1):
a = minimumArea(0, len(grid)-1, j+1, len(grid[0])-1)
for i in xrange(len(grid)-1):
b = minimumArea(0, i, 0, j)
c = minimumArea(i+1, len(grid)-1, 0, j)
result = min(result, a+b+c)
for j in xrange(len(grid[0])-1):
a = minimumArea(0, len(grid)-1, 0, j)
for i in xrange(len(grid)-1):
b = minimumArea(0, i, j+1, len(grid[0])-1)
c = minimumArea(i+1, len(grid)-1, j+1, len(grid[0])-1)
result = min(result, a+b+c)
for i in xrange(len(grid)-2):
a = minimumArea(0, i, 0, len(grid[0])-1)
for j in xrange(i+1, len(grid)-1):
b = minimumArea(i+1, j, 0, len(grid[0])-1)
c = minimumArea(j+1, len(grid)-1, 0, len(grid[0])-1)
result = min(result, a+b+c)
for i in xrange(len(grid[0])-2):
a = minimumArea(0, len(grid)-1, 0, i)
for j in xrange(i+1, len(grid[0])-1):
b = minimumArea(0, len(grid)-1, i+1, j)
c = minimumArea(0, len(grid)-1, j+1, len(grid[0])-1)
result = min(result, a+b+c)
return result
# Time: O(max(n, m)^2 * log(max(n, m)))
# Space: O(n * m)
# prefix sum, binary search
|
Solution4
|
python
|
milvus-io__pymilvus
|
pymilvus/client/types.py
|
{
"start": 2664,
"end": 3793
}
|
class ____(IntEnum):
"""
String of DataType is str of its value, e.g.: str(DataType.BOOL) == "1"
"""
NONE = 0 # schema_pb2.None, this is an invalid representation in python
BOOL = schema_pb2.Bool
INT8 = schema_pb2.Int8
INT16 = schema_pb2.Int16
INT32 = schema_pb2.Int32
INT64 = schema_pb2.Int64
FLOAT = schema_pb2.Float
DOUBLE = schema_pb2.Double
STRING = schema_pb2.String
VARCHAR = schema_pb2.VarChar
ARRAY = schema_pb2.Array
JSON = schema_pb2.JSON
GEOMETRY = schema_pb2.Geometry
TIMESTAMPTZ = schema_pb2.Timestamptz
BINARY_VECTOR = schema_pb2.BinaryVector
FLOAT_VECTOR = schema_pb2.FloatVector
FLOAT16_VECTOR = schema_pb2.Float16Vector
BFLOAT16_VECTOR = schema_pb2.BFloat16Vector
SPARSE_FLOAT_VECTOR = schema_pb2.SparseFloatVector
INT8_VECTOR = schema_pb2.Int8Vector
STRUCT = schema_pb2.Struct
# Internal use only - not exposed to users
_ARRAY_OF_VECTOR = schema_pb2.ArrayOfVector
_ARRAY_OF_STRUCT = schema_pb2.ArrayOfStruct
UNKNOWN = 999
def __str__(self) -> str:
return str(self.value)
|
DataType
|
python
|
kamyu104__LeetCode-Solutions
|
Python/paint-house-ii.py
|
{
"start": 442,
"end": 1336
}
|
class ____(object):
def minCostII(self, costs):
"""
:type costs: List[List[int]]
:rtype: int
"""
if not costs:
return 0
n = len(costs)
k = len(costs[0])
min_cost = [costs[0], [0] * k]
for i in xrange(1, n):
smallest, second_smallest = float("inf"), float("inf")
for j in xrange(k):
if min_cost[(i - 1) % 2][j] < smallest:
smallest, second_smallest = min_cost[(i - 1) % 2][j], smallest
elif min_cost[(i - 1) % 2][j] < second_smallest:
second_smallest = min_cost[(i - 1) % 2][j]
for j in xrange(k):
min_j = smallest if min_cost[(i - 1) % 2][j] != smallest else second_smallest
min_cost[i % 2][j] = costs[i][j] + min_j
return min(min_cost[(n - 1) % 2])
|
Solution2
|
python
|
altair-viz__altair
|
altair/datasets/_exceptions.py
|
{
"start": 230,
"end": 3823
}
|
class ____(Exception):
@classmethod
def from_url(cls, meta: Metadata, /) -> AltairDatasetsError:
if meta["suffix"] == ".parquet":
msg = (
f"{_failed_url(meta)}"
f"{meta['suffix']!r} datasets require `vegafusion`.\n"
"See upstream issue for details: https://github.com/vega/vega/issues/3961"
)
else:
msg = (
f"{cls.from_url.__qualname__}() called for "
f"unimplemented extension: {meta['suffix']}\n\n{meta!r}"
)
raise NotImplementedError(msg)
return cls(msg)
@classmethod
def from_tabular(cls, meta: Metadata, backend_name: str, /) -> AltairDatasetsError:
if meta["is_image"]:
reason = "Image data is non-tabular."
return cls(f"{_failed_tabular(meta)}{reason}{_suggest_url(meta)}")
elif not meta["is_tabular"] or meta["suffix"] in {".arrow", ".parquet"}:
if meta["suffix"] in {".arrow", ".parquet"}:
install: tuple[str, ...] = "pyarrow", "polars"
what = f"{meta['suffix']!r}"
else:
install = ("polars",)
if meta["is_spatial"]:
what = "Geospatial data"
elif meta["is_json"]:
what = "Non-tabular json"
else:
what = f"{meta['file_name']!r}"
reason = _why(what, backend_name)
return cls(f"{_failed_tabular(meta)}{reason}{_suggest_url(meta, *install)}")
else:
return cls(_implementation_not_found(meta))
@classmethod
def from_priority(cls, priority: Sequence[_Backend], /) -> AltairDatasetsError:
msg = f"Found no supported backend, searched:\n{priority!r}"
return cls(msg)
def module_not_found(
backend_name: str, reqs: Sequence[str], missing: str
) -> ModuleNotFoundError:
if len(reqs) == 1:
depends = f"{reqs[0]!r} package"
else:
depends = ", ".join(f"{req!r}" for req in reqs) + " packages"
msg = (
f"Backend {backend_name!r} requires the {depends}, but {missing!r} could not be found.\n"
f"This can be installed with pip using:\n"
f" pip install {missing}\n"
f"Or with conda using:\n"
f" conda install -c conda-forge {missing}"
)
return ModuleNotFoundError(msg, name=missing)
def _failed_url(meta: Metadata, /) -> str:
return f"Unable to load {meta['file_name']!r} via url.\n"
def _failed_tabular(meta: Metadata, /) -> str:
return f"Unable to load {meta['file_name']!r} as tabular data.\n"
def _why(what: str, backend_name: str, /) -> str:
return f"{what} is not supported natively by {backend_name!r}."
def _suggest_url(meta: Metadata, *install_other: str) -> str:
other = ""
if install_other:
others = " or ".join(f"`{other}`" for other in install_other)
other = f" installing {others}, or use"
return (
f"\n\nInstead, try{other}:\n"
" from altair.datasets import data\n"
f" data.{meta['dataset_name']}.url"
)
def _implementation_not_found(meta: Metadata, /) -> str:
"""Search finished without finding a *declared* incompatibility."""
INDENT = " " * 4
record = f",\n{INDENT}".join(
f"{k}={v!r}"
for k, v in meta.items()
if not (k.startswith(("is_", "sha", "bytes", "has_")))
or (v is True and k.startswith("is_"))
)
return f"Found no implementation that supports:\n{INDENT}{record}"
|
AltairDatasetsError
|
python
|
tensorflow__tensorflow
|
tensorflow/python/framework/ops.py
|
{
"start": 65601,
"end": 67260
}
|
class ____(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
__slots__ = ["_statistic_type", "_value"]
def __init__(self, statistic_type, value=None) -> None:
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self: OpStatsType, other: OpStatsType) -> OpStatsType:
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s." %
(self.statistic_type, other.statistic_type))
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value # pytype: disable=attribute-error
return self
_stats_registry: registry.Registry = registry.Registry("statistical functions")
|
OpStats
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 745513,
"end": 745753
}
|
class ____(VegaLiteSchema):
"""MarkPropDefnumberArray schema wrapper."""
_schema = {"$ref": "#/definitions/MarkPropDef<number[]>"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
|
MarkPropDefnumberArray
|
python
|
python-visualization__folium
|
folium/elements.py
|
{
"start": 5025,
"end": 5705
}
|
class ____(MacroElement):
"""Abstract class to add an element to another element."""
_template = Template(
"""
{% macro script(this, kwargs) %}
{{ this.target }}.{{ this.method }}(
{% for arg in this.args %}
{{ arg | tojavascript }},
{% endfor %}
{{ this.kwargs | tojavascript }}
);
{% endmacro %}
"""
)
def __init__(self, target: MacroElement, method: str, *args, **kwargs):
super().__init__()
self.target = target.get_name()
self.method = camelize(method)
self.args = args
self.kwargs = kwargs
|
MethodCall
|
python
|
ray-project__ray
|
python/ray/air/tests/mocked_wandb_integration.py
|
{
"start": 746,
"end": 899
}
|
class ____:
def __init__(self):
self.config = {}
def update(self, config, *args, **kwargs):
self.config.update(config)
|
_FakeConfig
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorMetadataDefinitionV0.py
|
{
"start": 341,
"end": 1232
}
|
class ____(BaseModel):
class Config:
extra = Extra.forbid
title: str = Field(..., description="Display title for the documentation link")
url: AnyUrl = Field(..., description="URL to the external documentation")
type: Optional[
Literal[
"api_deprecations",
"api_reference",
"api_release_history",
"authentication_guide",
"data_model_reference",
"developer_community",
"migration_guide",
"openapi_spec",
"other",
"permissions_scopes",
"rate_limits",
"sql_reference",
"status_page",
]
] = Field(None, description="Category of documentation")
requiresLogin: Optional[bool] = Field(
False, description="Whether the URL requires authentication to access"
)
|
ExternalDocumentationUrl
|
python
|
numba__numba
|
numba/tests/test_cfunc.py
|
{
"start": 2725,
"end": 5090
}
|
class ____(TestCase):
def test_basic(self):
"""
Basic usage and properties of a cfunc.
"""
f = cfunc(add_sig)(add_usecase)
self.assertEqual(f.__name__, "add_usecase")
self.assertEqual(f.__qualname__, "add_usecase")
self.assertIs(f.__wrapped__, add_usecase)
symbol = f.native_name
self.assertIsInstance(symbol, str)
self.assertIn("add_usecase", symbol)
addr = f.address
self.assertIsInstance(addr, int)
ct = f.ctypes
self.assertEqual(ctypes.cast(ct, ctypes.c_void_p).value, addr)
self.assertPreciseEqual(ct(2.0, 3.5), 5.5)
@skip_unless_cffi
def test_cffi(self):
from numba.tests import cffi_usecases
ffi, lib = cffi_usecases.load_inline_module()
f = cfunc(square_sig)(square_usecase)
res = lib._numba_test_funcptr(f.cffi)
self.assertPreciseEqual(res, 2.25) # 1.5 ** 2
def test_locals(self):
# By forcing the intermediate result into an integer, we
# truncate the ultimate function result
f = cfunc(div_sig, locals={'c': types.int64})(div_usecase)
self.assertPreciseEqual(f.ctypes(8, 3), 2.0)
def test_errors(self):
f = cfunc(div_sig)(div_usecase)
with captured_stderr() as err:
self.assertPreciseEqual(f.ctypes(5, 2), 2.5)
self.assertEqual(err.getvalue(), "")
with captured_stderr() as err:
res = f.ctypes(5, 0)
# This is just a side effect of Numba zero-initializing
# stack variables, and could change in the future.
self.assertPreciseEqual(res, 0.0)
err = err.getvalue()
self.assertIn("ZeroDivisionError:", err)
self.assertIn("Exception ignored", err)
def test_llvm_ir(self):
f = cfunc(add_sig)(add_usecase)
ir = f.inspect_llvm()
self.assertIn(f.native_name, ir)
self.assertIn("fadd double", ir)
def test_object_mode(self):
"""
Object mode is currently unsupported.
"""
with self.assertRaises(NotImplementedError):
cfunc(add_sig, forceobj=True)(add_usecase)
with self.assertTypingError() as raises:
cfunc(add_sig)(objmode_usecase)
self.assertIn("Untyped global name 'object'", str(raises.exception))
|
TestCFunc
|
python
|
django__django
|
tests/select_related/models.py
|
{
"start": 982,
"end": 1107
}
|
class ____(models.Model):
name = models.CharField(max_length=50)
klass = models.ForeignKey(Klass, models.CASCADE)
|
Order
|
python
|
pypa__warehouse
|
tests/unit/search/test_tasks.py
|
{
"start": 5461,
"end": 5546
}
|
class ____:
def __init__(self):
self.indices = FakeESIndices()
|
FakeESClient
|
python
|
getsentry__sentry
|
src/sentry/integrations/github_enterprise/webhook.py
|
{
"start": 3067,
"end": 3486
}
|
class ____:
@property
def provider(self) -> str:
return IntegrationProviderSlug.GITHUB_ENTERPRISE.value
def get_external_id(self, username: str) -> str:
return f"github_enterprise:{username}"
def get_idp_external_id(self, integration: RpcIntegration, host: str | None = None) -> str:
return "{}:{}".format(host, integration.metadata["installation"]["id"])
|
GitHubEnterpriseWebhook
|
python
|
ansible__ansible
|
test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/action/ios.py
|
{
"start": 240,
"end": 445
}
|
class ____(ActionBase):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
result['action_plugin'] = 'ios'
return result
|
ActionModule
|
python
|
mkdocstrings__mkdocstrings
|
src/mkdocstrings/_internal/handlers/base.py
|
{
"start": 1996,
"end": 2099
}
|
class ____(Exception):
"""An exception raised when some collection of data failed."""
|
CollectionError
|
python
|
gevent__gevent
|
src/greentest/3.9/test_ssl.py
|
{
"start": 119770,
"end": 193035
}
|
class ____(unittest.TestCase):
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
if protocol in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
continue
if not has_tls_protocol(protocol):
continue
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
seclevel_workaround(context)
server_params_test(context, context,
chatty=True, connectionchatty=True)
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
@unittest.skipUnless(
ssl.HAS_NEVER_CHECK_COMMON_NAME, "test requires hostname_checks_common_name"
)
def test_hostname_checks_common_name(self):
client_context, server_context, hostname = testing_context()
assert client_context.hostname_checks_common_name
client_context.hostname_checks_common_name = False
# default cert has a SAN
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
client_context, server_context, hostname = testing_context(NOSANFILE)
client_context.hostname_checks_common_name = False
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLCertVerificationError):
s.connect((HOST, server.port))
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.options |= ssl.OP_NO_TLSv1_3
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@requires_tls_version('TLSv1_3')
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# TLS 1.3 perform client cert exchange after handshake
s.connect((HOST, server.port))
try:
s.write(b'data')
s.read(4)
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = socket_helper.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@requires_tls_version('SSLv2')
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if has_tls_version('SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@requires_tls_version('SSLv3')
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS,
False, client_options=ssl.OP_NO_SSLv2)
@requires_tls_version('TLSv1')
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@requires_tls_version('TLSv1_1')
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
@requires_tls_version('TLSv1_2')
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
if has_tls_protocol(ssl.PROTOCOL_TLSv1):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
if has_tls_protocol(ssl.PROTOCOL_TLSv1_1):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(True)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.dup)
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, [bytearray(100)])
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
server = context.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.options |= ssl.OP_NO_TLSv1_3
# Force different suites on client and server
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
if IS_OPENSSL_1_1_1 and has_tls_version('TLSv1_3'):
self.assertEqual(s.version(), 'TLSv1.3')
elif ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
self.assertEqual(s.version(), 'TLSv1.2')
else: # 0.9.8 to 1.0.1
self.assertIn(s.version(), ('TLSv1', 'TLSv1.2'))
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@requires_tls_version('TLSv1_3')
def test_tls1_3(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2
)
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@requires_minimum_version
@requires_tls_version('TLSv1_2')
def test_min_max_version_tlsv1_2(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
@requires_minimum_version
@requires_tls_version('TLSv1_1')
def test_min_max_version_tlsv1_1(self):
client_context, server_context, hostname = testing_context()
# client 1.0 to 1.2, server 1.0 to 1.1
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
@requires_minimum_version
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
def test_min_max_version_mismatch(self):
client_context, server_context, hostname = testing_context()
# client 1.0, server 1.2 (mismatch)
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
client_context.maximum_version = ssl.TLSVersion.TLSv1
client_context.minimum_version = ssl.TLSVersion.TLSv1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@requires_minimum_version
@requires_tls_version('SSLv3')
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
context.options |= ssl.OP_NO_TLSv1_3
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.options |= ssl.OP_NO_TLSv1_3
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.options |= ssl.OP_NO_TLSv1_3
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
@unittest.skipUnless(HAVE_SECP_CURVES, "needs secp384r1 curve support")
@unittest.skipIf(IS_OPENSSL_1_1_1, "TODO: Test doesn't work on 1.1.1")
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
try:
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
except ssl.SSLError:
pass
else:
# OpenSSL 1.0.2 does not fail although it should.
if IS_OPENSSL_1_1_0:
self.fail("mismatch curve did not fail")
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
if (expected is None and IS_OPENSSL_1_1_0
and ssl.OPENSSL_VERSION_INFO < (1, 1, 0, 6)):
# OpenSSL 1.1.0 to 1.1.0e raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_npn_protocols(server_protocols)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason,
'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertEqual(catch.unraisable.exc_type, ZeroDivisionError)
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertEqual(catch.unraisable.exc_type, TypeError)
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.options |= ssl.OP_NO_TLSv1_3
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
if ssl.OPENSSL_VERSION_INFO > (1, 0, 1):
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.options |= ssl.OP_NO_TLSv1_3
client_context2.options |= ssl.OP_NO_TLSv1_3
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
@unittest.skipUnless(has_tls_version('TLSv1_3'), "Test needs TLS 1.3")
|
ThreadedTests
|
python
|
bokeh__bokeh
|
src/bokeh/models/renderers/renderer.py
|
{
"start": 3889,
"end": 4874
}
|
class ____(Renderer):
""" A renderer that allows attaching other renderers and DOM-based UIs.
"""
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
renderers = List(Instance(Renderer), default=[], help="""
A collection of renderers attached to this renderer.
.. note::
This property is experimental and may change at any point.
""")
elements = List(
Either(
Instance(".models.ui.UIElement"),
Instance(".models.dom.DOMNode"),
),
)(default=[], help="""
A collection of DOM-based UI elements attached to this renderer.
This can include floating elements like tooltips, allowing to establish
a parent-child relationship between this renderer and its UI elements.
.. note::
This property is an equivalent of ``Pane.elements`` in DOM-based UIs.
""")
@abstract
|
CompositeRenderer
|
python
|
huggingface__transformers
|
tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py
|
{
"start": 101497,
"end": 109133
}
|
class ____:
def __init__(
self,
parent,
vocab_size=99,
batch_size=7,
d_model=32,
decoder_seq_length=7,
is_training=True,
is_decoder=True,
use_attention_mask=True,
use_cache=False,
use_labels=True,
decoder_start_token_id=2,
decoder_ffn_dim=32,
decoder_layers=2,
encoder_attention_heads=4,
decoder_attention_heads=4,
max_position_embeddings=50,
is_encoder_decoder=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
scope=None,
attention_type="original_full",
use_bias=True,
block_size=16,
num_random_blocks=3,
):
self.parent = parent
self.batch_size = batch_size
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.d_model = d_model
self.hidden_size = d_model
self.num_hidden_layers = decoder_layers
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.decoder_attention_heads = decoder_attention_heads
self.num_attention_heads = decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.use_cache = use_cache
self.max_position_embeddings = max_position_embeddings
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 2
self.decoder_attention_idx = 1
self.attention_type = attention_type
self.use_bias = use_bias
self.block_size = block_size
self.num_random_blocks = num_random_blocks
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
config = BigBirdPegasusConfig(
vocab_size=self.vocab_size,
d_model=self.d_model,
decoder_layers=self.decoder_layers,
num_hidden_layers=self.decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_attention_heads=self.encoder_attention_heads,
decoder_attention_heads=self.decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
max_position_embeddings=self.max_position_embeddings,
is_encoder_decoder=self.is_encoder_decoder,
attention_type=self.attention_type,
use_bias=self.use_bias,
block_size=self.block_size,
num_random_blocks=self.num_random_blocks,
)
return (
config,
input_ids,
attention_mask,
lm_labels,
)
def create_and_check_decoder_model_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
config.use_cache = True
model = BigBirdPegasusDecoder(config=config).to(torch_device).eval()
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
model = BigBirdPegasusDecoder(config=config).to(torch_device).eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values, use_cache=True)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
# big bird has extremely high logits which requires
# such a high error tolerance here
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=5e-1)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, lm_labels = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
|
BigBirdPegasusStandaloneDecoderModelTester
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/tools/base.py
|
{
"start": 12126,
"end": 12547
}
|
class ____(Exception): # noqa: N818
"""Exception thrown when a tool execution error occurs.
This exception allows tools to signal errors without stopping the agent.
The error is handled according to the tool's handle_tool_error setting,
and the result is returned as an observation to the agent.
"""
ArgsSchema = TypeBaseModel | dict[str, Any]
_EMPTY_SET: frozenset[str] = frozenset()
|
ToolException
|
python
|
hynek__structlog
|
src/structlog/testing.py
|
{
"start": 3760,
"end": 4141
}
|
class ____:
r"""
Produce and cache `ReturnLogger`\ s.
To be used with `structlog.configure`\ 's *logger_factory*.
Positional arguments are silently ignored.
.. versionadded:: 0.4.0
"""
def __init__(self) -> None:
self._logger = ReturnLogger()
def __call__(self, *args: Any) -> ReturnLogger:
return self._logger
|
ReturnLoggerFactory
|
python
|
huggingface__transformers
|
src/transformers/models/luke/modeling_luke.py
|
{
"start": 64868,
"end": 72827
}
|
class ____(LukePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.luke = LukeModel(config)
self.num_labels = config.num_labels
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
entity_ids: Optional[torch.LongTensor] = None,
entity_attention_mask: Optional[torch.LongTensor] = None,
entity_token_type_ids: Optional[torch.LongTensor] = None,
entity_position_ids: Optional[torch.LongTensor] = None,
entity_start_positions: Optional[torch.LongTensor] = None,
entity_end_positions: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, EntitySpanClassificationOutput]:
r"""
entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`):
Indices of entity tokens in the entity vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*):
Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`:
- 1 for entity tokens that are **not masked**,
- 0 for entity tokens that are **masked**.
entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
Segment token indices to indicate first and second portions of the entity token inputs. Indices are
selected in `[0, 1]`:
- 0 corresponds to a *portion A* entity token,
- 1 corresponds to a *portion B* entity token.
entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*):
Indices of positions of each input entity in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
entity_start_positions (`torch.LongTensor`):
The start positions of entities in the word token sequence.
entity_end_positions (`torch.LongTensor`):
The end positions of entities in the word token sequence.
labels (`torch.LongTensor` of shape `(batch_size, entity_length)` or `(batch_size, entity_length, num_labels)`, *optional*):
Labels for computing the classification loss. If the shape is `(batch_size, entity_length)`, the cross
entropy loss is used for the single-label classification. In this case, labels should contain the indices
that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, entity_length,
num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case,
labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively.
Examples:
```python
>>> from transformers import AutoTokenizer, LukeForEntitySpanClassification
>>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003")
>>> model = LukeForEntitySpanClassification.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003")
>>> text = "Beyoncé lives in Los Angeles"
# List all possible entity spans in the text
>>> word_start_positions = [0, 8, 14, 17, 21] # character-based start positions of word tokens
>>> word_end_positions = [7, 13, 16, 20, 28] # character-based end positions of word tokens
>>> entity_spans = []
>>> for i, start_pos in enumerate(word_start_positions):
... for end_pos in word_end_positions[i:]:
... entity_spans.append((start_pos, end_pos))
>>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> predicted_class_indices = logits.argmax(-1).squeeze().tolist()
>>> for span, predicted_class_idx in zip(entity_spans, predicted_class_indices):
... if predicted_class_idx != 0:
... print(text[span[0] : span[1]], model.config.id2label[predicted_class_idx])
Beyoncé PER
Los Angeles LOC
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.luke(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
hidden_size = outputs.last_hidden_state.size(-1)
entity_start_positions = entity_start_positions.unsqueeze(-1).expand(-1, -1, hidden_size)
if entity_start_positions.device != outputs.last_hidden_state.device:
entity_start_positions = entity_start_positions.to(outputs.last_hidden_state.device)
start_states = torch.gather(outputs.last_hidden_state, -2, entity_start_positions)
entity_end_positions = entity_end_positions.unsqueeze(-1).expand(-1, -1, hidden_size)
if entity_end_positions.device != outputs.last_hidden_state.device:
entity_end_positions = entity_end_positions.to(outputs.last_hidden_state.device)
end_states = torch.gather(outputs.last_hidden_state, -2, entity_end_positions)
feature_vector = torch.cat([start_states, end_states, outputs.entity_last_hidden_state], dim=2)
feature_vector = self.dropout(feature_vector)
logits = self.classifier(feature_vector)
loss = None
if labels is not None:
# move labels to correct device
labels = labels.to(logits.device)
# When the number of dimension of `labels` is 2, cross entropy is used as the loss function. The binary
# cross entropy is used otherwise.
if labels.ndim == 2:
loss = nn.functional.cross_entropy(logits.view(-1, self.num_labels), labels.view(-1))
else:
loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits))
if not return_dict:
return tuple(
v
for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions]
if v is not None
)
return EntitySpanClassificationOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
entity_hidden_states=outputs.entity_hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
The LUKE Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
"""
)
|
LukeForEntitySpanClassification
|
python
|
python__mypy
|
mypy/partially_defined.py
|
{
"start": 10694,
"end": 10771
}
|
class ____:
def __init__(self) -> None:
self.has_break = False
|
Loop
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/postprocessor/llm_rerank.py
|
{
"start": 691,
"end": 4095
}
|
class ____(BaseNodePostprocessor):
"""LLM-based reranker."""
top_n: int = Field(description="Top N nodes to return.")
choice_select_prompt: SerializeAsAny[BasePromptTemplate] = Field(
description="Choice select prompt."
)
choice_batch_size: int = Field(description="Batch size for choice select.")
llm: LLM = Field(description="The LLM to rerank with.")
_format_node_batch_fn: Callable = PrivateAttr()
_parse_choice_select_answer_fn: Callable = PrivateAttr()
def __init__(
self,
llm: Optional[LLM] = None,
choice_select_prompt: Optional[BasePromptTemplate] = None,
choice_batch_size: int = 10,
format_node_batch_fn: Optional[Callable] = None,
parse_choice_select_answer_fn: Optional[Callable] = None,
top_n: int = 10,
) -> None:
choice_select_prompt = choice_select_prompt or DEFAULT_CHOICE_SELECT_PROMPT
llm = llm or Settings.llm
super().__init__(
llm=llm,
choice_select_prompt=choice_select_prompt,
choice_batch_size=choice_batch_size,
top_n=top_n,
)
self._format_node_batch_fn = (
format_node_batch_fn or default_format_node_batch_fn
)
self._parse_choice_select_answer_fn = (
parse_choice_select_answer_fn or default_parse_choice_select_answer_fn
)
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {"choice_select_prompt": self.choice_select_prompt}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "choice_select_prompt" in prompts:
self.choice_select_prompt = prompts["choice_select_prompt"]
@classmethod
def class_name(cls) -> str:
return "LLMRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
if query_bundle is None:
raise ValueError("Query bundle must be provided.")
if len(nodes) == 0:
return []
initial_results: List[NodeWithScore] = []
for idx in range(0, len(nodes), self.choice_batch_size):
nodes_batch = [
node.node for node in nodes[idx : idx + self.choice_batch_size]
]
query_str = query_bundle.query_str
fmt_batch_str = self._format_node_batch_fn(nodes_batch)
# call each batch independently
raw_response = self.llm.predict(
self.choice_select_prompt,
context_str=fmt_batch_str,
query_str=query_str,
)
raw_choices, relevances = self._parse_choice_select_answer_fn(
raw_response, len(nodes_batch)
)
choice_idxs = [int(choice) - 1 for choice in raw_choices]
choice_nodes = [nodes_batch[idx] for idx in choice_idxs]
relevances = relevances or [1.0 for _ in choice_nodes]
initial_results.extend(
[
NodeWithScore(node=node, score=relevance)
for node, relevance in zip(choice_nodes, relevances)
]
)
return sorted(initial_results, key=lambda x: x.score or 0.0, reverse=True)[
: self.top_n
]
|
LLMRerank
|
python
|
django-extensions__django-extensions
|
tests/test_runscript.py
|
{
"start": 2595,
"end": 4451
}
|
class ____(RunScriptTests):
def test_prints_additional_info_on_nonexistent_script_by_default(self):
cmd = self.get_command()
with self.assertRaises(CommandError):
call_command(cmd, "non_existent_script")
self.assertIn(
"No (valid) module for script 'non_existent_script' found",
sys.stdout.getvalue(),
)
self.assertIn(
"Try running with a higher verbosity level like: -v2 or -v3",
sys.stdout.getvalue(),
)
self.assertEqual(cmd.last_exit_code, 1)
def test_prints_import_error_on_script_with_invalid_imports_by_default(self):
cmd = self.get_command()
with self.assertRaises(CommandError):
call_command(cmd, "invalid_import_script")
self.assertIn(
"Cannot import module 'tests.testapp.scripts.invalid_import_script'",
sys.stdout.getvalue(),
)
self.assertRegex(
sys.stdout.getvalue(), "No module named (')?(invalidpackage)\1?"
)
self.assertEqual(cmd.last_exit_code, 1)
def test_prints_import_error_on_script_with_invalid_imports_reliably(self):
cmd = self.get_command()
if hasattr(importlib, "util") and hasattr(importlib.util, "find_spec"):
with self.settings(BASE_DIR=os.path.dirname(os.path.abspath(__file__))):
with self.assertRaises(CommandError):
call_command(cmd, "invalid_import_script")
self.assertIn(
"Cannot import module 'tests.testapp.scripts.invalid_import_script'",
sys.stdout.getvalue(),
)
self.assertRegex(
sys.stdout.getvalue(), "No module named (')?(invalidpackage)\1?"
)
self.assertEqual(cmd.last_exit_code, 1)
|
InvalidImportScriptsTests
|
python
|
mkdocs__mkdocs
|
mkdocs/tests/config/config_options_tests.py
|
{
"start": 66783,
"end": 66938
}
|
class ____(Config):
foo = c.Type(str, default='default foo')
bar = c.Type(int, default=0)
dir = c.Optional(c.Dir(exists=False))
|
_FakePluginConfig
|
python
|
getsentry__sentry
|
src/sentry/integrations/discord/analytics.py
|
{
"start": 316,
"end": 471
}
|
class ____(analytics.Event):
command_name: str
@analytics.eventclass("integrations.discord.identity_linked")
|
DiscordIntegrationCommandInteractionReceived
|
python
|
openai__openai-python
|
src/openai/types/realtime/realtime_tools_config_union.py
|
{
"start": 617,
"end": 1134
}
|
class ____(BaseModel):
read_only: Optional[bool] = None
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: Optional[List[str]] = None
"""List of allowed tool names."""
McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpToolFilter, None]
|
McpAllowedToolsMcpToolFilter
|
python
|
ansible__ansible
|
lib/ansible/errors/__init__.py
|
{
"start": 8039,
"end": 8123
}
|
class ____(AnsibleRuntimeError):
"""A module failed somehow."""
|
AnsibleModuleError
|
python
|
huggingface__transformers
|
src/transformers/models/videomae/modeling_videomae.py
|
{
"start": 13563,
"end": 14900
}
|
class ____(GradientCheckpointingLayer):
"""This corresponds to the Block class in the timm implementation."""
def __init__(self, config: VideoMAEConfig):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = VideoMAEAttention(config)
self.intermediate = VideoMAEIntermediate(config)
self.output = VideoMAEOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states_norm = self.layernorm_before(hidden_states)
attention_output = self.attention(hidden_states_norm)
# first residual connection
hidden_states = attention_output + hidden_states
# in VideoMAE, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
# second residual connection is done here
layer_output = self.output(layer_output, hidden_states)
return layer_output
# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->VideoMAE
|
VideoMAELayer
|
python
|
PrefectHQ__prefect
|
src/prefect/serializers.py
|
{
"start": 9225,
"end": 9512
}
|
class ____(CompressedSerializer[D]):
"""
A compressed serializer preconfigured to use the pickle serializer.
"""
type: str = Field(default="compressed/pickle", frozen=True)
serializer: Serializer[D] = Field(default_factory=PickleSerializer)
|
CompressedPickleSerializer
|
python
|
ray-project__ray
|
python/ray/serve/_private/benchmarks/microbenchmark.py
|
{
"start": 730,
"end": 5273
}
|
class ____:
def ready(self):
return "ok"
async def do_queries(self, num, data):
async with aiohttp.ClientSession() as session:
for _ in range(num):
await fetch(session, data)
def build_app(
intermediate_handles: bool,
num_replicas: int,
max_batch_size: int,
max_ongoing_requests: int,
):
@serve.deployment(max_ongoing_requests=1000)
class Upstream:
def __init__(self, handle: DeploymentHandle):
self._handle = handle
# Turn off access log.
logging.getLogger("ray.serve").setLevel(logging.WARNING)
async def __call__(self, req: Request):
return await self._handle.remote(await req.body())
@serve.deployment(
num_replicas=num_replicas,
max_ongoing_requests=max_ongoing_requests,
)
class Downstream:
def __init__(self):
# Turn off access log.
logging.getLogger("ray.serve").setLevel(logging.WARNING)
@serve.batch(max_batch_size=max_batch_size)
async def batch(self, reqs):
return [b"ok"] * len(reqs)
async def __call__(self, req: Union[bytes, Request]):
if max_batch_size > 1:
return await self.batch(req)
else:
return b"ok"
if intermediate_handles:
return Upstream.bind(Downstream.bind())
else:
return Downstream.bind()
async def trial(
intermediate_handles: bool,
num_replicas: int,
max_batch_size: int,
max_ongoing_requests: int,
data_size: str,
) -> Dict[str, float]:
results = {}
trial_key_base = (
f"replica:{num_replicas}/batch_size:{max_batch_size}/"
f"concurrent_queries:{max_ongoing_requests}/"
f"data_size:{data_size}/intermediate_handle:{intermediate_handles}"
)
print(
f"intermediate_handles={intermediate_handles},"
f"num_replicas={num_replicas},"
f"max_batch_size={max_batch_size},"
f"max_ongoing_requests={max_ongoing_requests},"
f"data_size={data_size}"
)
app = build_app(
intermediate_handles, num_replicas, max_batch_size, max_ongoing_requests
)
serve.run(app)
if data_size == "small":
data = None
elif data_size == "large":
data = b"a" * 1024 * 1024
else:
raise ValueError("data_size should be 'small' or 'large'.")
async with aiohttp.ClientSession() as session:
async def single_client():
for _ in range(CALLS_PER_BATCH):
await fetch(session, data)
single_client_avg_tps, single_client_std_tps = await run_throughput_benchmark(
single_client,
multiplier=CALLS_PER_BATCH,
)
print(
"\t{} {} +- {} requests/s".format(
"single client {} data".format(data_size),
single_client_avg_tps,
single_client_std_tps,
)
)
key = f"num_client:1/{trial_key_base}"
results[key] = single_client_avg_tps
clients = [Client.remote() for _ in range(NUM_CLIENTS)]
ray.get([client.ready.remote() for client in clients])
async def many_clients():
ray.get([a.do_queries.remote(CALLS_PER_BATCH, data) for a in clients])
multi_client_avg_tps, _ = await run_throughput_benchmark(
many_clients,
multiplier=CALLS_PER_BATCH * len(clients),
)
results[f"num_client:{len(clients)}/{trial_key_base}"] = multi_client_avg_tps
return results
async def main():
results = {}
for intermediate_handles in [False, True]:
for num_replicas in [1, 8]:
for max_batch_size, max_ongoing_requests in [
(1, 1),
(1, 10000),
(10000, 10000),
]:
# TODO(edoakes): large data causes broken pipe errors.
for data_size in ["small"]:
results.update(
await trial(
intermediate_handles,
num_replicas,
max_batch_size,
max_ongoing_requests,
data_size,
)
)
print("Results from all conditions:")
pprint(results)
return results
if __name__ == "__main__":
ray.init()
serve.start()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(main())
|
Client
|
python
|
openai__openai-python
|
src/openai/types/evals/run_create_params.py
|
{
"start": 9283,
"end": 10076
}
|
class ____(TypedDict, total=False):
format: ResponseFormatTextConfigParam
"""An object specifying the format that the model must output.
Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
ensures the model will match your supplied JSON schema. Learn more in the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
The default format is `{ "type": "text" }` with no additional options.
**Not recommended for gpt-4o and newer models:**
Setting to `{ "type": "json_object" }` enables the older JSON mode, which
ensures the message the model generates is valid JSON. Using `json_schema` is
preferred for models that support it.
"""
|
DataSourceCreateEvalResponsesRunDataSourceSamplingParamsText
|
python
|
pytorch__pytorch
|
torch/fx/experimental/meta_tracer.py
|
{
"start": 3590,
"end": 4327
}
|
class ____(MetaProxy):
def __init__(self, root, attr: str):
self.root = root
self.attr = attr
self.tracer = root.tracer
self._node = None
@property
def node(self): # type: ignore[override]
# the node for attributes is added lazily, since most will just be method calls
# which do not rely on the getitem call
if self._node is None:
self._node = self.tracer.create_proxy(
"call_function", getattr, (self.root, self.attr), {}
).node
return self._node
def __call__(self, *args, **kwargs):
return self.tracer.create_proxy(
"call_method", self.attr, (self.root,) + args, kwargs
)
|
MetaAttribute
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/variables/variable_ops_test.py
|
{
"start": 1659,
"end": 12294
}
|
class ____(test.TestCase):
def _initFetch(self, x, tftype, use_gpu=None):
with self.test_session(use_gpu=use_gpu):
p = state_ops.variable_op(x.shape, tftype)
op = state_ops.assign(p, x)
op.op.run()
return self.evaluate(p)
def _testTypes(self, vals):
for dtype in [
np.float16, np.float32, np.float64, np.complex64, np.complex128,
np.int32, np.int64, dtypes.bfloat16.as_numpy_dtype
]:
self.setUp()
x = vals.astype(dtype)
tftype = _NP_TO_TF[dtype]
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=False))
# NOTE(touts): the GPU test should pass for all types, whether the
# Variable op has an implementation for that type on GPU as we expect
# that Variable and Assign have GPU implementations for matching tf.
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=True))
@test_util.run_deprecated_v1
def testBasic(self):
self._testTypes(np.arange(0, 20).reshape([4, 5]))
@test_util.run_deprecated_v1
def testset_shape(self):
p = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual([1, 2], p.get_shape())
p = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), p.get_shape())
@test_util.run_deprecated_v1
def testAssign(self):
for dtype in [dtypes.float32, dtypes.int64, dtypes.uint32, dtypes.uint8]:
value = np.array([[42, 43]])
var = state_ops.variable_op(value.shape, dtype)
self.assertShapeEqual(value, var)
assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
@test_util.run_deprecated_v1
def testAssignNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32)
self.assertShapeEqual(value, var)
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
@test_util.run_deprecated_v1
def testAssignNoVarShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
@test_util.run_deprecated_v1
def testAssignNoVarShapeNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
def _NewShapelessTensor(self):
tensor = array_ops.placeholder(dtypes.float32)
self.assertEqual(tensor_shape.unknown_shape(), tensor.get_shape())
return tensor
@test_util.run_deprecated_v1
def testAssignNoValueShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, dtypes.float32)
assigned = state_ops.assign(var, value)
self.assertEqual(shape, var.get_shape())
self.assertEqual(shape, assigned.get_shape())
@test_util.run_deprecated_v1
def testAssignNoValueShapeNoValidateShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, dtypes.float32)
self.assertEqual(shape, var.get_shape())
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), assigned.get_shape())
@test_util.run_deprecated_v1
def testAssignNoShape(self):
with self.cached_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(tensor_shape.unknown_shape(),
state_ops.assign(var, value).get_shape())
@test_util.run_deprecated_v1
def testAssignNoShapeNoValidateShape(self):
with self.cached_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(
tensor_shape.unknown_shape(),
state_ops.assign(var, value, validate_shape=False).get_shape())
@test_util.run_deprecated_v1
def testAssignUpdate(self):
for dtype in [
dtypes.float32, dtypes.int64, dtypes.uint32, dtypes.uint8,
dtypes.bfloat16
]:
var = state_ops.variable_op([1, 2], dtype)
added = state_ops.assign_add(var, [[2, 3]])
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, [[12, 13]])
self.assertEqual([1, 2], subbed.get_shape())
@test_util.run_deprecated_v1
def testAssignUpdateNoVarShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
added = state_ops.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
@test_util.run_deprecated_v1
def testAssignUpdateNoValueShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32)
added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual([1, 2], subbed.get_shape())
@test_util.run_deprecated_v1
def testAssignUpdateNoShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), added.get_shape())
subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), subbed.get_shape())
@test_util.run_deprecated_v1
def testTemporaryVariable(self):
with test_util.use_gpu():
var = gen_state_ops.temporary_variable([1, 2],
dtypes.float32,
var_name="foo")
var = state_ops.assign(var, [[4.0, 5.0]])
var = state_ops.assign_add(var, [[6.0, 7.0]])
final = gen_state_ops.destroy_temporary_variable(var, var_name="foo")
self.assertAllClose([[10.0, 12.0]], self.evaluate(final))
@test_util.run_deprecated_v1
def testDestroyNonexistentTemporaryVariable(self):
with test_util.use_gpu():
var = gen_state_ops.temporary_variable([1, 2], dtypes.float32)
final = gen_state_ops.destroy_temporary_variable(var, var_name="bad")
with self.assertRaises(errors.NotFoundError):
self.evaluate(final)
@test_util.run_deprecated_v1
def testDuplicateTemporaryVariable(self):
with test_util.use_gpu():
var1 = gen_state_ops.temporary_variable([1, 2],
dtypes.float32,
var_name="dup")
var1 = state_ops.assign(var1, [[1.0, 2.0]])
var2 = gen_state_ops.temporary_variable([1, 2],
dtypes.float32,
var_name="dup")
var2 = state_ops.assign(var2, [[3.0, 4.0]])
final = var1 + var2
with self.assertRaises(errors.AlreadyExistsError):
self.evaluate(final)
@test_util.run_deprecated_v1
def testDestroyTemporaryVariableTwice(self):
with test_util.use_gpu():
var = gen_state_ops.temporary_variable([1, 2], dtypes.float32)
val1 = gen_state_ops.destroy_temporary_variable(var, var_name="dup")
val2 = gen_state_ops.destroy_temporary_variable(var, var_name="dup")
final = val1 + val2
with self.assertRaises(errors.NotFoundError):
self.evaluate(final)
@test_util.run_deprecated_v1
def testTemporaryVariableNoLeak(self):
with test_util.use_gpu():
var = gen_state_ops.temporary_variable([1, 2],
dtypes.float32,
var_name="bar")
final = array_ops.identity(var)
self.evaluate(final)
@test_util.run_deprecated_v1
def testTwoTemporaryVariablesNoLeaks(self):
with test_util.use_gpu():
var1 = gen_state_ops.temporary_variable([1, 2],
dtypes.float32,
var_name="var1")
var2 = gen_state_ops.temporary_variable([1, 2],
dtypes.float32,
var_name="var2")
final = var1 + var2
self.evaluate(final)
@test_util.run_deprecated_v1
def testAssignDependencyAcrossDevices(self):
with test_util.use_gpu():
# The variable and an op to increment it are on the GPU.
var = state_ops.variable_op([1], dtypes.float32)
self.evaluate(state_ops.assign(var, [1.0]))
increment = state_ops.assign_add(var, [1.0])
with ops.control_dependencies([increment]):
with test_util.force_cpu():
# This mul op is pinned to the CPU, but reads the variable from the
# GPU. The test ensures that the dependency on 'increment' is still
# honored, i.e., the Send and Recv from GPU to CPU should take place
# only after the increment.
result = math_ops.multiply(var, var)
self.assertAllClose([4.0], self.evaluate(result))
@test_util.run_deprecated_v1
def testIsVariableInitialized(self):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
v0 = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual(False, variable_v1.is_variable_initialized(v0).eval())
state_ops.assign(v0, [[2.0, 3.0]]).eval()
self.assertEqual(True, variable_v1.is_variable_initialized(v0).eval())
@test_util.run_deprecated_v1
def testString(self):
data = array_ops_stack.stack([b"data"])
buffer_var = variable_v1.VariableV1(
initial_value=array_ops.zeros(shape=(), dtype=dtypes.string),
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="buffer",
dtype=dtypes.string,
validate_shape=False,
use_resource=False)
result = state_ops.assign(buffer_var, data, validate_shape=False)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(result.eval(), b"data")
if __name__ == "__main__":
test.main()
|
VariableOpTest
|
python
|
redis__redis-py
|
redis/event.py
|
{
"start": 634,
"end": 1211
}
|
class ____(ABC):
"""
Represents a dispatcher that dispatches events to listeners
associated with given event.
"""
@abstractmethod
def dispatch(self, event: object):
pass
@abstractmethod
async def dispatch_async(self, event: object):
pass
@abstractmethod
def register_listeners(
self,
mappings: Dict[
Type[object],
List[Union[EventListenerInterface, AsyncEventListenerInterface]],
],
):
"""Register additional listeners."""
pass
|
EventDispatcherInterface
|
python
|
django__django
|
tests/generic_relations_regress/models.py
|
{
"start": 1985,
"end": 2135
}
|
class ____(models.Model):
name = models.CharField(max_length=100)
tlinks = GenericRelation(TextLink)
# models for test_q_object_or:
|
OddRelation2
|
python
|
pallets__click
|
src/click/types.py
|
{
"start": 13568,
"end": 15969
}
|
class ____(ParamType):
"""The DateTime type converts date strings into `datetime` objects.
The format strings which are checked are configurable, but default to some
common (non-timezone aware) ISO 8601 formats.
When specifying *DateTime* formats, you should only pass a list or a tuple.
Other iterables, like generators, may lead to surprising results.
The format strings are processed using ``datetime.strptime``, and this
consequently defines the format strings which are allowed.
Parsing is tried using each format, in order, and the first format which
parses successfully is used.
:param formats: A list or tuple of date format strings, in the order in
which they should be tried. Defaults to
``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``,
``'%Y-%m-%d %H:%M:%S'``.
"""
name = "datetime"
def __init__(self, formats: cabc.Sequence[str] | None = None):
self.formats: cabc.Sequence[str] = formats or [
"%Y-%m-%d",
"%Y-%m-%dT%H:%M:%S",
"%Y-%m-%d %H:%M:%S",
]
def to_info_dict(self) -> dict[str, t.Any]:
info_dict = super().to_info_dict()
info_dict["formats"] = self.formats
return info_dict
def get_metavar(self, param: Parameter, ctx: Context) -> str | None:
return f"[{'|'.join(self.formats)}]"
def _try_to_convert_date(self, value: t.Any, format: str) -> datetime | None:
try:
return datetime.strptime(value, format)
except ValueError:
return None
def convert(
self, value: t.Any, param: Parameter | None, ctx: Context | None
) -> t.Any:
if isinstance(value, datetime):
return value
for format in self.formats:
converted = self._try_to_convert_date(value, format)
if converted is not None:
return converted
formats_str = ", ".join(map(repr, self.formats))
self.fail(
ngettext(
"{value!r} does not match the format {format}.",
"{value!r} does not match the formats {formats}.",
len(self.formats),
).format(value=value, format=formats_str, formats=formats_str),
param,
ctx,
)
def __repr__(self) -> str:
return "DateTime"
|
DateTime
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/links/managed_kafka.py
|
{
"start": 1670,
"end": 1909
}
|
class ____(BaseGoogleLink):
"""Helper class for constructing Apache Kafka Clusters link."""
name = "Apache Kafka Cluster List"
key = "cluster_list_conf"
format_str = MANAGED_KAFKA_CLUSTER_LIST_LINK
|
ApacheKafkaClusterListLink
|
python
|
doocs__leetcode
|
solution/0600-0699/0678.Valid Parenthesis String/Solution.py
|
{
"start": 0,
"end": 559
}
|
class ____:
def checkValidString(self, s: str) -> bool:
n = len(s)
dp = [[False] * n for _ in range(n)]
for i, c in enumerate(s):
dp[i][i] = c == '*'
for i in range(n - 2, -1, -1):
for j in range(i + 1, n):
dp[i][j] = (
s[i] in '(*' and s[j] in '*)' and (i + 1 == j or dp[i + 1][j - 1])
)
dp[i][j] = dp[i][j] or any(
dp[i][k] and dp[k + 1][j] for k in range(i, j)
)
return dp[0][-1]
|
Solution
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.