language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
getsentry__sentry
|
tests/sentry/issues/endpoints/test_organization_group_index.py
|
{
"start": 114619,
"end": 173668
}
|
class ____(APITestCase, SnubaTestCase):
endpoint = "sentry-api-0-organization-group-index"
method = "put"
def setUp(self) -> None:
super().setUp()
self.min_ago = timezone.now() - timedelta(minutes=1)
def get_response(self, *args: Any, **kwargs: Any) -> Response:
if not args:
org = self.project.organization.slug
else:
org = args[0]
return super().get_response(org, **kwargs)
def assertNoResolution(self, group: Group) -> None:
assert not GroupResolution.objects.filter(group=group).exists()
def test_global_resolve(self) -> None:
group1 = self.create_group(status=GroupStatus.RESOLVED)
group2 = self.create_group(status=GroupStatus.UNRESOLVED)
group3 = self.create_group(status=GroupStatus.IGNORED)
group4 = self.create_group(
project=self.create_project(slug="foo"),
status=GroupStatus.UNRESOLVED,
)
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={
"status": "unresolved",
"project": self.project.id,
"query": "is:unresolved",
},
status="resolved",
)
assert response.data == {"status": "resolved", "statusDetails": {}, "inbox": None}
# the previously resolved entry should not be included
new_group1 = Group.objects.get(id=group1.id)
assert new_group1.status == GroupStatus.RESOLVED
assert new_group1.resolved_at is None
# this wont exist because it wasn't affected
assert not GroupSubscription.objects.filter(user_id=self.user.id, group=new_group1).exists()
new_group2 = Group.objects.get(id=group2.id)
assert new_group2.status == GroupStatus.RESOLVED
assert new_group2.resolved_at is not None
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=new_group2, is_active=True
).exists()
# the ignored entry should not be included
new_group3 = Group.objects.get(id=group3.id)
assert new_group3.status == GroupStatus.IGNORED
assert new_group3.resolved_at is None
assert not GroupSubscription.objects.filter(user_id=self.user.id, group=new_group3)
new_group4 = Group.objects.get(id=group4.id)
assert new_group4.status == GroupStatus.UNRESOLVED
assert new_group4.resolved_at is None
assert not GroupSubscription.objects.filter(user_id=self.user.id, group=new_group4)
assert not GroupHistory.objects.filter(
group=group1, status=GroupHistoryStatus.RESOLVED
).exists()
assert GroupHistory.objects.filter(
group=group2, status=GroupHistoryStatus.RESOLVED
).exists()
assert not GroupHistory.objects.filter(
group=group3, status=GroupHistoryStatus.RESOLVED
).exists()
assert not GroupHistory.objects.filter(
group=group4, status=GroupHistoryStatus.RESOLVED
).exists()
def test_resolve_member(self) -> None:
group = self.create_group(status=GroupStatus.UNRESOLVED)
member = self.create_user()
self.create_member(
organization=self.organization, teams=group.project.teams.all(), user=member
)
self.login_as(user=member)
response = self.get_success_response(
qs_params={
"status": "unresolved",
"project": self.project.id,
"query": "is:unresolved",
},
status="resolved",
)
assert response.data == {"status": "resolved", "statusDetails": {}, "inbox": None}
assert response.status_code == 200
def test_resolve_ignored(self) -> None:
group = self.create_group(status=GroupStatus.IGNORED)
snooze = GroupSnooze.objects.create(
group=group, until=timezone.now() - timedelta(minutes=1)
)
member = self.create_user()
self.create_member(
organization=self.organization, teams=group.project.teams.all(), user=member
)
self.login_as(user=member)
response = self.get_success_response(
qs_params={"id": group.id, "project": self.project.id}, status="resolved"
)
assert response.data == {"status": "resolved", "statusDetails": {}, "inbox": None}
assert not GroupSnooze.objects.filter(id=snooze.id).exists()
def test_bulk_resolve(self) -> None:
self.login_as(user=self.user)
for i in range(101):
self.store_event(
data={
"fingerprint": [i],
"timestamp": (self.min_ago - timedelta(seconds=i)).isoformat(),
},
project_id=self.project.id,
)
response = self.get_success_response(query="is:unresolved", sort_by="date", method="get")
assert len(response.data) == 100
response = self.get_success_response(qs_params={"status": "unresolved"}, status="resolved")
assert response.data == {"status": "resolved", "statusDetails": {}, "inbox": None}
response = self.get_success_response(query="is:unresolved", sort_by="date", method="get")
assert len(response.data) == 0
@patch("sentry.integrations.example.integration.ExampleIntegration.sync_status_outbound")
def test_resolve_with_integration(self, mock_sync_status_outbound: MagicMock) -> None:
self.login_as(user=self.user)
org = self.organization
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_provider_integration(provider="example", name="Example")
integration.add_organization(org, self.user)
event = self.store_event(
data={"timestamp": self.min_ago.isoformat()}, project_id=self.project.id
)
group = event.group
with assume_test_silo_mode(SiloMode.CONTROL):
OrganizationIntegration.objects.filter(
integration_id=integration.id, organization_id=group.organization.id
).update(
config={
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
}
)
external_issue = ExternalIssue.objects.get_or_create(
organization_id=org.id, integration_id=integration.id, key="APP-%s" % group.id
)[0]
GroupLink.objects.get_or_create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)[0]
response = self.get_success_response(sort_by="date", query="is:unresolved", method="get")
assert len(response.data) == 1
with self.tasks():
with self.feature({"organizations:integrations-issue-sync": True}):
response = self.get_success_response(
qs_params={"status": "unresolved"}, status="resolved"
)
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
assert response.data == {"status": "resolved", "statusDetails": {}, "inbox": None}
mock_sync_status_outbound.assert_called_once_with(
external_issue, True, group.project_id
)
response = self.get_success_response(sort_by="date", query="is:unresolved", method="get")
assert len(response.data) == 0
@patch("sentry.integrations.example.integration.ExampleIntegration.sync_status_outbound")
def test_set_unresolved_with_integration(self, mock_sync_status_outbound: MagicMock) -> None:
release = self.create_release(project=self.project, version="abc")
group = self.create_group(status=GroupStatus.RESOLVED)
with assume_test_silo_mode(SiloMode.CONTROL):
org = self.organization
integration = self.create_provider_integration(provider="example", name="Example")
integration.add_organization(org, self.user)
OrganizationIntegration.objects.filter(
integration_id=integration.id, organization_id=group.organization.id
).update(
config={
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
}
)
GroupResolution.objects.create(group=group, release=release)
external_issue = ExternalIssue.objects.get_or_create(
organization_id=org.id, integration_id=integration.id, key="APP-%s" % group.id
)[0]
GroupLink.objects.get_or_create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)[0]
self.login_as(user=self.user)
with self.tasks():
with self.feature({"organizations:integrations-issue-sync": True}):
response = self.get_success_response(
qs_params={"id": group.id}, status="unresolved"
)
assert response.status_code == 200
assert response.data == {"status": "unresolved", "statusDetails": {}}
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.UNRESOLVED
self.assertNoResolution(group)
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=True
).exists()
mock_sync_status_outbound.assert_called_once_with(
external_issue, False, group.project_id
)
def test_self_assign_issue(self) -> None:
group = self.create_group(status=GroupStatus.UNRESOLVED)
user = self.user
with assume_test_silo_mode(SiloMode.CONTROL):
uo1 = UserOption.objects.create(
key="self_assign_issue", value="1", project_id=None, user=user
)
self.login_as(user=user)
response = self.get_success_response(qs_params={"id": group.id}, status="resolved")
assert response.data["assignedTo"]["id"] == str(user.id)
assert response.data["assignedTo"]["type"] == "user"
assert response.data["status"] == "resolved"
assert GroupAssignee.objects.filter(group=group, user_id=user.id).exists()
assert GroupSubscription.objects.filter(
user_id=user.id, group=group, is_active=True
).exists()
with assume_test_silo_mode(SiloMode.CONTROL):
uo1.delete()
def test_self_assign_issue_next_release(self) -> None:
release = Release.objects.create(organization_id=self.project.organization_id, version="a")
release.add_project(self.project)
group = self.create_group(status=GroupStatus.UNRESOLVED)
with assume_test_silo_mode(SiloMode.CONTROL):
uo1 = UserOption.objects.create(
key="self_assign_issue", value="1", project_id=None, user=self.user
)
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": group.id}, status="resolvedInNextRelease"
)
assert response.data["status"] == "resolved"
assert response.data["statusDetails"]["inNextRelease"]
assert response.data["assignedTo"]["id"] == str(self.user.id)
assert response.data["assignedTo"]["type"] == "user"
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
assert GroupResolution.objects.filter(group=group, release=release).exists()
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=True
).exists()
activity = Activity.objects.get(
group=group, type=ActivityType.SET_RESOLVED_IN_RELEASE.value
)
assert activity.data["version"] == ""
with assume_test_silo_mode(SiloMode.CONTROL):
uo1.delete()
def test_in_semver_projects_group_resolution_stores_current_release_version(self) -> None:
"""
Test that ensures that when we resolve a group in the next release, then
GroupResolution.current_release_version is set to the latest release associated with a
Group, when the project follows semantic versioning scheme
"""
release_21_1_0 = self.create_release(version="fake_package@21.1.0")
release_21_1_1 = self.create_release(version="fake_package@21.1.1")
release_21_1_2 = self.create_release(version="fake_package@21.1.2")
self.store_event(
data={
"timestamp": before_now(seconds=10).isoformat(),
"fingerprint": ["group-1"],
"release": release_21_1_1.version,
},
project_id=self.project.id,
)
group = self.store_event(
data={
"timestamp": before_now(seconds=12).isoformat(),
"fingerprint": ["group-1"],
"release": release_21_1_0.version,
},
project_id=self.project.id,
).group
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": group.id}, status="resolvedInNextRelease"
)
assert response.data["status"] == "resolved"
assert response.data["statusDetails"]["inNextRelease"]
# The current_release_version should be to the latest (in semver) release associated with
# a group
grp_resolution = GroupResolution.objects.get(group=group)
assert grp_resolution.current_release_version == release_21_1_2.version
# "resolvedInNextRelease" with semver releases is considered as "resolvedInRelease"
assert grp_resolution.type == GroupResolution.Type.in_release
assert grp_resolution.status == GroupResolution.Status.resolved
# Add release that is between 2 and 3 to ensure that any release after release 2 should
# not have a resolution
release_21_1_1_plus_1 = self.create_release(version="fake_package@21.1.1+1")
release_21_1_3 = self.create_release(version="fake_package@21.1.3")
for release in [release_21_1_0, release_21_1_1, release_21_1_1_plus_1, release_21_1_2]:
assert GroupResolution.has_resolution(group=group, release=release)
for release in [release_21_1_3]:
assert not GroupResolution.has_resolution(group=group, release=release)
# Ensure that Activity has `current_release_version` set on `Resolved in next release`
activity = Activity.objects.get(
group=grp_resolution.group,
type=ActivityType.SET_RESOLVED_IN_RELEASE.value,
ident=grp_resolution.id,
)
assert activity.data["current_release_version"] == release_21_1_2.version
def test_in_non_semver_projects_group_resolution_stores_current_release_version(self) -> None:
"""
Test that ensures that when we resolve a group in the next release, then
GroupResolution.current_release_version is set to the most recent release associated with a
Group, when the project does not follow semantic versioning scheme
"""
release_1 = self.create_release(
date_added=timezone.now() - timedelta(minutes=45), version="foobar 1"
)
release_2 = self.create_release(version="foobar 2")
group = self.store_event(
data={
"timestamp": before_now(seconds=12).isoformat(),
"fingerprint": ["group-1"],
"release": release_1.version,
},
project_id=self.project.id,
).group
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": group.id}, status="resolvedInNextRelease"
)
assert response.data["status"] == "resolved"
assert response.data["statusDetails"]["inNextRelease"]
# Add a new release that is between 1 and 2, to make sure that if a the same issue/group
# occurs in that issue, then it should not have a resolution
release_3 = self.create_release(
date_added=timezone.now() - timedelta(minutes=30), version="foobar 3"
)
grp_resolution = GroupResolution.objects.filter(group=group)
assert len(grp_resolution) == 1
assert grp_resolution[0].current_release_version == release_1.version
assert GroupResolution.has_resolution(group=group, release=release_1)
for release in [release_2, release_3]:
assert not GroupResolution.has_resolution(group=group, release=release)
def test_in_non_semver_projects_store_actual_current_release_version_not_cached_version(
self,
) -> None:
"""
Test that ensures that the current_release_version is actually the latest version
associated with a group, not the cached version because currently
`group.get_last_release` fetches the latest release associated with a group and caches
that value, and we don't want to cache that value when resolving in next release in case a
new release appears to be associated with a group because if we store the cached rather
than the actual latest release, we might have unexpected results with the regression
algorithm
"""
release_1 = self.create_release(
date_added=timezone.now() - timedelta(minutes=45), version="foobar 1"
)
release_2 = self.create_release(version="foobar 2")
group = self.store_event(
data={
"timestamp": before_now(seconds=12).isoformat(),
"fingerprint": ["group-1"],
"release": release_1.version,
},
project_id=self.project.id,
).group
# Call this function to cache the `last_seen` release to release_1
# i.e. Set the first last observed by Sentry
assert group.get_last_release() == release_1.version
self.login_as(user=self.user)
self.store_event(
data={
"timestamp": before_now(seconds=0).isoformat(),
"fingerprint": ["group-1"],
"release": release_2.version,
},
project_id=self.project.id,
)
# Cached (i.e. first last observed release by Sentry) is returned here since `use_cache`
# is set to its default of `True`
assert Group.objects.get(id=group.id).get_last_release() == release_1.version
response = self.get_success_response(
qs_params={"id": group.id}, status="resolvedInNextRelease"
)
assert response.data["status"] == "resolved"
assert response.data["statusDetails"]["inNextRelease"]
# Changes here to release_2 and actual latest because `resolvedInNextRelease`,
# sets `use_cache` to False when fetching the last release associated with a group
assert Group.objects.get(id=group.id).get_last_release() == release_2.version
grp_resolution = GroupResolution.objects.filter(group=group)
assert len(grp_resolution) == 1
assert grp_resolution[0].current_release_version == release_2.version
def test_in_non_semver_projects_resolved_in_next_release_is_equated_to_in_release(self) -> None:
"""
Test that ensures that if we basically know the next release when clicking on Resolved
In Next Release because that release exists, then we can short circuit setting
GroupResolution to type "inNextRelease", and then having `clear_expired_resolutions` run
once a new release is created to convert GroupResolution to in_release and set Activity.
Basically we treat "ResolvedInNextRelease" as "ResolvedInRelease" when there is a release
that was created after the last release associated with the group being resolved
"""
release_1 = self.create_release(
date_added=timezone.now() - timedelta(minutes=45), version="foobar 1"
)
release_2 = self.create_release(version="foobar 2")
self.create_release(version="foobar 3")
group = self.store_event(
data={
"timestamp": before_now(seconds=12).isoformat(),
"fingerprint": ["group-1"],
"release": release_1.version,
},
project_id=self.project.id,
).group
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": group.id}, status="resolvedInNextRelease"
)
assert response.data["status"] == "resolved"
assert response.data["statusDetails"]["inNextRelease"]
grp_resolution = GroupResolution.objects.get(group=group)
assert grp_resolution.current_release_version == release_1.version
assert grp_resolution.release.id == release_2.id
assert grp_resolution.type == GroupResolution.Type.in_release
assert grp_resolution.status == GroupResolution.Status.resolved
activity = Activity.objects.get(
group=grp_resolution.group,
type=ActivityType.SET_RESOLVED_IN_RELEASE.value,
ident=grp_resolution.id,
)
assert activity.data["version"] == release_2.version
def test_selective_status_update(self) -> None:
group1 = self.create_group(status=GroupStatus.RESOLVED)
group1.resolved_at = timezone.now()
group1.save()
group2 = self.create_group(status=GroupStatus.UNRESOLVED)
group3 = self.create_group(status=GroupStatus.IGNORED)
group4 = self.create_group(
project=self.create_project(slug="foo"),
status=GroupStatus.UNRESOLVED,
)
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": [group1.id, group2.id], "group4": group4.id}, status="resolved"
)
assert response.data == {"status": "resolved", "statusDetails": {}, "inbox": None}
new_group1 = Group.objects.get(id=group1.id)
assert new_group1.resolved_at is not None
assert new_group1.status == GroupStatus.RESOLVED
new_group2 = Group.objects.get(id=group2.id)
assert new_group2.resolved_at is not None
assert new_group2.status == GroupStatus.RESOLVED
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=new_group2, is_active=True
).exists()
new_group3 = Group.objects.get(id=group3.id)
assert new_group3.resolved_at is None
assert new_group3.status == GroupStatus.IGNORED
new_group4 = Group.objects.get(id=group4.id)
assert new_group4.resolved_at is None
assert new_group4.status == GroupStatus.UNRESOLVED
def test_set_resolved_in_current_release(self) -> None:
release = Release.objects.create(organization_id=self.project.organization_id, version="a")
release.add_project(self.project)
group = self.create_group(status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": group.id}, status="resolved", statusDetails={"inRelease": "latest"}
)
assert response.data["status"] == "resolved"
assert response.data["statusDetails"]["inRelease"] == release.version
assert response.data["statusDetails"]["actor"]["id"] == str(self.user.id)
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
resolution = GroupResolution.objects.get(group=group)
assert resolution.release == release
assert resolution.type == GroupResolution.Type.in_release
assert resolution.status == GroupResolution.Status.resolved
assert resolution.actor_id == self.user.id
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=True
).exists()
activity = Activity.objects.get(
group=group, type=ActivityType.SET_RESOLVED_IN_RELEASE.value
)
assert activity.data["version"] == release.version
assert GroupHistory.objects.filter(
group=group, status=GroupHistoryStatus.SET_RESOLVED_IN_RELEASE
).exists()
def test_set_resolved_in_explicit_release(self) -> None:
release = Release.objects.create(organization_id=self.project.organization_id, version="a")
release.add_project(self.project)
release2 = Release.objects.create(organization_id=self.project.organization_id, version="b")
release2.add_project(self.project)
group = self.create_group(status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": group.id},
status="resolved",
statusDetails={"inRelease": release.version},
)
assert response.data["status"] == "resolved"
assert response.data["statusDetails"]["inRelease"] == release.version
assert response.data["statusDetails"]["actor"]["id"] == str(self.user.id)
assert "activity" in response.data
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
resolution = GroupResolution.objects.get(group=group)
assert resolution.release == release
assert resolution.type == GroupResolution.Type.in_release
assert resolution.status == GroupResolution.Status.resolved
assert resolution.actor_id == self.user.id
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=True
).exists()
activity = Activity.objects.get(
group=group, type=ActivityType.SET_RESOLVED_IN_RELEASE.value
)
assert activity.data["version"] == release.version
def test_in_semver_projects_set_resolved_in_explicit_release(self) -> None:
release_1 = self.create_release(version="fake_package@3.0.0")
release_2 = self.create_release(version="fake_package@2.0.0")
release_3 = self.create_release(version="fake_package@3.0.1")
group = self.store_event(
data={
"timestamp": before_now(seconds=10).isoformat(),
"fingerprint": ["group-1"],
"release": release_1.version,
},
project_id=self.project.id,
).group
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": group.id},
status="resolved",
statusDetails={"inRelease": release_1.version},
)
assert response.data["status"] == "resolved"
assert response.data["statusDetails"]["inRelease"] == release_1.version
assert response.data["statusDetails"]["actor"]["id"] == str(self.user.id)
assert "activity" in response.data
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
resolution = GroupResolution.objects.get(group=group)
assert resolution.release == release_1
assert resolution.type == GroupResolution.Type.in_release
assert resolution.status == GroupResolution.Status.resolved
assert resolution.actor_id == self.user.id
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=True
).exists()
activity = Activity.objects.get(
group=group, type=ActivityType.SET_RESOLVED_IN_RELEASE.value
)
assert activity.data["version"] == release_1.version
assert GroupResolution.has_resolution(group=group, release=release_2)
assert not GroupResolution.has_resolution(group=group, release=release_3)
def test_set_resolved_in_next_release(self) -> None:
release = Release.objects.create(organization_id=self.project.organization_id, version="a")
release.add_project(self.project)
group = self.create_group(status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": group.id}, status="resolved", statusDetails={"inNextRelease": True}
)
assert response.data["status"] == "resolved"
assert response.data["statusDetails"]["inNextRelease"]
assert response.data["statusDetails"]["actor"]["id"] == str(self.user.id)
assert "activity" in response.data
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
resolution = GroupResolution.objects.get(group=group)
assert resolution.release == release
assert resolution.type == GroupResolution.Type.in_next_release
assert resolution.status == GroupResolution.Status.pending
assert resolution.actor_id == self.user.id
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=True
).exists()
activity = Activity.objects.get(
group=group, type=ActivityType.SET_RESOLVED_IN_RELEASE.value
)
assert activity.data["version"] == ""
def test_set_resolved_in_next_release_legacy(self) -> None:
release = Release.objects.create(organization_id=self.project.organization_id, version="a")
release.add_project(self.project)
group = self.create_group(status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": group.id}, status="resolvedInNextRelease"
)
assert response.data["status"] == "resolved"
assert response.data["statusDetails"]["inNextRelease"]
assert response.data["statusDetails"]["actor"]["id"] == str(self.user.id)
assert "activity" in response.data
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
resolution = GroupResolution.objects.get(group=group)
assert resolution.release == release
assert resolution.type == GroupResolution.Type.in_next_release
assert resolution.status == GroupResolution.Status.pending
assert resolution.actor_id == self.user.id
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=True
).exists()
assert GroupHistory.objects.filter(
group=group, status=GroupHistoryStatus.SET_RESOLVED_IN_RELEASE
).exists()
activity = Activity.objects.get(
group=group, type=ActivityType.SET_RESOLVED_IN_RELEASE.value
)
assert activity.data["version"] == ""
def test_set_resolved_in_explicit_commit_unreleased(self) -> None:
repo = self.create_repo(project=self.project, name=self.project.name)
commit = self.create_commit(project=self.project, repo=repo)
group = self.create_group(status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": group.id},
status="resolved",
statusDetails={"inCommit": {"commit": commit.key, "repository": repo.name}},
)
assert response.data["status"] == "resolved"
assert response.data["statusDetails"]["inCommit"]["id"] == commit.key
assert response.data["statusDetails"]["actor"]["id"] == str(self.user.id)
assert "activity" not in response.data
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
link = GroupLink.objects.get(group_id=group.id)
assert link.linked_type == GroupLink.LinkedType.commit
assert link.relationship == GroupLink.Relationship.resolves
assert link.linked_id == commit.id
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=True
).exists()
activity = Activity.objects.get(group=group, type=ActivityType.SET_RESOLVED_IN_COMMIT.value)
assert activity.data["commit"] == commit.id
assert GroupHistory.objects.filter(
group=group, status=GroupHistoryStatus.SET_RESOLVED_IN_COMMIT
).exists()
def test_set_resolved_in_explicit_commit_released(self) -> None:
release = self.create_release(project=self.project)
repo = self.create_repo(project=self.project, name=self.project.name)
commit = self.create_commit(project=self.project, repo=repo, release=release)
group = self.create_group(status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": group.id},
status="resolved",
statusDetails={"inCommit": {"commit": commit.key, "repository": repo.name}},
)
assert response.data["status"] == "resolved"
assert response.data["statusDetails"]["inCommit"]["id"] == commit.key
assert response.data["statusDetails"]["actor"]["id"] == str(self.user.id)
assert "activity" in response.data
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
link = GroupLink.objects.get(group_id=group.id)
assert link.project_id == self.project.id
assert link.linked_type == GroupLink.LinkedType.commit
assert link.relationship == GroupLink.Relationship.resolves
assert link.linked_id == commit.id
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=True
).exists()
activity = Activity.objects.get(group=group, type=ActivityType.SET_RESOLVED_IN_COMMIT.value)
assert activity.data["commit"] == commit.id
resolution = GroupResolution.objects.get(group=group)
assert resolution.type == GroupResolution.Type.in_release
assert resolution.status == GroupResolution.Status.resolved
assert GroupHistory.objects.filter(
group=group, status=GroupHistoryStatus.SET_RESOLVED_IN_COMMIT
).exists()
def test_set_resolved_in_explicit_commit_missing(self) -> None:
repo = self.create_repo(project=self.project, name=self.project.name)
group = self.create_group(status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
response = self.get_response(
qs_params={"id": group.id},
status="resolved",
statusDetails={"inCommit": {"commit": "a" * 40, "repository": repo.name}},
)
assert response.status_code == 400
assert (
response.data["statusDetails"]["inCommit"]["commit"][0]
== "Unable to find the given commit."
)
assert not GroupHistory.objects.filter(
group=group, status=GroupHistoryStatus.SET_RESOLVED_IN_COMMIT
).exists()
def test_set_unresolved(self) -> None:
release = self.create_release(project=self.project, version="abc")
group = self.create_group(status=GroupStatus.IGNORED)
GroupResolution.objects.create(group=group, release=release)
self.login_as(user=self.user)
response = self.get_success_response(qs_params={"id": group.id}, status="unresolved")
assert response.data == {"status": "unresolved", "statusDetails": {}}
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.UNRESOLVED
assert GroupHistory.objects.filter(
group=group, status=GroupHistoryStatus.UNRESOLVED
).exists()
self.assertNoResolution(group)
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group, is_active=True
).exists()
def test_set_unresolved_on_snooze(self) -> None:
group = self.create_group(status=GroupStatus.IGNORED)
GroupSnooze.objects.create(group=group, until=timezone.now() - timedelta(days=1))
self.login_as(user=self.user)
response = self.get_success_response(qs_params={"id": group.id}, status="unresolved")
assert response.data == {"status": "unresolved", "statusDetails": {}}
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.UNRESOLVED
assert GroupHistory.objects.filter(
group=group, status=GroupHistoryStatus.UNRESOLVED
).exists()
def test_basic_ignore(self) -> None:
group = self.create_group(status=GroupStatus.RESOLVED)
snooze = GroupSnooze.objects.create(group=group, until=timezone.now())
self.login_as(user=self.user)
assert not GroupHistory.objects.filter(
group=group, status=GroupHistoryStatus.IGNORED
).exists()
response = self.get_success_response(qs_params={"id": group.id}, status="ignored")
# existing snooze objects should be cleaned up
assert not GroupSnooze.objects.filter(id=snooze.id).exists()
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.IGNORED
assert GroupHistory.objects.filter(
group=group, status=GroupHistoryStatus.ARCHIVED_FOREVER
).exists()
assert response.data == {"status": "ignored", "statusDetails": {}, "inbox": None}
def test_snooze_duration(self) -> None:
group = self.create_group(status=GroupStatus.RESOLVED)
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": group.id}, status="ignored", ignoreDuration=30
)
snooze = GroupSnooze.objects.get(group=group)
snooze.until = snooze.until
now = timezone.now()
assert snooze.count is None
assert snooze.until is not None
assert snooze.until > now + timedelta(minutes=29)
assert snooze.until < now + timedelta(minutes=31)
assert snooze.user_count is None
assert snooze.user_window is None
assert snooze.window is None
response.data["statusDetails"]["ignoreUntil"] = response.data["statusDetails"][
"ignoreUntil"
]
assert response.data["status"] == "ignored"
assert response.data["statusDetails"]["ignoreCount"] == snooze.count
assert response.data["statusDetails"]["ignoreWindow"] == snooze.window
assert response.data["statusDetails"]["ignoreUserCount"] == snooze.user_count
assert response.data["statusDetails"]["ignoreUserWindow"] == snooze.user_window
assert response.data["statusDetails"]["ignoreUntil"] == snooze.until
assert response.data["statusDetails"]["actor"]["id"] == str(self.user.id)
def test_snooze_count(self) -> None:
group = self.create_group(status=GroupStatus.RESOLVED, times_seen=1)
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": group.id}, status="ignored", ignoreCount=100
)
snooze = GroupSnooze.objects.get(group=group)
assert snooze.count == 100
assert snooze.until is None
assert snooze.user_count is None
assert snooze.user_window is None
assert snooze.window is None
assert snooze.state is not None
assert snooze.state["times_seen"] == 1
assert response.data["status"] == "ignored"
assert response.data["statusDetails"]["ignoreCount"] == snooze.count
assert response.data["statusDetails"]["ignoreWindow"] == snooze.window
assert response.data["statusDetails"]["ignoreUserCount"] == snooze.user_count
assert response.data["statusDetails"]["ignoreUserWindow"] == snooze.user_window
assert response.data["statusDetails"]["ignoreUntil"] == snooze.until
assert response.data["statusDetails"]["actor"]["id"] == str(self.user.id)
def test_snooze_user_count(self) -> None:
for i in range(10):
event = self.store_event(
data={
"fingerprint": ["put-me-in-group-1"],
"user": {"id": str(i)},
"timestamp": (self.min_ago + timedelta(seconds=i)).isoformat(),
},
project_id=self.project.id,
)
assert event.group is not None
group = Group.objects.get(id=event.group.id)
group.status = GroupStatus.RESOLVED
group.substatus = None
group.save()
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": group.id}, status="ignored", ignoreUserCount=10
)
snooze = GroupSnooze.objects.get(group=group)
assert snooze.count is None
assert snooze.until is None
assert snooze.user_count == 10
assert snooze.user_window is None
assert snooze.window is None
assert snooze.state is not None
assert snooze.state["users_seen"] == 10
assert response.data["status"] == "ignored"
assert response.data["statusDetails"]["ignoreCount"] == snooze.count
assert response.data["statusDetails"]["ignoreWindow"] == snooze.window
assert response.data["statusDetails"]["ignoreUserCount"] == snooze.user_count
assert response.data["statusDetails"]["ignoreUserWindow"] == snooze.user_window
assert response.data["statusDetails"]["ignoreUntil"] == snooze.until
assert response.data["statusDetails"]["actor"]["id"] == str(self.user.id)
def test_set_bookmarked(self) -> None:
group1 = self.create_group(status=GroupStatus.RESOLVED)
group2 = self.create_group(status=GroupStatus.UNRESOLVED)
group3 = self.create_group(status=GroupStatus.IGNORED)
group4 = self.create_group(
project=self.create_project(slug="foo"),
status=GroupStatus.UNRESOLVED,
)
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": [group1.id, group2.id], "group4": group4.id}, isBookmarked="true"
)
assert response.data == {"isBookmarked": True}
bookmark1 = GroupBookmark.objects.filter(group=group1, user_id=self.user.id)
assert bookmark1.exists()
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group1, is_active=True
).exists()
bookmark2 = GroupBookmark.objects.filter(group=group2, user_id=self.user.id)
assert bookmark2.exists()
assert GroupSubscription.objects.filter(
user_id=self.user.id, group=group2, is_active=True
).exists()
bookmark3 = GroupBookmark.objects.filter(group=group3, user_id=self.user.id)
assert not bookmark3.exists()
bookmark4 = GroupBookmark.objects.filter(group=group4, user_id=self.user.id)
assert not bookmark4.exists()
def test_subscription(self) -> None:
group1 = self.create_group()
group2 = self.create_group()
group3 = self.create_group()
group4 = self.create_group(project=self.create_project(slug="foo"))
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": [group1.id, group2.id], "group4": group4.id}, isSubscribed="true"
)
assert response.data == {"isSubscribed": True, "subscriptionDetails": {"reason": "unknown"}}
assert GroupSubscription.objects.filter(
group=group1, user_id=self.user.id, is_active=True
).exists()
assert GroupSubscription.objects.filter(
group=group2, user_id=self.user.id, is_active=True
).exists()
assert not GroupSubscription.objects.filter(group=group3, user_id=self.user.id).exists()
assert not GroupSubscription.objects.filter(group=group4, user_id=self.user.id).exists()
def test_set_public(self) -> None:
group1 = self.create_group()
group2 = self.create_group()
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": [group1.id, group2.id]}, isPublic="true"
)
assert response.data["isPublic"] is True
assert "shareId" in response.data
new_group1 = Group.objects.get(id=group1.id)
assert bool(new_group1.get_share_id())
new_group2 = Group.objects.get(id=group2.id)
assert bool(new_group2.get_share_id())
def test_set_private(self) -> None:
group1 = self.create_group()
group2 = self.create_group()
# Manually mark them as shared
for g in group1, group2:
GroupShare.objects.create(project_id=g.project_id, group=g)
assert bool(g.get_share_id())
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": [group1.id, group2.id]}, isPublic="false"
)
assert response.data == {"isPublic": False, "shareId": None}
new_group1 = Group.objects.get(id=group1.id)
assert not bool(new_group1.get_share_id())
new_group2 = Group.objects.get(id=group2.id)
assert not bool(new_group2.get_share_id())
def test_set_has_seen(self) -> None:
group1 = self.create_group(status=GroupStatus.RESOLVED)
group2 = self.create_group(status=GroupStatus.UNRESOLVED)
group3 = self.create_group(status=GroupStatus.IGNORED)
group4 = self.create_group(
project=self.create_project(slug="foo"),
status=GroupStatus.UNRESOLVED,
)
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": [group1.id, group2.id], "group4": group4.id}, hasSeen="true"
)
assert response.data == {"hasSeen": True}
r1 = GroupSeen.objects.filter(group=group1, user_id=self.user.id)
assert r1.exists()
r2 = GroupSeen.objects.filter(group=group2, user_id=self.user.id)
assert r2.exists()
r3 = GroupSeen.objects.filter(group=group3, user_id=self.user.id)
assert not r3.exists()
r4 = GroupSeen.objects.filter(group=group4, user_id=self.user.id)
assert not r4.exists()
@patch("sentry.issues.merge.uuid4")
@patch("sentry.issues.merge.merge_groups")
@patch("sentry.eventstream.backend")
def test_merge(
self, mock_eventstream: MagicMock, merge_groups: MagicMock, mock_uuid4: MagicMock
) -> None:
eventstream_state = object()
mock_eventstream.start_merge = Mock(return_value=eventstream_state)
mock_uuid4.return_value = self.get_mock_uuid()
today = datetime.now(tz=UTC)
yesterday = today - timedelta(days=1)
group1 = self.create_group(first_seen=today, times_seen=1)
group2 = self.create_group(first_seen=yesterday, times_seen=50)
group3 = self.create_group(first_seen=today, times_seen=2)
self.create_group()
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": [group1.id, group2.id, group3.id]}, merge="1"
)
assert response.data["merge"]["parent"] == str(group2.id)
assert sorted(response.data["merge"]["children"]) == sorted(
[str(group1.id), str(group3.id)]
)
mock_eventstream.start_merge.assert_called_once_with(
group1.project_id,
[group3.id, group1.id],
group2.id,
group2.first_seen,
)
assert len(merge_groups.mock_calls) == 1
merge_groups.delay.assert_any_call(
from_object_ids=[group3.id, group1.id],
to_object_id=group2.id,
transaction_id="abc123",
eventstream_state=eventstream_state,
)
@patch("sentry.issues.merge.uuid4")
@patch("sentry.issues.merge.merge_groups")
@patch("sentry.eventstream.backend")
def test_merge_performance_issues(
self, mock_eventstream: MagicMock, merge_groups: MagicMock, mock_uuid4: MagicMock
) -> None:
eventstream_state = object()
mock_eventstream.start_merge = Mock(return_value=eventstream_state)
mock_uuid4.return_value = self.get_mock_uuid()
group1 = self.create_group(times_seen=1, type=PerformanceSlowDBQueryGroupType.type_id)
group2 = self.create_group(times_seen=50, type=PerformanceSlowDBQueryGroupType.type_id)
group3 = self.create_group(times_seen=2, type=PerformanceSlowDBQueryGroupType.type_id)
self.create_group()
self.login_as(user=self.user)
response = self.get_error_response(
qs_params={"id": [group1.id, group2.id, group3.id]}, merge="1"
)
assert response.status_code == 400, response.content
def test_assign(self) -> None:
group1 = self.create_group(is_public=True)
group2 = self.create_group(is_public=True)
user = self.user
self.login_as(user=user)
response = self.get_success_response(qs_params={"id": group1.id}, assignedTo=user.username)
assert response.data["assignedTo"]["id"] == str(user.id)
assert response.data["assignedTo"]["type"] == "user"
assert GroupAssignee.objects.filter(group=group1, user_id=user.id).exists()
assert GroupHistory.objects.filter(
group=group1, status=GroupHistoryStatus.ASSIGNED
).exists()
assert not GroupAssignee.objects.filter(group=group2, user_id=user.id).exists()
assert (
Activity.objects.filter(
group=group1, user_id=user.id, type=ActivityType.ASSIGNED.value
).count()
== 1
)
assert GroupSubscription.objects.filter(
user_id=user.id, group=group1, is_active=True
).exists()
response = self.get_success_response(qs_params={"id": group1.id}, assignedTo="")
assert response.data["assignedTo"] is None
assert not GroupAssignee.objects.filter(group=group1, user_id=user.id).exists()
assert GroupHistory.objects.filter(
group=group1, status=GroupHistoryStatus.UNASSIGNED
).exists()
def test_assign_non_member(self) -> None:
group = self.create_group(is_public=True)
member = self.user
non_member = self.create_user("bar@example.com")
self.login_as(user=member)
response = self.get_response(qs_params={"id": group.id}, assignedTo=non_member.username)
assert not GroupHistory.objects.filter(
group=group, status=GroupHistoryStatus.ASSIGNED
).exists()
assert response.status_code == 400, response.content
def test_assign_team(self) -> None:
self.login_as(user=self.user)
group = self.create_group()
other_member = self.create_user("bar@example.com")
team = self.create_team(
organization=group.project.organization, members=[self.user, other_member]
)
group.project.add_team(team)
assert not GroupHistory.objects.filter(
group=group, status=GroupHistoryStatus.ASSIGNED
).exists()
response = self.get_success_response(
qs_params={"id": group.id}, assignedTo=f"team:{team.id}"
)
assert response.data["assignedTo"]["id"] == str(team.id)
assert response.data["assignedTo"]["type"] == "team"
assert GroupHistory.objects.filter(group=group, status=GroupHistoryStatus.ASSIGNED).exists()
assert GroupAssignee.objects.filter(group=group, team=team).exists()
assert Activity.objects.filter(group=group, type=ActivityType.ASSIGNED.value).count() == 1
assert GroupSubscription.objects.filter(group=group, is_active=True).count() == 2
response = self.get_success_response(qs_params={"id": group.id}, assignedTo="")
assert response.data["assignedTo"] is None
assert GroupHistory.objects.filter(
group=group, status=GroupHistoryStatus.UNASSIGNED
).exists()
def test_discard(self) -> None:
group1 = self.create_group(is_public=True)
group2 = self.create_group(is_public=True)
group_hash = GroupHash.objects.create(hash="x" * 32, project=group1.project, group=group1)
user = self.user
self.login_as(user=user)
with self.tasks():
with self.feature("projects:discard-groups"):
response = self.get_response(qs_params={"id": group1.id}, discard=True)
assert response.status_code == 204
assert not Group.objects.filter(id=group1.id).exists()
assert Group.objects.filter(id=group2.id).exists()
assert GroupHash.objects.filter(id=group_hash.id).exists()
tombstone = GroupTombstone.objects.get(
id=GroupHash.objects.get(id=group_hash.id).group_tombstone_id
)
assert tombstone.message == group1.message
assert tombstone.culprit == group1.culprit
assert tombstone.project == group1.project
assert tombstone.data == group1.data
def test_set_inbox(self) -> None:
group1 = self.create_group()
group2 = self.create_group()
self.login_as(user=self.user)
response = self.get_success_response(qs_params={"id": [group1.id, group2.id]}, inbox="true")
assert response.data == {"inbox": True}
assert GroupInbox.objects.filter(group=group1).exists()
assert GroupInbox.objects.filter(group=group2).exists()
assert not GroupHistory.objects.filter(
group=group1, status=GroupHistoryStatus.REVIEWED
).exists()
assert not GroupHistory.objects.filter(
group=group2, status=GroupHistoryStatus.REVIEWED
).exists()
response = self.get_success_response(qs_params={"id": [group2.id]}, inbox="false")
assert response.data == {"inbox": False}
assert GroupInbox.objects.filter(group=group1).exists()
assert not GroupHistory.objects.filter(
group=group1, status=GroupHistoryStatus.REVIEWED
).exists()
assert GroupHistory.objects.filter(
group=group2, status=GroupHistoryStatus.REVIEWED
).exists()
assert not GroupInbox.objects.filter(group=group2).exists()
def test_set_resolved_inbox(self) -> None:
group1 = self.create_group()
group2 = self.create_group()
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": [group1.id, group2.id]}, status="resolved"
)
assert response.data["inbox"] is None
assert not GroupInbox.objects.filter(group=group1).exists()
assert not GroupInbox.objects.filter(group=group2).exists()
self.get_success_response(qs_params={"id": [group2.id]}, status="unresolved")
assert not GroupInbox.objects.filter(group=group1).exists()
assert not GroupInbox.objects.filter(group=group2).exists()
assert not GroupHistory.objects.filter(
group=group1, status=GroupHistoryStatus.UNRESOLVED
).exists()
assert GroupHistory.objects.filter(
group=group2, status=GroupHistoryStatus.REGRESSED
).exists()
def test_update_priority(self) -> None:
"""
Bulk-setting priority successfully changes the priority of the groups
and also creates a GroupHistory and Activity entry for each group.
"""
group1 = self.create_group(priority=PriorityLevel.HIGH.value)
group2 = self.create_group(priority=PriorityLevel.MEDIUM.value)
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": [group1.id, group2.id]}, priority=PriorityLevel.LOW.to_str()
)
assert response.data["priority"] == PriorityLevel.LOW.to_str()
for group in (group1, group2):
assert Group.objects.get(id=group.id).priority == PriorityLevel.LOW.value
assert GroupHistory.objects.filter(
group=group, status=GroupHistoryStatus.PRIORITY_LOW
).exists()
assert Activity.objects.filter(
group=group, type=ActivityType.SET_PRIORITY.value, user_id=self.user.id
).exists()
def test_update_priority_no_change(self) -> None:
"""
When the priority is the same as the current priority, no changes are made
"""
group1 = self.create_group(priority=PriorityLevel.HIGH.value)
group2 = self.create_group(priority=PriorityLevel.MEDIUM.value)
self.login_as(user=self.user)
response = self.get_success_response(
qs_params={"id": [group1.id, group2.id]}, priority=PriorityLevel.MEDIUM.to_str()
)
assert response.data["priority"] == PriorityLevel.MEDIUM.to_str()
# First group should have medium priority and history/activity entries
assert Group.objects.get(id=group1.id).priority == PriorityLevel.MEDIUM.value
assert GroupHistory.objects.filter(
group=group1, status=GroupHistoryStatus.PRIORITY_MEDIUM
).exists()
assert Activity.objects.filter(
group=group1, type=ActivityType.SET_PRIORITY.value, user_id=self.user.id
).exists()
# Second group should still have medium priority and no history/activity entries
assert Group.objects.get(id=group1.id).priority == PriorityLevel.MEDIUM
assert not GroupHistory.objects.filter(
group=group2,
).exists()
assert not Activity.objects.filter(
group=group2, type=ActivityType.SET_PRIORITY.value, user_id=self.user.id
).exists()
def test_cannot_update_metric_issue_priority(self) -> None:
"""
Users should be prohibited from manually updating the priority of metric issues.
"""
group = self.create_group(priority=PriorityLevel.HIGH.value, type=MetricIssue.type_id)
self.login_as(self.user)
response = self.get_error_response(
qs_params={"id": [group.id]}, priority=PriorityLevel.MEDIUM.to_str()
)
assert response.status_code == 400
assert response.data["detail"] == "Cannot manually set priority of one or more issues."
|
GroupUpdateTest
|
python
|
PyCQA__pylint
|
tests/functional/ext/docstyle/docstyle_quotes.py
|
{
"start": 141,
"end": 825
}
|
class ____:
def method1(self): # [bad-docstring-quotes]
'''
Test Triple Single Quotes docstring
'''
def method2(self): # [bad-docstring-quotes]
"bad docstring 1"
def method3(self): # [bad-docstring-quotes]
'bad docstring 2'
def method4(self): # [bad-docstring-quotes]
' """bad docstring 3 '
@check_messages("bad-open-mode", "redundant-unittest-assert", "deprecated-module")
def method5(self):
"""Test OK 1 with decorators"""
def method6(self):
r"""Test OK 2 with raw string"""
def method7(self):
u"""Test OK 3 with unicode string"""
def function2():
"""Test Ok"""
|
FFFF
|
python
|
docker__docker-py
|
docker/errors.py
|
{
"start": 4843,
"end": 5024
}
|
class ____(DockerException):
def __init__(self, name):
self.name = name
def __str__(self):
return (f"context {self.name} already exists")
|
ContextAlreadyExists
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/queues.py
|
{
"start": 5186,
"end": 6761
}
|
class ____(NonStrictDataModel):
"""
:param task: Queued task ID
:type task: str
:param added: Time this entry was added to the queue
:type added: datetime.datetime
"""
_schema = {
"properties": {
"added": {
"description": "Time this entry was added to the queue",
"format": "date-time",
"type": ["string", "null"],
},
"task": {"description": "Queued task ID", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(self, task: Optional[str] = None, added: Optional[str] = None, **kwargs: Any) -> None:
super(Entry, self).__init__(**kwargs)
self.task = task
self.added = added
@schema_property("task")
def task(self) -> Optional[str]:
return self._property_task
@task.setter
def task(self, value: Optional[str]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("added")
def added(self) -> Optional[str]:
return self._property_added
@added.setter
def added(self, value: Optional[str]) -> None:
if value is None:
self._property_added = None
return
self.assert_isinstance(value, "added", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_added = value
|
Entry
|
python
|
openai__openai-python
|
src/openai/pagination.py
|
{
"start": 1112,
"end": 1674
}
|
class ____(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
"""Note: no pagination actually occurs yet, this is for forwards-compatibility."""
data: List[_T]
object: str
@override
def _get_page_items(self) -> List[_T]:
data = self.data
if not data:
return []
return data
@override
def next_page_info(self) -> None:
"""
This page represents a response that isn't actually paginated at the API level
so there will never be a next page.
"""
return None
|
AsyncPage
|
python
|
huggingface__transformers
|
src/transformers/models/nystromformer/modeling_nystromformer.py
|
{
"start": 21497,
"end": 24467
}
|
class ____(NystromformerPreTrainedModel):
_tied_weights_keys = {
"cls.predictions.decoder.weight": "nystromformer.embeddings.word_embeddings.weight",
"cls.predictions.decoder.bias": "cls.predictions.bias",
}
def __init__(self, config):
super().__init__(config)
self.nystromformer = NystromformerModel(config)
self.cls = NystromformerOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.nystromformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
NystromformerForMaskedLM
|
python
|
facelessuser__pymdown-extensions
|
tests/test_extensions/test_highlight.py
|
{
"start": 2094,
"end": 2865
}
|
class ____(util.MdCase):
"""Test title cases."""
extension = ['pymdownx.highlight', 'pymdownx.superfences']
extension_configs = {
'pymdownx.highlight': {
'auto_title': True
}
}
def test_auto_tile(self):
"""Test auto title."""
self.check_markdown(
r'''
```pycon
>>> import test
```
''',
r'''
<div class="highlight"><span class="filename">Python Console Session</span><pre><span></span><code><span class="gp">>>> </span><span class="kn">import</span><span class="w"> </span><span class="nn">test</span>
</code></pre></div>
''', # noqa: E501
True
)
|
TestHighlightAutoTitle
|
python
|
FactoryBoy__factory_boy
|
tests/test_mongoengine.py
|
{
"start": 526,
"end": 667
}
|
class ____(MongoEngineFactory):
class Meta:
model = Address
street = factory.Sequence(lambda n: 'street%d' % n)
|
AddressFactory
|
python
|
getsentry__sentry
|
src/sentry/core/endpoints/scim/members.py
|
{
"start": 16009,
"end": 24119
}
|
class ____(SCIMEndpoint):
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
"POST": ApiPublishStatus.PUBLIC,
}
permission_classes = (OrganizationSCIMMemberPermission,)
@extend_schema(
operation_id="List an Organization's SCIM Members",
parameters=[GlobalParams.ORG_ID_OR_SLUG, SCIMQueryParamSerializer],
responses={
200: inline_sentry_response_serializer(
"SCIMListResponseEnvelopeSCIMMemberIndexResponse", SCIMListMembersResponse
),
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=SCIMExamples.LIST_ORG_MEMBERS,
)
def get(self, request: Request, organization: Organization) -> Response:
"""
Returns a paginated list of members bound to a organization with a SCIM Users GET Request.
"""
# note that SCIM doesn't care about changing results as they're queried
query_params = self.get_query_parameters(request)
queryset = OrganizationMember.objects.filter(
Q(invite_status=InviteStatus.APPROVED.value),
Q(user_is_active=True, user_id__isnull=False) | Q(user_id__isnull=True),
organization=organization,
).order_by("email", "id")
if query_params["filter"]:
filtered_users = user_service.get_many_by_email(
emails=[query_params["filter"]],
organization_id=organization.id,
is_verified=False,
)
queryset = queryset.filter(
Q(email__iexact=query_params["filter"])
| Q(user_id__in=[u.id for u in filtered_users])
) # not including secondary email vals (dups, etc.)
def data_fn(offset, limit):
return list(queryset[offset : offset + limit])
def on_results(results):
results = serialize(
results,
None,
_scim_member_serializer_with_expansion(organization),
)
return self.list_api_format(results, queryset.count(), query_params["start_index"])
return self.paginate(
request=request,
on_results=on_results,
paginator=GenericOffsetPaginator(data_fn=data_fn),
default_per_page=query_params["count"],
queryset=queryset,
cursor_cls=SCIMCursor,
)
@extend_schema(
operation_id="Provision a New Organization Member",
parameters=[GlobalParams.ORG_ID_OR_SLUG],
request=inline_serializer(
name="SCIMMemberProvision",
fields={
"userName": serializers.EmailField(
help_text="The SAML field used for email.",
required=True,
),
"sentryOrgRole": serializers.ChoiceField(
help_text="""The organization role of the member. If unspecified, this will be
set to the organization's default role. The options are:""",
choices=[role for role in ROLE_CHOICES if role[0] != "owner"],
required=False,
),
},
),
responses={
201: OrganizationMemberSCIMSerializer,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=SCIMExamples.PROVISION_NEW_MEMBER,
)
def post(self, request: Request, organization) -> Response:
"""
Create a new Organization Member via a SCIM Users POST Request.
Note that this API does not support setting secondary emails.
"""
update_role = False
scope = sentry_sdk.get_isolation_scope()
if "sentryOrgRole" in request.data and request.data["sentryOrgRole"]:
role = request.data["sentryOrgRole"].lower()
idp_role_restricted = True
update_role = True
else:
role = organization.default_role
idp_role_restricted = False
scope.set_tag("role_restricted", idp_role_restricted)
# Allow any role as long as it doesn't have `org:admin` permissions
allowed_roles = {role for role in roles.get_all() if not role.has_scope("org:admin")}
# Check for roles not found
# TODO: move this to the serializer verification
if role not in {role.id for role in allowed_roles}:
scope.set_tag("invalid_role_selection", True)
raise SCIMApiError(detail=SCIM_400_INVALID_ORGROLE)
scope.set_tag("invalid_role_selection", False)
serializer = OrganizationMemberRequestSerializer(
data={
"email": request.data.get("userName"),
"role": roles.get(role).id,
},
context={
"organization": organization,
"allowed_roles": allowed_roles,
"allow_existing_invite_request": True,
},
)
if not serializer.is_valid():
if "email" in serializer.errors and any(
("is already a member" in error) for error in serializer.errors["email"]
):
# we include conflict logic in the serializer, check to see if that was
# our error and if so, return a 409 so the scim IDP knows how to handle
raise SCIMApiError(detail=SCIM_409_USER_EXISTS, status_code=409)
if "role" in serializer.errors:
# TODO: Change this to an error pointing to a doc showing the workaround if they
# tried to provision an org admin
raise SCIMApiError(detail=SCIM_400_INVALID_ORGROLE)
raise SCIMApiError(detail=json.dumps(serializer.errors))
result = serializer.validated_data
with transaction.atomic(router.db_for_write(OrganizationMember)):
member_query = OrganizationMember.objects.filter(
organization=organization, email=result["email"], role=result["role"]
)
if member_query.exists():
member = member_query.get()
if member.token_expired:
member.regenerate_token()
member.save()
else:
member = OrganizationMember(
organization=organization,
email=result["email"],
role=result["role"],
inviter_id=request.user.id,
)
# TODO: are invite tokens needed for SAML orgs?
member.flags["idp:provisioned"] = True
member.flags["idp:role-restricted"] = idp_role_restricted
if settings.SENTRY_ENABLE_INVITES:
member.token = member.generate_token()
member.save()
self.create_audit_entry(
request=request,
organization_id=organization.id,
target_object=member.id,
data=member.get_audit_log_data(),
event=(
audit_log.get_event_id("MEMBER_INVITE")
if settings.SENTRY_ENABLE_INVITES
else audit_log.get_event_id("MEMBER_ADD")
),
)
if settings.SENTRY_ENABLE_INVITES and result.get("sendInvite"):
member.send_invite_email()
member_invited.send_robust(
member=member,
user=request.user,
sender=self,
referrer=request.data.get("referrer"),
)
metrics.incr(
"sentry.scim.member.provision",
tags={"organization": organization},
)
if update_role:
metrics.incr("sentry.scim.member.update_role", tags={"organization": organization})
context = serialize(
member,
serializer=_scim_member_serializer_with_expansion(organization),
)
return Response(context, status=201)
|
OrganizationSCIMMemberIndex
|
python
|
PyCQA__pylint
|
doc/data/messages/n/no-member/bad.py
|
{
"start": 75,
"end": 158
}
|
class ____:
def meow(self):
print("Meow")
Cat().roar() # [no-member]
|
Cat
|
python
|
FactoryBoy__factory_boy
|
factory/base.py
|
{
"start": 685,
"end": 2906
}
|
class ____(type):
"""Factory metaclass for handling ordered declarations."""
def __call__(cls, **kwargs):
"""Override the default Factory() syntax to call the default strategy.
Returns an instance of the associated class.
"""
if cls._meta.strategy == enums.BUILD_STRATEGY:
return cls.build(**kwargs)
elif cls._meta.strategy == enums.CREATE_STRATEGY:
return cls.create(**kwargs)
elif cls._meta.strategy == enums.STUB_STRATEGY:
return cls.stub(**kwargs)
else:
raise errors.UnknownStrategy('Unknown Meta.strategy: {}'.format(
cls._meta.strategy))
def __new__(mcs, class_name, bases, attrs):
"""Record attributes as a pattern for later instance construction.
This is called when a new Factory subclass is defined; it will collect
attribute declaration from the class definition.
Args:
class_name (str): the name of the class being created
bases (list of class): the parents of the class being created
attrs (str => obj dict): the attributes as defined in the class
definition
Returns:
A new class
"""
parent_factories = get_factory_bases(bases)
if parent_factories:
base_factory = parent_factories[0]
else:
base_factory = None
attrs_meta = attrs.pop('Meta', None)
attrs_params = attrs.pop('Params', None)
base_meta = resolve_attribute('_meta', bases)
options_class = resolve_attribute('_options_class', bases, FactoryOptions)
meta = options_class()
attrs['_meta'] = meta
new_class = super().__new__(
mcs, class_name, bases, attrs)
meta.contribute_to_class(
new_class,
meta=attrs_meta,
base_meta=base_meta,
base_factory=base_factory,
params=attrs_params,
)
return new_class
def __str__(cls):
if cls._meta.abstract:
return '<%s (abstract)>' % cls.__name__
else:
return f'<{cls.__name__} for {cls._meta.model}>'
|
FactoryMetaClass
|
python
|
spyder-ide__spyder
|
spyder/plugins/editor/panels/linenumber.py
|
{
"start": 601,
"end": 11927
}
|
class ____(Panel):
"""Line number area (on the left side of the text editor widget)"""
# --- Qt Overrides
# -----------------------------------------------------------------
def __init__(self):
Panel.__init__(self)
self.setMouseTracking(True)
self.scrollable = True
self.linenumbers_color = QColor(Qt.darkGray)
# Markers
self._markers_margin = True
# Icons
self.error_icon = ima.icon('error')
self.warning_icon = ima.icon('warning')
self.info_icon = ima.icon('information')
self.hint_icon = ima.icon('hint')
self.todo_icon = ima.icon('todo')
# Line number area management
self._margin = True
self._pressed = -1
self._released = -1
# This is a tuple composed of (number of digits, current width)
self._width_cache = None
# Cache line numbers
self._static_line_numbers = None
self._static_active_line = None
# Static text must be flushed when dpi changes (qt bug?)
self._static_text_dpi = None
def sizeHint(self):
"""Override Qt method."""
return QSize(self.compute_width(), 0)
def paintEvent(self, event):
"""Override Qt method.
Painting line number area
"""
painter = QPainter(self)
painter.fillRect(event.rect(), self.editor.sideareas_color)
font_height = self.editor.fontMetrics().height()
def draw_pixmap(xleft, ytop, pixmap):
# Scale pixmap height to device independent pixels
pixmap_height = pixmap.height() / pixmap.devicePixelRatio()
painter.drawPixmap(
xleft,
ceil(ytop + (font_height-pixmap_height) / 2),
pixmap
)
size = self.get_markers_margin() - 2
icon_size = QSize(size, size)
if self._margin:
font = self.editor.font()
fm = QFontMetricsF(font)
if (
fm.leading() == 0
and self.editor.lineWrapMode() == QTextOption.NoWrap
):
self.draw_linenumbers(painter)
else:
# The editor doesn't care about leading or the text is being
# wrapped, so each line must be drawn independently.
self.draw_linenumbers_slow(painter)
self.paint_cell(painter)
for top, line_number, block in self.editor.visible_blocks:
data = block.userData()
if self._markers_margin and data:
if data.code_analysis:
errors = 0
warnings = 0
infos = 0
hints = 0
for _, _, sev, _ in data.code_analysis:
errors += sev == DiagnosticSeverity.ERROR
warnings += sev == DiagnosticSeverity.WARNING
infos += sev == DiagnosticSeverity.INFORMATION
hints += sev == DiagnosticSeverity.HINT
if errors:
draw_pixmap(1, top, self.error_icon.pixmap(icon_size))
elif warnings:
draw_pixmap(
1, top, self.warning_icon.pixmap(icon_size))
elif infos:
draw_pixmap(1, top, self.info_icon.pixmap(icon_size))
elif hints:
draw_pixmap(1, top, self.hint_icon.pixmap(icon_size))
if data.todo:
draw_pixmap(1, top, self.todo_icon.pixmap(icon_size))
def draw_linenumbers(self, painter):
"""Draw line numbers."""
if len(self.editor.visible_blocks) == 0:
return
active_line_number = self.editor.textCursor().blockNumber() + 1
number_digits = self.compute_width_digits()
width = self.width()
visible_lines = [ln for _, ln, _ in self.editor.visible_blocks]
try:
idx = visible_lines.index(active_line_number)
active_top = self.editor.visible_blocks[idx][0]
except ValueError:
active_top = None
# Right align
line_numbers = [f"{ln:{number_digits}d}" for ln in visible_lines]
# Use non-breaking spaces and <br> returns
lines = "<br>".join(line_numbers).replace(" ", " ")
# This is needed to make that the font size of line numbers
# be the same as the text one when zooming
# See spyder-ide/spyder#2296 and spyder-ide/spyder#4811.
font = self.editor.font()
font.setWeight(QFont.Weight.Normal)
painter.setFont(font)
painter.setPen(self.linenumbers_color)
if self.logicalDpiX() != self._static_text_dpi:
self._static_text_dpi = self.logicalDpiX()
self._static_line_numbers = None
self._static_active_line = None
if self._static_line_numbers:
if lines != self._static_line_numbers.text():
self._static_line_numbers.setText(lines)
else:
self._static_line_numbers = QStaticText(lines)
self._static_line_numbers.prepare(font=font)
top = self.editor.visible_blocks[0][0]
left = width - self._static_line_numbers.size().width()
painter.drawStaticText(
QPointF(left, top), self._static_line_numbers)
if active_top is not None:
font.setWeight(QFont.Weight.Bold)
painter.setFont(font)
painter.setPen(self.editor.normal_color)
text = str(active_line_number)
if self._static_active_line:
if text != self._static_active_line.text():
self._static_active_line.setText(text)
else:
self._static_active_line = QStaticText(text)
self._static_active_line.setTextFormat(Qt.PlainText)
self._static_active_line.prepare(font=font)
size = self._static_active_line.size()
left = width - size.width()
# Hide non-bold number
painter.fillRect(
int(left), active_top, int(size.width()), int(size.height()),
self.editor.sideareas_color
)
# Paint bold number
painter.drawStaticText(
QPointF(left, active_top), self._static_active_line)
def draw_linenumbers_slow(self, painter):
"""
Slower way (2x) to draw line numbers.
This is necessary for some fonts and when the wrap lines option
is active.
"""
font = self.editor.font()
font_height = self.editor.fontMetrics().height()
active_block = self.editor.textCursor().block()
active_line_number = active_block.blockNumber() + 1
for top, line_number, block in self.editor.visible_blocks:
if self._margin:
if line_number == active_line_number:
font.setWeight(QFont.Weight.Bold)
painter.setFont(font)
painter.setPen(self.editor.normal_color)
else:
font.setWeight(QFont.Weight.Normal)
painter.setFont(font)
painter.setPen(self.linenumbers_color)
painter.drawText(0, top, self.width(),
font_height,
int(Qt.AlignRight | Qt.AlignBottom),
str(line_number))
def leaveEvent(self, event):
"""Override Qt method."""
self.editor.hide_tooltip()
def mouseMoveEvent(self, event):
"""Override Qt method.
Show code analisis, if left button pressed select lines.
"""
line_number = self.editor.get_linenumber_from_mouse_event(event)
block = self.editor.document().findBlockByNumber(line_number - 1)
data = block.userData()
# This disables messages if there is an active drag/selection operation
check = self._released == -1
if check and data:
if data.code_analysis:
self.editor.show_code_analysis_results(line_number, data)
elif data.todo:
self.editor.show_todo(line_number, data)
else:
self.editor.hide_tooltip()
if event.buttons() == Qt.LeftButton:
self._released = line_number
self.editor.select_lines(self._pressed, self._released)
def mousePressEvent(self, event):
"""Override Qt method
Select line, and starts selection
"""
line_number = self.editor.get_linenumber_from_mouse_event(event)
self._pressed = line_number
self._released = line_number
self.editor.select_lines(self._pressed,
self._released)
def mouseReleaseEvent(self, event):
"""Override Qt method."""
self._released = -1
self._pressed = -1
def wheelEvent(self, event):
"""Override Qt method."""
self.editor.wheelEvent(event)
# --- Other methods
# -----------------------------------------------------------------
def compute_width_digits(self):
"""Compute and return line number area width in digits."""
number_lines = self.editor.blockCount()
return max(1, math.ceil(math.log10(
number_lines + 1)))
def compute_width(self):
"""Compute and return line number area width."""
if not self._enabled:
return 0
number_digits = self.compute_width_digits()
if (self._width_cache is not None and
self._width_cache[0] == number_digits):
return self._width_cache[1]
if self._margin:
margin = 3 + self.editor.fontMetrics().width('9' * number_digits)
else:
margin = 0
width = margin + self.get_markers_margin()
self._width_cache = (number_digits, width)
return width
def _clear_width_cache(self):
"""Clear width cache."""
self._width_cache = None
def on_install(self, editor):
"""Clear width cache on font change."""
super().on_install(editor)
editor.sig_font_changed.connect(self._clear_width_cache)
def on_uninstall(self):
"""Disconnect signal."""
self.editor.sig_font_changed.disconnect(self._clear_width_cache)
super().on_uninstall()
def get_markers_margin(self):
"""Get marker margins."""
if self._markers_margin:
font_height = self.editor.fontMetrics().height() + 2
return font_height
else:
return 0
def setup_margins(self, linenumbers=True, markers=True):
"""
Setup margin settings
(except font, now set in editor.set_font)
"""
self._width_cache = None
self._margin = linenumbers
self._markers_margin = markers
self.set_enabled(linenumbers or markers)
def set_enabled(self, state):
self._enabled = state
self.setVisible(state)
def get_width(self):
"""Return current line number area width"""
return self.contentsRect().width()
|
LineNumberArea
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/plotting.py
|
{
"start": 473,
"end": 959
}
|
class ____:
params = [["line", "bar", "area", "barh", "hist", "kde", "pie"]]
param_names = ["kind"]
def setup(self, kind):
if kind in ["bar", "barh", "pie"]:
n = 100
elif kind in ["kde"]:
n = 10000
else:
n = 1000000
self.s = Series(np.random.randn(n))
if kind in ["area", "pie"]:
self.s = self.s.abs()
def time_series_plot(self, kind):
self.s.plot(kind=kind)
|
SeriesPlotting
|
python
|
google__jax
|
jaxlib/xla_client.py
|
{
"start": 11712,
"end": 12103
}
|
class ____(enum.IntFlag):
DEFAULT = 0
# Calls to custom call are safe to trace into the command buffer. It means
# that calls to custom call always launch exactly the same device operations
# (can depend on attribute values) that can be captured and then replayed.
#
# Supported only for custom calls implemented with XLA FFI.
COMMAND_BUFFER_COMPATIBLE = 1
|
CustomCallTargetTraits
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/matchClass5.py
|
{
"start": 221,
"end": 1166
}
|
class ____(int): ...
def func1(subj: A | B):
match subj:
# This should generate an error because A accepts only
# one positional pattern.
case A(1, 2):
pass
case A(1):
pass
case A():
pass
case B(1, 2):
pass
# This should generate an error because B accepts only
# two positional patterns.
case B(1, 2, 3):
pass
# This should generate an error because B accepts only
# two positional patterns.
case C(1, 2, 3):
pass
case D(1):
pass
# This should generate an error because D accepts only
# one positional pattern.
case D(1, 2):
pass
case int(1):
pass
# This should generate an error because int accepts only
# one positional pattern.
case int(1, 2):
pass
|
D
|
python
|
lazyprogrammer__machine_learning_examples
|
rnn_class/tf_parity.py
|
{
"start": 1417,
"end": 4474
}
|
class ____:
def __init__(self, M):
self.M = M # hidden layer size
def fit(self, X, Y, batch_sz=20, learning_rate=0.1, mu=0.9, activation=tf.nn.sigmoid, epochs=100, show_fig=False):
N, T, D = X.shape # X is of size N x T(n) x D
K = len(set(Y.flatten()))
M = self.M
self.f = activation
# initial weights
# note: Wx, Wh, bh are all part of the RNN unit and will be created
# by BasicRNNCell
Wo = init_weight(M, K).astype(np.float32)
bo = np.zeros(K, dtype=np.float32)
# make them tf variables
self.Wo = tf.Variable(Wo)
self.bo = tf.Variable(bo)
# tf Graph input
tfX = tf.placeholder(tf.float32, shape=(batch_sz, T, D), name='inputs')
tfY = tf.placeholder(tf.int64, shape=(batch_sz, T), name='targets')
# turn tfX into a sequence, e.g. T tensors all of size (batch_sz, D)
sequenceX = x2sequence(tfX, T, D, batch_sz)
# create the simple rnn unit
rnn_unit = BasicRNNCell(num_units=self.M, activation=self.f)
# Get rnn cell output
# outputs, states = rnn_module.rnn(rnn_unit, sequenceX, dtype=tf.float32)
outputs, states = get_rnn_output(rnn_unit, sequenceX, dtype=tf.float32)
# outputs are now of size (T, batch_sz, M)
# so make it (batch_sz, T, M)
outputs = tf.transpose(outputs, (1, 0, 2))
outputs = tf.reshape(outputs, (T*batch_sz, M))
# Linear activation, using rnn inner loop last output
logits = tf.matmul(outputs, self.Wo) + self.bo
predict_op = tf.argmax(logits, 1)
targets = tf.reshape(tfY, (T*batch_sz,))
cost_op = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits,
labels=targets
)
)
train_op = tf.train.MomentumOptimizer(learning_rate, momentum=mu).minimize(cost_op)
costs = []
n_batches = N // batch_sz
init = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init)
for i in range(epochs):
X, Y = shuffle(X, Y)
n_correct = 0
cost = 0
for j in range(n_batches):
Xbatch = X[j*batch_sz:(j+1)*batch_sz]
Ybatch = Y[j*batch_sz:(j+1)*batch_sz]
_, c, p = session.run([train_op, cost_op, predict_op], feed_dict={tfX: Xbatch, tfY: Ybatch})
cost += c
for b in range(batch_sz):
idx = (b + 1)*T - 1
n_correct += (p[idx] == Ybatch[b][-1])
if i % 10 == 0:
print("i:", i, "cost:", cost, "classification rate:", (float(n_correct)/N))
if n_correct == N:
print("i:", i, "cost:", cost, "classification rate:", (float(n_correct)/N))
break
costs.append(cost)
if show_fig:
plt.plot(costs)
plt.show()
def parity(B=12, learning_rate=1., epochs=1000):
X, Y = all_parity_pairs_with_sequence_labels(B)
rnn = SimpleRNN(4)
rnn.fit(X, Y,
batch_sz=len(Y),
learning_rate=learning_rate,
epochs=epochs,
activation=tf.nn.sigmoid,
show_fig=False
)
if __name__ == '__main__':
parity()
|
SimpleRNN
|
python
|
apache__airflow
|
dev/breeze/src/airflow_breeze/utils/console.py
|
{
"start": 2284,
"end": 2580
}
|
class ____(Enum):
SUCCESS = "success"
INFO = "info"
WARNING = "warning"
ERROR = "error"
SPECIAL = "special"
def message_type_from_return_code(return_code: int) -> MessageType:
if return_code == 0:
return MessageType.SUCCESS
return MessageType.ERROR
|
MessageType
|
python
|
davidhalter__jedi
|
jedi/inference/names.py
|
{
"start": 17859,
"end": 18943
}
|
class ____(_ActualTreeParamName):
@plugin_manager.decorate(name='goto_anonymous_param')
def goto(self):
return super().goto()
@plugin_manager.decorate(name='infer_anonymous_param')
def infer(self):
values = super().infer()
if values:
return values
from jedi.inference.dynamic_params import dynamic_param_lookup
param = self._get_param_node()
values = dynamic_param_lookup(self.function_value, param.position_index)
if values:
return values
if param.star_count == 1:
from jedi.inference.value.iterable import FakeTuple
value = FakeTuple(self.function_value.inference_state, [])
elif param.star_count == 2:
from jedi.inference.value.iterable import FakeDict
value = FakeDict(self.function_value.inference_state, {})
elif param.default is None:
return NO_VALUES
else:
return self.function_value.parent_context.infer_node(param.default)
return ValueSet({value})
|
AnonymousParamName
|
python
|
django__django
|
tests/known_related_objects/models.py
|
{
"start": 147,
"end": 224
}
|
class ____(models.Model):
name = models.CharField(max_length=30)
|
Tournament
|
python
|
python-attrs__attrs
|
tests/test_functional.py
|
{
"start": 700,
"end": 798
}
|
class ____:
x = attr.ib(default=foo)
y = attr.ib(default=attr.Factory(list))
@attr.s
|
C2Slots
|
python
|
huggingface__transformers
|
src/transformers/models/rt_detr/modeling_rt_detr.py
|
{
"start": 51548,
"end": 60847
}
|
class ____(nn.Module):
"""
Decoder consisting of a projection layer, a set of `RTDetrEncoder`, a top-down Feature Pyramid Network
(FPN) and a bottom-up Path Aggregation Network (PAN). More details on the paper: https://huggingface.co/papers/2304.08069
Args:
config: RTDetrConfig
"""
def __init__(self, config: RTDetrConfig):
super().__init__()
self.config = config
self.in_channels = config.encoder_in_channels
self.feat_strides = config.feat_strides
self.encoder_hidden_dim = config.encoder_hidden_dim
self.encode_proj_layers = config.encode_proj_layers
self.positional_encoding_temperature = config.positional_encoding_temperature
self.eval_size = config.eval_size
self.out_channels = [self.encoder_hidden_dim for _ in self.in_channels]
self.out_strides = self.feat_strides
self.num_fpn_stages = len(self.in_channels) - 1
self.num_pan_stages = len(self.in_channels) - 1
activation = config.activation_function
# encoder transformer
self.encoder = nn.ModuleList([RTDetrEncoder(config) for _ in range(len(self.encode_proj_layers))])
# top-down FPN
self.lateral_convs = nn.ModuleList()
self.fpn_blocks = nn.ModuleList()
for _ in range(self.num_fpn_stages):
lateral_conv = RTDetrConvNormLayer(
config,
in_channels=self.encoder_hidden_dim,
out_channels=self.encoder_hidden_dim,
kernel_size=1,
stride=1,
activation=activation,
)
fpn_block = RTDetrCSPRepLayer(config)
self.lateral_convs.append(lateral_conv)
self.fpn_blocks.append(fpn_block)
# bottom-up PAN
self.downsample_convs = nn.ModuleList()
self.pan_blocks = nn.ModuleList()
for _ in range(self.num_pan_stages):
downsample_conv = RTDetrConvNormLayer(
config,
in_channels=self.encoder_hidden_dim,
out_channels=self.encoder_hidden_dim,
kernel_size=3,
stride=2,
activation=activation,
)
pan_block = RTDetrCSPRepLayer(config)
self.downsample_convs.append(downsample_conv)
self.pan_blocks.append(pan_block)
@staticmethod
def build_2d_sincos_position_embedding(
width, height, embed_dim=256, temperature=10000.0, device="cpu", dtype=torch.float32
):
grid_w = torch.arange(torch_int(width), device=device).to(dtype)
grid_h = torch.arange(torch_int(height), device=device).to(dtype)
grid_w, grid_h = torch.meshgrid(grid_w, grid_h, indexing="xy")
if embed_dim % 4 != 0:
raise ValueError("Embed dimension must be divisible by 4 for 2D sin-cos position embedding")
pos_dim = embed_dim // 4
omega = torch.arange(pos_dim, device=device).to(dtype) / pos_dim
omega = 1.0 / (temperature**omega)
out_w = grid_w.flatten()[..., None] @ omega[None]
out_h = grid_h.flatten()[..., None] @ omega[None]
return torch.concat([out_h.sin(), out_h.cos(), out_w.sin(), out_w.cos()], dim=1)[None, :, :]
def forward(
self,
inputs_embeds=None,
attention_mask=None,
position_embeddings=None,
spatial_shapes=None,
level_start_index=None,
valid_ratios=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
- 1 for pixel features that are real (i.e. **not masked**),
- 0 for pixel features that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Position embeddings that are added to the queries and keys in each self-attention layer.
spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of each feature map.
level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`):
Starting index of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Ratio of valid area in each feature level.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = inputs_embeds
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# encoder
if self.config.encoder_layers > 0:
for i, enc_ind in enumerate(self.encode_proj_layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states[enc_ind],)
height, width = hidden_states[enc_ind].shape[2:]
# flatten [batch, channel, height, width] to [batch, height*width, channel]
src_flatten = hidden_states[enc_ind].flatten(2).permute(0, 2, 1)
if self.training or self.eval_size is None:
pos_embed = self.build_2d_sincos_position_embedding(
width,
height,
self.encoder_hidden_dim,
self.positional_encoding_temperature,
device=src_flatten.device,
dtype=src_flatten.dtype,
)
else:
pos_embed = None
layer_outputs = self.encoder[i](
src_flatten,
pos_embed=pos_embed,
output_attentions=output_attentions,
)
hidden_states[enc_ind] = (
layer_outputs[0].permute(0, 2, 1).reshape(-1, self.encoder_hidden_dim, height, width).contiguous()
)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states[enc_ind],)
# top-down FPN
fpn_feature_maps = [hidden_states[-1]]
for idx, (lateral_conv, fpn_block) in enumerate(zip(self.lateral_convs, self.fpn_blocks)):
backbone_feature_map = hidden_states[self.num_fpn_stages - idx - 1]
top_fpn_feature_map = fpn_feature_maps[-1]
# apply lateral block
top_fpn_feature_map = lateral_conv(top_fpn_feature_map)
fpn_feature_maps[-1] = top_fpn_feature_map
# apply fpn block
top_fpn_feature_map = F.interpolate(top_fpn_feature_map, scale_factor=2.0, mode="nearest")
fused_feature_map = torch.concat([top_fpn_feature_map, backbone_feature_map], dim=1)
new_fpn_feature_map = fpn_block(fused_feature_map)
fpn_feature_maps.append(new_fpn_feature_map)
fpn_feature_maps.reverse()
# bottom-up PAN
pan_feature_maps = [fpn_feature_maps[0]]
for idx, (downsample_conv, pan_block) in enumerate(zip(self.downsample_convs, self.pan_blocks)):
top_pan_feature_map = pan_feature_maps[-1]
fpn_feature_map = fpn_feature_maps[idx + 1]
downsampled_feature_map = downsample_conv(top_pan_feature_map)
fused_feature_map = torch.concat([downsampled_feature_map, fpn_feature_map], dim=1)
new_pan_feature_map = pan_block(fused_feature_map)
pan_feature_maps.append(new_pan_feature_map)
if not return_dict:
return tuple(v for v in [pan_feature_maps, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=pan_feature_maps, hidden_states=encoder_states, attentions=all_attentions
)
|
RTDetrHybridEncoder
|
python
|
getsentry__sentry
|
tests/sentry/seer/endpoints/test_organization_trace_summary.py
|
{
"start": 417,
"end": 5317
}
|
class ____(APITestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.org = self.create_organization(owner=self.user)
self.login_as(user=self.user)
self.trace_id = "trace123"
self.mock_trace_tree = [
SerializedSpan(
description="http.request",
name="GET *",
event_id="span1",
event_type="span",
project_id=1,
project_slug="test-project",
start_timestamp=datetime.datetime(2023, 1, 1, 0, 0, 0),
transaction="test_transaction",
children=[],
errors=[],
occurrences=[],
duration=100.0,
end_timestamp=datetime.datetime(2023, 1, 1, 0, 0, 1),
measurements={},
op="http.request",
parent_span_id=None,
profile_id="",
profiler_id="",
sdk_name="test_sdk",
is_transaction=True,
transaction_id="1" * 32,
),
SerializedSpan(
description="db.query",
name="SELECT users",
event_id="span2",
event_type="span",
project_id=1,
project_slug="test-project",
start_timestamp=datetime.datetime(2023, 1, 1, 0, 0, 0),
transaction="test_transaction",
children=[],
errors=[],
occurrences=[],
duration=50.0,
end_timestamp=datetime.datetime(2023, 1, 1, 0, 0, 1),
measurements={},
op="db.query",
parent_span_id=None,
profile_id="",
profiler_id="",
sdk_name="test_sdk",
is_transaction=False,
transaction_id="1" * 32,
),
]
self.mock_summary_response = {
"trace_id": self.trace_id,
"summary": "Test summary of the trace",
"key_observations": "Test key observations of the trace",
"performance_characteristics": "Test performance characteristics of the trace",
"suggested_investigations": "Test suggested investigations of the trace",
}
self.url = self._get_url()
def _get_url(self) -> str:
return f"/api/0/organizations/{self.org.slug}/trace-summary/"
@patch("sentry.seer.endpoints.organization_trace_summary.get_trace_summary")
@patch("sentry.seer.endpoints.organization_trace_summary.OrganizationTraceEndpoint")
def test_endpoint_calls_get_trace_summary(
self, mock_trace_endpoint_class, mock_get_trace_summary
):
mock_trace_endpoint_class.return_value.query_trace_data.return_value = self.mock_trace_tree
mock_get_trace_summary.return_value = (self.mock_summary_response, 200)
response = self.client.post(
self.url,
data={"traceSlug": self.trace_id},
format="json",
)
assert response.status_code == 200
assert response.data == self.mock_summary_response
mock_trace_endpoint_class.assert_called_once()
mock_get_trace_summary.assert_called_once_with(
traceSlug=self.trace_id,
traceTree=self.mock_trace_tree,
organization=self.org,
user=ANY,
onlyTransaction=False,
)
def test_endpoint_without_trace_slug(self) -> None:
response = self.client.post(self.url, format="json")
assert response.status_code == 400
assert response.data == {"detail": "Missing traceSlug parameter"}
@patch("sentry.seer.endpoints.organization_trace_summary.OrganizationTraceEndpoint")
def test_endpoint_with_error_response(self, mock_trace_endpoint_class: MagicMock) -> None:
mock_trace_endpoint_class.return_value.query_trace_data.side_effect = Exception(
"Test exception"
)
response = self.client.post(self.url, data={"traceSlug": self.trace_id}, format="json")
assert response.status_code == 400
assert response.data == {"detail": "Error fetching trace"}
@patch("sentry.seer.endpoints.organization_trace_summary.OrganizationTraceEndpoint")
def test_endpoint_with_missing_trace_tree(
self, mock_organization_trace_endpoint: MagicMock
) -> None:
mock_organization_trace_endpoint.return_value.get_snuba_params.return_value = {}
mock_organization_trace_endpoint.return_value.query_trace_data.return_value = []
response = self.client.post(self.url, data={"traceSlug": self.trace_id}, format="json")
assert response.status_code == 400
assert response.data == {"detail": "Missing trace_tree data"}
|
OrganizationTraceSummaryEndpointTest
|
python
|
huggingface__transformers
|
src/transformers/integrations/bitsandbytes.py
|
{
"start": 3300,
"end": 4432
}
|
class ____(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, list[torch.Tensor]],
model: torch.nn.Module | None = None,
full_layer_name: str | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
value = list(input_dict.values())[0]
value = value[0] if isinstance(value, list) else value
module, _ = get_module_from_name(model, full_layer_name)
# Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls, Conv1D):
value = value.T
value_device = value.device
kwargs = model.get_parameter_or_buffer(full_layer_name).__dict__
kwargs.pop("SCB", None)
new_value = bnb.nn.Int8Params(value.to("cpu"), requires_grad=False, **kwargs).to(value_device)
return {full_layer_name: new_value}
|
Bnb8bitQuantize
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance2.py
|
{
"start": 139,
"end": 277
}
|
class ____:
def get_value(self) -> int:
if isinstance(self, ChildB):
return self.calculate()
return 7
|
ClassA
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/resource_variable_ops.py
|
{
"start": 64496,
"end": 92050
}
|
class ____(BaseResourceVariable, composite_tensor.CompositeTensor):
"""Variable based on resource handles.
See the [Variables How To](https://tensorflow.org/guide/variables)
for a high level overview.
A `ResourceVariable` allows you to maintain state across subsequent calls to
session.run.
The `ResourceVariable` constructor requires an initial value for the variable,
which can be a `Tensor` of any type and shape. The initial value defines the
type and shape of the variable. After construction, the type and shape of
the variable are fixed. The value can be changed using one of the assign
methods.
Just like any `Tensor`, variables created with
`tf.Variable(use_resource=True)` can be used as inputs for other Ops in the
graph. Additionally, all the operators overloaded for the `Tensor` class are
carried over to variables, so you can also add nodes to the graph by just
doing arithmetic on variables.
Unlike ref-based variable, a ResourceVariable has well-defined semantics. Each
usage of a ResourceVariable in a TensorFlow graph adds a read_value operation
to the graph. The Tensors returned by a read_value operation are guaranteed to
see all modifications to the value of the variable which happen in any
operation on which the read_value depends on (either directly, indirectly, or
via a control dependency) and guaranteed to not see any modification to the
value of the variable from operations that depend on the read_value operation.
Updates from operations that have no dependency relationship to the read_value
operation might or might not be visible to read_value.
For example, if there is more than one assignment to a ResourceVariable in
a single session.run call there is a well-defined value for each operation
which uses the variable's value if the assignments and the read are connected
by edges in the graph. Consider the following example, in which two writes
can cause tf.Variable and tf.ResourceVariable to behave differently:
```python
a = tf.Variable(1.0, use_resource=True)
a.initializer.run()
assign = a.assign(2.0)
with tf.control_dependencies([assign]):
b = a.read_value()
with tf.control_dependencies([b]):
other_assign = a.assign(3.0)
with tf.control_dependencies([other_assign]):
# Will print 2.0 because the value was read before other_assign ran. If
# `a` was a tf.Variable instead, 2.0 or 3.0 could be printed.
tf.compat.v1.Print(b, [b]).eval()
```
"""
def __init__(
self, # pylint: disable=super-init-not-called
initial_value=None,
trainable=None,
collections=None,
validate_shape=True, # pylint: disable=unused-argument
caching_device=None,
name=None,
dtype=None,
variable_def=None,
import_scope=None,
constraint=None,
distribute_strategy=None,
synchronization=None,
aggregation=None,
shape=None,
handle=None,
experimental_enable_variable_lifting=None,
):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. Can also be a callable with
no argument that returns the initial value when called. (Note that
initializer functions from init_ops.py must first be bound to a shape
before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
Defaults to `True`, unless `synchronization` is set to `ON_READ`, in
which case it defaults to `False`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type. If None,
either the datatype will be kept (if initial_value is a Tensor) or
float32 will be used (if it is a Python object convertible to a Tensor).
variable_def: `VariableDef` protocol buffer. If not None, recreates the
`ResourceVariable` object with its contents. `variable_def` and other
arguments (except for import_scope) are mutually exclusive.
import_scope: Optional `string`. Name scope to add to the
ResourceVariable. Only used when `variable_def` is provided.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
distribute_strategy: The tf.distribute.Strategy this variable is being
created inside of.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
shape: (optional) The shape of this variable. If None, the shape of
`initial_value` will be used. When setting this argument to
`tf.TensorShape(None)` (representing an unspecified shape), the variable
can be assigned with values of different shapes.
handle: (optional) The handle of a `tf.Variable`. If provided, only
`trainable`, `shape`, `dtype`, and `handle` will be used to construct
this `tf.Variable`.
experimental_enable_variable_lifting: Whether to lift the variable out if
it's in a `tf.function`. Default is `True`. When this argument
is `True`, variable creation will follow the behavior and
restrictions described
[here](https://www.tensorflow.org/guide/function#creating_tfvariables).
If this argument is `False`, that description doesn't apply,
and you can freely create and use the variable in the
`tf.function`, as if it's a "mutable `tf.Tensor`". You can't
return the variable though.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
@compatibility(eager)
When Eager Execution is enabled, the default for the `collections` argument
is `None`, which signifies that this `Variable` will not be added to any
collections.
@end_compatibility
"""
if variable_def:
if initial_value is not None:
raise ValueError(f"The variable_def and initial_value args to "
f"`tf.Variable` are mutually exclusive, but got both: "
f"variable_def={variable_def},\n"
f"initial_value={initial_value}")
if context.executing_eagerly():
raise ValueError(f"Creating a `tf.Variable` with a `variable_def` arg "
f"is not supported when eager execution is enabled. "
f"Got: variable_def={variable_def}")
self._init_from_proto(
variable_def,
import_scope=import_scope,
validate_shape=validate_shape)
elif handle is not None:
self._init_from_handle(trainable=trainable,
shape=shape,
dtype=dtype,
handle=handle)
else:
self._init_from_args(
initial_value=initial_value,
trainable=trainable,
collections=collections,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation,
shape=shape,
distribute_strategy=distribute_strategy,
validate_shape=validate_shape,
experimental_enable_variable_lifting=experimental_enable_variable_lifting,
)
# CompositeTensor method
@property
def _type_spec(self):
return VariableSpec.from_value(self)
# CompositeTensor method
def _shape_invariant_to_type_spec(self, shape):
return VariableSpec(shape, self.dtype, self.trainable)
# CompositeTensorGradient protocol
__composite_gradient__ = ResourceVariableGradient()
def _init_from_args(
self,
initial_value=None,
trainable=None,
collections=None,
caching_device=None,
name=None,
dtype=None,
constraint=None,
synchronization=None,
aggregation=None,
distribute_strategy=None,
shape=None,
validate_shape=True,
experimental_enable_variable_lifting=None,
):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound to
a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
Defaults to `True`, unless `synchronization` is set to `ON_READ`, in
which case it defaults to `False`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type. If None,
either the datatype will be kept (if initial_value is a Tensor) or
float32 will be used (if it is a Python object convertible to a Tensor).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
distribute_strategy: DistributionStrategy under which this variable was
created.
shape: (optional) The shape of this variable. If None, the shape of
`initial_value` will be used. When setting this argument to
`tf.TensorShape(None)` (representing an unspecified shape), the variable
can be assigned with values of different shapes.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
experimental_enable_variable_lifting: Whether to lift the variable out if
it's in a `tf.function`. Default is `True`. When this argument
is `True`, variable creation will follow the behavior and
restrictions described
[here](https://www.tensorflow.org/guide/function#creating_tfvariables).
If this argument is `False`, that description doesn't apply,
and you can freely create and use the variable in the
`tf.function`, as if it's a "mutable `tf.Tensor`". You can't
return the variable though.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
@compatibility(eager)
When Eager Execution is enabled, variables are never added to collections.
It is not implicitly added to the `GLOBAL_VARIABLES` or
`TRAINABLE_VARIABLES` collections, and the `collections` argument is
ignored.
@end_compatibility
"""
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
synchronization, aggregation, trainable, name))
if experimental_enable_variable_lifting is None:
experimental_enable_variable_lifting = True
if initial_value is None:
raise ValueError("The `initial_value` arg to `tf.Variable` must "
"be specified except when you are not providing a "
"`variable_def`. You provided neither.")
init_from_fn = callable(initial_value)
if isinstance(initial_value, tensor_module.Tensor) and hasattr(
initial_value, "graph") and initial_value.graph.building_function:
raise ValueError(f"Argument `initial_value` ({initial_value}) could not "
"be lifted out of a `tf.function`. "
f"(Tried to create variable with name='{name}'). "
"To avoid this error, when constructing `tf.Variable`s "
"inside of `tf.function` you can create the "
"`initial_value` tensor in a "
"`tf.init_scope` or pass a callable `initial_value` "
"(e.g., `tf.Variable(lambda : "
"tf.truncated_normal([10, 40]))`). "
"Please file a feature request if this "
"restriction inconveniences you.")
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if not isinstance(collections, (list, tuple, set)):
raise ValueError(
f"collections argument to Variable constructor must be a list, "
f"tuple, or set. Got {collections} of type {type(collections)}")
if constraint is not None and not callable(constraint):
raise ValueError(f"Argument `constraint` must be None or a callable. "
f"a callable. Got a {type(constraint)}: {constraint}")
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
with ops.init_scope():
self._in_graph_mode = not context.executing_eagerly()
if experimental_enable_variable_lifting:
maybe_init_scope = ops.init_scope
else:
maybe_init_scope = contextlib.nullcontext
with maybe_init_scope():
with ops.name_scope(
name,
"Variable", [] if init_from_fn else [initial_value],
skip_on_eager=False) as name:
# pylint: disable=protected-access
handle_name = ops.name_from_scope_name(name)
if self._in_graph_mode:
shared_name = handle_name
unique_id = shared_name
else:
# When in eager mode, use a uid for the shared_name, to prevent
# accidental sharing.
unique_id = "%s_%d" % (handle_name, ops.uid())
shared_name = None # Never shared
# Use attr_scope and device(None) to simulate the behavior of
# colocate_with when the variable we want to colocate with doesn't
# yet exist.
device_context_manager = (
ops.device if self._in_graph_mode else ops.NullContextmanager)
attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
s=[compat.as_bytes("loc:@%s" % handle_name)]))
with ops.get_default_graph()._attr_scope({"_class": attr}):
with ops.name_scope("Initializer"), device_context_manager(None):
if init_from_fn:
initial_value = initial_value()
if isinstance(initial_value, trackable.CheckpointInitialValue):
self._maybe_initialize_trackable()
self._update_uid = initial_value.checkpoint_position.restore_uid
initial_value = initial_value.wrapped_value
initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
if shape is not None:
if not initial_value.shape.is_compatible_with(shape):
raise ValueError(
f"In this `tf.Variable` creation, the initial value's shape "
f"({initial_value.shape}) is not compatible with "
f"the explicitly supplied `shape` argument ({shape}).")
else:
shape = initial_value.shape
handle = eager_safe_variable_handle(
initial_value=initial_value,
shape=shape,
shared_name=shared_name,
name=name,
graph_mode=self._in_graph_mode)
handle._parent_trackable = weakref.ref(self)
handle._name = handle_name + ":0"
handle._unique_id = unique_id
# pylint: disable=protected-access
if (self._in_graph_mode and initial_value is not None and
initial_value.op._get_control_flow_context() is not None):
raise ValueError(
f"The `initial_value` passed to `tf.Variable` {name} is from "
f"inside a control-flow construct, such as a loop or "
f"conditional. When creating a "
f"`tf.Variable` inside a loop or conditional, use a lambda as "
f"the `initial_value`. Got: initial_value=({initial_value})")
# pylint: enable=protected-access
dtype = initial_value.dtype.base_dtype
if self._in_graph_mode:
with ops.name_scope("IsInitialized"):
is_initialized_op = (
gen_resource_variable_ops.var_is_initialized_op(handle))
if initial_value is not None:
# pylint: disable=g-backslash-continuation
with ops.name_scope("Assign") as n, \
ops.colocate_with(None, ignore_existing=True), \
ops.device(handle.device):
# pylint: disable=protected-access
initializer_op = (
gen_resource_variable_ops.assign_variable_op(
handle,
variables._try_guard_against_uninitialized_dependencies(
name, initial_value),
name=n))
# pylint: enable=protected-access
# pylint: enable=g-backslash-continuation
with ops.name_scope("Read"):
# Manually assign reads to the handle's device to avoid log
# messages.
with ops.device(handle.device):
value = gen_resource_variable_ops.read_variable_op(handle, dtype)
_maybe_set_handle_data(dtype, handle, value)
graph_element = value
if caching_device is not None:
# Variables may be created in a tf.device() or ops.colocate_with()
# context. At the same time, users would expect caching device to
# be independent of this context, and/or would not expect the
# current device context to be merged with the caching device
# spec. Therefore we reset the colocation stack before creating
# the cached value. Note that resetting the colocation stack will
# also reset the device stack.
with ops.colocate_with(None, ignore_existing=True):
with ops.device(caching_device):
cached_value = array_ops.identity(value)
else:
cached_value = None
else:
gen_resource_variable_ops.assign_variable_op(handle, initial_value)
is_initialized_op = None
initializer_op = None
graph_element = None
if caching_device:
with ops.device(caching_device):
cached_value = gen_resource_variable_ops.read_variable_op(
handle, dtype)
_maybe_set_handle_data(dtype, handle, cached_value)
else:
cached_value = None
if cached_value is not None:
# Store the variable object so that the original variable can be
# accessed to generate functions that are compatible with SavedModel.
cached_value._cached_variable = weakref.ref(self) # pylint: disable=protected-access
if self._in_graph_mode:
# Eager variables are only added to collections if they are part of an
# eager variable store (otherwise in an interactive session they would
# hog memory and cause OOM). This is done in ops/variable_scope.py.
ops.add_to_collections(collections, self)
elif ops.GraphKeys.GLOBAL_STEP in collections:
ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, self)
initial_value = initial_value if self._in_graph_mode else None
super(ResourceVariable, self).__init__(
trainable=trainable,
shape=shape,
dtype=dtype,
handle=handle,
synchronization=synchronization,
constraint=constraint,
aggregation=aggregation,
distribute_strategy=distribute_strategy,
name=name,
unique_id=unique_id,
handle_name=handle_name,
graph_element=graph_element,
initial_value=initial_value,
initializer_op=initializer_op,
is_initialized_op=is_initialized_op,
cached_value=cached_value,
caching_device=caching_device,
validate_shape=validate_shape,
)
def _init_from_proto(self,
variable_def,
import_scope=None,
validate_shape=True):
"""Initializes from `VariableDef` proto."""
# Note that init_from_proto is currently not supported in Eager mode.
assert not context.executing_eagerly()
self._in_graph_mode = True
assert isinstance(variable_def, variable_pb2.VariableDef)
if not variable_def.is_resource:
raise ValueError(f"The `variable_def` you passed to `tf.Variable` is "
f"Trying to restore a TF 1.x Reference Variable "
f"as a TF 2.x ResourceVariable. This is unsupported. "
f"Got variable_def={variable_def}")
# Create from variable_def.
g = ops.get_default_graph()
self._handle = g.as_graph_element(
ops.prepend_name_scope(
variable_def.variable_name, import_scope=import_scope),
allow_operation=False)
self._shape = tensor_shape.TensorShape(self._handle.op.get_attr("shape"))
self._handle_name = self._handle.name
self._unique_id = self._handle_name
self._initializer_op = g.as_graph_element(
ops.prepend_name_scope(
variable_def.initializer_name, import_scope=import_scope))
# Check whether initial_value_name exists for backwards compatibility.
if (hasattr(variable_def, "initial_value_name") and
variable_def.initial_value_name):
self._initial_value = g.as_graph_element(
ops.prepend_name_scope(
variable_def.initial_value_name, import_scope=import_scope))
else:
self._initial_value = None
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
variable_def.synchronization, variable_def.aggregation,
variable_def.trainable, variable_def.variable_name))
self._synchronization = synchronization
self._aggregation = aggregation
self._trainable = trainable
if variable_def.snapshot_name:
snapshot = g.as_graph_element(
ops.prepend_name_scope(
variable_def.snapshot_name, import_scope=import_scope))
if snapshot.op.type != "ReadVariableOp":
self._cached_value = snapshot
else:
self._cached_value = None
while snapshot.op.type != "ReadVariableOp":
snapshot = snapshot.op.inputs[0]
self._graph_element = snapshot
else:
self._cached_value = None
# Legacy case for protos without the snapshot name; assume it's the
# following.
self._graph_element = g.get_tensor_by_name(self._handle.op.name +
"/Read/ReadVariableOp:0")
if variable_def.HasField("save_slice_info_def"):
self._save_slice_info = variables.Variable.SaveSliceInfo(
save_slice_info_def=variable_def.save_slice_info_def,
import_scope=import_scope)
else:
self._save_slice_info = None
self._caching_device = None
self._dtype = dtypes.as_dtype(self._handle.op.get_attr("dtype"))
self._constraint = None
self._validate_shape = validate_shape
def _init_from_handle(self,
trainable=None,
shape=None,
dtype=None,
handle=None):
handle_data = get_eager_safe_handle_data(handle)
if not handle_data.is_set:
# The handle may not have the handle shape and dtype if it was created
# using tf.placeholder.
handle_data = handle_data_util.create_handle_data(shape, dtype)
handle_data_util.set_handle_data(handle, handle_data)
# pylint: disable=protected-access
if hasattr(handle, "_name") and isinstance(handle._name, str):
handle_name = handle._name.rstrip(":0")
else:
handle_name = None
# pylint: enable=protected-access
unique_id = getattr(handle, "_unique_id", None)
super().__init__(
trainable=trainable, shape=shape, dtype=dtype, handle=handle,
unique_id=unique_id, handle_name=handle_name)
|
ResourceVariable
|
python
|
MongoEngine__mongoengine
|
mongoengine/base/fields.py
|
{
"start": 11844,
"end": 21897
}
|
class ____(BaseField):
"""Handles complex fields, such as lists / dictionaries.
Allows for nesting of embedded documents inside complex types.
Handles the lazy dereferencing of a queryset by lazily dereferencing all
items in a list / dict rather than one at a time.
"""
def __init__(self, field=None, **kwargs):
if field is not None and not isinstance(field, BaseField):
raise TypeError(
f"field argument must be a Field instance (e.g {self.__class__.__name__}(StringField()))"
)
self.field = field
super().__init__(**kwargs)
@staticmethod
def _lazy_load_refs(instance, name, ref_values, *, max_depth):
_dereference = _import_class("DeReference")()
documents = _dereference(
ref_values,
max_depth=max_depth,
instance=instance,
name=name,
)
return documents
def __set__(self, instance, value):
# Some fields e.g EnumField are converted upon __set__
# So it is fair to mimic the same behavior when using e.g ListField(EnumField)
EnumField = _import_class("EnumField")
if self.field and isinstance(self.field, EnumField):
if isinstance(value, (list, tuple)):
value = [self.field.to_python(sub_val) for sub_val in value]
elif isinstance(value, dict):
value = {key: self.field.to_python(sub) for key, sub in value.items()}
return super().__set__(instance, value)
def __get__(self, instance, owner):
"""Descriptor to automatically dereference references."""
if instance is None:
# Document class being used rather than a document object
return self
ReferenceField = _import_class("ReferenceField")
GenericReferenceField = _import_class("GenericReferenceField")
EmbeddedDocumentListField = _import_class("EmbeddedDocumentListField")
auto_dereference = instance._fields[self.name]._auto_dereference
dereference = auto_dereference and (
self.field is None
or isinstance(self.field, (GenericReferenceField, ReferenceField))
)
if (
instance._initialised
and dereference
and instance._data.get(self.name)
and not getattr(instance._data[self.name], "_dereferenced", False)
):
ref_values = instance._data.get(self.name)
instance._data[self.name] = self._lazy_load_refs(
ref_values=ref_values, instance=instance, name=self.name, max_depth=1
)
if hasattr(instance._data[self.name], "_dereferenced"):
instance._data[self.name]._dereferenced = True
value = super().__get__(instance, owner)
# Convert lists / values so we can watch for any changes on them
if isinstance(value, (list, tuple)):
if issubclass(type(self), EmbeddedDocumentListField) and not isinstance(
value, EmbeddedDocumentList
):
value = EmbeddedDocumentList(value, instance, self.name)
elif not isinstance(value, BaseList):
value = BaseList(value, instance, self.name)
instance._data[self.name] = value
elif isinstance(value, dict) and not isinstance(value, BaseDict):
value = BaseDict(value, instance, self.name)
instance._data[self.name] = value
if (
auto_dereference
and instance._initialised
and isinstance(value, (BaseList, BaseDict))
and not value._dereferenced
):
value = self._lazy_load_refs(
ref_values=value, instance=instance, name=self.name, max_depth=1
)
value._dereferenced = True
instance._data[self.name] = value
return value
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type."""
if isinstance(value, str):
return value
if hasattr(value, "to_python"):
return value.to_python()
BaseDocument = _import_class("BaseDocument")
if isinstance(value, BaseDocument):
# Something is wrong, return the value as it is
return value
is_list = False
if not hasattr(value, "items"):
try:
is_list = True
value = {idx: v for idx, v in enumerate(value)}
except TypeError: # Not iterable return the value
return value
if self.field:
self.field.set_auto_dereferencing(self._auto_dereference)
value_dict = {
key: self.field.to_python(item) for key, item in value.items()
}
else:
Document = _import_class("Document")
value_dict = {}
for k, v in value.items():
if isinstance(v, Document):
# We need the id from the saved object to create the DBRef
if v.pk is None:
self.error(
"You can only reference documents once they"
" have been saved to the database"
)
collection = v._get_collection_name()
value_dict[k] = DBRef(collection, v.pk)
elif hasattr(v, "to_python"):
value_dict[k] = v.to_python()
else:
value_dict[k] = self.to_python(v)
if is_list: # Convert back to a list
return [
v for _, v in sorted(value_dict.items(), key=operator.itemgetter(0))
]
return value_dict
def to_mongo(self, value, use_db_field=True, fields=None):
"""Convert a Python type to a MongoDB-compatible type."""
Document = _import_class("Document")
EmbeddedDocument = _import_class("EmbeddedDocument")
GenericReferenceField = _import_class("GenericReferenceField")
if isinstance(value, str):
return value
if hasattr(value, "to_mongo"):
if isinstance(value, Document):
return GenericReferenceField().to_mongo(value)
cls = value.__class__
val = value.to_mongo(use_db_field, fields)
# If it's a document that is not inherited add _cls
if isinstance(value, EmbeddedDocument):
val["_cls"] = cls.__name__
return val
is_list = False
if not hasattr(value, "items"):
try:
is_list = True
value = {k: v for k, v in enumerate(value)}
except TypeError: # Not iterable return the value
return value
if self.field:
value_dict = {
key: self.field._to_mongo_safe_call(item, use_db_field, fields)
for key, item in value.items()
}
else:
value_dict = {}
for k, v in value.items():
if isinstance(v, Document):
# We need the id from the saved object to create the DBRef
if v.pk is None:
self.error(
"You can only reference documents once they"
" have been saved to the database"
)
# If it's a document that is not inheritable it won't have
# any _cls data so make it a generic reference allows
# us to dereference
meta = getattr(v, "_meta", {})
allow_inheritance = meta.get("allow_inheritance")
if not allow_inheritance:
value_dict[k] = GenericReferenceField().to_mongo(v)
else:
collection = v._get_collection_name()
value_dict[k] = DBRef(collection, v.pk)
elif hasattr(v, "to_mongo"):
cls = v.__class__
val = v.to_mongo(use_db_field, fields)
# If it's a document that is not inherited add _cls
if isinstance(v, (Document, EmbeddedDocument)):
val["_cls"] = cls.__name__
value_dict[k] = val
else:
value_dict[k] = self.to_mongo(v, use_db_field, fields)
if is_list: # Convert back to a list
return [
v for _, v in sorted(value_dict.items(), key=operator.itemgetter(0))
]
return value_dict
def validate(self, value):
"""If field is provided ensure the value is valid."""
errors = {}
if self.field:
if hasattr(value, "items"):
sequence = value.items()
else:
sequence = enumerate(value)
for k, v in sequence:
try:
self.field._validate(v)
except ValidationError as error:
errors[k] = error.errors or error
except (ValueError, AssertionError) as error:
errors[k] = error
if errors:
field_class = self.field.__class__.__name__
self.error(f"Invalid {field_class} item ({value})", errors=errors)
# Don't allow empty values if required
if self.required and not value:
self.error("Field is required and cannot be empty")
def prepare_query_value(self, op, value):
return self.to_mongo(value)
def lookup_member(self, member_name):
if self.field:
return self.field.lookup_member(member_name)
return None
def _set_owner_document(self, owner_document):
if self.field:
self.field.owner_document = owner_document
self._owner_document = owner_document
|
ComplexBaseField
|
python
|
huggingface__transformers
|
src/transformers/models/videomae/configuration_videomae.py
|
{
"start": 785,
"end": 6600
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`VideoMAEModel`]. It is used to instantiate a
VideoMAE model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the VideoMAE
[MCG-NJU/videomae-base](https://huggingface.co/MCG-NJU/videomae-base) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_frames (`int`, *optional*, defaults to 16):
The number of frames in each video.
tubelet_size (`int`, *optional*, defaults to 2):
The number of tubelets.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
use_mean_pooling (`bool`, *optional*, defaults to `True`):
Whether to mean pool the final hidden states instead of using the final hidden state of the [CLS] token.
decoder_num_attention_heads (`int`, *optional*, defaults to 6):
Number of attention heads for each attention layer in the decoder.
decoder_hidden_size (`int`, *optional*, defaults to 384):
Dimensionality of the decoder.
decoder_num_hidden_layers (`int`, *optional*, defaults to 4):
Number of hidden layers in the decoder.
decoder_intermediate_size (`int`, *optional*, defaults to 1536):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the decoder.
norm_pix_loss (`bool`, *optional*, defaults to `True`):
Whether to normalize the target patch pixels.
Example:
```python
>>> from transformers import VideoMAEConfig, VideoMAEModel
>>> # Initializing a VideoMAE videomae-base style configuration
>>> configuration = VideoMAEConfig()
>>> # Randomly initializing a model from the configuration
>>> model = VideoMAEModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "videomae"
def __init__(
self,
image_size=224,
patch_size=16,
num_channels=3,
num_frames=16,
tubelet_size=2,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-12,
qkv_bias=True,
use_mean_pooling=True,
decoder_num_attention_heads=6,
decoder_hidden_size=384,
decoder_num_hidden_layers=4,
decoder_intermediate_size=1536,
norm_pix_loss=True,
**kwargs,
):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_frames = num_frames
self.tubelet_size = tubelet_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.use_mean_pooling = use_mean_pooling
self.decoder_num_attention_heads = decoder_num_attention_heads
self.decoder_hidden_size = decoder_hidden_size
self.decoder_num_hidden_layers = decoder_num_hidden_layers
self.decoder_intermediate_size = decoder_intermediate_size
self.norm_pix_loss = norm_pix_loss
__all__ = ["VideoMAEConfig"]
|
VideoMAEConfig
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/tools/types.py
|
{
"start": 2932,
"end": 4591
}
|
class ____(BaseModel):
"""Tool output."""
blocks: List[ContentBlock]
tool_name: str
raw_input: Dict[str, Any]
raw_output: Any
is_error: bool = False
_exception: Optional[Exception] = PrivateAttr(default=None)
def __init__(
self,
tool_name: str,
content: Optional[str] = None,
blocks: Optional[List[ContentBlock]] = None,
raw_input: Optional[Dict[str, Any]] = None,
raw_output: Optional[Any] = None,
is_error: bool = False,
exception: Optional[Exception] = None,
):
if content and blocks:
raise ValueError("Cannot provide both content and blocks.")
if content:
blocks = [TextBlock(text=content)]
elif blocks:
pass
else:
blocks = []
super().__init__(
tool_name=tool_name,
blocks=blocks,
raw_input=raw_input,
raw_output=raw_output,
is_error=is_error,
)
self._exception = exception
@property
def content(self) -> str:
"""Get the content of the tool output."""
return "\n".join(
[block.text for block in self.blocks if isinstance(block, TextBlock)]
)
@content.setter
def content(self, content: str) -> None:
"""Set the content of the tool output."""
self.blocks = [TextBlock(text=content)]
@property
def exception(self) -> Optional[Exception]:
"""Get the exception of the tool output."""
return self._exception
def __str__(self) -> str:
"""String."""
return self.content
|
ToolOutput
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_grpc_action.py
|
{
"start": 383,
"end": 4721
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'port': 'int',
'service': 'str'
}
attribute_map = {
'port': 'port',
'service': 'service'
}
def __init__(self, port=None, service=None, local_vars_configuration=None): # noqa: E501
"""V1GRPCAction - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._port = None
self._service = None
self.discriminator = None
self.port = port
if service is not None:
self.service = service
@property
def port(self):
"""Gets the port of this V1GRPCAction. # noqa: E501
Port number of the gRPC service. Number must be in the range 1 to 65535. # noqa: E501
:return: The port of this V1GRPCAction. # noqa: E501
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""Sets the port of this V1GRPCAction.
Port number of the gRPC service. Number must be in the range 1 to 65535. # noqa: E501
:param port: The port of this V1GRPCAction. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and port is None: # noqa: E501
raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
self._port = port
@property
def service(self):
"""Gets the service of this V1GRPCAction. # noqa: E501
Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). If this is not specified, the default behavior is defined by gRPC. # noqa: E501
:return: The service of this V1GRPCAction. # noqa: E501
:rtype: str
"""
return self._service
@service.setter
def service(self, service):
"""Sets the service of this V1GRPCAction.
Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). If this is not specified, the default behavior is defined by gRPC. # noqa: E501
:param service: The service of this V1GRPCAction. # noqa: E501
:type: str
"""
self._service = service
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1GRPCAction):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1GRPCAction):
return True
return self.to_dict() != other.to_dict()
|
V1GRPCAction
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/dependency.py
|
{
"start": 31243,
"end": 43887
}
|
class ____:
@staticmethod
def from_definitions(
nodes: Mapping[str, Node], dep_dict: DependencyMapping[str]
) -> "DependencyStructure":
return DependencyStructure(
list(dep_dict.keys()),
_create_handle_dict(nodes, dep_dict),
dep_dict,
)
_node_input_index: defaultdict[str, dict[NodeInput, list[NodeOutput]]]
_node_output_index: dict[str, defaultdict[NodeOutput, list[NodeInput]]]
_dynamic_fan_out_index: dict[str, NodeOutput]
_collect_index: dict[str, set[NodeOutput]]
_deps_by_node_name: DependencyMapping[str]
def __init__(
self,
node_names: Sequence[str],
input_to_output_map: InputToOutputMap,
deps_by_node_name: DependencyMapping[str],
):
self._node_names = node_names
self._input_to_output_map = input_to_output_map
self._deps_by_node_name = deps_by_node_name
# Building up a couple indexes here so that one can look up all the upstream output handles
# or downstream input handles in O(1). Without this, this can become O(N^2) where N is node
# count during the GraphQL query in particular
# node_name => input_handle => list[output_handle]
self._node_input_index = defaultdict(dict)
# node_name => output_handle => list[input_handle]
self._node_output_index = defaultdict(lambda: defaultdict(list))
# node_name => dynamic output_handle that this node will dupe for
self._dynamic_fan_out_index = {}
# node_name => set of dynamic output_handle this collects over
self._collect_index = defaultdict(set)
for node_input, (dep_type, node_output_or_list) in self._input_to_output_map.items():
if dep_type == DependencyType.FAN_IN:
node_output_list: list[NodeOutput] = []
for node_output in node_output_or_list:
if not isinstance(node_output, NodeOutput):
continue
if node_output.is_dynamic:
raise DagsterInvalidDefinitionError(
"Currently, items in a fan-in dependency cannot be downstream of"
" dynamic outputs. Problematic dependency on dynamic output"
f' "{node_output.describe()}".'
)
if self._dynamic_fan_out_index.get(node_output.node_name):
raise DagsterInvalidDefinitionError(
"Currently, items in a fan-in dependency cannot be downstream of"
" dynamic outputs. Problematic dependency on output"
f' "{node_output.describe()}", downstream of'
f' "{self._dynamic_fan_out_index[node_output.node_name].describe()}".'
)
node_output_list.append(node_output)
elif dep_type == DependencyType.DIRECT:
node_output = cast("NodeOutput", node_output_or_list)
if node_output.is_dynamic:
self._validate_and_set_fan_out(node_input, node_output)
if self._dynamic_fan_out_index.get(node_output.node_name):
self._validate_and_set_fan_out(
node_input, self._dynamic_fan_out_index[node_output.node_name]
)
node_output_list = [node_output]
elif dep_type == DependencyType.DYNAMIC_COLLECT:
node_output = cast("NodeOutput", node_output_or_list)
if node_output.is_dynamic:
self._validate_and_set_collect(node_input, node_output)
elif self._dynamic_fan_out_index.get(node_output.node_name):
self._validate_and_set_collect(
node_input,
self._dynamic_fan_out_index[node_output.node_name],
)
else:
check.failed(
f"Unexpected dynamic fan in dep created {node_output} -> {node_input}"
)
node_output_list = [node_output]
else:
check.failed(f"Unexpected dep type {dep_type}")
self._node_input_index[node_input.node.name][node_input] = node_output_list
for node_output in node_output_list:
self._node_output_index[node_output.node.name][node_output].append(node_input)
def _validate_and_set_fan_out(self, node_input: NodeInput, node_output: NodeOutput) -> None:
"""Helper function for populating _dynamic_fan_out_index."""
if not node_input.node.definition.input_supports_dynamic_output_dep(node_input.input_name):
raise DagsterInvalidDefinitionError(
f"{node_input.node.describe_node()} cannot be downstream of dynamic output"
f' "{node_output.describe()}" since input "{node_input.input_name}" maps to a'
" node that is already downstream of another dynamic output. Nodes cannot be"
" downstream of more than one dynamic output"
)
if self._collect_index.get(node_input.node_name):
raise DagsterInvalidDefinitionError(
f"{node_input.node.describe_node()} cannot be both downstream of dynamic output "
f"{node_output.describe()} and collect over dynamic output "
f"{next(iter(self._collect_index[node_input.node_name])).describe()}."
)
if self._dynamic_fan_out_index.get(node_input.node_name) is None:
self._dynamic_fan_out_index[node_input.node_name] = node_output
return
if self._dynamic_fan_out_index[node_input.node_name] != node_output:
raise DagsterInvalidDefinitionError(
f"{node_input.node.describe_node()} cannot be downstream of more than one dynamic"
f' output. It is downstream of both "{node_output.describe()}" and'
f' "{self._dynamic_fan_out_index[node_input.node_name].describe()}"'
)
def _validate_and_set_collect(
self,
node_input: NodeInput,
node_output: NodeOutput,
) -> None:
if self._dynamic_fan_out_index.get(node_input.node_name):
raise DagsterInvalidDefinitionError(
f"{node_input.node.describe_node()} cannot both collect over dynamic output "
f"{node_output.describe()} and be downstream of the dynamic output "
f"{self._dynamic_fan_out_index[node_input.node_name].describe()}."
)
self._collect_index[node_input.node_name].add(node_output)
# if the output is already fanned out
if self._dynamic_fan_out_index.get(node_output.node_name):
raise DagsterInvalidDefinitionError(
f"{node_input.node.describe_node()} cannot be downstream of more than one dynamic"
f' output. It is downstream of both "{node_output.describe()}" and'
f' "{self._dynamic_fan_out_index[node_output.node_name].describe()}"'
)
def all_upstream_outputs_from_node(self, node_name: str) -> Sequence[NodeOutput]:
check.str_param(node_name, "node_name")
# flatten out all outputs that feed into the inputs of this node
return [
output_handle
for output_handle_list in self._node_input_index[node_name].values()
for output_handle in output_handle_list
]
def input_to_upstream_outputs_for_node(
self, node_name: str
) -> Mapping[NodeInput, Sequence[NodeOutput]]:
"""Returns a Dict[NodeInput, List[NodeOutput]] that encodes
where all the inputs are sourced from upstream. Usually the
List[NodeOutput] will be a list of one, except for the
multi-dependency case.
"""
check.str_param(node_name, "node_name")
return self._node_input_index[node_name]
def output_to_downstream_inputs_for_node(
self, node_name: str
) -> Mapping[NodeOutput, Sequence[NodeInput]]:
"""Returns a Dict[NodeOutput, List[NodeInput]] that
represents all the downstream inputs for each output in the
dictionary.
"""
check.str_param(node_name, "node_name")
return self._node_output_index[node_name]
def has_direct_dep(self, node_input: NodeInput) -> bool:
check.inst_param(node_input, "node_input", NodeInput)
if node_input not in self._input_to_output_map:
return False
dep_type, _ = self._input_to_output_map[node_input]
return dep_type == DependencyType.DIRECT
def get_direct_dep(self, node_input: NodeInput) -> NodeOutput:
check.inst_param(node_input, "node_input", NodeInput)
dep_type, dep = self._input_to_output_map[node_input]
check.invariant(
dep_type == DependencyType.DIRECT,
f"Cannot call get_direct_dep when dep is not singular, got {dep_type}",
)
return cast("NodeOutput", dep)
def get_dependency_definition(self, node_input: NodeInput) -> Optional[IDependencyDefinition]:
return self._deps_by_node_name[node_input.node_name].get(node_input.input_name)
def has_fan_in_deps(self, node_input: NodeInput) -> bool:
check.inst_param(node_input, "node_input", NodeInput)
if node_input not in self._input_to_output_map:
return False
dep_type, _ = self._input_to_output_map[node_input]
return dep_type == DependencyType.FAN_IN
def get_fan_in_deps(
self, node_input: NodeInput
) -> Sequence[Union[NodeOutput, type["MappedInputPlaceholder"]]]:
check.inst_param(node_input, "node_input", NodeInput)
dep_type, deps = self._input_to_output_map[node_input]
check.invariant(
dep_type == DependencyType.FAN_IN,
f"Cannot call get_multi_dep when dep is not fan in, got {dep_type}",
)
return cast("list[Union[NodeOutput, type[MappedInputPlaceholder]]]", deps)
def has_dynamic_fan_in_dep(self, node_input: NodeInput) -> bool:
check.inst_param(node_input, "node_input", NodeInput)
if node_input not in self._input_to_output_map:
return False
dep_type, _ = self._input_to_output_map[node_input]
return dep_type == DependencyType.DYNAMIC_COLLECT
def get_dynamic_fan_in_dep(self, node_input: NodeInput) -> NodeOutput:
check.inst_param(node_input, "node_input", NodeInput)
dep_type, dep = self._input_to_output_map[node_input]
check.invariant(
dep_type == DependencyType.DYNAMIC_COLLECT,
f"Cannot call get_dynamic_fan_in_dep when dep is not, got {dep_type}",
)
return cast("NodeOutput", dep)
def has_deps(self, node_input: NodeInput) -> bool:
check.inst_param(node_input, "node_input", NodeInput)
return node_input in self._input_to_output_map
def get_deps_list(self, node_input: NodeInput) -> Sequence[NodeOutput]:
check.inst_param(node_input, "node_input", NodeInput)
check.invariant(self.has_deps(node_input))
dep_type, handle_or_list = self._input_to_output_map[node_input]
if dep_type == DependencyType.DIRECT:
return [cast("NodeOutput", handle_or_list)]
elif dep_type == DependencyType.DYNAMIC_COLLECT:
return [cast("NodeOutput", handle_or_list)]
elif dep_type == DependencyType.FAN_IN:
return [handle for handle in handle_or_list if isinstance(handle, NodeOutput)]
else:
check.failed(f"Unexpected dep type {dep_type}")
def inputs(self) -> Sequence[NodeInput]:
return list(self._input_to_output_map.keys())
def get_upstream_dynamic_output_for_node(self, node_name: str) -> Optional[NodeOutput]:
return self._dynamic_fan_out_index.get(node_name)
def get_dependency_type(self, node_input: NodeInput) -> Optional[DependencyType]:
result = self._input_to_output_map.get(node_input)
if result is None:
return None
dep_type, _ = result
return dep_type
def is_dynamic_mapped(self, node_name: str) -> bool:
return node_name in self._dynamic_fan_out_index
def has_dynamic_downstreams(self, node_name: str) -> bool:
for node_output in self._dynamic_fan_out_index.values():
if node_output.node_name == node_name:
return True
return False
|
DependencyStructure
|
python
|
scipy__scipy
|
scipy/cluster/tests/test_hierarchy.py
|
{
"start": 17877,
"end": 21859
}
|
class ____:
@pytest.mark.parametrize("nrow, ncol, valid", [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)])
def test_is_valid_linkage_various_size(self, nrow, ncol, valid, xp):
# Tests is_valid_linkage(Z) with linkage matrices of various sizes
Z = xp.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=xp.float64)
Z = Z[:nrow, :ncol]
xp_assert_equal(is_valid_linkage(Z), valid, check_namespace=False)
if not valid:
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_int_type(self, xp):
# Tests is_valid_linkage(Z) with integer type.
Z = xp.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=xp.int64)
xp_assert_equal(is_valid_linkage(Z), False, check_namespace=False)
assert_raises(TypeError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_empty(self, xp):
# Tests is_valid_linkage(Z) with empty linkage.
Z = xp.zeros((0, 4), dtype=xp.float64)
xp_assert_equal(is_valid_linkage(Z), False, check_namespace=False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up(self, xp):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = xp.asarray(linkage(y))
y = xp.asarray(y)
xp_assert_equal(is_valid_linkage(Z), True, check_namespace=False)
def test_is_valid_linkage_4_and_up_neg_index_left(self, xp):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (left).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = xp.asarray(linkage(y))
y = xp.asarray(y)
Z = xpx.at(Z)[i//2, 0].set(-2)
xp_assert_equal(is_valid_linkage(Z), False, check_namespace=False)
with pytest.raises(ValueError):
eager.is_valid_linkage(Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_index_right(self, xp):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (right).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = xp.asarray(linkage(y))
y = xp.asarray(y)
Z = xpx.at(Z)[i//2, 1].set(-2)
xp_assert_equal(is_valid_linkage(Z), False, check_namespace=False)
with pytest.raises(ValueError):
eager.is_valid_linkage(Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_dist(self, xp):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative distances.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = xp.asarray(linkage(y))
y = xp.asarray(y)
Z = xpx.at(Z)[i//2, 2].set(-0.5)
xp_assert_equal(is_valid_linkage(Z), False, check_namespace=False)
with pytest.raises(ValueError):
eager.is_valid_linkage(Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_counts(self, xp):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative counts.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = xp.asarray(linkage(y))
y = xp.asarray(y)
Z = xpx.at(Z)[i//2, 3].set(-2)
xp_assert_equal(is_valid_linkage(Z), False, check_namespace=False)
with pytest.raises(ValueError):
eager.is_valid_linkage(Z, throw=True)
@make_xp_test_case(is_valid_im)
|
TestIsValidLinkage
|
python
|
joke2k__faker
|
tests/providers/test_ssn.py
|
{
"start": 23317,
"end": 24969
}
|
class ____:
def setup_method(self):
self.fake = Faker("es_MX")
Faker.seed(0)
def test_ssn(self):
for _ in range(100):
ssn = self.fake.ssn()
assert len(ssn) == 11
assert ssn.isnumeric()
assert mx_ssn_checksum(map(int, ssn[:-1])) == int(ssn[-1])
def test_curp(self):
for _ in range(100):
curp = self.fake.curp()
assert len(curp) == 18
assert re.search(r"^[A-Z]{4}\d{6}[A-Z]{6}[0A]\d$", curp)
assert mx_curp_checksum(curp[:-1]) == int(curp[-1])
def test_rfc_natural(self):
for _ in range(100):
rfc = self.fake.rfc()
assert len(rfc) == 13
assert re.search(r"^[A-Z]{4}\d{6}[0-9A-Z]{3}$", rfc)
def test_rfc_legal(self):
for _ in range(100):
rfc = self.fake.rfc(natural=False)
assert len(rfc) == 12
assert re.search(r"^[A-Z]{3}\d{6}[0-9A-Z]{3}$", rfc)
@pytest.mark.parametrize(
"gender,pattern",
[
("M", r"^[A-Z]{6}\d{8}M\d{3}$"),
("H", r"^[A-Z]{6}\d{8}H\d{3}$"),
(None, r"^[A-Z]{6}\d{8}[HM]\d{3}$"),
],
ids=["woman", "man", "any"],
)
def test_elector_code(self, gender, pattern):
for _ in range(100):
elector_code = self.fake.elector_code(gender=gender)
assert len(elector_code) == 18
assert re.search(pattern, elector_code)
def test_elector_code_unsupported_gender(self):
with pytest.raises(ValueError, match="Gender must be"):
self.fake.elector_code("Z")
|
TestEsMX
|
python
|
ray-project__ray
|
doc/source/ray-overview/examples/e2e-rag/notebooks/rag_utils.py
|
{
"start": 2624,
"end": 3270
}
|
class ____:
def __init__(self, model_name: str = "intfloat/multilingual-e5-large-instruct"):
self.model_name = model_name
self.model = SentenceTransformer(
self.model_name, device="cuda" if torch.cuda.is_available() else "cpu"
)
def embed_single(self, text: str) -> np.ndarray:
"""Generate an embedding for a single text string."""
return self.model.encode(text, convert_to_numpy=True)
def embed_batch(self, texts: List[str]) -> np.ndarray:
"""Generate embeddings for a batch (list) of text strings."""
return self.model.encode(texts, convert_to_numpy=True)
|
Embedder
|
python
|
has2k1__plotnine
|
plotnine/scales/scale_alpha.py
|
{
"start": 1378,
"end": 1675
}
|
class ____(scale_alpha_ordinal):
"""
Discrete Alpha Scale
"""
def __post_init__(self, range):
warn(
"Using alpha for a discrete variable is not advised.",
PlotnineWarning,
)
super().__post_init__(range)
@dataclass
|
scale_alpha_discrete
|
python
|
getsentry__sentry
|
src/social_auth/backends/bitbucket.py
|
{
"start": 2503,
"end": 4263
}
|
class ____(BaseOAuth1):
"""Bitbucket OAuth authentication mechanism"""
AUTHORIZATION_URL = BITBUCKET_AUTHORIZATION_URL
REQUEST_TOKEN_URL = BITBUCKET_REQUEST_TOKEN_URL
ACCESS_TOKEN_URL = BITBUCKET_ACCESS_TOKEN_URL
AUTH_BACKEND = BitbucketBackend
SETTINGS_KEY_NAME = "BITBUCKET_CONSUMER_KEY"
SETTINGS_SECRET_NAME = "BITBUCKET_CONSUMER_SECRET"
DEFAULT_SCOPE = ["webhook", "repository", "issue"]
def user_data(self, access_token):
"""Return user data provided"""
# Bitbucket has a bit of an indirect route to obtain user data from an
# authenticated query: First obtain the user's email via an
# authenticated GET
url = BITBUCKET_EMAIL_DATA_URL
request = self.oauth_request(access_token, url)
response = self.fetch_response(request)
try:
email = None
# Then retrieve the user's primary email address or the top email
email_addresses = json.loads(response)
for email_address in reversed(email_addresses):
if email_address["active"]:
email = email_address["email"]
if email_address["primary"]:
break
if email is None:
return None
# Then return the user data using a normal GET with the
# BITBUCKET_USER_DATA_URL and the user's email
response = dsa_urlopen(BITBUCKET_USER_DATA_URL + email)
user_details = json.load(response)["user"]
user_details["email"] = email
return user_details
except ValueError:
return None
# Backend definition
BACKENDS = {IntegrationProviderSlug.BITBUCKET.value: BitbucketAuth}
|
BitbucketAuth
|
python
|
sphinx-doc__sphinx
|
sphinx/domains/_index.py
|
{
"start": 312,
"end": 1035
}
|
class ____(NamedTuple):
"""An index entry.
.. note::
The *qualifier* and *description* are not rendered for some output formats,
such as LaTeX.
"""
#: The name of the index entry to be displayed.
name: str
#: The sub-entry related type. One of:
#:
#: ``0``
#: A normal entry.
#: ``1``
#: An entry with sub-entries.
#: ``2``
#: A sub-entry.
subtype: int
#: *docname* where the entry is located.
docname: str
#: Anchor for the entry within `docname`
anchor: str
#: Extra info for the entry.
extra: str
#: Qualifier for the description.
qualifier: str
#: Description for the entry.
descr: str
|
IndexEntry
|
python
|
aio-libs__aiohttp
|
aiohttp/web_urldispatcher.py
|
{
"start": 27627,
"end": 28627
}
|
class ____(AbstractRoute):
"""A route with resource"""
def __init__(
self,
method: str,
handler: Handler | type[AbstractView],
resource: AbstractResource,
*,
expect_handler: _ExpectHandler | None = None,
) -> None:
super().__init__(
method, handler, expect_handler=expect_handler, resource=resource
)
def __repr__(self) -> str:
return f"<ResourceRoute [{self.method}] {self._resource} -> {self.handler!r}"
@property
def name(self) -> str | None:
if self._resource is None:
return None
return self._resource.name
def url_for(self, *args: str, **kwargs: str) -> URL:
"""Construct url for route with additional params."""
assert self._resource is not None
return self._resource.url_for(*args, **kwargs)
def get_info(self) -> _InfoDict:
assert self._resource is not None
return self._resource.get_info()
|
ResourceRoute
|
python
|
aio-libs__aiohttp
|
aiohttp/web_exceptions.py
|
{
"start": 11300,
"end": 11371
}
|
class ____(HTTPServerError):
status_code = 503
|
HTTPServiceUnavailable
|
python
|
PyCQA__pylint
|
pylint/config/callback_actions.py
|
{
"start": 11038,
"end": 11411
}
|
class ____(_XableAction):
"""Callback action for disabling a message."""
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = "--disable",
) -> None:
self._call(self.linter.disable, values, option_string)
|
_DisableAction
|
python
|
getsentry__sentry
|
src/sentry/api/endpoints/organization_profiling_functions.py
|
{
"start": 1626,
"end": 2098
}
|
class ____(serializers.Field):
def to_representation(self, trend_type: TrendType):
return trend_type.value
def to_internal_value(self, data: Any) -> TrendType | None:
for trend_type in TrendType:
if data == trend_type.value:
return trend_type
expected = " or ".join(trend_type.value for trend_type in TrendType)
raise serializers.ValidationError(f"Unknown trend type. Expected {expected}")
|
TrendTypeField
|
python
|
HIPS__autograd
|
autograd/numpy/numpy_wrapper.py
|
{
"start": 4186,
"end": 5666
}
|
class ____:
def __getitem__(self, args):
raw_array = _np.c_[args]
return wrap_if_boxes_inside(raw_array, slow_op_name="c_")
c_ = c_class()
# ----- misc -----
@primitive
def make_diagonal(D, offset=0, axis1=0, axis2=1):
# Numpy doesn't offer a complement to np.diagonal: a function to create new
# diagonal arrays with extra dimensions. We need such a function for the
# gradient of np.diagonal and it's also quite handy to have. So here it is.
if not (offset == 0 and axis1 == -1 and axis2 == -2):
raise NotImplementedError("Currently make_diagonal only supports offset=0, axis1=-1, axis2=-2")
# We use a trick: calling np.diagonal returns a view on the original array,
# so we can modify it in-place. (only valid for numpy version >= 1.10.)
new_array = _np.zeros(D.shape + (D.shape[-1],))
new_array_diag = _np.diagonal(new_array, offset=0, axis1=-1, axis2=-2)
new_array_diag.flags.writeable = True
new_array_diag[:] = D
return new_array
@notrace_primitive
def metadata(A):
return _np.shape(A), _np.ndim(A), _np.result_type(A), _np.iscomplexobj(A)
@notrace_primitive
def parse_einsum_input(*args):
return _parse_einsum_input(args)
if _np.lib.NumpyVersion(_np.__version__) >= "2.0.0":
# Wrapped above
_astype = astype
else:
@primitive
def _astype(A, dtype, order="K", casting="unsafe", subok=True, copy=True):
return A.astype(dtype, order, casting, subok, copy)
|
c_class
|
python
|
conda__conda
|
tests/plugins/test_pre_solves.py
|
{
"start": 504,
"end": 2144
}
|
class ____:
def pre_solve_action(self) -> None:
pass
@plugins.hookimpl
def conda_pre_solves(self):
yield plugins.CondaPreSolve(
name="custom-pre-solve",
action=self.pre_solve_action,
)
@pytest.fixture
def pre_solve_plugin(
mocker: MockerFixture,
plugin_manager_with_reporter_backends: CondaPluginManager,
) -> PreSolvePlugin:
mocker.patch.object(PreSolvePlugin, "pre_solve_action")
pre_solve_plugin = PreSolvePlugin()
plugin_manager_with_reporter_backends.register(pre_solve_plugin)
# register solvers
plugin_manager_with_reporter_backends.load_plugins(solvers)
return pre_solve_plugin
def test_pre_solve_invoked(
pre_solve_plugin: PreSolvePlugin,
tmp_env: TmpEnvFixture,
path_factory: PathFactoryFixture,
):
with pytest.raises(DryRunExit):
with tmp_env("zlib", "--solver=classic", "--dry-run"):
pass
assert pre_solve_plugin.pre_solve_action.mock_calls
def test_pre_solve_not_invoked(
pre_solve_plugin: PreSolvePlugin,
conda_cli: CondaCLIFixture,
):
conda_cli("config")
assert not pre_solve_plugin.pre_solve_action.mock_calls
def test_pre_solve_action_raises_exception(
pre_solve_plugin: PreSolvePlugin,
tmp_env: TmpEnvFixture,
path_factory: PathFactoryFixture,
):
exc_message = "💥"
pre_solve_plugin.pre_solve_action.side_effect = [Exception(exc_message)]
with pytest.raises(Exception, match=exc_message):
with tmp_env("zlib", "--solver=classic", "--dry-run"):
pass
assert pre_solve_plugin.pre_solve_action.mock_calls
|
PreSolvePlugin
|
python
|
wntrblm__nox
|
nox/sessions.py
|
{
"start": 4306,
"end": 4763
}
|
class ____:
def __init__(self, dir: str | os.PathLike[str]) -> None:
self._prev_working_dir = os.getcwd()
os.chdir(dir)
def __enter__(self) -> _WorkingDirContext: # noqa: PYI034
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
os.chdir(self._prev_working_dir)
|
_WorkingDirContext
|
python
|
ray-project__ray
|
rllib/policy/torch_mixins.py
|
{
"start": 366,
"end": 1906
}
|
class ____:
"""Mixin for TorchPolicy that adds a learning rate schedule."""
def __init__(self, lr, lr_schedule, lr2=None, lr2_schedule=None):
self._lr_schedule = None
self._lr2_schedule = None
# Disable any scheduling behavior related to learning if Learner API is active.
# Schedules are handled by Learner class.
if lr_schedule is None:
self.cur_lr = lr
else:
self._lr_schedule = PiecewiseSchedule(
lr_schedule, outside_value=lr_schedule[-1][-1], framework=None
)
self.cur_lr = self._lr_schedule.value(0)
if lr2_schedule is None:
self.cur_lr2 = lr2
else:
self._lr2_schedule = PiecewiseSchedule(
lr2_schedule, outside_value=lr2_schedule[-1][-1], framework=None
)
self.cur_lr2 = self._lr2_schedule.value(0)
def on_global_var_update(self, global_vars):
super().on_global_var_update(global_vars)
if self._lr_schedule:
self.cur_lr = self._lr_schedule.value(global_vars["timestep"])
for opt in self._optimizers:
for p in opt.param_groups:
p["lr"] = self.cur_lr
if self._lr2_schedule:
assert len(self._optimizers) == 2
self.cur_lr2 = self._lr2_schedule.value(global_vars["timestep"])
opt = self._optimizers[1]
for p in opt.param_groups:
p["lr"] = self.cur_lr2
@OldAPIStack
|
LearningRateSchedule
|
python
|
bokeh__bokeh
|
src/bokeh/server/contexts.py
|
{
"start": 4523,
"end": 12613
}
|
class ____:
''' Server-side holder for ``bokeh.application.Application`` plus any associated data.
This holds data that's global to all sessions, while ``ServerSession`` holds
data specific to an "instance" of the application.
'''
_sessions: dict[ID, ServerSession]
_pending_sessions: dict[ID, gen.Future[ServerSession]]
_session_contexts: dict[ID, SessionContext]
_server_context: BokehServerContext
def __init__(self, application: Application, io_loop: IOLoop | None = None,
url: str | None = None, logout_url: str | None = None):
self._application = application
self._loop = io_loop
self._sessions = {}
self._pending_sessions = {}
self._session_contexts = {}
self._server_context = BokehServerContext(self)
self._url = url
self._logout_url = logout_url
@property
def io_loop(self) -> IOLoop | None:
return self._loop
@property
def application(self) -> Application:
return self._application
@property
def url(self) -> str | None:
return self._url
@property
def server_context(self) -> BokehServerContext:
return self._server_context
@property
def sessions(self) -> Iterable[ServerSession]:
return self._sessions.values()
def run_load_hook(self) -> None:
try:
self._application.on_server_loaded(self.server_context)
except Exception as e:
log.error(f"Error in server loaded hook {e!r}", exc_info=True)
def run_unload_hook(self) -> None:
try:
self._application.on_server_unloaded(self.server_context)
except Exception as e:
log.error(f"Error in server unloaded hook {e!r}", exc_info=True)
async def create_session_if_needed(self, session_id: ID, request: HTTPServerRequest | None = None,
token: str | None = None) -> ServerSession:
# this is because empty session_ids would be "falsey" and
# potentially open up a way for clients to confuse us
if len(session_id) == 0:
raise ProtocolError("Session ID must not be empty")
if session_id not in self._sessions and \
session_id not in self._pending_sessions:
future = self._pending_sessions[session_id] = gen.Future()
doc = Document()
session_context = BokehSessionContext(session_id,
self.server_context,
doc,
logout_url=self._logout_url)
if request is not None:
payload = get_token_payload(token) if token else {}
if ('cookies' in payload and 'headers' in payload
and 'Cookie' not in payload['headers']):
# Restore Cookie header from cookies dictionary
payload['headers']['Cookie'] = '; '.join([
f'{k}={v}' for k, v in payload['cookies'].items()
])
# using private attr so users only have access to a read-only property
session_context._request = _RequestProxy(request,
arguments=payload.get('arguments'),
cookies=payload.get('cookies'),
headers=payload.get('headers'))
session_context._token = token
# expose the session context to the document
# use the _attribute to set the public property .session_context
doc._session_context = weakref.ref(session_context)
try:
await self._application.on_session_created(session_context)
except Exception as e:
log.error("Failed to run session creation hooks %r", e, exc_info=True)
self._application.initialize_document(doc)
session = ServerSession(session_id, doc, io_loop=self._loop, token=token)
del self._pending_sessions[session_id]
self._sessions[session_id] = session
session_context._set_session(session)
self._session_contexts[session_id] = session_context
# notify anyone waiting on the pending session
future.set_result(session)
if session_id in self._pending_sessions:
# another create_session_if_needed is working on
# creating this session
session = await self._pending_sessions[session_id]
else:
session = self._sessions[session_id]
return session
def get_session(self, session_id: ID) -> ServerSession:
if session_id in self._sessions:
session = self._sessions[session_id]
return session
else:
raise ProtocolError("No such session " + session_id)
async def _discard_session(self, session: ServerSession, should_discard: Callable[[ServerSession], bool]) -> None:
if session.connection_count > 0:
raise RuntimeError("Should not be discarding a session with open connections")
log.debug("Discarding session %r last in use %r milliseconds ago", session.id, session.milliseconds_since_last_unsubscribe)
session_context = self._session_contexts[session.id]
# session.destroy() wants the document lock so it can shut down the document
# callbacks.
def do_discard() -> None:
# while we awaited for the document lock, the discard-worthiness of the
# session may have changed.
# However, since we have the document lock, our own lock will cause the
# block count to be 1. If there's any other block count besides our own,
# we want to skip session destruction though.
if should_discard(session) and session.expiration_blocked_count == 1:
session.destroy()
del self._sessions[session.id]
del self._session_contexts[session.id]
log.trace(f"Session {session.id!r} was successfully discarded")
else:
log.warning(f"Session {session.id!r} was scheduled to discard but came back to life")
await session.with_document_locked(do_discard)
# session lifecycle hooks are supposed to be called outside the document lock,
# we only run these if we actually ended up destroying the session.
if session_context.destroyed:
try:
await self._application.on_session_destroyed(session_context)
except Exception as e:
log.error("Failed to run session destroy hooks %r", e, exc_info=True)
return None
async def _cleanup_sessions(self, unused_session_linger_milliseconds: int) -> None:
def should_discard_ignoring_block(session: ServerSession) -> bool:
return session.connection_count == 0 and \
(session.milliseconds_since_last_unsubscribe > unused_session_linger_milliseconds or \
session.expiration_requested)
# build a temp list to avoid trouble from self._sessions changes
to_discard: list[ServerSession] = []
for session in self._sessions.values():
if should_discard_ignoring_block(session) and not session.expiration_blocked:
to_discard.append(session)
if len(to_discard) > 0:
log.debug(f"Scheduling {len(to_discard)} sessions to discard")
# asynchronously reconsider each session
for session in to_discard:
if should_discard_ignoring_block(session) and not session.expiration_blocked:
await self._discard_session(session, should_discard_ignoring_block)
return None
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
|
ApplicationContext
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/genericType9.py
|
{
"start": 489,
"end": 531
}
|
class ____(ClassA[_T2]):
pass
|
ClassASub1
|
python
|
great-expectations__great_expectations
|
great_expectations/core/expectation_validation_result.py
|
{
"start": 18938,
"end": 30176
}
|
class ____(SerializableDictDot):
"""The result of a batch of data validated against an Expectation Suite.
When a Checkpoint is run, it produces an instance of this class. The primary property
of this class is `results`, which contains the individual ExpectationValidationResult
instances which were produced by the Checkpoint run.
ExpectationSuiteValidationResult.success will be True if all Expectations passed, otherwise it will be False.
ExpectationSuiteValidationResult.statistics contains information about the Checkpoint run.:
```python
{
"evaluated_expectations": 14,
"success_percent": 71.42857142857143,
"successful_expectations": 10,
"unsuccessful_expectations": 4
}
```
The meta property is an instance of ExpectationSuiteValidationResultMeta, and
contains information identifying the resources used during the Checkpoint run.:
```python
{
"active_batch_definition": {
"batch_identifiers": {},
"data_asset_name": "taxi_data_1.csv",
"data_connector_name": "default_inferred_data_connector_name",
"datasource_name": "pandas"
},
"batch_markers": {
"ge_load_time": "20220727T154327.630107Z",
"pandas_data_fingerprint": "c4f929e6d4fab001fedc9e075bf4b612"
},
"batch_spec": {
"path": "/Users/username/work/gx_example_projects/great_expectations/../data/taxi_data_1.csv"
},
"checkpoint_name": "single_validation_checkpoint",
"expectation_suite_name": "taxi_suite_1",
"great_expectations_version": "0.15.15",
"run_id": {
"run_name": "20220727-114327-my-run-name-template",
"run_time": "2022-07-27T11:43:27.625252+00:00"
},
"validation_time": "20220727T154327.701100Z"
}
```
Args:
success: Boolean indicating the success or failure of this collection of results, or None.
results: List of ExpectationValidationResults, or None.
suite_parameters: Dict of Suite Parameters used to produce these results, or None.
statistics: Dict of values describing the results.
meta: Instance of ExpectationSuiteValidationResult, a Dict of meta values, or None.
batch_id: A unique identifier for the batch of data that was validated.
result_url: A URL where the results are stored.
""" # noqa: E501 # FIXME CoP
def __init__( # noqa: PLR0913 # FIXME CoP
self,
success: bool,
results: list[ExpectationValidationResult],
suite_name: str,
suite_parameters: Optional[dict] = None,
statistics: Optional[dict] = None,
meta: Optional[ExpectationSuiteValidationResultMeta | dict] = None,
batch_id: Optional[str] = None,
result_url: Optional[str] = None,
id: Optional[str] = None,
) -> None:
self.success = success
self.results = results
self.suite_name = suite_name
self.suite_parameters = suite_parameters or {}
self.statistics = statistics or {}
meta = meta or {}
ensure_json_serializable(meta) # We require meta information to be serializable.
self.meta = meta
self.batch_id = batch_id
self.result_url = result_url
self.id = id
self._metrics: dict = {}
@property
def asset_name(self) -> str | None:
if "active_batch_definition" in self.meta:
return self.meta["active_batch_definition"].get("data_asset_name")
return None
def __eq__(self, other): # type: ignore[explicit-override] # FIXME
"""ExpectationSuiteValidationResult equality ignores instance identity, relying only on properties.""" # noqa: E501 # FIXME CoP
if not isinstance(other, self.__class__):
# Delegate comparison to the other instance's __eq__.
return NotImplemented
return all(
(
self.success == other.success,
self.results == other.results,
self.suite_parameters == other.suite_parameters,
self.statistics == other.statistics,
self.meta == other.meta,
)
)
@override
def __hash__(self) -> int:
return hash(
(
self.success,
tuple(sorted(hash(result) for result in self.results)),
tuple(sorted(self.suite_parameters.items())) if self.suite_parameters else (),
tuple(sorted(self.statistics.items())) if self.statistics else (),
tuple(sorted(self.meta.items())) if self.meta else (),
)
)
def __repr__(self): # type: ignore[explicit-override] # FIXME
return json.dumps(self.to_json_dict(), indent=2)
@override
def __str__(self):
return json.dumps(self.to_json_dict(), indent=2)
@public_api
@override
def to_json_dict(self):
"""Returns a JSON-serializable dict representation of this ExpectationSuiteValidationResult.
Returns:
A JSON-serializable dict representation of this ExpectationSuiteValidationResult.
"""
myself = deepcopy(self)
# NOTE - JPC - 20191031: migrate to expectation-specific schemas that subclass result with properly-typed # noqa: E501 # FIXME CoP
# schemas to get serialization all-the-way down via dump
myself["suite_parameters"] = convert_to_json_serializable(myself["suite_parameters"])
myself["statistics"] = convert_to_json_serializable(myself["statistics"])
myself["meta"] = convert_to_json_serializable(myself["meta"])
myself["results"] = [convert_to_json_serializable(result) for result in myself["results"]]
myself = expectationSuiteValidationResultSchema.dump(myself)
return myself
def get_metric(self, metric_name, **kwargs): # noqa: C901 # too complex
metric_name_parts = metric_name.split(".")
metric_kwargs_id = get_metric_kwargs_id(metric_kwargs=kwargs)
metric_value = None
# Expose overall statistics
if metric_name_parts[0] == "statistics":
if len(metric_name_parts) == 2: # noqa: PLR2004 # FIXME CoP
return self.statistics.get(metric_name_parts[1])
else:
raise gx_exceptions.UnavailableMetricError(f"Unrecognized metric {metric_name}") # noqa: TRY003 # FIXME CoP
# Expose expectation-defined metrics
elif metric_name_parts[0].lower().startswith("expect_"):
# Check our cache first
if (metric_name, metric_kwargs_id) in self._metrics:
return self._metrics[(metric_name, metric_kwargs_id)]
else:
for result in self.results:
try:
if metric_name_parts[0] == result.expectation_config.type:
metric_value = result.get_metric(metric_name, **kwargs)
break
except gx_exceptions.UnavailableMetricError:
pass
if metric_value is not None:
self._metrics[(metric_name, metric_kwargs_id)] = metric_value
return metric_value
raise gx_exceptions.UnavailableMetricError( # noqa: TRY003 # FIXME CoP
f"Metric {metric_name} with metric_kwargs_id {metric_kwargs_id} is not available."
)
def get_failed_validation_results(
self,
) -> ExpectationSuiteValidationResult:
validation_results = [result for result in self.results if not result.success]
successful_expectations = sum(exp.success or False for exp in validation_results)
evaluated_expectations = len(validation_results)
unsuccessful_expectations = evaluated_expectations - successful_expectations
success = successful_expectations == evaluated_expectations
try:
success_percent = successful_expectations / evaluated_expectations * 100
except ZeroDivisionError:
success_percent = None
statistics = {
"successful_expectations": successful_expectations,
"evaluated_expectations": evaluated_expectations,
"unsuccessful_expectations": unsuccessful_expectations,
"success_percent": success_percent,
"success": success,
}
return ExpectationSuiteValidationResult(
success=success,
results=validation_results,
suite_name=self.suite_name,
suite_parameters=self.suite_parameters,
statistics=statistics,
meta=self.meta,
)
def describe_dict(self) -> dict:
return convert_to_json_serializable(
{
"success": self.success,
"statistics": self.statistics,
"expectations": [expectation.describe_dict() for expectation in self.results],
"result_url": self.result_url,
}
)
@public_api
def describe(self) -> str:
"""JSON string description of this ExpectationSuiteValidationResult"""
return json.dumps(self.describe_dict(), indent=4)
@public_api
def get_max_severity_failure(self) -> FailureSeverity | None:
"""Get the maximum severity failure for Expectations in the validation result.
Returns the maximum severity level among failed expectations. The severity levels
are ordered as: CRITICAL > WARNING > INFO. If no failures exist, returns None.
Returns:
The maximum severity failure level, or None if no failures exist.
"""
from great_expectations.expectations import metadata_types
if not self.results:
return None
max_severity = None
for result in self.results:
# Only consider failed expectations
if not result.success:
if result.expectation_config is None:
logger.error(
f"Expectation configuration is None for failed expectation "
f"(Validation Result ID: {self.id}). "
f"Skipping this result."
)
continue
severity_str = result.expectation_config.get("severity")
try:
severity = metadata_types.FailureSeverity(severity_str)
# Short-circuit: highest possible severity level found
if severity == metadata_types.FailureSeverity.CRITICAL:
return severity
if max_severity is None or severity > max_severity:
max_severity = severity
except ValueError:
logger.exception(
f"Invalid severity value '{severity_str}' found in expectation "
f"'{result.expectation_config.type}' "
f"(Validation Result ID: {self.id}). "
f"Skipping this result."
)
return max_severity
|
ExpectationSuiteValidationResult
|
python
|
spack__spack
|
lib/spack/spack/cmd/create.py
|
{
"start": 9453,
"end": 9802
}
|
class ____(PackageTemplate):
"""Provides appropriate override for Waf-based packages"""
base_class_name = "WafPackage"
package_class_import = "from spack_repo.builtin.build_systems.waf import WafPackage"
body_def = """\
# FIXME: Override configure_args(), build_args(),
# or install_args() if necessary."""
|
WafPackageTemplate
|
python
|
ray-project__ray
|
python/ray/air/util/data_batch_conversion.py
|
{
"start": 970,
"end": 12539
}
|
class ____(str, Enum):
"""Internal Dataset block format enum."""
PANDAS = "pandas"
ARROW = "arrow"
SIMPLE = "simple"
def _convert_batch_type_to_pandas(
data: DataBatchType,
cast_tensor_columns: bool = False,
) -> "pd.DataFrame":
"""Convert the provided data to a Pandas DataFrame.
Args:
data: Data of type DataBatchType
cast_tensor_columns: Whether tensor columns should be cast to NumPy ndarrays.
Returns:
A pandas Dataframe representation of the input data.
"""
pd = _lazy_import_pandas()
if isinstance(data, np.ndarray):
data = pd.DataFrame({TENSOR_COLUMN_NAME: _ndarray_to_column(data)})
elif isinstance(data, dict):
tensor_dict = {}
for col_name, col in data.items():
if not isinstance(col, np.ndarray):
raise ValueError(
"All values in the provided dict must be of type "
f"np.ndarray. Found type {type(col)} for key {col_name} "
f"instead."
)
tensor_dict[col_name] = _ndarray_to_column(col)
data = pd.DataFrame(tensor_dict)
elif pyarrow is not None and isinstance(data, pyarrow.Table):
data = data.to_pandas()
elif not isinstance(data, pd.DataFrame):
raise ValueError(
f"Received data of type: {type(data)}, but expected it to be one "
f"of {DataBatchType}"
)
if cast_tensor_columns:
data = _cast_tensor_columns_to_ndarrays(data)
return data
def _convert_pandas_to_batch_type(
data: "pd.DataFrame",
type: BatchFormat,
cast_tensor_columns: bool = False,
) -> DataBatchType:
"""Convert the provided Pandas dataframe to the provided ``type``.
Args:
data: A Pandas DataFrame
type: The specific ``BatchFormat`` to convert to.
cast_tensor_columns: Whether tensor columns should be cast to our tensor
extension type.
Returns:
The input data represented with the provided type.
"""
if cast_tensor_columns:
data = _cast_ndarray_columns_to_tensor_extension(data)
if type == BatchFormat.PANDAS:
return data
elif type == BatchFormat.NUMPY:
if len(data.columns) == 1:
# If just a single column, return as a single numpy array.
return data.iloc[:, 0].to_numpy()
else:
# Else return as a dict of numpy arrays.
output_dict = {}
for column in data:
output_dict[column] = data[column].to_numpy()
return output_dict
elif type == BatchFormat.ARROW:
if not pyarrow:
raise ValueError(
"Attempted to convert data to Pyarrow Table but Pyarrow "
"is not installed. Please do `pip install pyarrow` to "
"install Pyarrow."
)
return pyarrow.Table.from_pandas(data)
else:
raise ValueError(
f"Received type {type}, but expected it to be one of {DataBatchType}"
)
@Deprecated
def convert_batch_type_to_pandas(
data: DataBatchType,
cast_tensor_columns: bool = False,
):
"""Convert the provided data to a Pandas DataFrame.
This API is deprecated from Ray 2.4.
Args:
data: Data of type DataBatchType
cast_tensor_columns: Whether tensor columns should be cast to NumPy ndarrays.
Returns:
A pandas Dataframe representation of the input data.
"""
warnings.warn(
"`convert_batch_type_to_pandas` is deprecated as a developer API "
"starting from Ray 2.4. All batch format conversions should be "
"done manually instead of relying on this API.",
PendingDeprecationWarning,
)
return _convert_batch_type_to_pandas(
data=data, cast_tensor_columns=cast_tensor_columns
)
@Deprecated
def convert_pandas_to_batch_type(
data: "pd.DataFrame",
type: BatchFormat,
cast_tensor_columns: bool = False,
):
"""Convert the provided Pandas dataframe to the provided ``type``.
Args:
data: A Pandas DataFrame
type: The specific ``BatchFormat`` to convert to.
cast_tensor_columns: Whether tensor columns should be cast to our tensor
extension type.
Returns:
The input data represented with the provided type.
"""
warnings.warn(
"`convert_pandas_to_batch_type` is deprecated as a developer API "
"starting from Ray 2.4. All batch format conversions should be "
"done manually instead of relying on this API.",
PendingDeprecationWarning,
)
return _convert_pandas_to_batch_type(
data=data, type=type, cast_tensor_columns=cast_tensor_columns
)
def _convert_batch_type_to_numpy(
data: DataBatchType,
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
"""Convert the provided data to a NumPy ndarray or dict of ndarrays.
Args:
data: Data of type DataBatchType
Returns:
A numpy representation of the input data.
"""
pd = _lazy_import_pandas()
if isinstance(data, np.ndarray):
return data
elif isinstance(data, dict):
for col_name, col in data.items():
if not isinstance(col, np.ndarray):
raise ValueError(
"All values in the provided dict must be of type "
f"np.ndarray. Found type {type(col)} for key {col_name} "
f"instead."
)
return data
elif pyarrow is not None and isinstance(data, pyarrow.Table):
from ray.air.util.tensor_extensions.arrow import (
get_arrow_extension_fixed_shape_tensor_types,
)
from ray.data._internal.arrow_ops import transform_pyarrow
column_values_ndarrays = []
for col in data.columns:
# Combine columnar values arrays to make these contiguous
# (making them compatible with numpy format)
combined_array = transform_pyarrow.combine_chunked_array(col)
column_values_ndarrays.append(
transform_pyarrow.to_numpy(combined_array, zero_copy_only=False)
)
arrow_fixed_shape_tensor_types = get_arrow_extension_fixed_shape_tensor_types()
# NOTE: This branch is here for backwards-compatibility
if data.column_names == [TENSOR_COLUMN_NAME] and (
isinstance(data.schema.types[0], arrow_fixed_shape_tensor_types)
):
return column_values_ndarrays[0]
return dict(zip(data.column_names, column_values_ndarrays))
elif isinstance(data, pd.DataFrame):
return _convert_pandas_to_batch_type(data, BatchFormat.NUMPY)
else:
raise ValueError(
f"Received data of type: {type(data)}, but expected it to be one "
f"of {DataBatchType}"
)
def _ndarray_to_column(arr: np.ndarray) -> Union["pd.Series", List[np.ndarray]]:
"""Convert a NumPy ndarray into an appropriate column format for insertion into a
pandas DataFrame.
If conversion to a pandas Series fails (e.g. if the ndarray is multi-dimensional),
fall back to a list of NumPy ndarrays.
"""
pd = _lazy_import_pandas()
try:
# Try to convert to Series, falling back to a list conversion if this fails
# (e.g. if the ndarray is multi-dimensional).
return pd.Series(arr)
except ValueError:
return list(arr)
def _unwrap_ndarray_object_type_if_needed(arr: np.ndarray) -> np.ndarray:
"""Unwrap an object-dtyped NumPy ndarray containing ndarray pointers into a single
contiguous ndarray, if needed/possible.
"""
if arr.dtype.type is np.object_:
try:
# Try to convert the NumPy ndarray to a non-object dtype.
arr = np.array([np.asarray(v) for v in arr])
except Exception:
# This may fail if the subndarrays are of heterogeneous shape
pass
return arr
def _cast_ndarray_columns_to_tensor_extension(df: "pd.DataFrame") -> "pd.DataFrame":
"""
Cast all NumPy ndarray columns in df to our tensor extension type, TensorArray.
"""
pd = _lazy_import_pandas()
try:
SettingWithCopyWarning = pd.core.common.SettingWithCopyWarning
except AttributeError:
# SettingWithCopyWarning was moved to pd.errors in Pandas 1.5.0.
SettingWithCopyWarning = pd.errors.SettingWithCopyWarning
from ray.air.util.tensor_extensions.pandas import (
TensorArray,
column_needs_tensor_extension,
)
# Try to convert any ndarray columns to TensorArray columns.
# TODO(Clark): Once Pandas supports registering extension types for type
# inference on construction, implement as much for NumPy ndarrays and remove
# this. See https://github.com/pandas-dev/pandas/issues/41848
# TODO(Clark): Optimize this with propagated DataFrame metadata containing a list of
# column names containing tensor columns, to make this an O(# of tensor columns)
# check rather than the current O(# of columns) check.
for col_name, col in df.items():
if column_needs_tensor_extension(col):
try:
# Suppress Pandas warnings:
# https://github.com/ray-project/ray/issues/29270
# We actually want in-place operations so we surpress this warning.
# https://stackoverflow.com/a/74193599
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=FutureWarning)
warnings.simplefilter("ignore", category=SettingWithCopyWarning)
df[col_name] = TensorArray(col)
except Exception as e:
raise ValueError(
f"Tried to cast column {col_name} to the TensorArray tensor "
"extension type but the conversion failed. To disable "
"automatic casting to this tensor extension, set "
"ctx = DataContext.get_current(); "
"ctx.enable_tensor_extension_casting = False."
) from e
return df
def _cast_tensor_columns_to_ndarrays(df: "pd.DataFrame") -> "pd.DataFrame":
"""Cast all tensor extension columns in df to NumPy ndarrays."""
pd = _lazy_import_pandas()
try:
SettingWithCopyWarning = pd.core.common.SettingWithCopyWarning
except AttributeError:
# SettingWithCopyWarning was moved to pd.errors in Pandas 1.5.0.
SettingWithCopyWarning = pd.errors.SettingWithCopyWarning
from ray.air.util.tensor_extensions.pandas import TensorDtype
# Try to convert any tensor extension columns to ndarray columns.
# TODO(Clark): Optimize this with propagated DataFrame metadata containing a list of
# column names containing tensor columns, to make this an O(# of tensor columns)
# check rather than the current O(# of columns) check.
for col_name, col in df.items():
if isinstance(col.dtype, TensorDtype):
# Suppress Pandas warnings:
# https://github.com/ray-project/ray/issues/29270
# We actually want in-place operations so we surpress this warning.
# https://stackoverflow.com/a/74193599
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=FutureWarning)
warnings.simplefilter("ignore", category=SettingWithCopyWarning)
df[col_name] = list(col.to_numpy())
return df
|
BlockFormat
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/kernel_tests/rebatch_test.py
|
{
"start": 1335,
"end": 14481
}
|
class ____(test_base.DatasetTestBase, parameterized.TestCase):
##############################################################################
# The following tests exercise our static computation of output_shapes.
##############################################################################
@combinations.generate(test_base.default_test_combinations())
def testShapeInferenceNotAllBatchSizesEqual(self):
dataset = dataset_ops.Dataset.range(8).batch(4, drop_remainder=True)
rebatched_dataset = dataset.rebatch(batch_size=[2, 1, 1])
expected_shapes = [[None]]
self.assertEqual(expected_shapes, _flat_shapes(rebatched_dataset))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(drop_remainder=[True, False])))
def testShapeInferenceInputBatchDimDivisible(self, drop_remainder):
dataset = dataset_ops.Dataset.range(8).batch(4, drop_remainder=True)
rebatched_dataset = dataset.rebatch(
batch_size=[2, 2], drop_remainder=drop_remainder)
expected_shapes = [[2]]
self.assertEqual(expected_shapes, _flat_shapes(rebatched_dataset))
@combinations.generate(
combinations.times(test_base.default_test_combinations()))
def testShapeInferenceInputBatchDimUnknown(self):
dataset = dataset_ops.Dataset.range(8).batch(4, drop_remainder=False)
rebatched_dataset = dataset.rebatch(batch_size=[2, 2], drop_remainder=False)
expected_shapes = [[None]]
self.assertEqual(expected_shapes, _flat_shapes(rebatched_dataset))
@combinations.generate(
combinations.times(test_base.default_test_combinations()))
def testShapeInferenceInputBatchDimUnknownWithDropRemainder(self):
dataset = dataset_ops.Dataset.range(8).batch(4, drop_remainder=False)
rebatched_dataset = dataset.rebatch(batch_size=[2, 2], drop_remainder=True)
expected_shapes = [[2]]
self.assertEqual(expected_shapes, _flat_shapes(rebatched_dataset))
@combinations.generate(
combinations.times(test_base.default_test_combinations()))
def testShapeInferenceInputBatchDimIndivisible(self):
dataset = dataset_ops.Dataset.range(10).batch(5, drop_remainder=True)
rebatched_dataset = dataset.rebatch(batch_size=[2, 2], drop_remainder=False)
expected_shapes = [[None]]
self.assertEqual(expected_shapes, _flat_shapes(rebatched_dataset))
@combinations.generate(
combinations.times(test_base.default_test_combinations()))
def testShapeInferenceInputBatchDimIndivisibleWithDropRemainder(self):
dataset = dataset_ops.Dataset.range(10).batch(5, drop_remainder=True)
rebatched_dataset = dataset.rebatch(batch_size=[2, 2], drop_remainder=True)
expected_shapes = [[2]]
self.assertEqual(expected_shapes, _flat_shapes(rebatched_dataset))
##############################################################################
# The following tests check `tf.data.Dataset.rebatch`'s output.
##############################################################################
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(drop_remainder=[True, False])))
def testBasic(self, drop_remainder):
dataset = dataset_ops.Dataset.range(8).batch(4, drop_remainder=True)
rebatched_dataset = dataset.rebatch(batch_size=[2, 2],
drop_remainder=drop_remainder)
expected_shapes = [[2]]
self.assertEqual(expected_shapes, _flat_shapes(rebatched_dataset))
expected_output = [[0, 1], [2, 3], [4, 5], [6, 7]]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(
combinations.times(test_base.default_test_combinations()))
def testPartialBatch(self):
dataset = dataset_ops.Dataset.range(5).batch(4, drop_remainder=False)
rebatched_dataset = dataset.rebatch(batch_size=[2, 2], drop_remainder=False)
expected_shapes = [[None]]
self.assertEqual(expected_shapes, _flat_shapes(rebatched_dataset))
expected_output = [[0, 1], [2, 3], [4]]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(
combinations.times(test_base.default_test_combinations()))
def testPartialBatchWithDropRemainder(self):
dataset = dataset_ops.Dataset.range(5).batch(4, drop_remainder=False)
rebatched_dataset = dataset.rebatch(batch_size=[2, 2], drop_remainder=True)
expected_shapes = [[2]]
self.assertEqual(expected_shapes, _flat_shapes(rebatched_dataset))
expected_output = [[0, 1], [2, 3]]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(drop_remainder=[True, False])))
def testBatchSizeGreaterThanOriginal(self, drop_remainder):
dataset = dataset_ops.Dataset.range(12).batch(4, drop_remainder=False)
rebatched_dataset = dataset.rebatch(batch_size=[6],
drop_remainder=drop_remainder)
expected_output = [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
batch_size=[2, 3, 4], drop_remainder=[True, False]
),
)
)
def testBatchSizeEqualToOriginal(self, batch_size, drop_remainder):
# `drop_remainder` needs to be `False` in `rebatch` call
# so that the remainder batch is preserved.
#
# For example:
# d = range(3).batch(2, drop_remainder=True)
# d2 = d.rebatch(2, drop_remainder=True)
# d becomes [[0, 1], [2]] and d2 becomes [[0, 1]],
# which is a mismatch we do not want.
dataset = dataset_ops.Dataset.range(11).batch(
batch_size, drop_remainder=drop_remainder
)
expected_output = self.getDatasetOutput(dataset)
rebatched_dataset = dataset.rebatch(
batch_size=batch_size, drop_remainder=False
)
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(drop_remainder=[True, False]),
)
)
def testEmptySplits(self, drop_remainder):
# It's possible for splits to be empty if the batch size is smaller than
# the number of replicas. Here, we use an example with batch_size == 4
# and num_replicas == 5.
dataset = dataset_ops.Dataset.range(8).batch(4, drop_remainder=True)
rebatched_dataset = dataset.rebatch(batch_size=[1, 1, 1, 1, 0],
drop_remainder=drop_remainder)
expected_shapes = [[None]]
self.assertEqual(expected_shapes, _flat_shapes(rebatched_dataset))
expected_output = [[0], [1], [2], [3], [], [4], [5], [6], [7], []]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(drop_remainder=[True, False])))
def testEmptyFirstSplits(self, drop_remainder):
dataset = dataset_ops.Dataset.range(8).batch(4, drop_remainder=True)
rebatched_dataset = dataset.rebatch(batch_size=[0, 1],
drop_remainder=drop_remainder)
expected_shapes = [[None]]
self.assertEqual(expected_shapes, _flat_shapes(rebatched_dataset))
# We have an extra element at the end because if the desired batch size is
# zero, then we never read any inputs from the input_dataset at all, so we
# will keep producing empty outputs until we reach a non zero desired batch
# size split.
expected_output = [[], [0], [], [1], [], [2], [], [3], [], [4], [], [5], [],
[6], [], [7], []]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(drop_remainder=[True, False])))
def testEmptyLastSplits(self, drop_remainder):
dataset = dataset_ops.Dataset.range(8).batch(4, drop_remainder=True)
rebatched_dataset = dataset.rebatch(batch_size=[1, 0],
drop_remainder=drop_remainder)
expected_shapes = [[None]]
self.assertEqual(expected_shapes, _flat_shapes(rebatched_dataset))
expected_output = [[0], [], [1], [], [2], [], [3], [], [4], [], [5], [],
[6], [], [7], []]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(drop_remainder=[True, False])))
def testEmptyTensors(self, drop_remainder):
"""Tests empty tensors case.
Args:
drop_remainder: whether to drop the remainder.
The implementation of rebatch might move the input data.
This test ensures the empty buffer is handled correctly.
"""
new_batch_size = 4
dataset = dataset_ops.Dataset.range(8)
dataset = dataset.map(lambda x: array_ops.reshape((), (5, 0)))
dataset = dataset.batch(2)
rebatched_dataset = dataset.rebatch(
batch_size=new_batch_size, drop_remainder=drop_remainder
)
expected_output = [
array_ops.reshape((), (new_batch_size, 5, 0))
for _ in range(8 // new_batch_size)
]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(drop_remainder=[True, False]),
)
)
def testScalarBatchSizeInput(self, drop_remainder):
dataset = dataset_ops.Dataset.range(8).batch(4, drop_remainder=True)
rebatched_dataset = dataset.rebatch(batch_size=2,
drop_remainder=drop_remainder)
expected_shapes = [[2]]
self.assertEqual(expected_shapes, _flat_shapes(rebatched_dataset))
expected_output = [[0, 1], [2, 3], [4, 5], [6, 7]]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(test_base.default_test_combinations())
def testMultipleBatches(self):
dataset = dataset_ops.Dataset.range(16).batch(
2, drop_remainder=True).batch(
4, drop_remainder=True)
self.assertEqual([[4, 2]], _flat_shapes(dataset))
rebatched_dataset = dataset.rebatch([2, 2])
self.assertEqual([[2, 2]], _flat_shapes(rebatched_dataset))
# Each element is a list of 2 elements where each element is a list of 2.
expected_output = [[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]],
[[12, 13], [14, 15]]]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(test_base.default_test_combinations())
def testNestedDictionaryOutput(self):
def map_fn(x):
return {"a": x, "b": {"c": x + 1}}
dataset = dataset_ops.Dataset.range(8).map(map_fn).batch(
4, drop_remainder=True)
rebatched_dataset = dataset.rebatch([2, 2])
self.assertEqual([[2], [2]], _flat_shapes(rebatched_dataset))
expected_output = [{
"a": [0, 1],
"b": {
"c": [1, 2]
}
}, {
"a": [2, 3],
"b": {
"c": [3, 4]
}
}, {
"a": [4, 5],
"b": {
"c": [5, 6]
}
}, {
"a": [6, 7],
"b": {
"c": [7, 8]
}
}]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(drop_remainder=[True, False])))
def testRaggedDataset(self, drop_remainder):
# Set up a dataset that produces ragged tensors with a static batch size.
dataset = dataset_ops.Dataset.from_tensor_slices(
ragged_tensor.RaggedTensor.from_row_lengths(
list(range(10)), [1, 2, 3, 4]))
# The map changes the internal representation of the ragged tensor.
# This test will fail if we don't normalize the tensor representation.
dataset = dataset.batch(4, drop_remainder=True).map(lambda x: x)
rebatched_dataset = dataset.rebatch(batch_size=[2, 2])
expected_output = [
ragged_tensor.RaggedTensor.from_row_lengths(list(range(3)), [1, 2]),
ragged_tensor.RaggedTensor.from_row_lengths(list(range(3, 10)), [3, 4]),
]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(test_base.default_test_combinations())
def testNoneDataset(self):
# Some datasets, e.g. datasets with None tensors, have components without
# output shapes. Test that this doesn't break rebatching shape inference
# logic.
dataset = dataset_ops.Dataset.range(4)
dataset = dataset.map(lambda x: (x, None))
dataset = dataset.batch(4, drop_remainder=True)
_ = dataset.rebatch(batch_size=[2, 2])
|
RebatchTest
|
python
|
getsentry__sentry
|
src/sentry/notifications/platform/types.py
|
{
"start": 7314,
"end": 8267
}
|
class ____[T: NotificationData](abc.ABC):
category: NotificationCategory
"""
The category that a notification belongs to. This will be used to determine which settings a
user needs to modify to manage receipt of these notifications (if applicable).
"""
example_data: T
"""
The example data for this notification.
"""
@abc.abstractmethod
def render(self, data: T) -> NotificationRenderedTemplate:
"""
Produce a rendered template given the notification data. Usually, this will involve
formatting the data into user-friendly strings of text.
"""
...
def render_example(self) -> NotificationRenderedTemplate:
"""
Used to produce a debugging example rendered template for this notification. This
implementation should be pure, and not populate with any live data.
"""
return self.render(data=self.example_data)
|
NotificationTemplate
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py
|
{
"start": 3076,
"end": 6522
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.num_experts = config.num_experts
self.intermediate_size = config.moe_intermediate_size
self.hidden_size = config.hidden_size
self.expert_dim = self.intermediate_size
self.gate_up_proj = nn.Parameter(torch.zeros(self.num_experts, self.hidden_size, 2 * self.expert_dim))
self.down_proj = nn.Parameter(torch.empty((self.num_experts, self.expert_dim, self.hidden_size)))
self.act_fn = ACT2FN[config.hidden_act]
def forward(
self, hidden_states: torch.Tensor, routing_weights: torch.Tensor, router_indices: torch.Tensor
) -> torch.Tensor:
"""
When training it is more efficient to just loop over the experts and compute the output for each expert
as otherwise the memory would explode.
For inference we can sacrifice some memory and compute the output for all experts at once. By repeating the inputs.
Args:
hidden_states (torch.Tensor): (batch_size * token_num, hidden_size)
routing_weights (torch.Tensor): (batch_size * token_num, num_experts)
router_indices (torch.Tensor): (batch_size * token_num, top_k)
Returns:
torch.Tensor
"""
batch_size = hidden_states.shape[0]
hidden_states = hidden_states.reshape(-1, self.hidden_size) # (num_tokens, hidden_size)
if self.training:
next_states = torch.zeros_like(hidden_states, dtype=hidden_states.dtype, device=hidden_states.device)
with torch.no_grad():
expert_mask = torch.nn.functional.one_hot(router_indices, num_classes=self.num_experts)
expert_mask = expert_mask.permute(2, 1, 0)
# we sum on the top_k and on the sequence length to get which experts
# are hit this time around
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
for expert_idx in expert_hit[:]:
with torch.no_grad():
_, token_idx = torch.where(expert_mask[expert_idx[0]])
current_state = hidden_states[token_idx]
gate_up = current_state @ self.gate_up_proj[expert_idx]
gate, up = gate_up.chunk(2, dim=-1)
gated_output = up * self.act_fn(gate)
out = gated_output @ self.down_proj[expert_idx]
weighted_output = out[0] * routing_weights[token_idx, expert_idx, None]
next_states.index_add_(0, token_idx, weighted_output.to(hidden_states.dtype))
next_states = next_states.view(batch_size, -1, self.hidden_size)
else:
hidden_states = hidden_states.repeat(self.num_experts, 1)
hidden_states = hidden_states.view(self.num_experts, -1, self.hidden_size)
gate_up = torch.bmm(hidden_states, self.gate_up_proj)
gate, up = gate_up.chunk(2, dim=-1) # not supported for DTensors
next_states = torch.bmm((up * self.act_fn(gate)), self.down_proj)
next_states = next_states.reshape(self.num_experts, batch_size, -1, self.hidden_size)
next_states = (
next_states * routing_weights.transpose(0, 1).view(self.num_experts, batch_size, -1)[..., None]
)
next_states = next_states.sum(dim=0)
return next_states
|
Qwen3VLMoeTextExperts
|
python
|
kamyu104__LeetCode-Solutions
|
Python/all-paths-from-source-to-target.py
|
{
"start": 142,
"end": 669
}
|
class ____(object):
def allPathsSourceTarget(self, graph):
"""
:type graph: List[List[int]]
:rtype: List[List[int]]
"""
def dfs(graph, curr, path, result):
if curr == len(graph)-1:
result.append(path[:])
return
for node in graph[curr]:
path.append(node)
dfs(graph, node, path, result)
path.pop()
result = []
dfs(graph, 0, [0], result)
return result
|
Solution
|
python
|
nedbat__coveragepy
|
tests/test_cmdline.py
|
{
"start": 858,
"end": 7379
}
|
class ____(CoverageTest):
"""Tests of execution paths through the command line interpreter."""
run_in_temp_dir = False
# Make a dict mapping function names to the default values that cmdline.py
# uses when calling the function.
_defaults = mock.Mock()
_defaults.Coverage().annotate(
directory=None,
ignore_errors=None,
include=None,
omit=None,
morfs=[],
contexts=None,
)
_defaults.Coverage().html_report(
directory=None,
ignore_errors=None,
include=None,
omit=None,
morfs=[],
skip_covered=None,
show_contexts=None,
title=None,
contexts=None,
skip_empty=None,
precision=None,
)
_defaults.Coverage().report(
ignore_errors=None,
include=None,
omit=None,
morfs=[],
show_missing=None,
skip_covered=None,
contexts=None,
skip_empty=None,
precision=None,
sort=None,
output_format=None,
)
_defaults.Coverage().xml_report(
ignore_errors=None,
include=None,
omit=None,
morfs=[],
outfile=None,
contexts=None,
skip_empty=None,
)
_defaults.Coverage().json_report(
ignore_errors=None,
include=None,
omit=None,
morfs=[],
outfile=None,
contexts=None,
pretty_print=None,
show_contexts=None,
)
_defaults.Coverage().lcov_report(
ignore_errors=None,
include=None,
omit=None,
morfs=[],
outfile=None,
contexts=None,
)
_defaults.Coverage(
data_file=DEFAULT_DATAFILE,
cover_pylib=None,
data_suffix=None,
timid=None,
branch=None,
config_file=True,
source=None,
include=None,
omit=None,
debug=None,
concurrency=None,
check_preimported=True,
context=None,
messages=True,
)
DEFAULT_KWARGS = {name: kw for name, _, kw in _defaults.mock_calls}
def model_object(self) -> mock.Mock:
"""Return a Mock suitable for use in CoverageScript."""
mk = mock.Mock()
cov = mk.Coverage.return_value
# The mock needs options.
mk.config = CoverageConfig()
cov.get_option = mk.config.get_option
cov.set_option = mk.config.set_option
# Get the type right for the result of reporting.
cov.report.return_value = 50.0
cov.html_report.return_value = 50.0
cov.xml_report.return_value = 50.0
cov.json_report.return_value = 50.0
cov.lcov_report.return_value = 50.0
return mk
# Global names in cmdline.py that will be mocked during the tests.
MOCK_GLOBALS = ["Coverage", "PyRunner", "show_help"]
def mock_command_line(
self,
args: str,
options: Mapping[str, TConfigValueIn] | None = None,
) -> tuple[mock.Mock, int]:
"""Run `args` through the command line, with a Mock.
`options` is a dict of names and values to pass to `set_option`.
Returns the Mock it used and the status code returned.
"""
mk = self.model_object()
if options is not None:
for name, value in options.items():
mk.config.set_option(name, value)
patchers = [
mock.patch("coverage.cmdline." + name, getattr(mk, name)) for name in self.MOCK_GLOBALS
]
for patcher in patchers:
patcher.start()
try:
ret = command_line(args)
finally:
for patcher in patchers:
patcher.stop()
return mk, ret
def cmd_executes(
self,
args: str,
code: str,
ret: int = OK,
options: Mapping[str, TConfigValueIn] | None = None,
) -> None:
"""Assert that the `args` end up executing the sequence in `code`."""
called, status = self.mock_command_line(args, options=options)
assert status == ret, f"Wrong status: got {status!r}, wanted {ret!r}"
# Remove all indentation, and execute with mock globals
code = textwrap.dedent(code)
expected = self.model_object()
globs = {n: getattr(expected, n) for n in self.MOCK_GLOBALS}
code_obj = compile(code, "<code>", "exec", dont_inherit=True)
eval(code_obj, globs, {}) # pylint: disable=eval-used
# Many of our functions take a lot of arguments, and cmdline.py
# calls them with many. But most of them are just the defaults, which
# we don't want to have to repeat in all tests. For each call, apply
# the defaults. This lets the tests just mention the interesting ones.
for name, _, kwargs in expected.mock_calls:
for k, v in self.DEFAULT_KWARGS.get(name, {}).items():
kwargs.setdefault(k, v)
self.assert_same_mock_calls(expected, called)
def cmd_executes_same(self, args1: str, args2: str) -> None:
"""Assert that the `args1` executes the same as `args2`."""
m1, r1 = self.mock_command_line(args1)
m2, r2 = self.mock_command_line(args2)
assert r1 == r2
self.assert_same_mock_calls(m1, m2)
def assert_same_mock_calls(self, m1: mock.Mock, m2: mock.Mock) -> None:
"""Assert that `m1.mock_calls` and `m2.mock_calls` are the same."""
# Use a real equality comparison, but if it fails, use a nicer assert
# so we can tell what's going on. We have to use the real == first due
# to CmdOptionParser.__eq__
if m1.mock_calls != m2.mock_calls:
pp1 = pprint.pformat(m1.mock_calls)
pp2 = pprint.pformat(m2.mock_calls)
assert pp1 + "\n" == pp2 + "\n"
def cmd_help(
self,
args: str,
help_msg: str | None = None,
topic: str | None = None,
ret: int = ERR,
) -> None:
"""Run a command line, and check that it prints the right help.
Only the last function call in the mock is checked, which should be the
help message that we want to see.
"""
mk, status = self.mock_command_line(args)
assert status == ret, f"Wrong status: got {status}, wanted {ret}"
if help_msg:
assert mk.mock_calls[-1] == ("show_help", (help_msg,), {})
else:
assert mk.mock_calls[-1] == ("show_help", (), {"topic": topic})
|
BaseCmdLineTest
|
python
|
ApeWorX__ape
|
src/ape/cli/paramtype.py
|
{
"start": 91,
"end": 397
}
|
class ____(click.Path):
"""
This class exists to encourage the consistent usage
of ``pathlib.Path`` for path_type.
"""
def __init__(self, *args, **kwargs):
if "path_type" not in kwargs:
kwargs["path_type"] = PathLibPath
super().__init__(*args, **kwargs)
|
Path
|
python
|
ray-project__ray
|
python/ray/llm/_internal/batch/stages/vllm_engine_stage.py
|
{
"start": 13586,
"end": 21604
}
|
class ____(StatefulStageUDF):
def __init__(
self,
data_column: str,
expected_input_keys: List[str],
batch_size: int,
max_concurrent_batches: int,
model: str,
engine_kwargs: Dict[str, Any],
task_type: vLLMTaskType = vLLMTaskType.GENERATE,
max_pending_requests: Optional[int] = None,
dynamic_lora_loading_path: Optional[str] = None,
):
"""
Initialize the vLLMEngineStageUDF.
Args:
data_column: The data column name.
expected_input_keys: The expected input keys of the stage.
model: The model to use for the vLLM engine.
engine_kwargs: The kwargs to pass to the vLLM engine.
task_type: The task to use for the vLLM engine (e.g., "generate", "embed", etc).
max_pending_requests: The maximum number of pending requests. If None,
it will be set to 1.1 * max_num_seqs * pipeline_parallel_size.
dynamic_lora_loading_path: The path to the dynamic LoRA adapter. It is expected
to hold subfolders each for a different lora checkpoint.
"""
super().__init__(data_column, expected_input_keys)
self.model = model
# Setup vLLM engine kwargs.
self.task_type = task_type
self.engine_kwargs = self.normalize_engine_kwargs(task_type, engine_kwargs)
# Set up the max pending requests.
pp_size = self.engine_kwargs.get("pipeline_parallel_size", 1)
self.max_pending_requests = max_pending_requests or math.ceil(
self.engine_kwargs.get("max_num_seqs", 128) * pp_size * 1.1
)
if self.max_pending_requests > 0:
logger.info("Max pending requests is set to %d", self.max_pending_requests)
exclude_safetensors = (
self.engine_kwargs.get("load_format") in STREAMING_LOAD_FORMATS
)
if exclude_safetensors:
logger.info("Excluding safetensors files when downloading the model.")
download_model = NodeModelDownloadable.EXCLUDE_SAFETENSORS
else:
logger.info("Downloading model and tokenizer.")
download_model = NodeModelDownloadable.MODEL_AND_TOKENIZER
# Download the model if needed.
model_source = download_model_files(
model_id=self.model,
mirror_config=None,
download_model=download_model,
download_extra_files=False,
)
# If we are using streaming load formats, we need to pass in self.model which is a remote cloud storage path.
source = model_source if not exclude_safetensors else self.model
self.llm = vLLMEngineWrapper(
model=self.model,
model_source=source,
idx_in_batch_column=self.IDX_IN_BATCH_COLUMN,
enable_log_requests=False,
max_pending_requests=self.max_pending_requests,
dynamic_lora_loading_path=dynamic_lora_loading_path,
**self.engine_kwargs,
)
max_num_seqs = self.llm.get_scheduler_config().max_num_seqs
if batch_size * max_concurrent_batches < max_num_seqs:
logger.warning(
f"The product of batch_size ({batch_size}) and "
f"max_concurrent_batches ({max_concurrent_batches}) is too small "
"to saturate vLLM engine. This may lead to suboptimal "
"throughput. Please increase max_concurrent_batches to at least "
f"{math.ceil(max_num_seqs / batch_size)}."
)
def normalize_engine_kwargs(
self,
task_type: vLLMTaskType,
engine_kwargs: Dict[str, Any],
) -> Dict[str, Any]:
"""
Normalize the engine kwargs.
Args:
task_type: The task to use for the vLLM engine (e.g., "generate", "embed", etc).
engine_kwargs: The kwargs to normalize.
Returns:
The normalized kwargs.
"""
# Remove model from engine kwargs if set.
model = engine_kwargs.pop("model", None)
if model is not None and model != self.model:
logger.warning(
"The model set in engine kwargs (%s) is different from the "
"stage (%s). Please remove 'model' from engine kwargs.",
model,
self.model,
)
# Override the task if it is different from the stage.
task = vLLMTaskType(engine_kwargs.get("task", task_type))
if task != task_type:
logger.warning(
"The task set in engine kwargs (%s) is different from the "
"stage (%s). Overriding the task in engine kwargs to %s.",
task,
task_type,
task_type,
)
engine_kwargs["task"] = task_type
return engine_kwargs
async def udf(self, batch: List[Dict[str, Any]]) -> AsyncIterator[Dict[str, Any]]:
"""Run the vLLM engine.
Args:
batch: A list of rows to run the vLLM engine on.
Returns:
The response of the vLLM engine.
"""
batch_uuid = uuid.uuid4()
batch_start_time = time.perf_counter()
tasks = [asyncio.create_task(self.llm.generate_async(row)) for row in batch]
for resp in asyncio.as_completed(tasks):
request, output, time_taken_llm = await resp
yield {
**output,
"request_id": request.request_id,
self.IDX_IN_BATCH_COLUMN: request.idx_in_batch,
"batch_uuid": batch_uuid.hex,
"time_taken_llm": time_taken_llm,
"params": str(request.params),
}
batch_time_taken = time.perf_counter() - batch_start_time
# TODO: Add metrics to the UDf wrapper so that we don't need
# timer in UDFs anymore.
logger.info(
"[vLLM] Elapsed time for batch %s with size %d: %s",
batch_uuid.hex,
len(batch),
batch_time_taken,
)
# Log engine stats after each batch is done conditioned on the flag
# passed to the engine.
if not self.engine_kwargs.get("disable_log_stats", False):
await self.llm.engine.do_log_stats()
def __del__(self):
if hasattr(self, "llm"):
# Kill the engine processes.
self.llm.shutdown()
def _ray_scheduling_strategy_fn(
num_bundles_per_replica: int,
accelerator_type: Optional[str] = None,
placement_group_config: Optional[Dict[str, Any]] = None,
):
"""Create a Ray scheduling strategy for the engine.
Args:
num_bundles_per_replica: The number of device bundles per
engine replica.
accelerator_type: The accelerator type. If None, the
accelerator_type label will not be set.
placement_group_config: The custom placement group configuration.
If None, we use the default placement group configuration.
Returns:
The Ray scheduling strategy.
"""
def _get_bundle() -> Dict[str, float]:
# GPU bundles
bundle = {"GPU": 1, "CPU": 1}
# Accelerator type
if accelerator_type:
bundle[f"accelerator_type:{accelerator_type}"] = 0.001
return bundle
if placement_group_config:
placement_group_config = copy.deepcopy(placement_group_config)
if accelerator_type:
for bundle in placement_group_config["bundles"]:
bundle[f"accelerator_type:{accelerator_type}"] = 0.001
pg = ray.util.placement_group(**placement_group_config)
else:
pg = ray.util.placement_group(
[_get_bundle()] * num_bundles_per_replica,
strategy="PACK",
)
return dict(
scheduling_strategy=PlacementGroupSchedulingStrategy(
pg, placement_group_capture_child_tasks=True
)
)
|
vLLMEngineStageUDF
|
python
|
lazyprogrammer__machine_learning_examples
|
rl3/a2c/atari_wrappers.py
|
{
"start": 186,
"end": 1222
}
|
class ____(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) # pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
|
NoopResetEnv
|
python
|
python__mypy
|
mypy/modulefinder.py
|
{
"start": 4442,
"end": 5354
}
|
class ____:
"""A single source file."""
def __init__(
self,
path: str | None,
module: str | None,
text: str | None = None,
base_dir: str | None = None,
followed: bool = False,
) -> None:
self.path = path # File where it's found (e.g. 'xxx/yyy/foo/bar.py')
self.module = module or "__main__" # Module name (e.g. 'foo.bar')
self.text = text # Source code, if initially supplied, else None
self.base_dir = base_dir # Directory where the package is rooted (e.g. 'xxx/yyy')
self.followed = followed # Was this found by following imports?
def __repr__(self) -> str:
return (
"BuildSource(path={!r}, module={!r}, has_text={}, base_dir={!r}, followed={})".format(
self.path, self.module, self.text is not None, self.base_dir, self.followed
)
)
|
BuildSource
|
python
|
pytorch__pytorch
|
test/distributed/test_c10d_functional_native.py
|
{
"start": 21220,
"end": 23409
}
|
class ____(TestCase):
"""
Native functional collectives have some interesting interactions with
PyProcessGroup due to Python reference counting and pybind trampoline
classes with C++ types. This validates that PyProcessGroup and PyWork
aren't getting prematurely freed.
"""
def test_wait_tensor(self) -> None:
wait_called = False
class MyWork(dist.Work):
def wait(self, _):
nonlocal wait_called
wait_called = True
# check registration and implicit unregistration
tensor = torch.rand(2, 2)
work = MyWork()
torch._C._distributed_c10d._register_work(tensor, work)
# Force GC collection of the MyWork object, if we're not doing correct
# reference counting we'll deadlock in wait_tensor.
del work
gc.collect()
torch.ops._c10d_functional.wait_tensor(tensor)
self.assertTrue(wait_called)
def test_collectives(self) -> None:
dummy_init_pg()
pg = ProcessGroupDummy().register()
x = torch.rand(2, 2)
x = funcol.all_reduce(x, "sum", group=pg)
gc.collect()
self.assertEqual(pg.dels, 0)
x.wait()
self.assertEqual(pg.waits, 1)
self.assertEqual(pg.dels, 1)
x = torch.rand(2, 2)
x = funcol.broadcast(x, 0, group=pg)
gc.collect()
self.assertEqual(pg.dels, 1)
x.wait()
self.assertEqual(pg.waits, 2)
self.assertEqual(pg.dels, 2)
x = torch.rand(2, 2)
x = funcol.all_gather_tensor(x, 0, group=pg)
gc.collect()
self.assertEqual(pg.dels, 2)
x.wait()
self.assertEqual(pg.waits, 3)
self.assertEqual(pg.dels, 3)
x = torch.rand(2, 2)
x = funcol.reduce_scatter_tensor(x, "sum", 0, group=pg)
gc.collect()
self.assertEqual(pg.dels, 3)
x.wait()
self.assertEqual(pg.waits, 4)
self.assertEqual(pg.dels, 4)
def find_buffer_assignments(code):
pattern = r"buf(\d+) = empty_strided_"
matches = re.finditer(pattern, code)
return tuple(f"buf{match.group(1)}" for match in matches)
|
PyWorkTest
|
python
|
nedbat__coveragepy
|
tests/test_arcs.py
|
{
"start": 56232,
"end": 58229
}
|
class ____(CoverageTest):
"""Tests of lambdas"""
def test_multiline_lambda(self) -> None:
self.check_coverage(
"""\
fn = (lambda x:
x + 2
)
assert fn(4) == 6
""",
branchz="",
branchz_missing="",
)
self.check_coverage(
"""\
fn = \\
(
lambda
x:
x
+
8
)
assert fn(10) == 18
""",
branchz="",
branchz_missing="",
)
def test_unused_lambdas_are_confusing_bug_90(self) -> None:
self.check_coverage(
"""\
a = 1
fn = lambda x: x
b = 3
""",
branchz="",
branchz_missing="",
)
def test_raise_with_lambda_looks_like_partial_branch(self) -> None:
self.check_coverage(
"""\
def ouch(fn):
2/0
a = b = c = d = 3
try:
a = ouch(lambda: 5)
if a:
b = 7
except ZeroDivisionError:
c = 9
d = 10
assert (a, b, c, d) == (3, 3, 9, 10)
""",
lines=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
missing="6-7",
branchz="67 6A",
branchz_missing="67 6A",
)
def test_lambda_in_dict(self) -> None:
self.check_coverage(
"""\
x = 1
x = 2
d = {
4: lambda: [],
5: lambda: [],
6: lambda: [],
7: lambda: [],
}
for k, v in d.items(): # 10
if k & 1:
v()
""",
branchz="AB A. BA BC",
branchz_missing="",
)
|
LambdaArcTest
|
python
|
sympy__sympy
|
sympy/physics/biomechanics/tests/test_musculotendon.py
|
{
"start": 1520,
"end": 2434
}
|
class ____:
@staticmethod
def test_rigid_tendon_member():
assert MusculotendonFormulation(0) == 0
assert MusculotendonFormulation.RIGID_TENDON == 0
@staticmethod
def test_fiber_length_explicit_member():
assert MusculotendonFormulation(1) == 1
assert MusculotendonFormulation.FIBER_LENGTH_EXPLICIT == 1
@staticmethod
def test_tendon_force_explicit_member():
assert MusculotendonFormulation(2) == 2
assert MusculotendonFormulation.TENDON_FORCE_EXPLICIT == 2
@staticmethod
def test_fiber_length_implicit_member():
assert MusculotendonFormulation(3) == 3
assert MusculotendonFormulation.FIBER_LENGTH_IMPLICIT == 3
@staticmethod
def test_tendon_force_implicit_member():
assert MusculotendonFormulation(4) == 4
assert MusculotendonFormulation.TENDON_FORCE_IMPLICIT == 4
|
TestMusculotendonFormulation
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pyupgrade/UP046_0.py
|
{
"start": 1698,
"end": 1799
}
|
class ____(Generic[T], Generic[S]):
var: T
var: S
# These cases are not handled
|
TooManyGenerics
|
python
|
readthedocs__readthedocs.org
|
readthedocs/rtd_tests/tests/test_views.py
|
{
"start": 6440,
"end": 7585
}
|
class ____(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username="eric", password="test")
self.pip = Project.objects.get(slug="pip")
self.pip.privacy_level = PUBLIC
self.pip.external_builds_privacy_level = PUBLIC
self.pip.save()
self.pip.versions.update(privacy_level=PUBLIC)
def test_build_list_includes_external_versions(self):
external_version = get(
Version,
project=self.pip,
active=True,
type=EXTERNAL,
privacy_level=PUBLIC,
)
external_version_build = get(Build, project=self.pip, version=external_version)
self.pip.privacy_level = PUBLIC
self.pip.save()
response = self.client.get(
reverse("builds_project_list", args=[self.pip.slug]),
)
self.assertEqual(response.status_code, 200)
self.assertIn(external_version_build, response.context["build_qs"])
@override_settings(
RTD_DEFAULT_FEATURES=dict(
[RTDProductFeature(type=TYPE_SEARCH_ANALYTICS, value=90).to_item()]
),
)
|
BuildViewTests
|
python
|
apache__airflow
|
airflow-core/src/airflow/executors/workloads.py
|
{
"start": 1863,
"end": 2786
}
|
class ____(BaseModel):
"""Schema for TaskInstance with minimal required fields needed for Executors and Task SDK."""
id: uuid.UUID
dag_version_id: uuid.UUID
task_id: str
dag_id: str
run_id: str
try_number: int
map_index: int = -1
pool_slots: int
queue: str
priority_weight: int
executor_config: dict | None = Field(default=None, exclude=True)
parent_context_carrier: dict | None = None
context_carrier: dict | None = None
# TODO: Task-SDK: Can we replace TastInstanceKey with just the uuid across the codebase?
@property
def key(self) -> TaskInstanceKey:
from airflow.models.taskinstancekey import TaskInstanceKey
return TaskInstanceKey(
dag_id=self.dag_id,
task_id=self.task_id,
run_id=self.run_id,
try_number=self.try_number,
map_index=self.map_index,
)
|
TaskInstance
|
python
|
falconry__falcon
|
examples/ws_tutorial/ws_tutorial/app.py
|
{
"start": 2052,
"end": 2159
}
|
class ____:
async def on_get(self, req, resp):
resp.media = {'hello': 'world'}
|
HelloWorldResource
|
python
|
pydata__xarray
|
xarray/tests/test_backends.py
|
{
"start": 193917,
"end": 203150
}
|
class ____(NetCDF4Base):
engine: T_NetcdfEngine = "h5netcdf"
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
yield backends.H5NetCDFStore.open(tmp_file, "w")
@pytest.mark.skipif(
has_h5netcdf_1_4_0_or_above, reason="only valid for h5netcdf < 1.4.0"
)
def test_complex(self) -> None:
expected = Dataset({"x": ("y", np.ones(5) + 1j * np.ones(5))})
save_kwargs = {"invalid_netcdf": True}
with pytest.warns(UserWarning, match="You are writing invalid netcdf features"):
with self.roundtrip(expected, save_kwargs=save_kwargs) as actual:
assert_equal(expected, actual)
@pytest.mark.skipif(
has_h5netcdf_1_4_0_or_above, reason="only valid for h5netcdf < 1.4.0"
)
@pytest.mark.parametrize("invalid_netcdf", [None, False])
def test_complex_error(self, invalid_netcdf) -> None:
import h5netcdf
expected = Dataset({"x": ("y", np.ones(5) + 1j * np.ones(5))})
save_kwargs = {"invalid_netcdf": invalid_netcdf}
with pytest.raises(
h5netcdf.CompatibilityError, match="are not a supported NetCDF feature"
):
with self.roundtrip(expected, save_kwargs=save_kwargs) as actual:
assert_equal(expected, actual)
def test_numpy_bool_(self) -> None:
# h5netcdf loads booleans as numpy.bool_, this type needs to be supported
# when writing invalid_netcdf datasets in order to support a roundtrip
expected = Dataset({"x": ("y", np.ones(5), {"numpy_bool": np.bool_(True)})})
save_kwargs = {"invalid_netcdf": True}
with pytest.warns(UserWarning, match="You are writing invalid netcdf features"):
with self.roundtrip(expected, save_kwargs=save_kwargs) as actual:
assert_identical(expected, actual)
def test_cross_engine_read_write_netcdf4(self) -> None:
# Drop dim3, because its labels include strings. These appear to be
# not properly read with python-netCDF4, which converts them into
# unicode instead of leaving them as bytes.
data = create_test_data().drop_vars("dim3")
data.attrs["foo"] = "bar"
valid_engines: list[T_NetcdfEngine] = ["netcdf4", "h5netcdf"]
for write_engine in valid_engines:
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, engine=write_engine)
for read_engine in valid_engines:
with open_dataset(tmp_file, engine=read_engine) as actual:
assert_identical(data, actual)
def test_read_byte_attrs_as_unicode(self) -> None:
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, "w") as nc:
nc.foo = b"bar"
with open_dataset(tmp_file) as actual:
expected = Dataset(attrs={"foo": "bar"})
assert_identical(expected, actual)
def test_compression_encoding_h5py(self) -> None:
ENCODINGS: tuple[tuple[dict[str, Any], dict[str, Any]], ...] = (
# h5py style compression with gzip codec will be converted to
# NetCDF4-Python style on round-trip
(
{"compression": "gzip", "compression_opts": 9},
{"zlib": True, "complevel": 9},
),
# What can't be expressed in NetCDF4-Python style is
# round-tripped unaltered
(
{"compression": "lzf", "compression_opts": None},
{"compression": "lzf", "compression_opts": None},
),
# If both styles are used together, h5py format takes precedence
(
{
"compression": "lzf",
"compression_opts": None,
"zlib": True,
"complevel": 9,
},
{"compression": "lzf", "compression_opts": None},
),
)
for compr_in, compr_out in ENCODINGS:
data = create_test_data()
compr_common = {
"chunksizes": (5, 5),
"fletcher32": True,
"shuffle": True,
"original_shape": data.var2.shape,
}
data["var2"].encoding.update(compr_in)
data["var2"].encoding.update(compr_common)
compr_out.update(compr_common)
data["scalar"] = ("scalar_dim", np.array([2.0]))
data["scalar"] = data["scalar"][0]
with self.roundtrip(data) as actual:
for k, v in compr_out.items():
assert v == actual["var2"].encoding[k]
def test_compression_check_encoding_h5py(self) -> None:
"""When mismatched h5py and NetCDF4-Python encodings are expressed
in to_netcdf(encoding=...), must raise ValueError
"""
data = Dataset({"x": ("y", np.arange(10.0))})
# Compatible encodings are graciously supported
with create_tmp_file() as tmp_file:
data.to_netcdf(
tmp_file,
engine="h5netcdf",
encoding={
"x": {
"compression": "gzip",
"zlib": True,
"compression_opts": 6,
"complevel": 6,
}
},
)
with open_dataset(tmp_file, engine="h5netcdf") as actual:
assert actual.x.encoding["zlib"] is True
assert actual.x.encoding["complevel"] == 6
# Incompatible encodings cause a crash
with create_tmp_file() as tmp_file:
with pytest.raises(
ValueError, match=r"'zlib' and 'compression' encodings mismatch"
):
data.to_netcdf(
tmp_file,
engine="h5netcdf",
encoding={"x": {"compression": "lzf", "zlib": True}},
)
with create_tmp_file() as tmp_file:
with pytest.raises(
ValueError,
match=r"'complevel' and 'compression_opts' encodings mismatch",
):
data.to_netcdf(
tmp_file,
engine="h5netcdf",
encoding={
"x": {
"compression": "gzip",
"compression_opts": 5,
"complevel": 6,
}
},
)
def test_dump_encodings_h5py(self) -> None:
# regression test for #709
ds = Dataset({"x": ("y", np.arange(10.0))})
kwargs = {"encoding": {"x": {"compression": "gzip", "compression_opts": 9}}}
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert actual.x.encoding["zlib"]
assert actual.x.encoding["complevel"] == 9
kwargs = {"encoding": {"x": {"compression": "lzf", "compression_opts": None}}}
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert actual.x.encoding["compression"] == "lzf"
assert actual.x.encoding["compression_opts"] is None
def test_decode_utf8_warning(self) -> None:
title = b"\xc3"
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, "w") as f:
f.title = title
with pytest.warns(UnicodeWarning, match="returning bytes undecoded") as w:
ds = xr.load_dataset(tmp_file, engine="h5netcdf")
assert ds.title == title
assert "attribute 'title' of h5netcdf object '/'" in str(w[0].message)
def test_byte_attrs(self, byte_attrs_dataset: dict[str, Any]) -> None:
with pytest.raises(ValueError, match=byte_attrs_dataset["h5netcdf_error"]):
super().test_byte_attrs(byte_attrs_dataset)
@requires_h5netcdf_1_4_0_or_above
def test_roundtrip_complex(self):
expected = Dataset({"x": ("y", np.ones(5) + 1j * np.ones(5))})
with self.roundtrip(expected) as actual:
assert_equal(expected, actual)
def test_phony_dims_warning(self) -> None:
import h5py
foo_data = np.arange(125).reshape(5, 5, 5)
bar_data = np.arange(625).reshape(25, 5, 5)
var = {"foo1": foo_data, "foo2": bar_data, "foo3": foo_data, "foo4": bar_data}
with create_tmp_file() as tmp_file:
with h5py.File(tmp_file, "w") as f:
grps = ["bar", "baz"]
for grp in grps:
fx = f.create_group(grp)
for k, v in var.items():
fx.create_dataset(k, data=v)
with pytest.warns(UserWarning, match="The 'phony_dims' kwarg"):
with xr.open_dataset(tmp_file, engine="h5netcdf", group="bar") as ds:
assert ds.sizes == {
"phony_dim_0": 5,
"phony_dim_1": 5,
"phony_dim_2": 5,
"phony_dim_3": 25,
}
@requires_h5netcdf
@requires_netCDF4
|
TestH5NetCDFData
|
python
|
pytorch__pytorch
|
test/distributed/checkpoint/_experimental/test_checkpoint_process.py
|
{
"start": 5111,
"end": 5888
}
|
class ____(TestCase):
"""Test CheckpointProcessConfig configuration."""
def test_default_options(self) -> None:
"""Test default CheckpointProcessConfig."""
options = CheckpointProcessConfig()
# Test default values
self.assertEqual(options.subprocess_init_timeout_secs, 30)
self.assertEqual(options.subprocess_shutdown_timeout_secs, 60)
def test_custom_options(self) -> None:
"""Test custom CheckpointProcessConfig."""
options = CheckpointProcessConfig(
subprocess_init_timeout_secs=10, subprocess_shutdown_timeout_secs=30
)
self.assertEqual(options.subprocess_init_timeout_secs, 10)
self.assertEqual(options.subprocess_shutdown_timeout_secs, 30)
|
TestCheckpointProcessConfig
|
python
|
pytorch__pytorch
|
torch/__init__.py
|
{
"start": 71643,
"end": 71867
}
|
class ____(_LegacyStorage):
@classproperty
def dtype(self):
_warn_typed_storage_removal(stacklevel=3)
return self._dtype
@classproperty
def _dtype(self):
return torch.short
|
ShortStorage
|
python
|
zostera__django-bootstrap4
|
tests/test_buttons.py
|
{
"start": 174,
"end": 1546
}
|
class ____(TestCase):
def test_button(self):
self.assertEqual(render_button("button"), '<button class="btn btn-primary">button</button>')
def test_button_with_illegal_type(self):
try:
self.assertEqual(
render_button("button", button_type="illegal"), '<button class="btn btn-primary">button</button>'
)
except BootstrapError as e:
self.assertEqual(
str(e),
'Parameter "button_type" should be "submit", "reset", "button", "link" or empty ("illegal" given).',
)
def test_bootstrap_button_tag(self):
res = render_template_with_form("{% bootstrap_button 'button' size='lg' %}")
self.assertEqual(res.strip(), '<button class="btn btn-primary btn-lg">button</button>')
link_button = '<a class="btn btn-primary btn-lg" href="#" role="button">button</a>'
res = render_template_with_form("{% bootstrap_button 'button' size='lg' href='#' %}")
self.assertIn(res.strip(), link_button)
res = render_template_with_form("{% bootstrap_button 'button' button_type='link' size='lg' href='#' %}")
self.assertIn(res.strip(), link_button)
with self.assertRaises(BootstrapError):
res = render_template_with_form("{% bootstrap_button 'button' button_type='button' href='#' %}")
|
ButtonsTest
|
python
|
huggingface__transformers
|
tests/models/led/test_modeling_led.py
|
{
"start": 20316,
"end": 96283
}
|
class ____(unittest.TestCase):
"""All the below results were obtained with the original checkpoints and code
base from https://github.com/allenai/longformer.
IMPORTANT: Note that the original checkpoints include a `position_embeddings` "hack"
and have to be cut to have the correct shape.
See: https://github.com/huggingface/transformers/pull/9278#issue-544709661.
"""
@cached_property
def default_tokenizer(self):
return LEDTokenizer.from_pretrained("allenai/led-base-16384")
def test_inference_no_head(self):
model = LEDModel.from_pretrained("allenai/led-base-16384").to(torch_device)
# change to intended input
input_ids = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
decoder_input_ids = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
inputs_dict = prepare_led_inputs_dict(model.config, input_ids, decoder_input_ids)
with torch.no_grad():
output = model(**inputs_dict).last_hidden_state
expected_shape = torch.Size((1, 1024, 768))
self.assertEqual(output.shape, expected_shape)
# change to expected output here
expected_slice = torch.tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]], device=torch_device
)
torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_inference_head(self):
model = LEDForConditionalGeneration.from_pretrained("allenai/led-base-16384").to(torch_device)
# change to intended input
input_ids = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
decoder_input_ids = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
inputs_dict = prepare_led_inputs_dict(model.config, input_ids, decoder_input_ids)
with torch.no_grad():
output = model(**inputs_dict, use_cache=False).logits
expected_shape = torch.Size((1, 1024, model.config.vocab_size))
self.assertEqual(output.shape, expected_shape)
# change to expected output here
expected_slice = torch.tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]], device=torch_device
)
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_seq_to_seq_generation(self):
# this test requires 16GB of RAM
hf = LEDForConditionalGeneration.from_pretrained("allenai/led-large-16384-arxiv").to(torch_device)
tok = LEDTokenizer.from_pretrained("allenai/led-large-16384-arxiv")
ARTICLE_LEP = r"""the lep experiments at the resonance of @xmath1-boson have tested the standard model ( sm ) at quantum level , measuring the @xmath1-decay into fermion pairs with an accuracy of one part in ten thousands . the good agreement of the lep data with the sm predictions have severely constrained the behavior of new physics at the @xmath1-pole . taking these achievements into account one can imagine that the physics of @xmath1-boson will again play the central role in the frontier of particle physics if the next generation @xmath1 factory comes true with the generated @xmath1 events several orders of magnitude higher than that of the lep . this factory can be realized in the gigaz option of the international linear collider ( ilc)@xcite . the ilc is a proposed electron - positron collider with tunable energy ranging from @xmath12 to @xmath13 and polarized beams in its first phase , and the gigaz option corresponds to its operation on top of the resonance of @xmath1 boson by adding a bypass to its main beam line . given the high luminosity , @xmath14 , and the cross section at the resonance of @xmath1 boson , @xmath15 , about @xmath16 @xmath1 events can be generated in an operational year of @xmath17 of gigaz , which implies that the expected sensitivity to the branching ratio of @xmath1-decay can be improved from @xmath18 at the lep to @xmath19 at the gigaz@xcite . in light of this , the @xmath1-boson properties , especially its exotic or rare decays which are widely believed to be sensitive to new physics , should be investigated comprehensively to evaluate their potential in probing new physics . among the rare @xmath1-decays , the flavor changing ( fc ) processes were most extensively studied to explore the flavor texture in new physics @xcite , and it was found that , although these processes are severely suppressed in the sm , their branching ratios in new physics models can be greatly enhanced to @xmath19 for lepton flavor violation decays @xcite and @xmath20 for quark flavor violation decays @xcite . besides the fc processes , the @xmath1-decay into light higgs boson(s ) is another type of rare process that was widely studied , e.g. the decay @xmath21 ( @xmath22 ) with the particle @xmath0 denoting a light higgs boson was studied in @xcite , the decay @xmath23 was studied in the two higgs doublet model ( 2hdm)@xcite and the minimal supersymmetric standard model ( mssm)@xcite , and the decay @xmath4 was studied in a model independent way @xcite , in 2hdm@xcite and also in mssm@xcite . these studies indicate that , in contrast with the kinematic forbidden of these decays in the sm , the rates of these decays can be as large as @xmath18 in new physics models , which lie within the expected sensitivity of the gigaz . in this work , we extend the previous studies of these decays to some new models and investigate these decays altogether . we are motivated by some recent studies on the singlet extension of the mssm , such as the next - to - minimal supersymmetric standard model ( nmssm ) @xcite and the nearly minimal supersymmetric standard model ( nmssm ) @xcite , where a light cp - odd higgs boson @xmath0 with singlet - dominant component may naturally arise from the spontaneous breaking of some approximate global symmetry like @xmath24 or peccei - quuin symmetry @xcite . these non - minimal supersymmetric models can not only avoid the @xmath25-problem , but also alleviate the little hierarchy by having such a light higgs boson @xmath0 @xcite . we are also motivated by that , with the latest experiments , the properties of the light higgs boson are more stringently constrained than before . so it is worth updating the previous studies . so far there is no model - independent lower bound on the lightest higgs boson mass . in the sm , it must be heavier than @xmath26 gev , obtained from the null observation of the higgs boson at lep experiments . however , due to the more complex structure of the higgs sector in the extensions of the sm , this lower bound can be significantly relaxed according to recent studies , e.g. , for the cp - odd higgs boson @xmath0 we have @xmath27 gev in the nmssm @xcite , @xmath28 gev in the nmssm @xcite , and @xmath29 gev in the lepton - specific 2hdm ( l2hdm ) @xcite . with such a light cp - odd higgs boson , the z - decay into one or more @xmath0 is open up . noting that the decay @xmath30 is forbidden due to bose symmetry , we in this work study the rare @xmath1-decays @xmath6 ( @xmath22 ) , @xmath31 and @xmath4 in a comparative way for four models , namely the type - ii 2hdm@xcite , the l2hdm @xcite , the nmssm and the nmssm . in our study , we examine carefully the constraints on the light @xmath0 from many latest experimental results . this work is organized as follows . in sec . ii we briefly describe the four new physics models . in sec . iii we present the calculations of the rare @xmath1-decays . in sec . iv we list the constraints on the four new physics models . in sec . v we show the numerical results for the branching ratios of the rare @xmath1-decays in various models . finally , the conclusion is given in sec . as the most economical way , the sm utilizes one higgs doublet to break the electroweak symmetry . as a result , the sm predicts only one physical higgs boson with its properties totally determined by two free parameters . in new physics models , the higgs sector is usually extended by adding higgs doublets and/or singlets , and consequently , more physical higgs bosons are predicted along with more free parameters involved in . the general 2hdm contains two @xmath32 doublet higgs fields @xmath33 and @xmath34 , and with the assumption of cp - conserving , its scalar potential can be parameterized as@xcite : @xmath35,\end{aligned}\ ] ] where @xmath36 ( @xmath37 ) are free dimensionless parameters , and @xmath38 ( @xmath39 ) are the parameters with mass dimension . after the electroweak symmetry breaking , the spectrum of this higgs sector includes three massless goldstone modes , which become the longitudinal modes of @xmath40 and @xmath1 bosons , and five massive physical states : two cp - even higgs bosons @xmath41 and @xmath42 , one neutral cp - odd higgs particle @xmath0 and a pair of charged higgs bosons @xmath43 . noting the constraint @xmath44 with @xmath45 and @xmath46 denoting the vacuum expectation values ( vev ) of @xmath33 and @xmath34 respectively , we choose @xmath47 as the input parameters with @xmath48 , and @xmath49 being the mixing angle that diagonalizes the mass matrix of the cp - even higgs fields . the difference between the type - ii 2hdm and the l2hdm comes from the yukawa coupling of the higgs bosons to quark / lepton . in the type - ii 2hdm , one higgs doublet @xmath34 generates the masses of up - type quarks and the other doublet @xmath33 generates the masses of down - type quarks and charged leptons ; while in the l2hdm one higgs doublet @xmath33 couples only to leptons and the other doublet @xmath34 couples only to quarks . so the yukawa interactions of @xmath0 to fermions in these two models are given by @xcite @xmath50 with @xmath51 denoting generation index . obviously , in the type - ii 2hdm the @xmath52 coupling and the @xmath53 coupling can be simultaneously enhanced by @xmath54 , while in the l2hdm only the @xmath53 coupling is enhanced by @xmath55 . the structures of the nmssm and the nmssm are described by their superpotentials and corresponding soft - breaking terms , which are given by @xcite @xmath56 where @xmath57 is the superpotential of the mssm without the @xmath25 term , @xmath58 and @xmath59 are higgs doublet and singlet superfields with @xmath60 and @xmath61 being their scalar component respectively , @xmath62 , @xmath63 , @xmath64 , @xmath65 , @xmath66 and @xmath67 are soft breaking parameters , and @xmath68 and @xmath69 are coefficients of the higgs self interactions . with the superpotentials and the soft - breaking terms , one can get the higgs potentials of the nmssm and the nmssm respectively . like the 2hdm , the higgs bosons with same cp property will mix and the mass eigenstates are obtained by diagonalizing the corresponding mass matrices : @xmath70 where the fields on the right hands of the equations are component fields of @xmath71 , @xmath72 and @xmath61 defined by @xmath73 @xmath74 and @xmath75 are respectively the cp - even and cp - odd neutral higgs bosons , @xmath76 and @xmath77 are goldstone bosons eaten by @xmath1 and @xmath78 , and @xmath79 is the charged higgs boson . so both the nmssm and nmssm predict three cp - even higgs bosons , two cp - odd higgs bosons and one pair of charged higgs bosons . in general , the lighter cp - odd higgs @xmath0 in these model is the mixture of the singlet field @xmath80 and the doublet field combination , @xmath81 , i.e. @xmath82 and its couplings to down - type quarks are then proportional to @xmath83 . so for singlet dominated @xmath0 , @xmath84 is small and the couplings are suppressed . as a comparison , the interactions of @xmath0 with the squarks are given by@xcite @xmath85 i.e. the interaction does not vanish when @xmath86 approaches zero . just like the 2hdm where we use the vevs of the higgs fields as fundamental parameters , we choose @xmath68 , @xmath69 , @xmath87 , @xmath88 , @xmath66 and @xmath89 as input parameters for the nmssm@xcite and @xmath68 , @xmath54 , @xmath88 , @xmath65 , @xmath90 and @xmath91 as input parameters for the nmssm@xcite . about the nmssm and the nmssm , three points should be noted . the first is for the two models , there is no explicit @xmath92term , and the effective @xmath25 parameter ( @xmath93 ) is generated when the scalar component of @xmath59 develops a vev . the second is , the nmssm is actually same as the nmssm with @xmath94@xcite , because the tadpole terms @xmath95 and its soft breaking term @xmath96 in the nmssm do not induce any interactions , except for the tree - level higgs boson masses and the minimization conditions . and the last is despite of the similarities , the nmssm has its own peculiarity , which comes from its neutralino sector . in the basis @xmath97 , its neutralino mass matrix is given by @xcite @xmath98 where @xmath99 and @xmath100 are @xmath101 and @xmath102 gaugino masses respectively , @xmath103 , @xmath104 , @xmath105 and @xmath106 . after diagonalizing this matrix one can get the mass eigenstate of the lightest neutralino @xmath107 with mass taking the following form @xcite @xmath108 this expression implies that @xmath107 must be lighter than about @xmath109 gev for @xmath110 ( from lower bound on chargnio mass ) and @xmath111 ( perturbativity bound ) . like the other supersymmetric models , @xmath107 as the lightest sparticle acts as the dark matter in the universe , but due to its singlino - dominated nature , it is difficult to annihilate sufficiently to get the correct density in the current universe . so the relic density of @xmath107 plays a crucial way in selecting the model parameters . for example , as shown in @xcite , for @xmath112 , there is no way to get the correct relic density , and for the other cases , @xmath107 mainly annihilates by exchanging @xmath1 boson for @xmath113 , or by exchanging a light cp - odd higgs boson @xmath0 with mass satisfying the relation @xmath114 for @xmath115 . for the annihilation , @xmath54 and @xmath25 are required to be less than 10 and @xmath116 respectively because through eq.([mass - exp ] ) a large @xmath87 or @xmath25 will suppress @xmath117 to make the annihilation more difficult . the properties of the lightest cp - odd higgs boson @xmath0 , such as its mass and couplings , are also limited tightly since @xmath0 plays an important role in @xmath107 annihilation . the phenomenology of the nmssm is also rather special , and this was discussed in detail in @xcite . in the type - ii 2hdm , l2hdm , nmssm and nmssm , the rare @xmath1-decays @xmath118 ( @xmath22 ) , @xmath3 and @xmath4 may proceed by the feynman diagrams shown in fig.[fig1 ] , fig.[fig2 ] and fig.[fig3 ] respectively . for these diagrams , the intermediate state @xmath119 represents all possible cp - even higgs bosons in the corresponding model , i.e. @xmath41 and @xmath42 in type - ii 2hdm and l2hdm and @xmath41 , @xmath42 and @xmath120 in nmssm and nmssm . in order to take into account the possible resonance effects of @xmath119 in fig.[fig1](c ) for @xmath2 and fig.[fig3 ] ( a ) for @xmath11 , we have calculated all the decay modes of @xmath119 and properly included the width effect in its propagator . as to the decay @xmath121 , two points should be noted . one is , unlike the decays @xmath6 and @xmath11 , this process proceeds only through loops mediated by quarks / leptons in the type - ii 2hdm and l2hdm , and additionally by sparticles in the nmssm and nmssm . so in most cases its rate should be much smaller than the other two . the other is due to cp - invariance , loops mediated by squarks / sleptons give no contribution to the decay@xcite . in actual calculation , this is reflected by the fact that the coupling coefficient of @xmath122 differs from that of @xmath123 by a minus sign ( see eq.([asqsq ] ) ) , and as a result , the squark - mediated contributions to @xmath121 are completely canceled out . with regard to the rare decay @xmath11 , we have more explanations . in the lowest order , this decay proceeds by the diagram shown in fig.[fig3 ] ( a ) , and hence one may think that , as a rough estimate , it is enough to only consider the contributions from fig.[fig3](a ) . however , we note that in some cases of the type - ii 2hdm and l2hdm , due to the cancelation of the contributions from different @xmath119 in fig.[fig3 ] ( a ) and also due to the potentially largeness of @xmath124 couplings ( i.e. larger than the electroweak scale @xmath125 ) , the radiative correction from the higgs - mediated loops may dominate over the tree level contribution even when the tree level prediction of the rate , @xmath126 , exceeds @xmath20 . on the other hand , we find the contribution from quark / lepton - mediated loops can be safely neglected if @xmath127 in the type - ii 2hdm and the l2hdm . in the nmssm and the nmssm , besides the corrections from the higgs- and quark / lepton - mediated loops , loops involving sparticles such as squarks , charginos and neutralinos can also contribute to the decay . we numerically checked that the contributions from squarks and charginos can be safely neglected if @xmath127 . we also calculated part of potentially large neutralino correction ( note that there are totally about @xmath128 diagrams for such correction ! ) and found they can be neglected too . since considering all the radiative corrections will make our numerical calculation rather slow , we only include the most important correction , namely that from higgs - mediated loops , in presenting our results for the four models . one can intuitively understand the relative smallness of the sparticle contribution to @xmath11 as follows . first consider the squark contribution which is induced by the @xmath129 interaction ( @xmath130 denotes the squark in chirality state ) and the @xmath131 interaction through box diagrams . because the @xmath132 interaction conserves the chirality of the squarks while the @xmath133 interaction violates the chirality , to get non - zero contribution to @xmath11 from the squark loops , at least four chiral flippings are needed , with three of them provided by @xmath131 interaction and the rest provided by the left - right squark mixing . this means that , if one calculates the amplitude in the chirality basis with the mass insertion method , the amplitude is suppressed by the mixing factor @xmath134 with @xmath135 being the off diagonal element in squark mass matrix . next consider the chargino / neutralino contributions . since for a light @xmath0 , its doublet component , parameterized by @xmath84 in eq.([mixing ] ) , is usually small , the couplings of @xmath0 with the sparticles will never be tremendously large@xcite . so the chargino / neutralino contributions are not important too . in our calculation of the decays , we work in the mass eigenstates of sparticles instead of in the chirality basis . for the type - ii 2hdm and the l2hdm , we consider the following constraints @xcite : * theoretical constraints on @xmath136 from perturbativity , unitarity and requirements that the scalar potential is finit at large field values and contains no flat directions @xcite , which imply that @xmath137 * the constraints from the lep search for neutral higgs bosons . we compute the signals from the higgs - strahlung production @xmath138 ( @xmath139 ) with @xmath140 @xcite and from the associated production @xmath141 with @xmath142 @xcite , and compare them with the corresponding lep data which have been inputted into our code . we also consider the constraints from @xmath138 by looking for a peak of @xmath143 recoil mass distribution of @xmath1-boson @xcite and the constraint of @xmath144 mev when @xmath145 @xcite . + these constraints limit the quantities such as @xmath146 \times br ( h_i \to \bar{b } b ) $ ] on the @xmath147 plane with the the subscript @xmath148 denoting the coupling coefficient of the @xmath149 interaction . they also impose a model - dependent lower bound on @xmath150 , e.g. , @xmath151 for the type - ii 2hdm ( from our scan results ) , @xmath152 for the l2hdm@xcite , and @xmath153 for the nmssm @xcite . these bounds are significantly lower than that of the sm , i.e. @xmath154 , partially because in new physics models , unconventional decay modes of @xmath155 such as @xmath156 are open up . as to the nmssm , another specific reason for allowing a significantly lighter cp - even higgs boson is that the boson may be singlet - dominated in this model . + with regard to the lightest cp - odd higgs boson @xmath0 , we checked that there is no lower bound on its mass so long as the @xmath157 interaction is weak or @xmath155 is sufficiently heavy . * the constraints from the lep search for a light higgs boson via the yukawa process @xmath158 with @xmath22 and @xmath61 denoting a scalar @xcite . these constraints can limit the @xmath159 coupling versus @xmath160 in new physics models . * the constraints from the cleo - iii limit on @xmath161 and the latest babar limits on @xmath162 . these constraints will put very tight constraints on the @xmath163 coupling for @xmath164 . in our analysis , we use the results of fig.8 in the second paper of @xcite to excluded the unfavored points . * the constraints from @xmath165 couplings . since the higgs sector can give sizable higher order corrections to @xmath165 couplings , we calculate them to one loop level and require the corrected @xmath165 couplings to lie within the @xmath166 range of their fitted value . the sm predictions for the couplings at @xmath1-pole are given by @xmath167 and @xmath168 @xcite , and the fitted values are given by @xmath169 and @xmath170 , respectively@xcite . we adopt the formula in @xcite to the 2hdm in our calculation . * the constraints from @xmath171 leptonic decay . we require the new physics correction to the branching ratio @xmath172 to be in the range of @xmath173 @xcite . we use the formula in @xcite in our calculation . + about the constraints ( 5 ) and ( 6 ) , two points should be noted . one is all higgs bosons are involved in the constraints by entering the self energy of @xmath171 lepton , the @xmath174 vertex correction or the @xmath175 vertex correction , and also the box diagrams for @xmath176@xcite . since the yukawa couplings of the higgs bosons to @xmath171 lepton get enhanced by @xmath54 and so do the corrections , @xmath54 must be upper bounded for given spectrum of the higgs sector . generally speaking , the lighter @xmath0 is , the more tightly @xmath54 is limited@xcite . the other point is in the type - ii 2hdm , @xmath177 , b - physics observables as well as @xmath178 decays discussed above can constraint the model in a tighter way than the constraints ( 5 ) and ( 6 ) since the yukawa couplings of @xmath171 lepton and @xmath179 quark are simultaneously enhanced by @xmath54 . but for the l2hdm , because only the yukawa couplings of @xmath171 lepton get enhanced ( see eq.[yukawa ] ) , the constraints ( 5 ) and ( 6 ) are more important in limiting @xmath54 . * indirect constraints from the precision electroweak observables such as @xmath180 , @xmath181 and @xmath182 , or their combinations @xmath183 @xcite . we require @xmath184 to be compatible with the lep / sld data at @xmath185 confidence level@xcite . we also require new physics prediction of @xmath186 is within the @xmath187 range of its experimental value . the latest results for @xmath188 are @xmath189 ( measured value ) and @xmath190 ( sm prediction ) for @xmath191 gev @xcite . in our code , we adopt the formula for these observables presented in @xcite to the type - ii 2hdm and the l2hdm respectively . + in calculating @xmath180 , @xmath181 and @xmath182 , we note that these observables get dominant contributions from the self energies of the gauge bosons @xmath1 , @xmath192 and @xmath193 . since there is no @xmath194 coupling or @xmath195 coupling , @xmath0 must be associated with the other higgs bosons to contribute to the self energies . so by the uv convergence of these quantities , one can infer that , for the case of a light @xmath0 and @xmath196 , these quantities depend on the spectrum of the higgs sector in a way like @xmath197 at leading order , which implies that a light @xmath0 can still survive the constraints from the precision electroweak observables given the splitting between @xmath150 and @xmath198 is moderate@xcite . * the constraints from b physics observables such as the branching ratios for @xmath199 , @xmath200 and @xmath201 , and the mass differences @xmath202 and @xmath203 . we require their theoretical predications to agree with the corresponding experimental values at @xmath187 level . + in the type - ii 2hdm and the l2hdm , only the charged higgs boson contributes to these observables by loops , so one can expect that @xmath198 versus @xmath54 is to be limited . combined analysis of the limits in the type - ii 2hdm has been done by the ckmfitter group , and the lower bound of @xmath204 as a function of @xmath87 was given in fig.11 of @xcite . this analysis indicates that @xmath198 must be heavier than @xmath205 at @xmath185 c.l . regardless the value of @xmath54 . in this work , we use the results of fig.11 in @xcite to exclude the unfavored points . as for the l2hdm , b physics actually can not put any constraints@xcite because in this model the couplings of the charged higgs boson to quarks are proportional to @xmath206 and in the case of large @xmath54 which we are interested in , they are suppressed . in our analysis of the l2hdm , we impose the lep bound on @xmath198 , i.e. @xmath207@xcite . * the constraints from the muon anomalous magnetic moment @xmath208 . now both the theoretical prediction and the experimental measured value of @xmath208 have reached a remarkable precision , but a significant deviation still exists : @xmath209 @xcite . in the 2hdm , @xmath208 gets additional contributions from the one - loop diagrams induced by the higgs bosons and also from the two - loop barr - zee diagrams mediated by @xmath0 and @xmath155@xcite . if the higgs bosons are much heavier than @xmath25 lepton mass , the contributions from the barr - zee diagrams are more important , and to efficiently alleviate the discrepancy of @xmath208 , one needs a light @xmath0 along with its enhanced couplings to @xmath25 lepton and also to heavy fermions such as bottom quark and @xmath171 lepton to push up the effects of the barr - zee diagram@xcite . the cp - even higgs bosons are usually preferred to be heavy since their contributions to @xmath208 are negative . + in the type - ii 2hdm , because @xmath54 is tightly constrained by the process @xmath210 at the lep@xcite and the @xmath178 decay@xcite , the barr - zee diagram contribution is insufficient to enhance @xmath208 to @xmath187 range around its measured value@xcite . so in our analysis , we require the type - ii 2hdm to explain @xmath208 at @xmath211 level . while for the l2hdm , @xmath54 is less constrained compared with the type - ii 2hdm , and the barr - zee diagram involving the @xmath171-loop is capable to push up greatly the theoretical prediction of @xmath208@xcite . therefore , we require the l2hdm to explain the discrepancy at @xmath187 level . + unlike the other constraints discussed above , the @xmath208 constraint will put a two - sided bound on @xmath54 since on the one hand , it needs a large @xmath54 to enhance the barr - zee contribution , but on the other hand , too large @xmath54 will result in an unacceptable large @xmath208 . * since this paper concentrates on a light @xmath0 , the decay @xmath212 is open up with a possible large decay width . we require the width of any higgs boson to be smaller than its mass to avoid a too fat higgs boson@xcite . we checked that for the scenario characterized by @xmath213 , the coefficient of @xmath214 interaction is usually larger than the electroweak scale @xmath125 , and consequently a large decay width is resulted . for the nmssm and nmssm , the above constraints become more complicated because in these models , not only more higgs bosons are involved in , but also sparticles enter the constraints . so it is not easy to understand some of the constraints intuitively . take the process @xmath199 as an example . in the supersymmetric models , besides the charged higgs contribution , chargino loops , gluino loops as well as neutralino loops also contribute to the process@xcite , and depending on the susy parameters , any of these contributions may become dominated over or be canceled by other contributions . as a result , although the charged higgs affects the process in the same way as that in the type - ii 2hdm , charged higgs as light as @xmath215 is still allowed even for @xmath216@xcite . since among the constraints , @xmath208 is rather peculiar in that it needs new physics to explain the discrepancy between @xmath217 and @xmath218 , we discuss more about its dependence on susy parameters . in the nmssm and the nmssm , @xmath208 receives contributions from higgs loops and neutralino / chargino loops . for the higgs contribution , it is quite similar to that of the type - ii 2hdm except that more higgs bosons are involved in@xcite . for the neutralino / chargino contribution , in the light bino limit ( i.e. @xmath219 ) , it can be approximated by@xcite @xmath220 for @xmath221 with @xmath222 being smuon mass . so combining the two contributions together , one can learn that a light @xmath0 along with large @xmath54 and/or light smuon with moderate @xmath87 are favored to dilute the discrepancy . because more parameters are involved in the constraints on the supersymmetric models , we consider following additional constraints to further limit their parameters : * direct bounds on sparticle masses from the lep1 , the lep2 and the tevatron experiments @xcite . * the lep1 bound on invisible z decay @xmath223 ; the lep2 bound on neutralino production @xmath224 and @xmath225@xcite . * dark matter constraints from the wmap relic density 0.0975 @xmath226 0.1213 @xcite . note that among the above constraints , the constraint ( 2 ) on higgs sector and the constraint ( c ) on neutralino sector are very important . this is because in the supersymmetric models , the sm - like higgs is upper bounded by about @xmath227 at tree level and by about @xmath228 at loop level , and that the relic density restricts the lsp annihilation cross section in a certain narrow range . in our analysis of the nmssm , we calculate the constraints ( 3 ) and ( 5 - 7 ) by ourselves and utilize the code nmssmtools @xcite to implement the rest constraints . we also extend nmssmtools to the nmssm to implement the constraints . for the extension , the most difficult thing we faced is how to adapt the code micromegas@xcite to the nmssm case . we solve this problem by noting the following facts : * as we mentioned before , the nmssm is actually same as the nmssm with the trilinear singlet term setting to zero . so we can utilize the model file of the nmssm as the input of the micromegas and set @xmath229 . * since in the nmssm , the lsp is too light to annihilate into higgs pairs , there is no need to reconstruct the effective higgs potential to calculate precisely the annihilation channel @xmath230 with @xmath61 denoting any of higgs bosons@xcite . we thank the authors of the nmssmtools for helpful discussion on this issue when we finish such extension@xcite . with the above constraints , we perform four independent random scans over the parameter space of the type - ii 2hdm , the l2hdm , the nmssm and the nmssm respectively . we vary the parameters in following ranges : @xmath231 for the type - ii 2hdm , @xmath232 for the l2hdm , @xmath233 for the nmssm , and @xmath234 for the nmssm . in performing the scans , we note that for the nmssm and the nmssm , some constraints also rely on the gaugino masses and the soft breaking parameters in the squark sector and the slepton sector . since these parameters affect little on the properties of @xmath0 , we fix them to reduce the number of free parameters in our scan . for the squark sector , we adopt the @xmath235 scenario which assumes that the soft mass parameters for the third generation squarks are degenerate : @xmath236 800 gev , and that the trilinear couplings of the third generation squarks are also degenerate , @xmath237 with @xmath238 . for the slepton sector , we assume all the soft - breaking masses and trilinear parameters to be 100 gev . this setting is necessary for the nmssm since this model is difficult to explain the muon anomalous moment at @xmath239 level for heavy sleptons@xcite . finally , we assume the grand unification relation @xmath240 for the gaugino masses with @xmath241 being fine structure constants of the different gauge group . with large number of random points in the scans , we finally get about @xmath242 , @xmath243 , @xmath244 and @xmath242 samples for the type - ii 2hdm , the l2hdm , the nmssm and the nmssm respectively which survive the constraints and satisfy @xmath245 . analyzing the properties of the @xmath0 indicates that for most of the surviving points in the nmssm and the nmssm , its dominant component is the singlet field ( numerically speaking , @xmath246 ) so that its couplings to the sm fermions are suppressed@xcite . our analysis also indicates that the main decay products of @xmath0 are @xmath247 for the l2hdm@xcite , @xmath248 ( dominant ) and @xmath247 ( subdominant ) for the type - ii 2hdm , the nmssm and the nmssm , and in some rare cases , neutralino pairs in the nmssm@xcite . in fig.[fig4 ] , we project the surviving samples on the @xmath249 plane . this figure shows that the allowed range of @xmath54 is from @xmath250 to @xmath251 in the type - ii 2hdm , and from @xmath252 to @xmath253 in the l2hdm . just as we introduced before , the lower bounds of @xmath254 come from the fact that we require the models to explain the muon anomalous moment , while the upper bound is due to we have imposed the constraint from the lep process @xmath255 , which have limited the upper reach of the @xmath256 coupling for light @xmath61 @xcite(for the dependence of @xmath256 coupling on @xmath54 , see sec . this figure also indicates that for the nmssm and the nmssm , @xmath54 is upper bounded by @xmath257 . for the nmssm , this is because large @xmath87 can suppress the dark matter mass to make its annihilation difficult ( see @xcite and also sec . ii ) , but for the nmssm , this is because we choose a light slepton mass so that large @xmath54 can enhance @xmath208 too significantly to be experimentally unacceptable . we checked that for the slepton mass as heavy as @xmath258 , @xmath259 is still allowed for the nmssm . in fig.[fig5 ] and fig.[fig6 ] , we show the branching ratios of @xmath260 and @xmath261 respectively . fig.[fig5 ] indicates , among the four models , the type - ii 2hdm predicts the largest ratio for @xmath260 with its value varying from @xmath262 to @xmath263 . the underlying reason is in the type - ii 2hdm , the @xmath264 coupling is enhanced by @xmath54 ( see fig.[fig4 ] ) , while in the other three model , the coupling is suppressed either by @xmath265 or by the singlet component of the @xmath0 . fig.[fig6 ] shows that the l2hdm predicts the largest rate for @xmath266 with its value reaching @xmath5 in optimum case , and for the other three models , the ratio of @xmath261 is at least about one order smaller than that of @xmath267 . this feature can be easily understood from the @xmath268 coupling introduced in sect . we emphasize that , if the nature prefers a light @xmath0 , @xmath260 and/or @xmath269 in the type - ii 2hdm and the l2hdm will be observable at the gigaz . then by the rates of the two decays , one can determine whether the type - ii 2hdm or the l2hdm is the right theory . on the other hand , if both decays are observed with small rates or fail to be observed , the singlet extensions of the mssm are favored . in fig.[fig7 ] , we show the rate of @xmath3 as the function of @xmath270 . this figure indicates that the branching ratio of @xmath121 can reach @xmath271 , @xmath272 , @xmath273 and @xmath274 for the optimal cases of the type - ii 2hdm , the l2hdm , the nmssm and the nmssm respectively , which implies that the decay @xmath121 will never be observable at the gigaz if the studied model is chosen by nature . the reason for the smallness is , as we pointed out before , that the decay @xmath121 proceeds only at loop level . comparing the optimum cases of the type - ii 2hdm , the nmssm and the nmssm shown in fig.5 - 7 , one may find that the relation @xmath275 holds for any of the decays . this is because the decays are all induced by the yukawa couplings with similar structure for the models . in the supersymmetric models , the large singlet component of the light @xmath0 is to suppress the yukawa couplings , and the @xmath0 in the nmssm has more singlet component than that in the nmssm . next we consider the decay @xmath11 , which , unlike the above decays , depends on the higgs self interactions . in fig.[fig8 ] we plot its rate as a function of @xmath270 and this figure indicates that the @xmath276 may be the largest among the ratios of the exotic @xmath1 decays , reaching @xmath277 in the optimum cases of the type - ii 2hdm , the l2hdm and the nmssm . the underlying reason is , in some cases , the intermediate state @xmath119 in fig.[fig3 ] ( a ) may be on - shell . in fact , we find this is one of the main differences between the nmssm and the nmssm , that is , in the nmssm , @xmath119 in fig.[fig3 ] ( a ) may be on - shell ( corresponds to the points with large @xmath278 ) while in the nmssm , this seems impossible . so we conclude that the decay @xmath11 may serve as an alternative channel to test new physics models , especially it may be used to distinguish the nmssm from the nmssm if the supersymmetry is found at the lhc and the @xmath11 is observed at the gigaz with large rate . before we end our discussion , we note that in the nmssm , the higgs boson @xmath0 may be lighter than @xmath279 without conflicting with low energy data from @xmath178 decays and the other observables ( see fig.[fig4]-[fig8 ] ) . in this case , @xmath0 is axion - like as pointed out in @xcite . we checked that , among the rare @xmath1 decays discussed in this paper , the largest branching ratio comes from @xmath280 which can reach @xmath281 . since in this case , the decay product of @xmath0 is highly collinear muon pair , detecting the decay @xmath280 may need some knowledge about detectors , which is beyond our discussion . in this paper , we studied the rare @xmath1-decays @xmath2 ( @xmath7 ) , @xmath282 and @xmath4 in the type - ii 2hdm , lepton - specific 2hdm , nmssm and nmssm , which predict a light cp - odd higgs boson @xmath0 . in the parameter space allowed by current experiments , the branching ratio can be as large as @xmath5 for @xmath118 , @xmath8 for @xmath3 and @xmath9 for @xmath4 , which implies that the decays @xmath2 and @xmath283 may be accessible at the gigaz option . since different models predict different size of branching ratios , these decays can be used to distinguish different model through the measurement of these rare decays . this work was supported in part by hastit under grant no . 2009hastit004 , by the national natural science foundation of china ( nnsfc ) under grant nos . 10821504 , 10725526 , 10635030 , 10775039 , 11075045 and by the project of knowledge innovation program ( pkip ) of chinese academy of sciences under grant no . . for some reviews , see , e.g. , m. a. perez , g. tavares - velasco and j. j. toscano , int . j. mod . a * 19 * , 159 ( 2004 ) ; j. m. yang , arxiv:1006.2594 . j. i. illana , m. masip , 67 , 035004 ( 2003 ) ; j. cao , z. xiong , j. m. yang , 32 , 245 ( 2004 ) . d. atwood _ et al_. , 66 , 093005 ( 2002 ) . j. kalinowski , and s. pokorski , 219 , 116 ( 1989 ) ; a. djouadi , p. m. zerwas and j. zunft , 259 , 175 ( 1991 ) ; a. djouadi , j. kalinowski , and p. m. zerwas , z. phys . c * 54 * , 255 ( 1992 ) . m. krawczyk , _ et al . _ , 19 , 463 ( 2001 ) ; 8 , 495 ( 1999 ) . j. f. gunion , g. gamberini and s. f. novaes , 38 , 3481 ( 1988 ) ; thomas j. weiler and tzu - chiang yuan , 318 , 337 ( 1989 ) ; a. djouadi , _ et al . _ , 1 , 163 ( 1998)[hep - ph/9701342 ] . d. chang and w. y. keung , phys . lett . * 77 * , 3732 ( 1996 ) . e. keith and e. ma , 57 , 2017 ( 1998 ) ; m. a. perez , g. tavares - velasco and j. j. toscano , int . j. mod.phys . a * 19 * , 159 ( 2004 ) . f. larios , g. tavares - velasco and c. p. yuan , 64 , 055004 ( 2001 ) ; 66 , 075006 ( 2002 ) . a. djouadi , _ et al . _ , 10 , 27 ( 1999 ) [ hep - ph/9903229 ] . for a detailed introduction of the nmssm , see f. franke and h. fraas , int . j. mod . a * 12 * ( 1997 ) 479 ; for a recent review of the nmssm , see for example , u. ellwanger , c. hugonie , and a. m. teixeira , arxiv : 0910.1785 . see , e.g. , j. r. ellis , j. f. gunion , h. e. haber , l. roszkowski and f. zwirner , phys . rev . d * 39 * ( 1989 ) 844 ; m. drees , int . j. mod . phys . a * 4 * ( 1989 ) 3635 ; u. ellwanger , m. rausch de traubenberg and c. a. savoy , phys . b * 315 * ( 1993 ) 331 ; nucl . b * 492 * ( 1997 ) 21 ; d.j . miller , r. nevzorov , p.m. zerwas , 681 , 3 ( 2004 ) . c. panagiotakopoulos , k. tamvakis , 446 , 224 ( 1999 ) ; 469 , 145 ( 1999 ) ; c. panagiotakopoulos , a. pilaftsis , 63 , 055003 ( 2001 ) ; a. dedes , _ et al . _ , 63 , 055009 ( 2001 ) ; a. menon , _ et al . _ , 70 , 035005 ( 2004 ) ; v. barger , _ et al . _ , 630 , 85 ( 2005 ) . c. balazs , _ et al . _ , 0706 , 066 ( 2007 ) . b. a. dobrescu , k. t. matchev , 0009 , 031 ( 2000 ) ; a. arhrib , k. cheung , t. j. hou , k. w. song , hep - ph/0611211 ; 0703 , 073 ( 2007 ) ; x. g. he , j. tandean , and g. valencia , 98 , 081802 ( 2007 ) ; 0806 , 002 ( 2008 ) ; f. domingo _ et al_. , 0901 , 061 ( 2009 ) ; gudrun hiller , 70 , 034018 ( 2004 ) ; r. dermisek , and john f. gunion , 75 , 075019 ( 2007 ) ; 79 , 055014 ( 2009 ) ; 81 , 055001 ( 2010 ) ; r. dermisek , john f. gunion , and b. mcelrath , 76 , 051105 ( 2007 ) ; z. heng , _ et al_. , 77 , 095012 ( 2008 ) ; a. belyaev _ et al_. , 81 , 075021 ( 2010 ) ; d. das and u. ellwanger , arxiv:1007.1151 [ hep - ph ] . s. andreas , o. lebedev , s. ramos - sanchez and a. ringwald , arxiv:1005.3978 [ hep - ph ] . j. f. gunion , jhep * 0908 * , 032 ( 2009 ) ; r. dermisek and j. f. gunion , phys . rev . d * 81 * , 075003 ( 2010 ) . r. dermisek and j. f. gunion , phys . lett . * 95 * , 041801 ( 2005 ) ; phys . d * 73 * , 111701 ( 2006 ) . j. cao , h. e. logan , j. m. yang , 79 , 091701 ( 2009 ) . j. cao , p. wan , l. wu , j. m. yang , 80 , 071701 ( 2009 ) . j. f. gunion and h. e. haber , 67 , 075019 ( 2003 ) . r. m. barnett , _ et al . _ , phys . b * 136 * , 191 ( 1984 ) ; r. m. barnett , g. senjanovic and d. wyler , phys . d * 30 * , 1529 ( 1984 ) ; y. grossman , nucl . b * 426 * , 355 ( 1994 ) . h. s. goh , l. j. hall and p. kumar , jhep * 0905 * , 097 ( 2009 ) ; a. g. akeroyd and w. j. stirling , nucl . b * 447 * , 3 ( 1995 ) ; a. g. akeroyd , phys . b * 377 * , 95 ( 1996 ) ; h. e. logan and d. maclennan , phys . rev . d * 79 * , 115022 ( 2009 ) ; m. aoki , _ et al . _ , arxiv:0902.4665 [ hep - ph ] . v. barger , p. langacker , h. s. lee and g. shaughnessy , phys . d * 73 * , 115010 ( 2006 ) . s. hesselbach , _ et . _ , arxiv:0810.0511v2 [ hep - ph ] . de vivie and p. janot [ aleph collaboration ] , pa13 - 027 contribution to the international conference on high energy physics , warsaw , poland , 2531 july 1996 ; j. kurowska , o. grajek and p. zalewski [ delphi collaboration ] , cern - open-99 - 385 . [ aleph collaboration and delphi collaboration and l3 collaboration ] , phys . rept . * 427 * , 257 ( 2006 ) . j. cao and j. m. yang , jhep * 0812 * , 006 ( 2008 ) . m. krawczyk and d. temes , eur . j. c * 44 * , 435 ( 2005 ) . g. altarelli and r. barbieri , 253 , 161 ( 1991 ) ; m. e. peskin , t. takeuchi , 46 , 381 ( 1992 ) . c. amsler , _ et al . _ , ( particle data group ) , 667 , 1 ( 2008 ) . o. deschamps , s. descotes - genon , s. monteil , v. niess , s. tjampens and v. tisserand , arxiv:0907.5135 [ hep - ph ] . s. su and b. thomas , phys . d * 79 * , 095014 ( 2009 ) . g. abbiendi , _ et al . _ , eur . phys . j. c * 32 * , 453 ( 2004 ) . m. davier , _ et al . _ , 66 , 1 ( 2010 ) . k. cheung , _ et al . _ , phys . d * 64 * , 111301 ( 2001 ) . k. cheung and o. c. w. kong , phys . d * 68 * , 053003 ( 2003 ) . t. besmer , c. greub , t.hurth , 609 , 359 ( 2001 ) ; f. borzumati , _ et al . _ , 62 , 075005(2000 ) . j. cao , k. i. hikasa , w. wang , j. m. yang and l. x. yu , phys . d * 82 * , 051701 ( 2010 ) [ arxiv:1006.4811 [ hep - ph ] ] . j. f. gunion , _ et . d * 73 * , 015011 ( 2006 ) . martin and j. d. wells , phys . d * 64 * , 035003 ( 2001 ) . j. abdallah _ et al . _ , eur . j. c * 31 * , 421 ( 2004 ) ; g. abbiendi _ et al . _ , eur . j. c * 35 * , 1 ( 2004 ) . j. dunkley _ et al . _ [ wmap collaboration ] , astrophys . j. suppl . * 180 * , 306 ( 2009 ) [ arxiv:0803.0586 [ astro - ph ] ] . u. ellwanger _ et al . _ , 02 , 066 ( 2005 ) . g. belanger , f. boudjema , a. pukhov and a. semenov , comput . commun . * 174 * , 577 ( 2006 ) ; comput . phys . commun . * 176 * , 367 ( 2007 ) . g. belanger , f. boudjema , c. hugonie , a. pukhov and a. semenov , jcap * 0509 * , 001 ( 2005 ) ."""
ARTICLE_MAGNET = r"""it is well known that the classical magnetoresistance ( mr ) in metals or semiconductors with a closed free electron fermi surface increases quadratically with increasing magnetic field @xmath2 for @xmath3 and saturates when @xmath4 . here @xmath5 is the zero - magnetic - field mobility . hence , the extraordinarily high and linear mr ( lmr ) , which breaks this familiar rule , has been gaining much attention as soon as its discovery . in the past decade , this unexpected lmr has been reported in silver chalcogenide,@xcite indium antimonide,@xcite silicon,@xcite mnas - gaas composite material,@xcite and graphene.@xcite kapitza s linear law@xcite indicates that the metal shows a magnetoresistance linear in perpendicular magnetic field when it has an open fermi surface and a mean free path longer than the electronic larmor radius . recently , another two models , irrespective of the open fermi surface , have been constructed to provide possible mechanisms for the lmr phenomenon . abrikosov suggested a quantum - limit origin of lmr for the homogenous system with a gapless linear energy spectrum.@xcite his model requires that landau levels are well formed and the carrier concentration is small that all electrons occupy only the lowest landau band . alternatively , parish and littlewood developed a classical model without involving linear spectrum.@xcite ignoring the concrete microscopic mechanism , they attributed this unusual mr to the mobility fluctuations in a strongly inhomogenous system . topological insulators@xcite ( tis ) are novel materials with a full energy gap in bulk , while there are gapless surface states . due to its unique band structure with only one helical dirac cone and linear energy dispersion,@xcite the surface states of the ti bi@xmath0se@xmath1 become an excellent platform for the study of quantum - limit lmr . the recent experiment in this flat surface system , however , reported that a large positive mr , which becomes very linear above a characteristic field of @xmath6@xmath7@xmath8 t , was observed even in an opposite situation where the carrier sheet density is high that electrons occupy more than one landau levels.@xcite moreover , they found that raising temperature to room temperature almost has no influence on the observed lmr . it is striking that this observation is in conflict with abrikosov s model and also with the classical parish - littlewood model . so far a reliable theoretical scheme capable of explaining this novel experiment has still been lacking . in this paper , we generalize the balance - equation approach@xcite to a system modeling the surface states of a three - dimensional ti to investigate the two - dimensional magnetotransport in it . we find that a positive , nonsaturating and dominantly linear magnetoresistance can appear within quite wide magnetic - field range in the ti surface state having a positive and finite effective g - factor . this linear magnetoresistance shows up in the system of high carrier concentration and low mobility when electrons are in extended states and spread over many smeared landau levels , and persists up to room temperature , providing a possible mechanism for the recently observed linear magnetoresistance in topological insulator bi@xmath0se@xmath1 nanoribbons.@xcite we consider the surface state of a bi@xmath0se@xmath1-type large bulk gap ti in the @xmath9-@xmath10 plane under the influence of a uniform magnetic field @xmath11 applied along the @xmath12 direction.@xcite following the experimental observation,@xcite we assume that the fermi energy locates in the gap of the bulk band and above the dirac point , i.e. the surface carriers are electrons . further , the separations of the fermi energy from the bottom of bulk band and dirac point are much larger than the highest temperature ( @xmath13 ) considered in this work . hence , the contribution from the bulk band to the magnetotransport is negligible . these electrons , scattered by randomly distributed impurities and by phonons , are driven by a uniform in - plane electric field @xmath14 in the topological surface . the hamiltonian of this many - electron and phonon system consists of an electron part @xmath15 , a phonon part @xmath16 , and electron - impurity and electron - phonon interactions @xmath17 and @xmath18 : @xmath19 here , the electron hamiltonian is taken in the form @xmath20 , \ ] ] in which @xmath21 , @xmath22 , @xmath23 and @xmath24 , stand , respectively , for the canonical momentum , coordinate , momentum and spin operators of the @xmath25th electron having charge @xmath26 , @xmath27 is the vector potential of the perpendicular magnetic field @xmath28 in the landau gauge , @xmath29 is the fermi velocity , @xmath30 is the effective g - factor of the surface electron , and @xmath31 is the bohr magneton with @xmath32 the free electron mass . the sum index @xmath25 in eq.([helectron ] ) goes over all electrons of total number @xmath33 in the surface state of unit area . in the frame work of balance equation approach,@xcite the two - dimensional center - of - mass ( c.m . ) momentum and coordinate @xmath34 and @xmath35 , and the relative - electron momenta and coordinates @xmath36 and @xmath37 are introduced to write the hamiltonian @xmath15 into the sum of a single - particle c.m . part @xmath38 and a many - particle relative - electron part @xmath39 : @xmath40 , with @xmath41.\end{aligned}\ ] ] in this , @xmath42 is the canonical momentum of the center - of - mass and @xmath43 is the canonical momentum for the @xmath25th relative electron . here we have also introduced c.m . spin operators @xmath44 and @xmath45 . the commutation relations between the c.m . spin operators @xmath46 and @xmath47 and the spin operators @xmath48 , @xmath49 and @xmath50 of the @xmath25th electron are of order of @xmath51 : @xmath52= n^{-1}2\,{\rm i}\,\varepsi lon_{\beta_1\beta_2\beta_3}\sigma_j^{\beta_3}$ ] with @xmath53 . therefore , for a macroscopic large @xmath33 system , the c.m . part @xmath38 actually commutes with the relative - electron part @xmath54 in the hamiltonian , i.e. the c.m . motion and the relative motion of electrons are truly separated from each other . the couplings between the two emerge only through the electron impurity and electron phonon interactions . furthermore , the electric field @xmath55 shows up only in @xmath38 . and , in view of @xmath56={\rm i}\delta_{\alpha \beta}(\delta_{ij}-1/n)\simeq { \rm i}\delta_{\alpha\beta}\delta_{ij}$ ] , i.e. the relative - electron momenta and coordinates can be treated as canonical conjugate variables , the relative - motion part @xmath54 is just the hamiltonian of @xmath33 electrons in the surface state of ti in the magnetic field without the presence of the electric field . in terms of the c.m . coordinate @xmath57 and the relative electron density operator @xmath58 , the electron impurity and electron phonon interactions can be written as@xcite @xmath59 here @xmath60 and @xmath61 are respectively the impurity potential ( an impurity at randomly distributed position @xmath62 ) and electron phonon coupling matrix element in the plane - wave representation , and @xmath63 with @xmath64 and @xmath65 being the creation and annihilation operators for a phonon of wavevector @xmath66 in branch @xmath67 having frequency @xmath68 . velocity ( operator ) @xmath69 is the time variation of its coordinate : @xmath70= v_{\rm f}(\sigma_{\rm c}^y\ , \hat{i}-\sigma_{\rm c}^x\ , \hat{j})$ ] . to derive a force - balance equation for steady state transport we consider the heisenberg equation for the rate of change of the c.m . canonical momentum @xmath71 : @xmath72= - n e({\bm v}\times { \bm b})- n e{\bm e}+{\bm { f}}_{\rm i}+{\bm { f}}_{\rm p},\ ] ] in which the frictional forces @xmath73 and @xmath74 share the same expressions as given in ref .. the statistical average of the operator equation can be determined to linear order in the electron impurity and electron phonon interactions @xmath17 and @xmath18 with the initial density matrix @xmath75 at temperature @xmath76 when the in - plane electric field @xmath77 is not strong . for steady - transport states we have @xmath78 , leading to a force - balance equation of the form @xmath79 here @xmath80 , the statistically averaged velocity of the moving center - of - mass , is identified as the average rate of change of its position , i.e. the drift velocity of the electron system driven by the electric field @xmath77 , and @xmath81 and @xmath82 are frictional forces experienced by the center - of - mass due to impurity and phonon scatterings : @xmath83,\label{fp}\end{aligned}\ ] ] in which @xmath84 is the bose distribution function , @xmath85 , and @xmath86 stands for the imaginary part of the fourier spectrum of the relative - electron density correlation function defined by @xmath87\big\rangle_{0},\ ] ] where @xmath88 and @xmath89 denotes the statistical averaging over the initial density matrix @xmath90.@xcite the force - balance equation describes the steady - state two - dimensional magnetotransport in the surface state of a ti . note that the frictional forces @xmath81 and @xmath82 are in the opposite direction of the drift velocity @xmath91 and their magnitudes are functions of @xmath92 only . with the drift velocity @xmath93 in the @xmath9 direction , the force - balance equation eq . yields a transverse resistivity @xmath94 , and a longitudinal resistivity @xmath95 . the linear one is in the form @xmath96 for calculating the electron density correlation function @xmath97 we proceed in the landau representation.@xcite the landau levels of the single - particle hamiltonian @xmath98 of the relative - electron system in the absence of electric field are composed of a positive `` @xmath99 '' and a negative `` @xmath100 '' branch@xcite @xmath101 with @xmath102 and @xmath103 , and a zero ( @xmath104 ) level @xmath105 the corresponding landau wave functions are @xmath106 and @xmath107 for @xmath108 ; and @xmath109 for @xmath104 . here @xmath110 is the wavevector of the system along @xmath9 direction ; @xmath111 with @xmath112 ; and @xmath113 is the harmonic oscillator eigenfunction with @xmath114 being the hermite polynomial , @xmath115 , and @xmath116 . each landau level contains @xmath117 electron states for system of unit surface area . the positive branch @xmath118 and the @xmath104 level @xmath119 of the above energy spectra are indeed quite close to those of the surface states in the bulk gap of bi@xmath0se@xmath1-family materials derived from microscopic band calculation.@xcite the landau levels are broadened due to impurity , phonon and electron - electron scatterings . we model the imaginary part of the retarded green s function , or the density - of - states , of the broadened landau level @xmath120 ( written for `` + ' ' -branch and @xmath104 levels ) , using a gaussian - type form:@xcite @xmath121,\ ] ] with a half - width @xmath122 of the form:@xcite @xmath123^{1/2}$ ] . here @xmath124 is the single - particle lifetime and @xmath125 is the cyclotron frequency of linear - energy - dispersion system with @xmath126 being the zero - temperature fermi level . using a semi - empirical parameter @xmath127 to relate @xmath124 with the transport scattering time @xmath128 , and expressing @xmath129 with the zero - field mobility @xmath5 at finite temperature,@xcite we can write the landau - level broadening as @xmath130^{1/2}.\ ] ] in the present study we consider the case of @xmath120-doping , i.e. the fermi level is high enough above the energy zero of the dirac cone in the range of `` + ' ' -branch levels and the states of `` @xmath100''-branch levels are completely filled , that they are irrelevant to electron transport . special attention has to be paid to the @xmath104 level , since , depending on the direction of exchange potential the effective g - factor of a ti surface state , @xmath30 , can be positive , zero or negative.@xcite the sign and magnitude of the effective g - factor determines how many states of the zero level should be included in or excluded from the available states for electron occupation in the case of @xmath120-doping at a magnetic field . ( i ) if @xmath131 , the @xmath104 level center is exactly at @xmath132 and the system is electron - hole symmetric . the total number of negative energy states ( including the states of the lower half of the @xmath104 level and states of the @xmath100"-branch levels ) and that of positive energy states ( including the states of the upper half of the @xmath104 level and states of the @xmath99"-branch levels ) do not change when changing magnetic field . therefore , the lower - half negative energy states of this level are always filled and the upper - half positive - energy states of it are available for the occupation of particles which are counted as electrons participating in transport in the case of @xmath120-doping . ( ii ) for a finite positive @xmath133 , the @xmath104 level @xmath134 moves downward to negative energy and its distance to the nearest @xmath100"-branch level is @xmath135 closer than to the nearest + " -branch level at finite magnetic field strength @xmath2 . this is equivalent to the opening of an increasingly enlarged ( with increasing @xmath2 ) energy gap between the + " -branch states and the states of the zero - level and the @xmath100"-branch levels . the opening of a sufficient energy gap implies that with increasing magnetic field the states in the + " -branch levels would no longer shrink into the zero - level , and thus the @xmath104 level should be completely excluded from the conduction band , i.e. only particles occupying the + " -branch states are counted as electrons participating in transport in the case of @xmath120-doping , when the magnetic field @xmath2 gets larger than a certain value ( depending on the magnitude of @xmath30 ) . ( iii ) for a finite negative @xmath136 , the @xmath104 level @xmath134 moves upward to positive energy and an increasingly enlarged energy gap will be opened between the states of the zero - level and the + " -branch and the states of @xmath100"-branch levels , and particles occupying the @xmath104 level and + " -branch states are electrons participating in transport when the magnetic field @xmath2 gets larger than a certain value . as a result , the experimentally accessible sheet density @xmath33 of electrons participating in transport is related to the fermi energy @xmath137 by the following equation valid at finite @xmath30 for the magnetic field @xmath2 larger than a certain value : @xmath138 in which @xmath139 + 1\}^{-1}$ ] is the fermi distribution function at temperature @xmath76 and the summation index @xmath120 goes over @xmath140 for @xmath133 , or @xmath141 for @xmath136 . in the case of @xmath131 , @xmath142\ ] ] valid for arbitrary magnetic field , in which @xmath143 . the imaginary part of relative - electron density correlation function in the presence of a magnetic field , @xmath86 , can be expressed in the landau representation as@xcite @xmath144 in which the transform factor @xmath145 ^ 2,\end{aligned}\ ] ] with @xmath146 , @xmath147 , @xmath148 , and @xmath149 being associated laguerre polynomials . the landau - representation correlation function @xmath150 in eq.([piqw ] ) can be constructed with the imaginary part of the retarded green s function @xmath151 , or the density - of - states , of the @xmath120th landau level as@xcite @xmath152\nonumber\\ & \hspace{1.2cm}\times{\rm im}g_n(\epsilon+\omega){\rm im}g_{n'}(\epsilon).\end{aligned}\ ] ] the summation indices @xmath120 and @xmath153 in eq.([piqw ] ) are taken over @xmath140 for @xmath133 , or @xmath154 for @xmath136 . in the case of @xmath131 , eq.([piqw ] ) still works and the summation indices @xmath120 and @xmath153 go over @xmath154 but with @xmath155 replaced by @xmath156 in eq.([p2nn ] ) . numerical calculations are performed for the magnetoresistivity @xmath157 of surface state in a uniform ti bi@xmath0se@xmath1 . at zero temperature the elastic scattering contributing to the resistivity is modeled by a coulomb potential due to charged impurities:@xcite @xmath158 with @xmath159 being the impurity density , which is determined by the zero - magnetic - field mobility @xmath5 . at temperatures higher than @xmath160,@xcite phonon scatterings play increasingly important role and the dominant inelastic contribution comes from optical phonons . for this polar material , the scattering by optical phonons via the deformation potential can be neglected . hence , we take account of inelastic scattering from optical phonons via frhlich coupling : @xmath161 . in the numerical calculation we use the following parameters:@xcite fermi velocity @xmath162 , static dielectric constant @xmath163 , optical dielectric constant @xmath164 , and phonon energy @xmath165 . the broadening parameter is taken to be @xmath166 . as a function of the magnetic field @xmath2 having different effective g - factors : @xmath167 and @xmath168 for a ti surface system with electron sheet density @xmath169 in the cases of zero - magnetic - field mobility @xmath170 ( a ) and @xmath171 ( b ) . several integer - number positions of filling factor @xmath172 are marked in ( b).,scaledwidth=40.0% ] fig.[diffg ] shows the calculated magnetoresistivity @xmath157 versus the magnetic field strength @xmath2 for a ti surface system with electron sheet density @xmath169 but having different effective g - factors : @xmath167 and @xmath168 for two values of zero - magnetic - field mobility @xmath170 and @xmath171 , representing different degree of landau - level broadening . in the case without zeeman splitting ( @xmath131 ) the resistivity @xmath157 exhibits almost no change with changing magnetic field up to 10 t , except the shubnikov - de haas ( sdh ) oscillation showing up in the case of @xmath171 . this kind of magnetoresistance behavior was indeed seen experimentally in the electron - hole symmetrical massless system of single - layer graphene.@xcite in the case of a positive g - factor , @xmath173 , the magnetoresistivity increases linearly with increasing magnetic field ; while for a negative g - factor , @xmath174 , the magnetoresistivity decreases linearly with increasing magnetic field . is shown as a function of the magnetic field @xmath2 for different values of zero - magnetic - field mobility : ( a ) @xmath175 , ( b ) @xmath176 , ( c ) @xmath177 , ( d ) @xmath178 , ( e ) @xmath179 , and ( f ) @xmath180 . the inset of ( a ) illustrates the same for a larger magnetic - field range @xmath181 . the filling factor @xmath182 is plotted versus the magnetic field in ( f ) ; and several integer - number positions of @xmath182 are also marked in ( d ) and ( e ) . here the surface electron density @xmath169 and the lattice temperature @xmath183.,scaledwidth=47.0% ] in the following we will give more detailed examination on the linearly increasing magnetoresistance in the positive @xmath30 case . fig.[rhob ] shows the calculated resistivity @xmath157 versus the magnetic field strength @xmath2 at lattice temperature @xmath183 for system of carrier sheet density @xmath169 and @xmath173 , having different zero - field mobility @xmath184 and @xmath180 . all resistivity curves for mobility @xmath185 exhibit clear linearity in the magnetic - field range and appear no tendency of saturation at the highest field shown in the figure . especially , for the case @xmath170 , the linear behavior extends even up to the magnetic field of @xmath186 , as illustrated in the inset of fig.[rhob](a ) . this feature contradicts the classical mr which saturates at sufficiently large magnetic field @xmath187 . note that here we only present the calculated @xmath157 for magnetic field @xmath2 larger than @xmath188 t , for which a sufficient energy gap @xmath135 is assumed to open that with further increase of the magnetic field the states in the `` + ' ' -branch levels no longer shrink into the zero level and thus it should be excluded from the conduction band . this is of course not true for very weak magnetic field . when @xmath189 the energy gap @xmath190 , the situation becomes similar to the case of @xmath131 : the whole upper half of the zero - level states are available to electron occupation and we should have a flat resistivity @xmath157 when changing magnetic field . with increasing @xmath2 the portion of the zero - level states available to conduction electrons decreases until the magnetic field reaches @xmath191 . as a result the resistivity @xmath157 should exhibit a crossover from a flat changing at small @xmath2 to positively linear increasing at @xmath192 . this is just the behavior observed in the ti bi@xmath0se@xmath1.@xcite note that in the case of @xmath170 , the broadened landau - level widths are always larger than the neighboring level interval : @xmath193 , which requires @xmath194 ^ 2 $ ] , even for the lowest landau level @xmath195 , i.e. the whole landau - level spectrum is smeared . with increasing the zero - field mobility the magnitude of resistivity @xmath157 decreases , and when the broadened landau - level width becomes smaller than the neighboring level interval , @xmath196 , a weak sdh oscillation begin to occur around the linearly - dependent average value of @xmath157 at higher portion of the magnetic field range , as seen in fig.[rhob](c ) , ( d ) and ( e ) for @xmath197 and @xmath198 . on the other hand , in the case of large mobility , e.g. @xmath199 , where the broadened landau - level widths @xmath200 are much smaller than the neighboring level interval even for level index @xmath120 as large as @xmath201 , the magnetoresistivity shows pronounced sdh oscillation and the linear - dependent behavior disappears , before the appearance of quantum hall effect,@xcite as shown in fig.[rhob](f ) . abrikosov s model for the lmr requires the applied magnetic field large enough to reach the quantum limit at which all the carriers are within the lowest landau level,@xcite while it is obvious that more than one landau levels are occupied in the experimental samples in the field range in which the linear and non - saturating magnetoresistivity was observed.@xcite for the given electron surface density @xmath202 , the number of occupied landau levels , or the filling factor @xmath172 , at different magnetic fields is shown in fig.[rhob](f ) , as well as in the fig.[rhob](d ) and ( e ) , where the integer - number positions of @xmath203 , i.e. filling up to entire @xmath182 landau levels , coincide with the minima of the density - of - states or the dips of sdh oscillation . this is in contrast with @xmath131 case , where the integer number of @xmath203 , which implies a filling up to the center position of the @xmath182th landau levels , locates at a peak of sdh oscillation , as shown in fig.[diffg]b . the observed sdh oscillations in the bi@xmath0se@xmath1 nanoribbon exhibiting nonsaturating surface lmr in the experiment@xcite favor the former case : a finite positive effective @xmath133 . is plotted as a function of the surface electron density @xmath33 at magnetic field @xmath204 : ( a ) at different values of zero - field mobility @xmath5 , and ( b ) at different values of zero - field conductivity @xmath205.,scaledwidth=40.0% ] at various lattice temperatures . here the zero - magnetic - field mobility at zero temperature is @xmath206.,scaledwidth=35.0% ] next , we examine the density - dependence of the linear magnetoresistivity . to compare with abrikosov s quantum magnetoresistance which suggests a @xmath207 behavior,@xcite we show the calculated @xmath208 for above lmr versus the carrier sheet density @xmath33 in fig.[rhon ] at fixed magnetic field @xmath209 t . the mobility is taken respectively to be @xmath210 and @xmath211m@xmath212/vs to make the resistivity in the lmr regime . a clearly linear dependence of @xmath213 on the surface density @xmath33 is seen in all cases , indicating that this non - saturating linear resistivity is almost inversely proportional to the carrier density . in the figure we also show @xmath208 versus @xmath33 under the condition of different given conductivity @xmath214 and @xmath215 . in this case the half - width @xmath216 is independent of surface density . the linear dependence still holds , indicating that this linear behavior is not sensitive to the modest @xmath33-dependence of landau level broadening @xmath216 as long as the system is in the overlapped landau level regime . from the above discussion , it is obvious that lmr shows up in the system having overlapped landau levels and the separation of landau levels makes the mr departure from the linear increase . at high temperature , the thermal energy would smear the level separation and phonon scatterings further broaden landau levels . hence , it is believed that this lmr will be robust against raising temperature . this is indeed the case as seen in fig.[rhot ] , where we plot the calculated magnetoresistivity @xmath157 for the above system with zero - temperature linear mobility @xmath217m@xmath212/vs versus the magnetic field at different lattice temperatures . we can see that raising temperature to room temperature has little effect on the linearity of mr . due to the decreased mobility at higher temperature from phonon scattering , the weak sdh oscillation on the linear background tends to vanish . these features are in good agreement with the experimental report.@xcite in summary , we have studied the two - dimensional magnetotransport in the flat surface of a three - dimensional ti , which arises from the surface states with a wavevector - linear energy dispersion and a finite , positive zeeman splitting within the bulk energy gap . when the level broadening is comparable to or larger than the landau - level separation and the conduction electrons spread over many landau levels , a positive , dominantly linear and non - saturating magnetoresistance appears within a quite wide range of magnetic field and persists up to room temperature . this remarkable lmr provides a possible mechanism for the recently observed linear magnetoresistance in topological insulator bi@xmath0se@xmath1 nanoribbons.@xcite in contrast to quantum hall effect which appears in the case of well formed landau levels and to abrikosov s quantum magnetotransport,@xcite which is limited to the extreme quantum limit that all electrons coalesce into the lowest landau level , the discussed lmr is a phenomena of pure classical two - dimensional magnetotransport in a system having linear - energy - dispersion , appearing in the regime of overlapped landau levels , irrespective of its showing up in relatively high magnetic field range . furthermore , the present scheme deals with spatially uniform case without invoking the mobility fluctuation in a strongly inhomogeneous system , which is required in the classical parish and littlewood model to produce a lmr.@xcite the appearance of this significant positive - increasing linear magnetoresistance depends on the existence of a positive and sizable effective g - factor . if the zeeman energy splitting is quite small the resistivity @xmath157 would exhibit little change with changing magnetic field . in the case of a negative and sizable effective g - factor the magnetoresistivity would decrease linearly with increasing magnetic field . therefore , the behavior of the longitudinal resistivity versus magnetic field may provide a useful way for judging the direction and the size of the effective zeeman energy splitting in ti surface states . this work was supported by the national science foundation of china ( grant no . 11104002 ) , the national basic research program of china ( grant no . 2012cb927403 ) and by the program for science&technology innovation talents in universities of henan province ( grant no . 2012hastit029 ) ."""
dct = tok.batch_encode_plus(
[ARTICLE_LEP, ARTICLE_MAGNET],
max_length=6144,
padding="max_length",
truncation=True,
return_tensors="pt",
)
hypotheses_batch = hf.generate(
input_ids=dct["input_ids"].to(torch_device),
attention_mask=dct["attention_mask"].to(torch_device),
num_beams=4,
max_length=512,
early_stopping=True,
no_repeat_ngram_size=3,
)
EXPECTED_LEP = (
" the physics of @xmath0-boson will again play the central role in the frontier of particle physics if the"
" gigaz option of the international linear collider ( ilc ) can be realized in its first phase. \n the"
" expected sensitivity to the branching ratio of rare decays, especially its exotic or rare processes,"
" should be investigated comprehensively to evaluate their potential in probing new physics. in this work"
" \n, we study the rare decay into light higgs boson(s ) in the framework of the minimal supersymmetric"
" standard model ( mssm ), where a light cp - odd higgs - boson with singlet - dominant component may"
" naturally arise from the spontaneous breaking of some approximate global symmetry. "
)
EXPECTED_MAGNET = (
" the recent experiment in the surface states of the topological insulator bi@xmath0se @xmath1, however,"
" reported that a large positive magnetoresistance becomes very linear in perpendicular magnetic field"
" even in an opposite situation where the carrier sheet density is high that all electrons occupy more"
" than one landau levels. \n it is striking that this observation is in conflict with abrikosov s model"
" and also with the classical parish - littlewood model. "
)
generated = tok.batch_decode(
hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True
)
assert generated == [EXPECTED_LEP, EXPECTED_MAGNET]
|
LEDModelIntegrationTests
|
python
|
numpy__numpy
|
numpy/_core/tests/test_scalarinherit.py
|
{
"start": 257,
"end": 281
}
|
class ____(B0):
pass
|
C0
|
python
|
PrefectHQ__prefect
|
src/prefect/flow_engine.py
|
{
"start": 29461,
"end": 61774
}
|
class ____(BaseFlowRunEngine[P, R]):
"""
Async version of the flow run engine.
NOTE: This has not been fully asyncified yet which may lead to async flows
not being fully asyncified.
"""
_client: Optional[PrefectClient] = None
parameters: dict[str, Any] | None = None
flow_run: FlowRun | None = None
@property
def client(self) -> PrefectClient:
if not self._is_started or self._client is None:
raise RuntimeError("Engine has not started.")
return self._client
def _resolve_parameters(self):
if not self.parameters:
return
resolved_parameters = {}
for parameter, value in self.parameters.items():
try:
resolved_parameters[parameter] = visit_collection(
value,
visit_fn=resolve_to_final_result,
return_data=True,
max_depth=-1,
remove_annotations=True,
context={"parameter_name": parameter},
)
except UpstreamTaskError:
raise
except Exception as exc:
raise PrefectException(
f"Failed to resolve inputs in parameter {parameter!r}. If your"
" parameter type is not supported, consider using the `quote`"
" annotation to skip resolution of inputs."
) from exc
self.parameters = resolved_parameters
def _wait_for_dependencies(self):
if not self.wait_for:
return
visit_collection(
self.wait_for,
visit_fn=resolve_to_final_result,
return_data=False,
max_depth=-1,
remove_annotations=True,
context={},
)
async def begin_run(self) -> State:
try:
self._resolve_parameters()
self._wait_for_dependencies()
except UpstreamTaskError as upstream_exc:
state = await self.set_state(
Pending(
name="NotReady",
message=str(upstream_exc),
),
# if orchestrating a run already in a pending state, force orchestration to
# update the state name
force=self.state.is_pending(),
)
return state
# validate prior to context so that context receives validated params
if self.flow.should_validate_parameters:
try:
self.parameters = self.flow.validate_parameters(self.parameters or {})
except Exception as exc:
message = "Validation of flow parameters failed with error:"
self.logger.error("%s %s", message, exc)
await self.handle_exception(
exc,
msg=message,
result_store=get_result_store().update_for_flow(
self.flow, _sync=True
),
)
self.short_circuit = True
new_state = Running()
state = await self.set_state(new_state)
while state.is_pending():
await asyncio.sleep(0.2)
state = await self.set_state(new_state)
return state
async def set_state(self, state: State, force: bool = False) -> State:
""" """
# prevents any state-setting activity
if self.short_circuit:
return self.state
state = await propose_state(
self.client, state, flow_run_id=self.flow_run.id, force=force
) # type: ignore
self.flow_run.state = state # type: ignore
self.flow_run.state_name = state.name # type: ignore
self.flow_run.state_type = state.type # type: ignore
self._telemetry.update_state(state)
await self.call_hooks(state)
return state
async def result(self, raise_on_failure: bool = True) -> "Union[R, State, None]":
if self._return_value is not NotSet and not isinstance(
self._return_value, State
):
_result = self._return_value
link_state_to_flow_run_result(self.state, _result)
if asyncio.iscoroutine(_result):
# getting the value for a BaseResult may return an awaitable
# depending on whether the parent frame is sync or not
_result = await _result
return _result
if self._raised is not NotSet:
if raise_on_failure:
raise self._raised
return self._raised
# This is a fall through case which leans on the existing state result mechanics to get the
# return value. This is necessary because we currently will return a State object if the
# the State was Prefect-created.
# TODO: Remove the need to get the result from a State except in cases where the return value
# is a State object.
return await self.state.aresult(raise_on_failure=raise_on_failure) # type: ignore
async def handle_success(self, result: R) -> R:
result_store = getattr(FlowRunContext.get(), "result_store", None)
if result_store is None:
raise ValueError("Result store is not set")
resolved_result = resolve_futures_to_states(result)
terminal_state = await return_value_to_state(
resolved_result,
result_store=result_store,
write_result=should_persist_result(),
)
await self.set_state(terminal_state)
self._return_value = resolved_result
self._telemetry.end_span_on_success()
return result
async def handle_exception(
self,
exc: Exception,
msg: Optional[str] = None,
result_store: Optional[ResultStore] = None,
) -> State:
context = FlowRunContext.get()
terminal_state = cast(
State,
await exception_to_failed_state(
exc,
message=msg or "Flow run encountered an exception:",
result_store=result_store or getattr(context, "result_store", None),
write_result=True,
),
)
state = await self.set_state(terminal_state)
if self.state.is_scheduled():
self.logger.info(
(
f"Received non-final state {state.name!r} when proposing final"
f" state {terminal_state.name!r} and will attempt to run again..."
),
)
state = await self.set_state(Running())
self._raised = exc
self._telemetry.record_exception(exc)
self._telemetry.end_span_on_failure(state.message)
return state
async def handle_timeout(self, exc: TimeoutError) -> None:
if isinstance(exc, FlowRunTimeoutError):
message = (
f"Flow run exceeded timeout of {self.flow.timeout_seconds} second(s)"
)
else:
message = f"Flow run failed due to timeout: {exc!r}"
self.logger.error(message)
state = Failed(
data=exc,
message=message,
name="TimedOut",
)
await self.set_state(state)
self._raised = exc
self._telemetry.record_exception(exc)
self._telemetry.end_span_on_failure(message)
async def handle_crash(self, exc: BaseException) -> None:
# need to shield from asyncio cancellation to ensure we update the state
# on the server before exiting
with CancelScope(shield=True):
state = await exception_to_crashed_state(exc)
self.logger.error(f"Crash detected! {state.message}")
self.logger.debug("Crash details:", exc_info=exc)
await self.set_state(state, force=True)
self._raised = exc
self._telemetry.record_exception(exc)
self._telemetry.end_span_on_failure(state.message)
async def load_subflow_run(
self,
parent_task_run: TaskRun,
client: PrefectClient,
context: FlowRunContext,
) -> Union[FlowRun, None]:
"""
This method attempts to load an existing flow run for a subflow task
run, if appropriate.
If the parent task run is in a final but not COMPLETED state, and not
being rerun, then we attempt to load an existing flow run instead of
creating a new one. This will prevent the engine from running the
subflow again.
If no existing flow run is found, or if the subflow should be rerun,
then no flow run is returned.
"""
# check if the parent flow run is rerunning
rerunning = (
context.flow_run.run_count > 1
if getattr(context, "flow_run", None)
and isinstance(context.flow_run, FlowRun)
else False
)
# if the parent task run is in a final but not completed state, and
# not rerunning, then retrieve the most recent flow run instead of
# creating a new one. This effectively loads a cached flow run for
# situations where we are confident the flow should not be run
# again.
assert isinstance(parent_task_run.state, State)
if parent_task_run.state.is_final() and not (
rerunning and not parent_task_run.state.is_completed()
):
# return the most recent flow run, if it exists
flow_runs = await client.read_flow_runs(
flow_run_filter=FlowRunFilter(
parent_task_run_id={"any_": [parent_task_run.id]}
),
sort=FlowRunSort.EXPECTED_START_TIME_ASC,
limit=1,
)
if flow_runs:
loaded_flow_run = flow_runs[-1]
self._return_value = loaded_flow_run.state
return loaded_flow_run
async def create_flow_run(self, client: PrefectClient) -> FlowRun:
flow_run_ctx = FlowRunContext.get()
parameters = self.parameters or {}
parent_task_run = None
# this is a subflow run
if flow_run_ctx:
# add a task to a parent flow run that represents the execution of a subflow run
parent_task = Task(
name=self.flow.name, fn=self.flow.fn, version=self.flow.version
)
parent_task_run = await parent_task.create_run(
flow_run_context=flow_run_ctx,
parameters=self.parameters,
wait_for=self.wait_for,
)
# check if there is already a flow run for this subflow
if subflow_run := await self.load_subflow_run(
parent_task_run=parent_task_run, client=client, context=flow_run_ctx
):
return subflow_run
return await client.create_flow_run(
flow=self.flow,
parameters=self.flow.serialize_parameters(parameters),
state=Pending(),
parent_task_run_id=getattr(parent_task_run, "id", None),
tags=TagsContext.get().current_tags,
)
async def call_hooks(self, state: Optional[State] = None) -> None:
if state is None:
state = self.state
flow = self.flow
flow_run = self.flow_run
if not flow_run:
raise ValueError("Flow run is not set")
enable_cancellation_and_crashed_hooks = (
os.environ.get(
"PREFECT__ENABLE_CANCELLATION_AND_CRASHED_HOOKS", "true"
).lower()
== "true"
)
if state.is_failed() and flow.on_failure_hooks:
hooks = flow.on_failure_hooks
elif state.is_completed() and flow.on_completion_hooks:
hooks = flow.on_completion_hooks
elif (
enable_cancellation_and_crashed_hooks
and state.is_cancelling()
and flow.on_cancellation_hooks
):
hooks = flow.on_cancellation_hooks
elif (
enable_cancellation_and_crashed_hooks
and state.is_crashed()
and flow.on_crashed_hooks
):
hooks = flow.on_crashed_hooks
elif state.is_running() and flow.on_running_hooks:
hooks = flow.on_running_hooks
else:
hooks = None
for hook in hooks or []:
hook_name = get_hook_name(hook)
try:
self.logger.info(
f"Running hook {hook_name!r} in response to entering state"
f" {state.name!r}"
)
result = hook(flow, flow_run, state)
if asyncio.iscoroutine(result):
await result
except Exception:
self.logger.error(
f"An error was encountered while running hook {hook_name!r}",
exc_info=True,
)
else:
self.logger.info(f"Hook {hook_name!r} finished running successfully")
@asynccontextmanager
async def setup_run_context(self, client: Optional[PrefectClient] = None):
from prefect.utilities.engine import (
should_log_prints,
)
if client is None:
client = self.client
if not self.flow_run:
raise ValueError("Flow run not set")
self.flow_run = await client.read_flow_run(self.flow_run.id)
log_prints = should_log_prints(self.flow)
async with AsyncExitStack() as stack:
# TODO: Explore closing task runner before completing the flow to
# wait for futures to complete
stack.enter_context(capture_sigterm())
if log_prints:
stack.enter_context(patch_print())
task_runner = stack.enter_context(self.flow.task_runner.duplicate())
stack.enter_context(
FlowRunContext(
flow=self.flow,
log_prints=log_prints,
flow_run=self.flow_run,
parameters=self.parameters,
client=client,
result_store=get_result_store().update_for_flow(
self.flow, _sync=True
),
task_runner=task_runner,
persist_result=self.flow.persist_result
if self.flow.persist_result is not None
else should_persist_result(),
)
)
# Set deployment context vars only if this is the top-level deployment run
# (nested flows will inherit via ContextVar propagation)
if self.flow_run.deployment_id and not _deployment_id.get():
id_token = _deployment_id.set(self.flow_run.deployment_id)
params_token = _deployment_parameters.set(self.flow_run.parameters)
stack.callback(_deployment_id.reset, id_token)
stack.callback(_deployment_parameters.reset, params_token)
stack.enter_context(ConcurrencyContextV1())
stack.enter_context(ConcurrencyContext())
if lease_id := self.state.state_details.deployment_concurrency_lease_id:
await stack.enter_async_context(
amaintain_concurrency_lease(
lease_id, 300, raise_on_lease_renewal_failure=True
)
)
# set the logger to the flow run logger
self.logger: "logging.Logger" = flow_run_logger(
flow_run=self.flow_run, flow=self.flow
)
# update the flow run name if necessary
if not self._flow_run_name_set and self.flow.flow_run_name:
flow_run_name = resolve_custom_flow_run_name(
flow=self.flow, parameters=self.parameters
)
await self.client.set_flow_run_name(
flow_run_id=self.flow_run.id, name=flow_run_name
)
self.logger.extra["flow_run_name"] = flow_run_name
self.logger.debug(
f"Renamed flow run {self.flow_run.name!r} to {flow_run_name!r}"
)
self.flow_run.name = flow_run_name
self._flow_run_name_set = True
self._telemetry.update_run_name(name=flow_run_name)
if self.flow_run.parent_task_run_id:
_logger = get_run_logger(FlowRunContext.get())
run_type = "subflow"
else:
_logger = self.logger
run_type = "flow"
_logger.info(
f"Beginning {run_type} run {self.flow_run.name!r} for flow {self.flow.name!r}"
)
if flow_run_url := url_for(self.flow_run):
self.logger.info(
f"View at {flow_run_url}", extra={"send_to_api": False}
)
yield
@asynccontextmanager
async def initialize_run(self):
"""
Enters a client context and creates a flow run if needed.
"""
with hydrated_context(self.context):
async with AsyncClientContext.get_or_create() as client_ctx:
self._client = client_ctx.client
self._is_started = True
if not self.flow_run:
self.flow_run = await self.create_flow_run(self.client)
flow_run_url = url_for(self.flow_run)
if flow_run_url:
self.logger.info(
f"View at {flow_run_url}", extra={"send_to_api": False}
)
else:
# Update the empirical policy to match the flow if it is not set
if self.flow_run.empirical_policy.retry_delay is None:
self.flow_run.empirical_policy.retry_delay = (
self.flow.retry_delay_seconds
)
if self.flow_run.empirical_policy.retries is None:
self.flow_run.empirical_policy.retries = self.flow.retries
await self.client.update_flow_run(
flow_run_id=self.flow_run.id,
flow_version=self.flow.version,
empirical_policy=self.flow_run.empirical_policy,
)
await self._telemetry.async_start_span(
run=self.flow_run,
client=self.client,
parameters=self.parameters,
)
try:
yield self
except TerminationSignal as exc:
self.cancel_all_tasks()
await self.handle_crash(exc)
raise
except Exception:
# regular exceptions are caught and re-raised to the user
raise
except (Abort, Pause) as exc:
if getattr(exc, "state", None):
# we set attribute explicitly because
# internals will have already called the state change API
self.flow_run.state = exc.state
raise
except GeneratorExit:
# Do not capture generator exits as crashes
raise
except BaseException as exc:
# We don't want to crash a flow run if the user code finished executing
if self.flow_run.state and not self.flow_run.state.is_final():
# BaseExceptions are caught and handled as crashes
await self.handle_crash(exc)
raise
else:
self.logger.debug(
"BaseException was raised after user code finished executing",
exc_info=exc,
)
finally:
# If debugging, use the more complete `repr` than the usual `str` description
display_state = (
repr(self.state) if PREFECT_DEBUG_MODE else str(self.state)
)
self.logger.log(
level=logging.INFO
if self.state.is_completed()
else logging.ERROR,
msg=f"Finished in state {display_state}",
)
self._is_started = False
self._client = None
# --------------------------
#
# The following methods compose the main task run loop
#
# --------------------------
@asynccontextmanager
async def start(self) -> AsyncGenerator[None, None]:
async with self.initialize_run():
with (
trace.use_span(self._telemetry.span)
if self._telemetry.span
else nullcontext()
):
await self.begin_run()
yield
@asynccontextmanager
async def run_context(self):
timeout_context = timeout_async if self.flow.isasync else timeout
# reenter the run context to ensure it is up to date for every run
async with self.setup_run_context():
try:
with timeout_context(
seconds=self.flow.timeout_seconds,
timeout_exc_type=FlowRunTimeoutError,
):
self.logger.debug(
f"Executing flow {self.flow.name!r} for flow run {self.flow_run.name!r}..."
)
yield self
except TimeoutError as exc:
await self.handle_timeout(exc)
except Exception as exc:
self.logger.exception("Encountered exception during execution: %r", exc)
await self.handle_exception(exc)
async def call_flow_fn(self) -> Coroutine[Any, Any, R]:
"""
Convenience method to call the flow function. Returns a coroutine if the
flow is async.
"""
assert self.flow.isasync, "Flow must be async to be run with AsyncFlowRunEngine"
result = await call_with_parameters(self.flow.fn, self.parameters)
await self.handle_success(result)
return result
def run_flow_sync(
flow: Flow[P, R],
flow_run: Optional[FlowRun] = None,
parameters: Optional[Dict[str, Any]] = None,
wait_for: Optional[Iterable[PrefectFuture[Any]]] = None,
return_type: Literal["state", "result"] = "result",
context: Optional[dict[str, Any]] = None,
) -> Union[R, State, None]:
engine = FlowRunEngine[P, R](
flow=flow,
parameters=parameters,
flow_run=flow_run,
wait_for=wait_for,
context=context,
)
with engine.start():
while engine.is_running():
with engine.run_context():
engine.call_flow_fn()
return engine.state if return_type == "state" else engine.result()
async def run_flow_async(
flow: Flow[P, R],
flow_run: Optional[FlowRun] = None,
parameters: Optional[Dict[str, Any]] = None,
wait_for: Optional[Iterable[PrefectFuture[Any]]] = None,
return_type: Literal["state", "result"] = "result",
context: Optional[dict[str, Any]] = None,
) -> Union[R, State, None]:
engine = AsyncFlowRunEngine[P, R](
flow=flow,
parameters=parameters,
flow_run=flow_run,
wait_for=wait_for,
context=context,
)
async with engine.start():
while engine.is_running():
async with engine.run_context():
await engine.call_flow_fn()
return engine.state if return_type == "state" else await engine.result()
def run_generator_flow_sync(
flow: Flow[P, R],
flow_run: Optional[FlowRun] = None,
parameters: Optional[Dict[str, Any]] = None,
wait_for: Optional[Iterable[PrefectFuture[Any]]] = None,
return_type: Literal["state", "result"] = "result",
context: Optional[dict[str, Any]] = None,
) -> Generator[R, None, None]:
if return_type != "result":
raise ValueError("The return_type for a generator flow must be 'result'")
engine = FlowRunEngine[P, R](
flow=flow,
parameters=parameters,
flow_run=flow_run,
wait_for=wait_for,
context=context,
)
with engine.start():
while engine.is_running():
with engine.run_context():
call_args, call_kwargs = parameters_to_args_kwargs(
flow.fn, engine.parameters or {}
)
gen = flow.fn(*call_args, **call_kwargs)
try:
while True:
gen_result = next(gen)
# link the current state to the result for dependency tracking
link_state_to_flow_run_result(engine.state, gen_result)
yield gen_result
except StopIteration as exc:
engine.handle_success(exc.value)
except GeneratorExit as exc:
engine.handle_success(None)
gen.throw(exc)
return engine.result()
async def run_generator_flow_async(
flow: Flow[P, R],
flow_run: Optional[FlowRun] = None,
parameters: Optional[Dict[str, Any]] = None,
wait_for: Optional[Iterable[PrefectFuture[R]]] = None,
return_type: Literal["state", "result"] = "result",
context: Optional[dict[str, Any]] = None,
) -> AsyncGenerator[R, None]:
if return_type != "result":
raise ValueError("The return_type for a generator flow must be 'result'")
engine = AsyncFlowRunEngine[P, R](
flow=flow,
parameters=parameters,
flow_run=flow_run,
wait_for=wait_for,
context=context,
)
async with engine.start():
while engine.is_running():
async with engine.run_context():
call_args, call_kwargs = parameters_to_args_kwargs(
flow.fn, engine.parameters or {}
)
gen = flow.fn(*call_args, **call_kwargs)
try:
while True:
# can't use anext in Python < 3.10
gen_result = await gen.__anext__()
# link the current state to the result for dependency tracking
link_state_to_flow_run_result(engine.state, gen_result)
yield gen_result
except (StopAsyncIteration, GeneratorExit) as exc:
await engine.handle_success(None)
if isinstance(exc, GeneratorExit):
gen.throw(exc)
# async generators can't return, but we can raise failures here
if engine.state.is_failed():
await engine.result()
def run_flow(
flow: Flow[P, R],
flow_run: Optional[FlowRun] = None,
parameters: Optional[Dict[str, Any]] = None,
wait_for: Optional[Iterable[PrefectFuture[R]]] = None,
return_type: Literal["state", "result"] = "result",
error_logger: Optional[logging.Logger] = None,
context: Optional[dict[str, Any]] = None,
) -> (
R
| State
| None
| Coroutine[Any, Any, R | State | None]
| Generator[R, None, None]
| AsyncGenerator[R, None]
):
ret_val: Union[
R,
State,
None,
Coroutine[Any, Any, R | State | None],
Generator[R, None, None],
AsyncGenerator[R, None],
] = None
try:
kwargs: dict[str, Any] = dict(
flow=flow,
flow_run=flow_run,
parameters=_flow_parameters(
flow=flow, flow_run=flow_run, parameters=parameters
),
wait_for=wait_for,
return_type=return_type,
context=context,
)
if flow.isasync and flow.isgenerator:
ret_val = run_generator_flow_async(**kwargs)
elif flow.isgenerator:
ret_val = run_generator_flow_sync(**kwargs)
elif flow.isasync:
ret_val = run_flow_async(**kwargs)
else:
ret_val = run_flow_sync(**kwargs)
except (Abort, Pause):
raise
except:
if error_logger:
error_logger.error(
"Engine execution exited with unexpected exception", exc_info=True
)
raise
return ret_val
def _flow_parameters(
flow: Flow[P, R], flow_run: Optional[FlowRun], parameters: Optional[Dict[str, Any]]
) -> Dict[str, Any]:
if parameters:
# This path is taken when a flow is being called directly with
# parameters, in that case just return the parameters as-is.
return parameters
# Otherwise the flow is being executed indirectly and we may need to grab
# the parameters from the flow run. We also need to resolve any default
# parameters that are defined on the flow function itself.
parameters = flow_run.parameters if flow_run else {}
call_args, call_kwargs = parameters_to_args_kwargs(flow.fn, parameters)
return get_call_parameters(flow.fn, call_args, call_kwargs)
def run_flow_in_subprocess(
flow: "Flow[..., Any]",
flow_run: "FlowRun | None" = None,
parameters: dict[str, Any] | None = None,
wait_for: Iterable[PrefectFuture[Any]] | None = None,
context: dict[str, Any] | None = None,
) -> multiprocessing.context.SpawnProcess:
"""
Run a flow in a subprocess.
Note the result of the flow will only be accessible if the flow is configured to
persist its result.
Args:
flow: The flow to run.
flow_run: The flow run object containing run metadata.
parameters: The parameters to use when invoking the flow.
wait_for: The futures to wait for before starting the flow.
context: A serialized context to hydrate before running the flow. If not provided,
the current context will be used. A serialized context should be provided if
this function is called in a separate memory space from the parent run (e.g.
in a subprocess or on another machine).
Returns:
A multiprocessing.context.SpawnProcess representing the process that is running the flow.
"""
from prefect.flow_engine import run_flow
@wraps(run_flow)
def run_flow_with_env(
*args: Any,
env: dict[str, str] | None = None,
**kwargs: Any,
):
"""
Wrapper function to update environment variables and settings before running the flow.
"""
os.environ.update(env or {})
settings_context = get_settings_context()
# Create a new settings context with a new settings object to pick up the updated
# environment variables
with SettingsContext(
profile=settings_context.profile,
settings=Settings(),
):
with handle_engine_signals(getattr(flow_run, "id", None)):
maybe_coro = run_flow(*args, **kwargs)
if asyncio.iscoroutine(maybe_coro):
# This is running in a brand new process, so there won't be an existing
# event loop.
asyncio.run(maybe_coro)
ctx = multiprocessing.get_context("spawn")
context = context or serialize_context()
process = ctx.Process(
target=cloudpickle_wrapped_call(
run_flow_with_env,
env=get_current_settings().to_environment_variables(exclude_unset=True)
| os.environ
| {
# TODO: make this a thing we can pass into the engine
"PREFECT__ENABLE_CANCELLATION_AND_CRASHED_HOOKS": "false",
},
flow=flow,
flow_run=flow_run,
parameters=parameters,
wait_for=wait_for,
context=context,
),
)
process.start()
return process
|
AsyncFlowRunEngine
|
python
|
ray-project__ray
|
rllib/core/models/tests/test_catalog.py
|
{
"start": 1470,
"end": 15265
}
|
class ____(unittest.TestCase):
def _check_model_outputs(self, model, framework, model_config_dict, input_space):
"""Checks the model's outputs for the given input space.
Args:
model: The model to check.
framework: The framework to use (torch).
model_config_dict: The model config dict to use.
input_space: The input space to use.
"""
convert_method = convert_to_torch_tensor
expected_latent_dim = model_config_dict.get("latent_dim")
if expected_latent_dim is None:
# For CNNEncoders, `output_dims` are computed automatically.
if isinstance(model.config, CNNEncoderConfig):
expected_latent_dim = model.config.output_dims[0]
# In order to stay backward compatible, we default to fcnet_hiddens[-1].
# See MODEL_DEFAULTS for more details
else:
expected_latent_dim = model_config_dict["fcnet_hiddens"][-1]
observations = convert_method(
get_dummy_batch_for_space(input_space, batch_size=32)
)
states = tree.map_structure(
lambda s: convert_method(32 * [s]), model.get_initial_state()
)
seq_lens = convert_method([32])
inputs = {
Columns.OBS: observations,
Columns.STATE_IN: states,
Columns.SEQ_LENS: seq_lens,
}
outputs = model(inputs)
self.assertEqual(outputs[ENCODER_OUT].shape, (32, expected_latent_dim))
if Columns.STATE_OUT in outputs:
tree.map_structure_with_path(
lambda p, v: (
True if v is None else self.assertEqual(v.shape, states[p].shape)
),
outputs[Columns.STATE_OUT],
)
def test_atari_help(self):
"""Tests that we raise an appropriate error message if a user tries to create a
Catalog object for an Atari game's observation space without providing a cnn
config.
"""
import pytest
with pytest.raises(ValueError, match="This is the default atari obs shape."):
Catalog(
observation_space=Box(-np.inf, np.inf, [210, 160, 3], dtype=np.float32),
action_space=Box(-1, 1, (1,)),
model_config_dict={},
)
def test_get_encoder_config(self):
"""Tests if we can create a bunch of encoders from the base catalog class."""
input_spaces_and_config_types = [
(Box(-1.0, 1.0, (5,), dtype=np.float32), MLPEncoderConfig),
(Box(-1.0, 1.0, (84, 84, 1), dtype=np.float32), CNNEncoderConfig),
]
model_configs = [
# This should produce an MLPEncoder with three hidden layers
DefaultModelConfig(
fcnet_activation="relu",
fcnet_hiddens=[256, 256, 256],
),
# This should produce an MLPEncoder with one hidden layer
DefaultModelConfig(
fcnet_hiddens=[512, 512],
fcnet_activation="relu",
),
]
frameworks = ["torch"]
# First check if encoders can be created for non-composite spaces
print("Testing encoders for non-composite input spaces...")
config_combinations = [
frameworks,
input_spaces_and_config_types,
model_configs,
]
for config in itertools.product(*config_combinations):
framework, input_space_and_config_type, model_config = config
model_config_dict = dataclasses.asdict(model_config)
input_space, model_config_type = input_space_and_config_type
print(
f"Testing framework: \n{framework}\n, input space: \n{input_space}\n "
f"and config: \n{model_config}\n"
)
catalog = Catalog(
observation_space=input_space,
# Action space does not matter for encoders
action_space=gym.spaces.Box(1, 1, (1,)),
model_config_dict=model_config_dict,
)
model_config = catalog._get_encoder_config(
observation_space=input_space, model_config_dict=model_config_dict
)
self.assertEqual(type(model_config), model_config_type)
model = model_config.build(framework=framework)
# Do a forward pass and check if the output has the correct shape
self._check_model_outputs(model, framework, model_config_dict, input_space)
def test_get_dist_cls_from_action_space(self):
"""Tests if we can create a bunch of action distributions.
Action distributions are created from the base catalog class. Things this
test checks:
- Whether we output the correct action distributions classes.
- Whether we can instantiate the action distributions, query their
required input dimensions and sample from them.
"""
TestConfig = namedtuple(
"TestConfig", ("action_space", "expected_dist_cls_dict")
)
test_configs = [
# Box
TestConfig(
Box(-np.inf, np.inf, (7,), dtype=np.float32),
{
"torch": TorchDiagGaussian,
},
),
# Discrete
TestConfig(Discrete(5), {"torch": TorchCategorical}),
# Nested Dict
TestConfig(
Dict(
{
"a": Box(-np.inf, np.inf, (7,), dtype=np.float32),
"b": Dict({"c": Discrete(5)}),
}
),
{
"torch": TorchMultiDistribution,
},
),
# Nested Tuple
TestConfig(
Tuple(
(
Box(-np.inf, np.inf, (7,), dtype=np.float32),
Tuple((Discrete(5), Discrete(5))),
)
),
{
"torch": TorchMultiDistribution,
},
),
# Tuple nested inside Dict
TestConfig(
Dict(
{
"a": Box(-np.inf, np.inf, (7,), dtype=np.float32),
"b": Dict(
{
"c": Tuple(
(
Box(-np.inf, np.inf, (7,), dtype=np.float32),
Tuple((Discrete(5), Discrete(5))),
)
)
}
),
}
),
{
"torch": TorchMultiDistribution,
},
),
# Dict nested inside Tuple
TestConfig(
Tuple(
(
Box(-np.inf, np.inf, (7,), dtype=np.float32),
Tuple(
(
Discrete(5),
Dict(
{
"a": Box(
-np.inf, np.inf, (7,), dtype=np.float32
),
"b": Dict({"c": Discrete(5)}),
}
),
)
),
)
),
{
"torch": TorchMultiDistribution,
},
),
# MultiDiscrete
TestConfig(
MultiDiscrete([5, 5, 5]),
{"torch": TorchMultiCategorical},
),
]
for (
action_space,
expected_cls_dict,
) in test_configs:
print(f"Testing action space: {action_space}:")
catalog = Catalog(
observation_space=Box(-1.0, 1.0, (84, 84, 1), dtype=np.float32),
action_space=action_space,
model_config_dict=MODEL_DEFAULTS.copy(),
)
dist_cls = catalog._get_dist_cls_from_action_space(
action_space=action_space,
framework="torch",
)
# Check if we can query the required input dimensions
expected_cls = expected_cls_dict["torch"]
if expected_cls is TorchMultiDistribution:
# For these special cases, we need to create partials of the
# expected classes so that we can calculate the required inputs
expected_cls = _multi_action_dist_partial_helper(
catalog_cls=catalog,
action_space=action_space,
framework="torch",
)
elif expected_cls is TorchMultiCategorical:
# For these special cases, we need to create partials of the
# expected classes so that we can calculate the required inputs
expected_cls = _multi_categorical_dist_partial_helper(
action_space=action_space, framework="torch"
)
# Now that we have sorted out special cases, we can finally get the
# input_dim
input_dim = expected_cls.required_input_dim(action_space)
logits = np.ones((32, input_dim), dtype=np.float32)
logits = torch.from_numpy(logits)
# We don't need a model if we input tensors
dist = dist_cls.from_logits(logits=logits)
self.assertTrue(
isinstance(dist, expected_cls_dict["torch"]),
msg=f"Expected {expected_cls_dict['torch']}, " f"got {type(dist)}",
)
# Test if sampling works
actions = dist.sample()
# Test is logp works
dist.logp(actions)
# For any array of actions in a possibly nested space, convert to
# numpy and pick the first one to check if it is in the action space.
action = tree.map_structure(lambda a: convert_to_numpy(a)[0], actions)
self.assertTrue(action_space.contains(action))
def test_customize_catalog_from_algorithm_config(self):
"""Test if we can pass catalog to algorithm config and it ends up inside
RLModule and is used to build models there."""
class MyCatalog(PPOCatalog):
def build_vf_head(self, framework):
return torch.nn.Linear(self.latent_dims[0], 1)
config = (
PPOConfig()
.rl_module(
rl_module_spec=RLModuleSpec(catalog_class=MyCatalog),
)
.framework("torch")
)
algo = config.build(env="CartPole-v0")
self.assertEqual(type(algo.get_module("default_policy").catalog), MyCatalog)
# Test if we can pass custom catalog to algorithm config and train with it.
config = (
PPOConfig()
.rl_module(
rl_module_spec=RLModuleSpec(
module_class=PPOTorchRLModule, catalog_class=MyCatalog
)
)
.framework("torch")
)
algo = config.build(env="CartPole-v0")
algo.train()
def test_post_init_overwrite(self):
"""Test if we can overwrite post_init method of a catalog class.
This tests:
- Defines a custom encoder and its config.
- Defines a custom catalog class that uses the custom encoder by
overwriting the __post_init__ method and defining a custom
Catalog.encoder_config.
- Defines a custom RLModule that uses the custom catalog.
- Runs a forward pass through the custom RLModule to check if
everything is working together as expected.
"""
env = gym.make("CartPole-v0")
class MyCostumTorchEncoderConfig(ModelConfig):
def build(self, framework):
return MyCostumTorchEncoder(self)
class MyCostumTorchEncoder(TorchModel, Encoder):
def __init__(self, config):
super().__init__(config)
self.net = torch.nn.Linear(env.observation_space.shape[0], 10)
def _forward(self, input_dict, **kwargs):
return {
ENCODER_OUT: (self.net(input_dict["obs"])),
Columns.STATE_OUT: None,
}
class MyCustomCatalog(PPOCatalog):
def _determine_components(self):
self._action_dist_class_fn = functools.partial(
self._get_dist_cls_from_action_space, action_space=self.action_space
)
self.latent_dims = (10,)
self.encoder_config = MyCostumTorchEncoderConfig(
input_dims=self.observation_space.shape,
)
spec = RLModuleSpec(
module_class=PPOTorchRLModule,
observation_space=env.observation_space,
action_space=env.action_space,
model_config=MODEL_DEFAULTS.copy(),
catalog_class=MyCustomCatalog,
)
module = spec.build()
module.forward_inference(
batch={"obs": torch.ones((32, *env.observation_space.shape))}
)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
TestCatalog
|
python
|
kamyu104__LeetCode-Solutions
|
Python/binary-search-tree-iterator-ii.py
|
{
"start": 232,
"end": 1253
}
|
class ____(object):
def __init__(self, root):
"""
:type root: TreeNode
"""
self.__stk = []
self.__traversalLeft(root)
self.__vals = []
self.__pos = -1
def hasNext(self):
"""
:rtype: bool
"""
return self.__pos+1 != len(self.__vals) or self.__stk
def next(self):
"""
:rtype: int
"""
self.__pos += 1
if self.__pos == len(self.__vals):
node = self.__stk.pop()
self.__traversalLeft(node.right)
self.__vals.append(node.val)
return self.__vals[self.__pos]
def hasPrev(self):
"""
:rtype: bool
"""
return self.__pos-1 >= 0
def prev(self):
"""
:rtype: int
"""
self.__pos -= 1
return self.__vals[self.__pos]
def __traversalLeft(self, node):
while node is not None:
self.__stk.append(node)
node = node.left
|
BSTIterator
|
python
|
python-attrs__attrs
|
tests/test_filters.py
|
{
"start": 573,
"end": 1735
}
|
class ____:
"""
Tests for `include`.
"""
@pytest.mark.parametrize(
("incl", "value"),
[
((int,), 42),
((str,), "hello"),
((str, fields(C).a), 42),
((str, fields(C).b), "hello"),
(("a",), 42),
(("a",), "hello"),
(("a", str), 42),
(("a", fields(C).b), "hello"),
],
)
def test_allow(self, incl, value):
"""
Return True if a class or attribute is included.
"""
i = include(*incl)
assert i(fields(C).a, value) is True
@pytest.mark.parametrize(
("incl", "value"),
[
((str,), 42),
((int,), "hello"),
((str, fields(C).b), 42),
((int, fields(C).b), "hello"),
(("b",), 42),
(("b",), "hello"),
(("b", str), 42),
(("b", fields(C).b), "hello"),
],
)
def test_drop_class(self, incl, value):
"""
Return False on non-included classes and attributes.
"""
i = include(*incl)
assert i(fields(C).a, value) is False
|
TestInclude
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/init_ops_v2.py
|
{
"start": 16020,
"end": 18448
}
|
class ____(Initializer):
"""Initializer that generates a truncated normal distribution.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
These values are similar to values from a `tf.initializers.RandomNormal`
except that values more than two standard deviations from the mean are
discarded and re-drawn. This is the recommended initializer for neural network
weights and filters.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(
... 3, tf.initializers.TruncatedNormal(mean=1., stddev=2.))
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([...], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
...
>>> make_variables(4, tf.initializers.RandomUniform(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point
"""
self._validate_kwargs(kwargs)
dtype = _assert_float_dtype(dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.truncated_normal(shape, self.mean,
self.stddev, dtype)
def get_config(self):
return {
"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed
}
|
TruncatedNormal
|
python
|
keras-team__keras
|
guides/writing_your_own_callbacks.py
|
{
"start": 6457,
"end": 8835
}
|
class ____(keras.callbacks.Callback):
def on_train_batch_end(self, batch, logs=None):
print(
"Up to batch {}, the average loss is {:7.2f}.".format(
batch, logs["loss"]
)
)
def on_test_batch_end(self, batch, logs=None):
print(
"Up to batch {}, the average loss is {:7.2f}.".format(
batch, logs["loss"]
)
)
def on_epoch_end(self, epoch, logs=None):
print(
"The average loss for epoch {} is {:7.2f} "
"and mean absolute error is {:7.2f}.".format(
epoch, logs["loss"], logs["mean_absolute_error"]
)
)
model = get_model()
model.fit(
x_train,
y_train,
batch_size=128,
epochs=2,
verbose=0,
callbacks=[LossAndErrorPrintingCallback()],
)
res = model.evaluate(
x_test,
y_test,
batch_size=128,
verbose=0,
callbacks=[LossAndErrorPrintingCallback()],
)
"""
## Usage of `self.model` attribute
In addition to receiving log information when one of their methods is called,
callbacks have access to the model associated with the current round of
training/evaluation/inference: `self.model`.
Here are a few of the things you can do with `self.model` in a callback:
- Set `self.model.stop_training = True` to immediately interrupt training.
- Mutate hyperparameters of the optimizer (available as `self.model.optimizer`),
such as `self.model.optimizer.learning_rate`.
- Save the model at period intervals.
- Record the output of `model.predict()` on a few test samples at the end of each
epoch, to use as a sanity check during training.
- Extract visualizations of intermediate features at the end of each epoch, to monitor
what the model is learning over time.
- etc.
Let's see this in action in a couple of examples.
"""
"""
## Examples of Keras callback applications
### Early stopping at minimum loss
This first example shows the creation of a `Callback` that stops training when the
minimum of loss has been reached, by setting the attribute `self.model.stop_training`
(boolean). Optionally, you can provide an argument `patience` to specify how many
epochs we should wait before stopping after having reached a local minimum.
`keras.callbacks.EarlyStopping` provides a more complete and general implementation.
"""
|
LossAndErrorPrintingCallback
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
|
{
"start": 239317,
"end": 241045
}
|
class ____(GeneratedAirbyteSource):
class OAuth20:
@public
def __init__(self, client_id: str, client_secret: str, access_token: str):
self.auth_method = "oauth2.0"
self.client_id = check.str_param(client_id, "client_id")
self.client_secret = check.str_param(client_secret, "client_secret")
self.access_token = check.str_param(access_token, "access_token")
class APIToken:
@public
def __init__(self, api_token: str, email: str):
self.auth_method = "api_token"
self.api_token = check.str_param(api_token, "api_token")
self.email = check.str_param(email, "email")
@public
def __init__(
self,
name: str,
subdomain: str,
start_date: str,
credentials: Union["ZendeskSunshineSource.OAuth20", "ZendeskSunshineSource.APIToken"],
):
"""Airbyte Source for Zendesk Sunshine.
Documentation can be found at https://docs.airbyte.com/integrations/sources/zendesk-sunshine
Args:
name (str): The name of the destination.
subdomain (str): The subdomain for your Zendesk Account.
start_date (str): The date from which you'd like to replicate data for Zendesk Sunshine API, in the format YYYY-MM-DDT00:00:00Z.
"""
self.subdomain = check.str_param(subdomain, "subdomain")
self.start_date = check.str_param(start_date, "start_date")
self.credentials = check.inst_param(
credentials,
"credentials",
(ZendeskSunshineSource.OAuth20, ZendeskSunshineSource.APIToken),
)
super().__init__("Zendesk Sunshine", name)
|
ZendeskSunshineSource
|
python
|
getsentry__sentry
|
tests/sentry/snuba/test_transactions_timeseries_query.py
|
{
"start": 497,
"end": 1687
}
|
class ____(SnubaTestCase, TestCase):
def setUp(self) -> None:
super().setUp()
self.one_min_ago = before_now(minutes=1)
self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
# transaction event
data = load_data("transaction", timestamp=self.day_ago + timedelta(hours=1))
data["event_id"] = "a" * 32
data["transaction"] = "very bad"
data["user"] = {"id": 1}
data["tags"] = {"important": "yes"}
self.store_event(data=data, project_id=self.project.id)
data = load_data("transaction", timestamp=self.day_ago + timedelta(hours=1, minutes=1))
data["event_id"] = "b" * 32
data["transaction"] = "oh my"
data["user"] = {}
data["tags"] = {"important": "no"}
self.store_event(data=data, project_id=self.project.id)
data = load_data("transaction", timestamp=self.day_ago + timedelta(hours=2, minutes=1))
data["event_id"] = "c" * 32
data["transaction"] = "very bad"
data["user"] = {}
data["tags"] = {"important": "yes"}
self.store_event(data=data, project_id=self.project.id)
|
TimeseriesBase
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_axis38.py
|
{
"start": 315,
"end": 1428
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis38.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [45642496, 45644416]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_y_axis({"line": {"color": "yellow"}, "fill": {"color": "red"}})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/core_api/datamodels/tasks.py
|
{
"start": 1792,
"end": 3827
}
|
class ____(BaseModel):
"""Task serializer for responses."""
task_id: str | None
task_display_name: str | None
owner: str | None
start_date: datetime | None
end_date: datetime | None
trigger_rule: str | None
depends_on_past: bool
wait_for_downstream: bool
retries: float | None
queue: str | None
pool: str | None
pool_slots: float | None
execution_timeout: TimeDeltaWithValidation | None
retry_delay: TimeDeltaWithValidation | None
retry_exponential_backoff: float
priority_weight: float | None
weight_rule: str | None
ui_color: str | None
ui_fgcolor: str | None
template_fields: list[str] | None
downstream_task_ids: list[str] | None
doc_md: str | None
operator_name: str | None
params: abc.MutableMapping | None
class_ref: dict | None
is_mapped: bool | None
@model_validator(mode="before")
@classmethod
def validate_model(cls, task: Any) -> Any:
task.__dict__.update({"class_ref": _get_class_ref(task), "is_mapped": task.is_mapped})
return task
@field_validator("weight_rule", mode="before")
@classmethod
def validate_weight_rule(cls, wr: str | PriorityWeightStrategy | None) -> str | None:
"""Validate the weight_rule property."""
if wr is None:
return None
if isinstance(wr, str):
return wr
return encode_priority_weight_strategy(wr)
@field_validator("params", mode="before")
@classmethod
def get_params(cls, params: SerializedParamsDict | None) -> dict | None:
"""Convert params attribute to dict representation."""
if params is None:
return None
return {k: v.dump() for k, v in params.items()}
# Mypy issue https://github.com/python/mypy/issues/1362
@computed_field # type: ignore[prop-decorator]
@property
def extra_links(self) -> list[str]:
"""Extract and return extra_links."""
return getattr(self, "operator_extra_links", [])
|
TaskResponse
|
python
|
keras-team__keras
|
keras/src/ops/node_test.py
|
{
"start": 158,
"end": 194
}
|
class ____(Layer):
pass
|
DummyLayer
|
python
|
getsentry__sentry
|
src/sentry/api/bases/organization.py
|
{
"start": 8899,
"end": 11830
}
|
class ____(Endpoint):
"""
A base class for endpoints that use an organization scoping but lives in the control silo
"""
permission_classes: tuple[type[BasePermission], ...] = (OrganizationPermission,)
def convert_args(
self,
request: Request,
*args: Any,
**kwargs: Any,
) -> tuple[tuple[Any, ...], dict[str, Any]]:
organization_id_or_slug: int | str | None = None
if args and args[0] is not None:
organization_id_or_slug = args[0]
# Required so it behaves like the original convert_args, where organization_id_or_slug was another parameter
# TODO: Remove this once we remove the old `organization_slug` parameter from getsentry
args = args[1:]
else:
organization_id_or_slug = kwargs.pop("organization_id_or_slug", None) or kwargs.pop(
"organization_slug", None
)
if not organization_id_or_slug:
raise ResourceDoesNotExist
if not subdomain_is_region(request):
subdomain = getattr(request, "subdomain", None)
if subdomain is not None and subdomain != organization_id_or_slug:
raise ResourceDoesNotExist
if str(organization_id_or_slug).isdecimal():
# It is ok that `get_organization_by_id` doesn't check for visibility as we
# don't check the visibility in `get_organization_by_slug` either (only_active=False).
organization_context = organization_service.get_organization_by_id(
id=int(organization_id_or_slug), user_id=request.user.id
)
else:
organization_context = organization_service.get_organization_by_slug(
slug=str(organization_id_or_slug), only_visible=False, user_id=request.user.id
)
if organization_context is None:
raise ResourceDoesNotExist
with sentry_sdk.start_span(op="check_object_permissions_on_organization"):
self.check_object_permissions(request, organization_context)
bind_organization_context(organization_context.organization)
# Track the 'active' organization when the request came from
# a cookie based agent (react app)
# Never track any org (regardless of whether the user does or doesn't have
# membership in that org) when the user is in active superuser mode
if request.auth is None and request.user and not is_active_superuser(request):
auth.set_active_org(request, organization_context.organization.slug)
kwargs["organization_context"] = organization_context
kwargs["organization"] = organization_context.organization
# Used for API access logs
request._request.organization = organization_context.organization # type: ignore[attr-defined]
return (args, kwargs)
|
ControlSiloOrganizationEndpoint
|
python
|
pallets__werkzeug
|
tests/test_datastructures.py
|
{
"start": 39267,
"end": 42087
}
|
class ____:
storage_class = ds.FileStorage
def test_mimetype_always_lowercase(self):
file_storage = self.storage_class(content_type="APPLICATION/JSON")
assert file_storage.mimetype == "application/json"
@pytest.mark.parametrize("data", [io.StringIO("one\ntwo"), io.BytesIO(b"one\ntwo")])
def test_bytes_proper_sentinel(self, data):
# iterate over new lines and don't enter an infinite loop
storage = self.storage_class(data)
idx = -1
for idx, _line in enumerate(storage):
assert idx < 2
assert idx == 1
@pytest.mark.parametrize("stream", (tempfile.SpooledTemporaryFile, io.BytesIO))
def test_proxy_can_access_stream_attrs(self, stream):
"""``SpooledTemporaryFile`` doesn't implement some of
``IOBase``. Ensure that ``FileStorage`` can still access the
attributes from the backing file object.
https://github.com/pallets/werkzeug/issues/1344
https://github.com/python/cpython/pull/3249
"""
file_storage = self.storage_class(stream=stream())
for name in ("fileno", "writable", "readable", "seekable"):
assert hasattr(file_storage, name)
file_storage.close()
def test_save_to_pathlib_dst(self, tmp_path):
src = tmp_path / "src.txt"
src.write_text("test")
dst = tmp_path / "dst.txt"
with src.open("rb") as f:
storage = self.storage_class(f)
storage.save(dst)
assert dst.read_text() == "test"
def test_save_to_bytes_io(self):
storage = self.storage_class(io.BytesIO(b"one\ntwo"))
dst = io.BytesIO()
storage.save(dst)
assert dst.getvalue() == b"one\ntwo"
def test_save_to_file(self, tmp_path):
path = tmp_path / "file.data"
storage = self.storage_class(io.BytesIO(b"one\ntwo"))
with path.open("wb") as dst:
storage.save(dst)
with path.open("rb") as src:
assert src.read() == b"one\ntwo"
@pytest.mark.parametrize("ranges", ([(0, 1), (-5, None)], [(5, None)]))
def test_range_to_header(ranges):
header = ds.Range("byes", ranges).to_header()
r = http.parse_range_header(header)
assert r.ranges == ranges
@pytest.mark.parametrize(
"ranges", ([(0, 0)], [(None, 1)], [(1, 0)], [(0, 1), (-5, 10)])
)
def test_range_validates_ranges(ranges):
with pytest.raises(ValueError):
ds.Range("bytes", ranges)
@pytest.mark.parametrize(
("value", "expect"),
[
({"a": "ab"}, [("a", "ab")]),
({"a": ["a", "b"]}, [("a", "a"), ("a", "b")]),
({"a": b"ab"}, [("a", b"ab")]),
],
)
def test_iter_multi_data(value: t.Any, expect: list[tuple[t.Any, t.Any]]) -> None:
assert list(ds.iter_multi_items(value)) == expect
|
TestFileStorage
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_us_state_abbreviation.py
|
{
"start": 1832,
"end": 4453
}
|
class ____(ColumnMapExpectation):
"""Expect values in this column to be valid state abbreviations.
See https://pypi.org/project/us/ for more information. \
DC statehood is a perennial issue in data science, and the owners of the us repo addressed it differently than we have: https://github.com/unitedstates/python-us/issues/50. \
dc_statehood defaults to True, though can be overriden by end users
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_state_abbreviation": ["KS", "MI", "AL", "NE", "ND"],
"invalid_state_abbreviation": ["", "1234", "WVV", "AA", "WX"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_state_abbreviation"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_state_abbreviation"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_state_abbreviation"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["us"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidUSStateAbbreviation().print_diagnostic_checklist()
|
ExpectColumnValuesToBeValidUSStateAbbreviation
|
python
|
lazyprogrammer__machine_learning_examples
|
rl3/a2c/atari_wrappers.py
|
{
"start": 4420,
"end": 5160
}
|
class ____(gym.RewardWrapper):
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
# class WarpFrame(gym.ObservationWrapper):
# def __init__(self, env):
# """Warp frames to 84x84 as done in the Nature paper and later work."""
# gym.ObservationWrapper.__init__(self, env)
# self.width = 84
# self.height = 84
# self.observation_space = spaces.Box(low=0, high=255, shape=(self.height, self.width, 1))
# def _observation(self, frame):
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
# return frame[:, :, None]
|
ClipRewardEnv
|
python
|
jazzband__django-formtools
|
tests/wizard/test_forms.py
|
{
"start": 1975,
"end": 2175
}
|
class ____(TestWizard):
form_list = [Step1, Step2]
condition_dict = {'step2': True}
initial_dict = {'start': {'name': 'value1'}}
instance_dict = {'start': User()}
|
TestWizardWithInitAttrs
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/backend_tools.py
|
{
"start": 14524,
"end": 18755
}
|
class ____(ToolBase):
"""
Auxiliary Tool to handle changes in views and positions.
Runs in the background and should get used by all the tools that
need to access the figure's history of views and positions, e.g.
* `ToolZoom`
* `ToolPan`
* `ToolHome`
* `ToolBack`
* `ToolForward`
"""
def __init__(self, *args, **kwargs):
self.views = WeakKeyDictionary()
self.positions = WeakKeyDictionary()
self.home_views = WeakKeyDictionary()
super().__init__(*args, **kwargs)
def add_figure(self, figure):
"""Add the current figure to the stack of views and positions."""
if figure not in self.views:
self.views[figure] = cbook._Stack()
self.positions[figure] = cbook._Stack()
self.home_views[figure] = WeakKeyDictionary()
# Define Home
self.push_current(figure)
# Make sure we add a home view for new Axes as they're added
figure.add_axobserver(lambda fig: self.update_home_views(fig))
def clear(self, figure):
"""Reset the Axes stack."""
if figure in self.views:
self.views[figure].clear()
self.positions[figure].clear()
self.home_views[figure].clear()
self.update_home_views()
def update_view(self):
"""
Update the view limits and position for each Axes from the current
stack position. If any Axes are present in the figure that aren't in
the current stack position, use the home view limits for those Axes and
don't update *any* positions.
"""
views = self.views[self.figure]()
if views is None:
return
pos = self.positions[self.figure]()
if pos is None:
return
home_views = self.home_views[self.figure]
all_axes = self.figure.get_axes()
for a in all_axes:
if a in views:
cur_view = views[a]
else:
cur_view = home_views[a]
a._set_view(cur_view)
if set(all_axes).issubset(pos):
for a in all_axes:
# Restore both the original and modified positions
a._set_position(pos[a][0], 'original')
a._set_position(pos[a][1], 'active')
self.figure.canvas.draw_idle()
def push_current(self, figure=None):
"""
Push the current view limits and position onto their respective stacks.
"""
if not figure:
figure = self.figure
views = WeakKeyDictionary()
pos = WeakKeyDictionary()
for a in figure.get_axes():
views[a] = a._get_view()
pos[a] = self._axes_pos(a)
self.views[figure].push(views)
self.positions[figure].push(pos)
def _axes_pos(self, ax):
"""
Return the original and modified positions for the specified Axes.
Parameters
----------
ax : matplotlib.axes.Axes
The `.Axes` to get the positions for.
Returns
-------
original_position, modified_position
A tuple of the original and modified positions.
"""
return (ax.get_position(True).frozen(),
ax.get_position().frozen())
def update_home_views(self, figure=None):
"""
Make sure that ``self.home_views`` has an entry for all Axes present
in the figure.
"""
if not figure:
figure = self.figure
for a in figure.get_axes():
if a not in self.home_views[figure]:
self.home_views[figure][a] = a._get_view()
def home(self):
"""Recall the first view and position from the stack."""
self.views[self.figure].home()
self.positions[self.figure].home()
def back(self):
"""Back one step in the stack of views and positions."""
self.views[self.figure].back()
self.positions[self.figure].back()
def forward(self):
"""Forward one step in the stack of views and positions."""
self.views[self.figure].forward()
self.positions[self.figure].forward()
|
ToolViewsPositions
|
python
|
dagster-io__dagster
|
scripts/run-pyright.py
|
{
"start": 3476,
"end": 3538
}
|
class ____(TypedDict):
line: int
character: int
|
Position
|
python
|
ray-project__ray
|
rllib/core/models/torch/primitives.py
|
{
"start": 8853,
"end": 15305
}
|
class ____(nn.Module):
"""A model containing a CNN with N Conv2D layers.
All layers share the same activation function, bias setup (use bias or not),
and LayerNorm setup (use layer normalization or not).
Note that there is no flattening nor an additional dense layer at the end of the
stack. The output of the network is a 3D tensor of dimensions
[width x height x num output filters].
"""
def __init__(
self,
*,
input_dims: Union[List[int], Tuple[int, ...]],
cnn_filter_specifiers: List[List[Union[int, List]]],
cnn_use_bias: bool = True,
cnn_use_layernorm: bool = False,
cnn_activation: str = "relu",
cnn_kernel_initializer: Optional[Union[str, Callable]] = None,
cnn_kernel_initializer_config: Optional[Dict] = None,
cnn_bias_initializer: Optional[Union[str, Callable]] = None,
cnn_bias_initializer_config: Optional[Dict] = None,
):
"""Initializes a TorchCNN instance.
Args:
input_dims: The 3D input dimensions of the network (incoming image).
cnn_filter_specifiers: A list in which each element is another (inner) list
of either the following forms:
`[number of channels/filters, kernel, stride]`
OR:
`[number of channels/filters, kernel, stride, padding]`, where `padding`
can either be "same" or "valid".
When using the first format w/o the `padding` specifier, `padding` is
"same" by default. Also, `kernel` and `stride` may be provided either as
single ints (square) or as a tuple/list of two ints (width- and height
dimensions) for non-squared kernel/stride shapes.
A good rule of thumb for constructing CNN stacks is:
When using padding="same", the input "image" will be reduced in size by
the factor `stride`, e.g. input=(84, 84, 3) stride=2 kernel=x
padding="same" filters=16 -> output=(42, 42, 16).
For example, if you would like to reduce an Atari image from its
original (84, 84, 3) dimensions down to (6, 6, F), you can construct the
following stack and reduce the w x h dimension of the image by 2 in each
layer:
[[16, 4, 2], [32, 4, 2], [64, 4, 2], [128, 4, 2]] -> output=(6, 6, 128)
cnn_use_bias: Whether to use bias on all Conv2D layers.
cnn_activation: The activation function to use after each Conv2D layer.
cnn_use_layernorm: Whether to insert a LayerNormalization functionality
in between each Conv2D layer's outputs and its activation.
cnn_kernel_initializer: The initializer function or class to use for kernel
initialization in the CNN layers. If `None` the default initializer of
the respective CNN layer is used. Note, only the in-place
initializers, i.e. ending with an underscore "_" are allowed.
cnn_kernel_initializer_config: Configuration to pass into the initializer
defined in `cnn_kernel_initializer`.
cnn_bias_initializer: The initializer function or class to use for bias
initializationcin the CNN layers. If `None` the default initializer of
the respective CNN layer is used. Note, only the in-place initializers,
i.e. ending with an underscore "_" are allowed.
cnn_bias_initializer_config: Configuration to pass into the initializer
defined in `cnn_bias_initializer`.
"""
super().__init__()
assert len(input_dims) == 3
cnn_activation = get_activation_fn(cnn_activation, framework="torch")
cnn_kernel_initializer = get_initializer_fn(
cnn_kernel_initializer, framework="torch"
)
cnn_bias_initializer = get_initializer_fn(
cnn_bias_initializer, framework="torch"
)
layers = []
# Add user-specified hidden convolutional layers first
width, height, in_depth = input_dims
in_size = [width, height]
for filter_specs in cnn_filter_specifiers:
# Padding information not provided -> Use "same" as default.
if len(filter_specs) == 3:
out_depth, kernel_size, strides = filter_specs
padding = "same"
# Padding information provided.
else:
out_depth, kernel_size, strides, padding = filter_specs
# Pad like in tensorflow's SAME/VALID mode.
if padding == "same":
padding_size, out_size = same_padding(in_size, kernel_size, strides)
layers.append(nn.ZeroPad2d(padding_size))
# No actual padding is performed for "valid" mode, but we will still
# compute the output size (input for the next layer).
else:
out_size = valid_padding(in_size, kernel_size, strides)
layer = nn.Conv2d(
in_depth, out_depth, kernel_size, strides, bias=cnn_use_bias
)
# Initialize CNN layer kernel if necessary.
if cnn_kernel_initializer:
cnn_kernel_initializer(
layer.weight, **cnn_kernel_initializer_config or {}
)
# Initialize CNN layer bias if necessary.
if cnn_bias_initializer:
cnn_bias_initializer(layer.bias, **cnn_bias_initializer_config or {})
layers.append(layer)
# Layernorm.
if cnn_use_layernorm:
# We use an epsilon of 0.001 here to mimick the Tf default behavior.
layers.append(LayerNorm1D(out_depth, eps=0.001))
# Activation.
if cnn_activation is not None:
layers.append(cnn_activation())
in_size = out_size
in_depth = out_depth
# Create the CNN.
self.cnn = nn.Sequential(*layers)
def forward(self, inputs):
# Permute b/c data comes in as channels_last ([B, dim, dim, channels]) ->
# Convert to `channels_first` for torch:
inputs = inputs.permute(0, 3, 1, 2)
out = self.cnn(inputs)
# Permute back to `channels_last`.
return out.permute(0, 2, 3, 1)
|
TorchCNN
|
python
|
pytorch__pytorch
|
torch/export/unflatten.py
|
{
"start": 9398,
"end": 10018
}
|
class ____(abc.ABC):
"""
Adapts input arguments with ``input_spec`` to align ``target_spec``.
"""
@abc.abstractmethod
def adapt(
self,
target_spec: pytree.TreeSpec,
input_spec: pytree.TreeSpec,
input_args: list[Any],
metadata: Optional[dict[str, Any]] = None,
obj: Optional[Any] = None,
) -> list[Any]:
"""NOTE: This adapter may mutate given ``input_args_with_path``."""
...
def get_flat_arg_paths(self) -> list[str]:
"""Returns a list of paths that are used to access the flat args."""
return []
|
FlatArgsAdapter
|
python
|
python-openxml__python-docx
|
tests/image/test_tiff.py
|
{
"start": 9382,
"end": 12183
}
|
class ____:
def it_constructs_the_right_class_for_a_given_ifd_entry(self, fixture):
stream_rdr, offset, entry_cls_, ifd_entry_ = fixture
ifd_entry = _IfdEntryFactory(stream_rdr, offset)
entry_cls_.from_stream.assert_called_once_with(stream_rdr, offset)
assert ifd_entry is ifd_entry_
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[
(b"\x66\x66\x00\x01", "BYTE"),
(b"\x66\x66\x00\x02", "ASCII"),
(b"\x66\x66\x00\x03", "SHORT"),
(b"\x66\x66\x00\x04", "LONG"),
(b"\x66\x66\x00\x05", "RATIONAL"),
(b"\x66\x66\x00\x06", "CUSTOM"),
]
)
def fixture(
self,
request,
ifd_entry_,
_IfdEntry_,
_AsciiIfdEntry_,
_ShortIfdEntry_,
_LongIfdEntry_,
_RationalIfdEntry_,
):
bytes_, entry_type = request.param
entry_cls_ = {
"BYTE": _IfdEntry_,
"ASCII": _AsciiIfdEntry_,
"SHORT": _ShortIfdEntry_,
"LONG": _LongIfdEntry_,
"RATIONAL": _RationalIfdEntry_,
"CUSTOM": _IfdEntry_,
}[entry_type]
stream_rdr = StreamReader(io.BytesIO(bytes_), BIG_ENDIAN)
offset = 0
return stream_rdr, offset, entry_cls_, ifd_entry_
@pytest.fixture
def ifd_entry_(self, request):
return instance_mock(request, _IfdEntry)
@pytest.fixture
def _IfdEntry_(self, request, ifd_entry_):
_IfdEntry_ = class_mock(request, "docx.image.tiff._IfdEntry")
_IfdEntry_.from_stream.return_value = ifd_entry_
return _IfdEntry_
@pytest.fixture
def _AsciiIfdEntry_(self, request, ifd_entry_):
_AsciiIfdEntry_ = class_mock(request, "docx.image.tiff._AsciiIfdEntry")
_AsciiIfdEntry_.from_stream.return_value = ifd_entry_
return _AsciiIfdEntry_
@pytest.fixture
def _ShortIfdEntry_(self, request, ifd_entry_):
_ShortIfdEntry_ = class_mock(request, "docx.image.tiff._ShortIfdEntry")
_ShortIfdEntry_.from_stream.return_value = ifd_entry_
return _ShortIfdEntry_
@pytest.fixture
def _LongIfdEntry_(self, request, ifd_entry_):
_LongIfdEntry_ = class_mock(request, "docx.image.tiff._LongIfdEntry")
_LongIfdEntry_.from_stream.return_value = ifd_entry_
return _LongIfdEntry_
@pytest.fixture
def _RationalIfdEntry_(self, request, ifd_entry_):
_RationalIfdEntry_ = class_mock(request, "docx.image.tiff._RationalIfdEntry")
_RationalIfdEntry_.from_stream.return_value = ifd_entry_
return _RationalIfdEntry_
@pytest.fixture
def offset_(self, request):
return instance_mock(request, int)
|
Describe_IfdEntryFactory
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/instrumentation/events/embedding.py
|
{
"start": 156,
"end": 547
}
|
class ____(BaseEvent):
"""
EmbeddingStartEvent.
Args:
model_dict (dict): Model dictionary containing details about the embedding model.
"""
model_config = ConfigDict(protected_namespaces=("pydantic_model_",))
model_dict: dict
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "EmbeddingStartEvent"
|
EmbeddingStartEvent
|
python
|
coleifer__peewee
|
tests/schema.py
|
{
"start": 27964,
"end": 28104
}
|
class ____(TestModel):
content = TextField()
timestamp = TimestampField()
status = IntegerField()
flags = IntegerField()
|
NoteX
|
python
|
PrefectHQ__prefect
|
tests/server/orchestration/api/test_block_documents.py
|
{
"start": 10523,
"end": 11069
}
|
class ____:
async def test_read_missing_block_document(self, client):
response = await client.get(f"/block_documents/{uuid4()}")
assert response.status_code == status.HTTP_404_NOT_FOUND
async def test_read_nonsense_block_document(self, client):
"""Regression test for an issue we observed in Cloud where a client made
requests for /block_documents/null"""
response = await client.get("/block_documents/not-even")
assert response.status_code == status.HTTP_404_NOT_FOUND
|
TestReadBlockDocument
|
python
|
pyparsing__pyparsing
|
pyparsing/core.py
|
{
"start": 236231,
"end": 237437
}
|
class ____(TokenConverter):
"""Converter to return the matched tokens as a list - useful for
returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions.
The optional ``aslist`` argument when set to True will return the
parsed tokens as a Python list instead of a pyparsing ParseResults.
Example:
.. doctest::
>>> ident = Word(alphas)
>>> num = Word(nums)
>>> term = ident | num
>>> func = ident + Opt(DelimitedList(term))
>>> print(func.parse_string("fn a, b, 100"))
['fn', 'a', 'b', '100']
>>> func = ident + Group(Opt(DelimitedList(term)))
>>> print(func.parse_string("fn a, b, 100"))
['fn', ['a', 'b', '100']]
"""
def __init__(self, expr: ParserElement, aslist: bool = False) -> None:
super().__init__(expr)
self.saveAsList = True
self._asPythonList = aslist
def postParse(self, instring, loc, tokenlist):
if self._asPythonList:
return ParseResults.List(
tokenlist.as_list()
if isinstance(tokenlist, ParseResults)
else list(tokenlist)
)
return [tokenlist]
|
Group
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/opengl/items/GLTextItem.py
|
{
"start": 155,
"end": 4329
}
|
class ____(GLGraphicsItem):
"""Draws text in 3D."""
def __init__(self, parentItem=None, **kwds):
"""All keyword arguments are passed to setData()"""
super().__init__(parentItem=parentItem)
glopts = kwds.pop('glOptions', 'additive')
self.setGLOptions(glopts)
self.pos = np.array([0.0, 0.0, 0.0])
self.color = QtCore.Qt.GlobalColor.white
self.text = ''
self.font = QtGui.QFont('Helvetica', 16)
self.alignment = QtCore.Qt.AlignmentFlag.AlignLeft | QtCore.Qt.AlignmentFlag.AlignBottom
self.setData(**kwds)
def setData(self, **kwds):
"""
Update the data displayed by this item. All arguments are optional;
for example it is allowed to update text while leaving colors unchanged, etc.
==================== ==================================================
**Arguments:**
------------------------------------------------------------------------
pos (3,) array of floats specifying text location.
color QColor or array of ints [R,G,B] or [R,G,B,A]. (Default: Qt.white)
text String to display.
font QFont (Default: QFont('Helvetica', 16))
alignment QtCore.Qt.AlignmentFlag (Default: QtCore.Qt.AlignmentFlag.AlignLeft | QtCore.Qt.AlignmentFlag.AlignBottom)
==================== ==================================================
"""
args = ['pos', 'color', 'text', 'font', 'alignment']
for k in kwds.keys():
if k not in args:
raise ValueError('Invalid keyword argument: %s (allowed arguments are %s)' % (k, str(args)))
for arg in args:
if arg in kwds:
value = kwds[arg]
if arg == 'pos':
if isinstance(value, np.ndarray):
if value.shape != (3,):
raise ValueError('"pos.shape" must be (3,).')
elif isinstance(value, (tuple, list)):
if len(value) != 3:
raise ValueError('"len(pos)" must be 3.')
elif arg == 'color':
value = fn.mkColor(value)
elif arg == 'font':
if isinstance(value, QtGui.QFont) is False:
raise TypeError('"font" must be QFont.')
setattr(self, arg, value)
self.update()
def paint(self):
if len(self.text) < 1:
return
self.setupGLState()
project = self.compute_projection()
vec3 = QtGui.QVector3D(*self.pos)
text_pos = self.align_text(project.map(vec3).toPointF())
painter = QtGui.QPainter(self.view())
painter.setPen(self.color)
painter.setFont(self.font)
painter.setRenderHints(QtGui.QPainter.RenderHint.Antialiasing | QtGui.QPainter.RenderHint.TextAntialiasing)
painter.drawText(text_pos, self.text)
painter.end()
def compute_projection(self):
# note that QRectF.bottom() != QRect.bottom()
rect = QtCore.QRectF(self.view().rect())
ndc_to_viewport = QtGui.QMatrix4x4()
ndc_to_viewport.viewport(rect.left(), rect.bottom(), rect.width(), -rect.height())
return ndc_to_viewport * self.mvpMatrix()
def align_text(self, pos):
"""
Aligns the text at the given position according to the given alignment.
"""
font_metrics = QtGui.QFontMetrics(self.font)
rect = font_metrics.tightBoundingRect(self.text)
width = rect.width()
height = rect.height()
dx = dy = 0.0
if self.alignment & QtCore.Qt.AlignmentFlag.AlignRight:
dx = width
if self.alignment & QtCore.Qt.AlignmentFlag.AlignHCenter:
dx = width / 2.0
if self.alignment & QtCore.Qt.AlignmentFlag.AlignTop:
dy = height
if self.alignment & QtCore.Qt.AlignmentFlag.AlignVCenter:
dy = height / 2.0
pos.setX(pos.x() - dx)
pos.setY(pos.y() + dy)
return pos
|
GLTextItem
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/lib/test_shape_base_.py
|
{
"start": 11202,
"end": 16885
}
|
class ____(TestCase):
def test_integer_0_split(self):
a = np.arange(10)
assert_raises(ValueError, array_split, a, 0)
def test_integer_split(self):
a = np.arange(10)
res = array_split(a, 1)
desired = [np.arange(10)]
compare_results(res, desired)
res = array_split(a, 2)
desired = [np.arange(5), np.arange(5, 10)]
compare_results(res, desired)
res = array_split(a, 3)
desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)]
compare_results(res, desired)
res = array_split(a, 4)
desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), np.arange(8, 10)]
compare_results(res, desired)
res = array_split(a, 5)
desired = [
np.arange(2),
np.arange(2, 4),
np.arange(4, 6),
np.arange(6, 8),
np.arange(8, 10),
]
compare_results(res, desired)
res = array_split(a, 6)
desired = [
np.arange(2),
np.arange(2, 4),
np.arange(4, 6),
np.arange(6, 8),
np.arange(8, 9),
np.arange(9, 10),
]
compare_results(res, desired)
res = array_split(a, 7)
desired = [
np.arange(2),
np.arange(2, 4),
np.arange(4, 6),
np.arange(6, 7),
np.arange(7, 8),
np.arange(8, 9),
np.arange(9, 10),
]
compare_results(res, desired)
res = array_split(a, 8)
desired = [
np.arange(2),
np.arange(2, 4),
np.arange(4, 5),
np.arange(5, 6),
np.arange(6, 7),
np.arange(7, 8),
np.arange(8, 9),
np.arange(9, 10),
]
compare_results(res, desired)
res = array_split(a, 9)
desired = [
np.arange(2),
np.arange(2, 3),
np.arange(3, 4),
np.arange(4, 5),
np.arange(5, 6),
np.arange(6, 7),
np.arange(7, 8),
np.arange(8, 9),
np.arange(9, 10),
]
compare_results(res, desired)
res = array_split(a, 10)
desired = [
np.arange(1),
np.arange(1, 2),
np.arange(2, 3),
np.arange(3, 4),
np.arange(4, 5),
np.arange(5, 6),
np.arange(6, 7),
np.arange(7, 8),
np.arange(8, 9),
np.arange(9, 10),
]
compare_results(res, desired)
res = array_split(a, 11)
desired = [
np.arange(1),
np.arange(1, 2),
np.arange(2, 3),
np.arange(3, 4),
np.arange(4, 5),
np.arange(5, 6),
np.arange(6, 7),
np.arange(7, 8),
np.arange(8, 9),
np.arange(9, 10),
np.array([]),
]
compare_results(res, desired)
def test_integer_split_2D_rows(self):
a = np.array([np.arange(10), np.arange(10)])
res = array_split(a, 3, axis=0)
tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), np.zeros((0, 10))]
compare_results(res, tgt)
assert_(a.dtype.type is res[-1].dtype.type)
# Same thing for manual splits:
res = array_split(a, [0, 1], axis=0)
tgt = [np.zeros((0, 10)), np.array([np.arange(10)]), np.array([np.arange(10)])]
compare_results(res, tgt)
assert_(a.dtype.type is res[-1].dtype.type)
def test_integer_split_2D_cols(self):
a = np.array([np.arange(10), np.arange(10)])
res = array_split(a, 3, axis=-1)
desired = [
np.array([np.arange(4), np.arange(4)]),
np.array([np.arange(4, 7), np.arange(4, 7)]),
np.array([np.arange(7, 10), np.arange(7, 10)]),
]
compare_results(res, desired)
def test_integer_split_2D_default(self):
"""This will fail if we change default axis"""
a = np.array([np.arange(10), np.arange(10)])
res = array_split(a, 3)
tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), np.zeros((0, 10))]
compare_results(res, tgt)
assert_(a.dtype.type is res[-1].dtype.type)
# perhaps should check higher dimensions
@skipif(not IS_64BIT, reason="Needs 64bit platform")
def test_integer_split_2D_rows_greater_max_int32(self):
a = np.broadcast_to([0], (1 << 32, 2))
res = array_split(a, 4)
chunk = np.broadcast_to([0], (1 << 30, 2))
tgt = [chunk] * 4
for i in range(len(tgt)):
assert_equal(res[i].shape, tgt[i].shape)
def test_index_split_simple(self):
a = np.arange(10)
indices = [1, 5, 7]
res = array_split(a, indices, axis=-1)
desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), np.arange(7, 10)]
compare_results(res, desired)
def test_index_split_low_bound(self):
a = np.arange(10)
indices = [0, 5, 7]
res = array_split(a, indices, axis=-1)
desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), np.arange(7, 10)]
compare_results(res, desired)
def test_index_split_high_bound(self):
a = np.arange(10)
indices = [0, 5, 7, 10, 12]
res = array_split(a, indices, axis=-1)
desired = [
np.array([]),
np.arange(0, 5),
np.arange(5, 7),
np.arange(7, 10),
np.array([]),
np.array([]),
]
compare_results(res, desired)
|
TestArraySplit
|
python
|
apache__airflow
|
airflow-ctl/tests/airflow_ctl/ctl/commands/test_variable_command.py
|
{
"start": 1145,
"end": 4377
}
|
class ____:
key = "key"
value = "value"
description = "description"
export_file_name = "exported_json.json"
parser = cli_parser.get_parser()
variable_collection_response = VariableCollectionResponse(
variables=[
VariableResponse(
key=key,
value=value,
description=description,
is_encrypted=False,
),
],
total_entries=1,
)
bulk_response_success = BulkResponse(
create=BulkActionResponse(success=[key], errors=[]), update=None, delete=None
)
bulk_response_error = BulkResponse(
create=BulkActionResponse(
success=[],
errors=[
{"error": f"The variables with these keys: {{'{key}'}} already exist.", "status_code": 409}
],
),
update=None,
delete=None,
)
def test_import_success(self, api_client_maker, tmp_path, monkeypatch):
api_client = api_client_maker(
path="/api/v2/variables",
response_json=self.bulk_response_success.model_dump(),
expected_http_status_code=200,
kind=ClientKind.CLI,
)
monkeypatch.chdir(tmp_path)
expected_json_path = tmp_path / self.export_file_name
variable_file = {
self.key: self.value,
}
expected_json_path.write_text(json.dumps(variable_file))
response = variable_command.import_(
self.parser.parse_args(["variables", "import", expected_json_path.as_posix()]),
api_client=api_client,
)
assert response == [self.key]
def test_import_error(self, api_client_maker, tmp_path, monkeypatch):
api_client = api_client_maker(
path="/api/v2/variables",
response_json=self.bulk_response_error.model_dump(),
expected_http_status_code=200,
kind=ClientKind.CLI,
)
monkeypatch.chdir(tmp_path)
expected_json_path = tmp_path / self.export_file_name
variable_file = {
self.key: self.value,
}
expected_json_path.write_text(json.dumps(variable_file))
with pytest.raises(SystemExit):
variable_command.import_(
self.parser.parse_args(["variables", "import", expected_json_path.as_posix()]),
api_client=api_client,
)
def test_export(self, api_client_maker, tmp_path, monkeypatch):
api_client = api_client_maker(
path="/api/v2/variables",
response_json=self.variable_collection_response.model_dump(),
expected_http_status_code=200,
kind=ClientKind.CLI,
)
monkeypatch.chdir(tmp_path)
expected_json_path = (tmp_path / self.export_file_name).as_posix()
variable_command.export(
self.parser.parse_args(["variables", "export", expected_json_path]),
api_client=api_client,
)
assert os.path.exists(tmp_path / self.export_file_name)
with open(expected_json_path) as f:
assert json.load(f) == {self.key: {"description": self.description, "value": self.value}}
|
TestCliVariableCommands
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/hyperparameter_tuning_job.py
|
{
"start": 19430,
"end": 21935
}
|
class ____(GoogleCloudBaseOperator):
"""
Deletes a HyperparameterTuningJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param hyperparameter_tuning_job_id: Required. The name of the HyperparameterTuningJob resource to be
deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
template_fields = ("region", "project_id", "hyperparameter_tuning_job_id", "impersonation_chain")
def __init__(
self,
*,
hyperparameter_tuning_job_id: str,
region: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.hyperparameter_tuning_job_id = hyperparameter_tuning_job_id
self.region = region
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = HyperparameterTuningJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Deleting Hyperparameter Tuning job: %s", self.hyperparameter_tuning_job_id)
operation = hook.delete_hyperparameter_tuning_job(
region=self.region,
project_id=self.project_id,
hyperparameter_tuning_job=self.hyperparameter_tuning_job_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Hyperparameter Tuning job was deleted.")
except NotFound:
self.log.info(
"The Hyperparameter Tuning Job ID %s does not exist.", self.hyperparameter_tuning_job_id
)
|
DeleteHyperparameterTuningJobOperator
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.