language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
doocs__leetcode
|
solution/0800-0899/0835.Image Overlap/Solution.py
|
{
"start": 0,
"end": 463
}
|
class ____:
def largestOverlap(self, img1: List[List[int]], img2: List[List[int]]) -> int:
n = len(img1)
cnt = Counter()
for i in range(n):
for j in range(n):
if img1[i][j]:
for h in range(n):
for k in range(n):
if img2[h][k]:
cnt[(i - h, j - k)] += 1
return max(cnt.values()) if cnt else 0
|
Solution
|
python
|
jazzband__django-oauth-toolkit
|
oauth2_provider/views/device.py
|
{
"start": 3191,
"end": 4703
}
|
class ____(LoginRequiredMixin, FormView):
"""
The view where the user is instructed (by the device) to come to in order to
enter the user code. More details in this section of the RFC:
https://datatracker.ietf.org/doc/html/rfc8628#section-3.3
Note: it's common to see in other implementations of this RFC that only ask the
user to sign in after they input the user code but since the user has to be signed
in regardless, to approve the device login we're making the decision here, for
simplicity, to require being logged in up front.
"""
template_name = "oauth2_provider/device/user_code.html"
form_class = DeviceGrantForm
def get_success_url(self):
return reverse(
"oauth2_provider:device-confirm",
kwargs={
"client_id": self.device_grant.client_id,
"user_code": self.device_grant.user_code,
},
)
def form_valid(self, form):
"""
Sets the device_grant on the instance so that it can be accessed
in get_success_url. It comes in handy when users want to overwrite
get_success_url, redirecting to the URL with the URL params pointing
to the current device.
"""
device_grant: DeviceGrant = form.cleaned_data["device_grant"]
device_grant.user = self.request.user
device_grant.save(update_fields=["user"])
self.device_grant = device_grant
return super().form_valid(form)
|
DeviceUserCodeView
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-the-digits-that-divide-a-number.py
|
{
"start": 39,
"end": 312
}
|
class ____(object):
def countDigits(self, num):
"""
:type num: int
:rtype: int
"""
result = 0
curr = num
while curr:
result += int(num%(curr%10) == 0)
curr //= 10
return result
|
Solution
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_dataplex.py
|
{
"start": 13442,
"end": 14420
}
|
class ____:
@mock.patch(HOOK_STR)
@mock.patch(DATASCANJOB_STR)
def test_execute(self, mock_data_scan_job, hook_mock):
op = DataplexRunDataProfileScanOperator(
task_id="execute_data_scan",
project_id=PROJECT_ID,
region=REGION,
data_scan_id=DATA_SCAN_ID,
api_version=API_VERSION,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context=mock.MagicMock())
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
api_version=API_VERSION,
impersonation_chain=IMPERSONATION_CHAIN,
)
hook_mock.return_value.run_data_scan.assert_called_once_with(
project_id=PROJECT_ID,
region=REGION,
data_scan_id=DATA_SCAN_ID,
retry=DEFAULT,
timeout=None,
metadata=(),
)
|
TestDataplexRunDataProfileScanOperator
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-uniprot/llama_index/readers/uniprot/base.py
|
{
"start": 871,
"end": 15279
}
|
class ____(BaseReader):
"""
UniProt reader for LlamaIndex.
Reads UniProt Swiss-Prot format files and converts them into LlamaIndex Documents.
Each record is converted into a document with structured text and metadata.
Args:
include_fields (Optional[Set[str]]): Set of fields to include in the output.
Defaults to all fields.
max_records (Optional[int]): Maximum number of records to parse.
If None, parse all records.
"""
# Mapping of field names to their two-letter codes in UniProt format
FIELD_CODES = {
"id": "ID",
"accession": "AC",
"description": "DE",
"gene_names": "GN",
"organism": "OS",
"comments": "CC",
"keywords": "KW",
"features": "FT",
"sequence_length": "SQ",
"sequence_mw": "SQ",
"taxonomy": "OC",
"taxonomy_id": "OX",
"citations": "RN",
"cross_references": "DR",
}
def __init__(
self,
include_fields: Optional[Set[str]] = None,
max_records: Optional[int] = None,
) -> None:
"""Initialize with arguments."""
super().__init__()
self.include_fields = include_fields or {
"id",
"accession",
"description",
"gene_names",
"organism",
"comments",
"keywords",
"sequence_length",
"sequence_mw",
"taxonomy",
"taxonomy_id",
"citations",
"cross_references",
}
self.max_records = max_records
# Field codes we need to parse
self.include_field_codes = {
code
for field_name, code in self.FIELD_CODES.items()
if field_name in self.include_fields
}
def load_data(
self, input_file: str, extra_info: Optional[Dict] = {}
) -> List[Document]:
"""Load data from the input file."""
documents = []
record_count = 0
for record_lines in self._read_records(input_file):
if self.max_records is not None and record_count >= self.max_records:
break
record = self._parse_record(record_lines)
if record:
document = self._record_to_document(record)
document.metadata.update(extra_info)
documents.append(document)
record_count += 1
return documents
def lazy_load_data(
self, input_file: str, extra_info: Optional[Dict] = {}
) -> Generator[Document, None, None]:
"""
Load data from the input file lazily, yielding one document at a time.
This method is memory efficient as it processes one record at a time instead of
loading all records into memory at once. It's particularly useful for large UniProt files.
Args:
input_file (str): Path to the UniProt file
extra_info (Optional[Dict]): Additional metadata to add to each document
Yields:
Document: One document at a time
"""
record_count = 0
for record_lines in self._read_records(input_file):
if self.max_records is not None and record_count >= self.max_records:
break
record = self._parse_record(record_lines)
if record:
document = self._record_to_document(record)
document.metadata.update(extra_info)
yield document
record_count += 1
def _parse_record(self, lines: List[str]) -> Optional[UniProtRecord]:
"""Parse a single UniProt record from lines."""
if not lines:
return None
record = UniProtRecord(
id="",
accession=[],
description="",
gene_names=[],
organism="",
comments=[],
keywords=[],
features=[],
sequence_length=0,
sequence_mw=0,
dates=[],
taxonomy=[],
taxonomy_id={},
cross_references=[],
citations=[],
)
current_field = None
for line in lines:
if not line.strip():
continue
if line.startswith("//"):
break
field = line[:2]
if field not in self.include_field_codes and current_field != "citations":
continue
value = line[5:].strip().rstrip(";")
if field != "RA":
# Remove trailing period
# Do not remove trailing period from authors names
value = value.rstrip(".")
if field == "ID":
record.id = value.split()[0]
current_field = "id"
elif field == "AC":
record.accession = [acc.strip() for acc in value.split(";")]
current_field = "accession"
elif field == "DE":
record.description = value
current_field = "description"
elif field == "GN":
record.gene_names = [name.strip() for name in value.split(";")]
current_field = "gene_names"
elif field == "OS":
record.organism = value
current_field = "organism"
elif field == "CC":
if value.startswith("-!-"):
record.comments.append(value[4:])
elif value.startswith("---"):
# Skip separator lines
continue
elif any(word in value.lower() for word in ["copyright", "license"]):
# Skip standard UniProt footer comments
continue
else:
record.comments.append(value)
current_field = "comments"
elif field == "KW":
# Handle multiple KW lines by extending the list
record.keywords.extend([kw.strip() for kw in value.split(";")])
current_field = "keywords"
elif field == "FT":
if value:
feature_parts = value.split()
if len(feature_parts) >= 2:
record.features.append(
{
"type": feature_parts[0],
"location": feature_parts[1],
"description": " ".join(feature_parts[2:])
if len(feature_parts) > 2
else "",
}
)
current_field = "features"
elif field == "SQ":
if "SEQUENCE" in value:
parts = value.split(";")
record.sequence_length = int(parts[0].split()[1])
record.sequence_mw = int(parts[1].split()[0])
current_field = "sequence"
elif field == "OC":
record.taxonomy.extend(value.split("; "))
elif field == "OX":
# Parse taxonomy database qualifier and code
# Format: OX Taxonomy_database_Qualifier=Taxonomic code;
parts = value.split("=")
if len(parts) == 2:
record.taxonomy_id = {"database": parts[0], "code": parts[1]}
elif field == "RN":
# Start a new citation block
current_citation = {
"number": value.strip("[]"),
"position": [],
"comment": [],
"cross_references": [],
"authors": "",
"title": "",
"location": [],
}
record.citations.append(current_citation)
current_field = "citations"
elif field == "RP" and current_field == "citations":
current_citation["position"].append(value)
elif field == "RC" and current_field == "citations":
current_citation["comment"].append(value)
elif field == "RX" and current_field == "citations":
current_citation["cross_references"].append(value)
elif field == "RA" and current_field == "citations":
# Concatenate author lines with space
current_citation["authors"] = (
current_citation["authors"] + " " + value
).strip()
elif field == "RT" and current_field == "citations":
# Concatenate title lines with space and remove quotes
title = (current_citation["title"] + " " + value).strip()
current_citation["title"] = title.strip('"')
elif field == "RL" and current_field == "citations":
current_citation["location"].append(value)
elif field == "DR":
# Parse database cross-references
# Format: DR RESOURCE_ABBREVIATION; RESOURCE_IDENTIFIER; OPTIONAL_INFORMATION_1[; OPTIONAL_INFORMATION_2][; OPTIONAL_INFORMATION_3].
parts = value.split("; ")
if len(parts) >= 2:
record.cross_references.append(
{
"abbrev": parts[0],
"id": parts[1],
"info": parts[2:],
}
)
current_field = "cross_references"
return record
def _record_to_document(self, record: UniProtRecord) -> Document:
"""Convert a UniProt record to a LlamaIndex Document."""
text_parts = []
if "id" in self.include_fields:
text_parts.append(f"Protein ID: {record.id}")
if "accession" in self.include_fields:
text_parts.append(f"Accession numbers: {', '.join(record.accession)}")
if "description" in self.include_fields:
text_parts.append(f"Description: {record.description}")
if "gene_names" in self.include_fields:
text_parts.append(f"Gene names: {', '.join(record.gene_names)}")
if "organism" in self.include_fields:
text_parts.append(f"Organism: {record.organism}")
if "comments" in self.include_fields:
text_parts.append("Comments:")
text_parts.extend(f"- {comment}" for comment in record.comments)
if "keywords" in self.include_fields:
text_parts.append(f"Keywords: {', '.join(record.keywords)}")
if "features" in self.include_fields:
text_parts.append("Features:")
text_parts.extend(
f"- {feature['type']} ({feature['location']}): {feature['description']}"
for feature in record.features
)
if "sequence_length" in self.include_fields:
text_parts.append(f"Sequence length: {record.sequence_length} AA")
if "sequence_mw" in self.include_fields:
text_parts.append(f"Molecular weight: {record.sequence_mw} Da")
if "taxonomy" in self.include_fields:
# Clean up taxonomy by removing empty entries and joining with proper hierarchy
clean_taxonomy = [t for t in record.taxonomy if t]
text_parts.append("Taxonomy:")
text_parts.append(" " + " > ".join(clean_taxonomy))
if "taxonomy_id" in self.include_fields and record.taxonomy_id:
text_parts.append(
f"Taxonomy ID: {record.taxonomy_id['database']} {record.taxonomy_id['code']}"
)
if "cross_references" in self.include_fields:
text_parts.append("Cross-references:")
for ref in record.cross_references:
text_parts.append(
f"- {ref['abbrev']}: {ref['id']}" + (f" - {'; '.join(ref['info'])}")
)
if "citations" in self.include_fields and record.citations:
text_parts.append("Citations:")
for citation in record.citations:
text_parts.append(f"Reference {citation['number']}:")
if citation["position"]:
text_parts.append(" Position: " + " ".join(citation["position"]))
if citation["title"]:
text_parts.append(" Title: " + citation["title"])
if citation["authors"]:
text_parts.append(" Authors: " + citation["authors"])
if citation["location"]:
text_parts.append(" Location: " + " ".join(citation["location"]))
if citation["comment"]:
text_parts.append(" Comments: " + " ".join(citation["comment"]))
if citation["cross_references"]:
text_parts.append(
" Cross-references: " + " ".join(citation["cross_references"])
)
metadata = {
"id": record.id,
}
return Document(text="\n".join(text_parts), metadata=metadata)
def _read_records(self, file_path: str) -> Generator[List[str], None, None]:
"""Read UniProt records from file."""
current_record = []
with open(file_path, encoding="utf-8") as f:
for line in f:
if line.startswith("//"):
if current_record:
yield current_record
current_record = []
else:
current_record.append(line)
if current_record:
yield current_record
def count_records(self, file_path: str) -> int:
"""
Count the total number of protein records in the UniProt database file.
Uses grep to efficiently count lines starting with "//" which is much faster
than reading the file line by line.
Args:
file_path (str): Path to the UniProt database file
Returns:
int: Total number of protein records in the file
"""
count = 0
with open(file_path, encoding="utf-8") as f:
for line in f:
if line.startswith("//"):
count += 1
return count
|
UniProtReader
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/resource_requirement.py
|
{
"start": 7267,
"end": 10062
}
|
class ____(ResourceKeyRequirement):
key: str # pyright: ignore[reportIncompatibleMethodOverride]
source_key: Optional[str]
def describe_requirement(self) -> str:
source_descriptor = f" by resource with key '{self.source_key}'" if self.source_key else ""
return f"resource with key '{self.key}' required{source_descriptor}"
def ensure_requirements_satisfied(
resource_defs: Mapping[str, "ResourceDefinition"],
requirements: Sequence[ResourceRequirement],
) -> None:
for requirement in requirements:
requirement.ensure_satisfied(resource_defs)
def get_resource_key_conflicts(
resource_defs: Mapping[str, "ResourceDefinition"],
other_resource_defs: Mapping[str, "ResourceDefinition"],
) -> AbstractSet[str]:
overlapping_keys = set(resource_defs.keys()).intersection(set(other_resource_defs.keys()))
overlapping_keys = {key for key in overlapping_keys if key != DEFAULT_IO_MANAGER_KEY}
return overlapping_keys
def merge_resource_defs(
old_resource_defs: Mapping[str, "ResourceDefinition"],
resource_defs_to_merge_in: Mapping[str, "ResourceDefinition"],
requires_resources: "AssetsDefinition",
) -> Mapping[str, "ResourceDefinition"]:
from dagster._core.execution.resources_init import get_transitive_required_resource_keys
overlapping_keys = get_resource_key_conflicts(old_resource_defs, resource_defs_to_merge_in)
if overlapping_keys:
overlapping_keys_str = ", ".join(sorted(list(overlapping_keys)))
raise DagsterInvalidInvocationError(
f"{requires_resources} has conflicting resource "
"definitions with provided resources for the following keys: "
f"{overlapping_keys_str}. Either remove the existing "
"resources from the asset or change the resource keys so that "
"they don't overlap."
)
merged_resource_defs = merge_dicts(resource_defs_to_merge_in, old_resource_defs)
# Ensure top-level resource requirements are met - except for
# io_manager, since that is a default it can be resolved later.
requirements = [
*requires_resources.get_resource_requirements(),
*[
req
for key, resource in merged_resource_defs.items()
for req in resource.get_resource_requirements(source_key=key)
],
]
ensure_requirements_satisfied(merged_resource_defs, requirements)
# Get all transitive resource dependencies from other resources.
relevant_keys = get_transitive_required_resource_keys(
requires_resources.required_resource_keys, merged_resource_defs
)
return {
key: resource_def
for key, resource_def in merged_resource_defs.items()
if key in relevant_keys
}
|
ResourceDependencyRequirement
|
python
|
getsentry__sentry
|
tests/sentry/features/test_flagpole_context.py
|
{
"start": 5956,
"end": 8651
}
|
class ____(TestCase):
def test_without_user_passed(self) -> None:
context_data = project_context_transformer(SentryContextData())
assert context_data == {}
def test_with_invalid_user_passed(self) -> None:
with pytest.raises(InvalidContextDataException):
user_context_transformer(SentryContextData(actor=123)) # type: ignore[arg-type]
with pytest.raises(InvalidContextDataException):
user_context_transformer(SentryContextData(actor=self.create_organization()))
def test_with_valid_user(self) -> None:
user = self.create_user(email="foobar@example.com")
# Create a new, unverified email to ensure we don't list it
self.create_useremail(user=user, email="unverified_email@example.com")
context_data = user_context_transformer(SentryContextData(actor=user))
assert context_data == {
"user_email": "foobar@example.com",
"user_domain": "example.com",
"user_id": user.id,
"user_is-superuser": False,
"user_is-staff": False,
}
def test_with_only_unverified_emails(self) -> None:
user = self.create_user(email="foobar@example.com")
user_email = UserEmail.objects.filter(user_id=user.id).get()
user_email.is_verified = False
user_email.save()
context_data = user_context_transformer(SentryContextData(actor=user))
assert context_data == {
"user_id": user.id,
"user_is-superuser": False,
"user_is-staff": False,
}
def test_with_super_user_and_staff(self) -> None:
user = self.create_user(email="super_user_admin_person@sentry.io", is_superuser=True)
context_data = user_context_transformer(SentryContextData(actor=user))
assert context_data == {
"user_email": "super_user_admin_person@sentry.io",
"user_domain": "sentry.io",
"user_id": user.id,
"user_is-superuser": True,
"user_is-staff": False,
}
user.is_staff = True
user.is_superuser = False
user.save()
context_data = user_context_transformer(SentryContextData(actor=user))
assert context_data == {
"user_email": "super_user_admin_person@sentry.io",
"user_domain": "sentry.io",
"user_id": user.id,
"user_is-superuser": False,
"user_is-staff": True,
}
def test_with_anonymous_user(self) -> None:
user = AnonymousUser()
context_data = user_context_transformer(SentryContextData(actor=user))
assert context_data == {}
|
TestUserContextTransformer
|
python
|
huggingface__transformers
|
src/transformers/models/phi3/modular_phi3.py
|
{
"start": 9111,
"end": 9187
}
|
class ____(MistralPreTrainedModel):
_version = "0.0.5"
|
Phi3PreTrainedModel
|
python
|
django-guardian__django-guardian
|
example_project_custom_group/core/migrations/0001_initial.py
|
{
"start": 251,
"end": 4901
}
|
class ____(migrations.Migration):
initial = True
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
]
operations = [
migrations.CreateModel(
name="CustomGroup",
fields=[
(
"group_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="auth.group",
),
),
("label", models.CharField(max_length=120)),
],
bases=("auth.group", guardian.mixins.GuardianGroupMixin),
managers=[
("objects", django.contrib.auth.models.GroupManager()),
],
),
migrations.CreateModel(
name="CustomUser",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("password", models.CharField(max_length=128, verbose_name="password")),
("last_login", models.DateTimeField(blank=True, null=True, verbose_name="last login")),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"username",
models.CharField(
error_messages={"unique": "A user with that username already exists."},
help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=150,
unique=True,
validators=[django.contrib.auth.validators.UnicodeUsernameValidator()],
verbose_name="username",
),
),
("first_name", models.CharField(blank=True, max_length=150, verbose_name="first name")),
("last_name", models.CharField(blank=True, max_length=150, verbose_name="last name")),
("email", models.EmailField(blank=True, max_length=254, verbose_name="email address")),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates whether the user can log into this admin site.",
verbose_name="staff status",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
verbose_name="active",
),
),
("date_joined", models.DateTimeField(default=django.utils.timezone.now, verbose_name="date joined")),
("birth_date", models.DateField(blank=True, null=True)),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="core.CustomGroup",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
],
options={
"abstract": False,
},
bases=(models.Model, guardian.mixins.GuardianUserMixin),
managers=[
("objects", django.contrib.auth.models.UserManager()),
],
),
]
|
Migration
|
python
|
neetcode-gh__leetcode
|
python/2616-minimize-the-maximum-difference-of-pairs.py
|
{
"start": 0,
"end": 619
}
|
class ____:
def minimizeMax(self, nums: List[int], p: int) -> int:
nums.sort()
def checkPair(mid):
count, i = 0, 0
while i < len(nums) - 1:
if nums[i + 1] - nums[i] <= mid:
count += 1
i += 2
else:
i += 1
return count >= p
left, right = 0, nums[-1] - nums[0]
while left < right:
mid = (left + right) // 2
if checkPair(mid):
right = mid
else:
left = mid + 1
return left
|
Solution
|
python
|
google__jax
|
docs/autodidax.py
|
{
"start": 73326,
"end": 73369
}
|
class ____(NamedTuple):
val: Any
|
ConstRecipe
|
python
|
dagster-io__dagster
|
examples/docs_snippets/docs_snippets/guides/components/using-template-variables/component.py
|
{
"start": 61,
"end": 349
}
|
class ____(dg.Component):
@staticmethod
@dg.template_var
def database_url() -> str:
return "postgresql://localhost:5432/mydb"
@staticmethod
@dg.template_var
def get_table_name() -> Callable:
return lambda prefix: f"{prefix}_processed_data"
|
MyComponent
|
python
|
huggingface__transformers
|
tests/models/prophetnet/test_modeling_prophetnet.py
|
{
"start": 30424,
"end": 40770
}
|
class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (ProphetNetModel, ProphetNetForConditionalGeneration) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": ProphetNetModel,
"summarization": ProphetNetForConditionalGeneration,
"text-generation": ProphetNetForCausalLM,
"text2text-generation": ProphetNetForConditionalGeneration,
"translation": ProphetNetForConditionalGeneration,
}
if is_torch_available()
else {}
)
test_resize_embeddings = False
is_encoder_decoder = True
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if pipeline_test_case_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `ProphetNetConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def setUp(self):
self.model_tester = ProphetNetModelTester(self)
self.config_tester = ConfigTester(self, config_class=ProphetNetConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_lm_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_with_lm_head(*config_and_inputs)
def test_only_decoder_causal_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causal_lm_decoder(*config_and_inputs)
@unittest.skip(reason="The init scheme changes, this is weird but now failing.")
def test_fast_integration(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_fast_integration(*config_and_inputs)
def test_shift_labels_via_shift_left(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs)
@unittest.skip(reason="Flaky test with no simple resolution. TODO Fix me @patrickvonplaten")
def test_decoder_model_generate(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_generate_with_past_key_value_states(*config_and_inputs)
def test_encoder_decoder_model_generate(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_generate_with_past_key_value_states(*config_and_inputs)
def test_attn_mask_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_model_with_attn_mask(*config_and_inputs)
def test_config_save(self):
config = self.model_tester.prepare_config_and_inputs()[0]
config.add_cross_attention = False
with tempfile.TemporaryDirectory() as tmp_dirname:
config.save_pretrained(tmp_dirname)
config = ProphetNetConfig.from_pretrained(tmp_dirname)
self.assertFalse(config.add_cross_attention)
def test_causal_lm_from_pretrained(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_causal_lm_from_pretrained(*config_and_inputs)
@unittest.skipIf(torch_device == "cpu", "Can't do half precision")
def test_fp16_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)
# methods overwrite method in `test_modeling_common.py`
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
chunk_length = getattr(self.model_tester, "chunk_length", None)
if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
correct_outlen = 7
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
(self.model_tester.ngram + 1) * decoder_seq_length,
encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(self_attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
encoder_hidden_states = outputs.encoder_hidden_states[0]
encoder_attentions = outputs.encoder_attentions[0]
encoder_hidden_states.retain_grad()
encoder_attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(encoder_attentions.grad)
@require_torch
|
ProphetNetModelTest
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/destination-glassflow/destination_glassflow/destination.py
|
{
"start": 979,
"end": 4836
}
|
class ____(Destination):
def write(
self, config: Mapping[str, Any], configured_catalog: ConfiguredAirbyteCatalog, input_messages: Iterable[AirbyteMessage]
) -> Iterable[AirbyteMessage]:
"""
Reads the input stream of messages, config, and catalog to write data to the destination.
This method returns an iterable (typically a generator of AirbyteMessages via yield) containing state messages received
in the input message stream. Outputting a state message means that every AirbyteRecordMessage which came before it has been
successfully persisted to the destination. This is used to ensure fault tolerance in the case that a sync fails before fully completing,
then the source is given the last state message output from this method as the starting point of the next sync.
:param config: dict of JSON configuration matching the configuration declared in spec.json
:param configured_catalog: The Configured Catalog describing the schema of the data being received and how it should be persisted in the
destination
:param input_messages: The stream of input messages received from the source
:return: Iterable of AirbyteStateMessages wrapped in AirbyteMessage structs
"""
streams = {s.stream.name for s in configured_catalog.streams}
connection = create_source_connection(config)
for message in input_messages:
if message.type == Type.STATE:
# Emitting a state message means all records that came before it
# have already been published.
yield message
elif message.type == Type.RECORD:
record = message.record
if record.stream not in streams:
# Message contains record from a stream that is not in the catalog. Skip it!
logger.debug(f"Stream {record.stream} was not present in configured streams, skipping")
continue
connection.publish(
{
"stream": record.stream,
"namespace": record.namespace,
"emitted_at": record.emitted_at,
"data": record.data,
}
)
else:
logger.info(f"Message type {message.type} not supported, skipping")
continue
def check(self, logger: Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
"""
Tests if the input configuration can be used to successfully connect to the destination with the needed permissions
e.g: if a provided API token or password can be used to connect and write to the destination.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this destination, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
try:
connection = create_source_connection(config)
try:
connection.validate_credentials()
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except errors.PipelineAccessTokenInvalidError:
return AirbyteConnectionStatus(status=Status.FAILED, message=f"The pipeline access token is not valid")
except Exception as e:
logger.error(f"Failed to create connection. Error: {e}")
return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {repr(e)}")
|
DestinationGlassflow
|
python
|
aio-libs__aiohttp
|
aiohttp/client.py
|
{
"start": 3825,
"end": 4755
}
|
class ____(TypedDict, total=False):
params: Query
data: Any
json: Any
cookies: LooseCookies | None
headers: LooseHeaders | None
skip_auto_headers: Iterable[str] | None
auth: BasicAuth | None
allow_redirects: bool
max_redirects: int
compress: str | bool
chunked: bool | None
expect100: bool
raise_for_status: None | bool | Callable[[ClientResponse], Awaitable[None]]
read_until_eof: bool
proxy: StrOrURL | None
proxy_auth: BasicAuth | None
timeout: "ClientTimeout | _SENTINEL | None"
ssl: SSLContext | bool | Fingerprint
server_hostname: str | None
proxy_headers: LooseHeaders | None
trace_request_ctx: Mapping[str, Any] | None
read_bufsize: int | None
auto_decompress: bool | None
max_line_size: int | None
max_field_size: int | None
middlewares: Sequence[ClientMiddlewareType] | None
@frozen_dataclass_decorator
|
_RequestOptions
|
python
|
pytorch__pytorch
|
test/distributed/pipelining/model_registry.py
|
{
"start": 2031,
"end": 3798
}
|
class ____(torch.nn.Module):
DEFAULT_DHID = 512
DEFAULT_BATCH_SIZE = 256
def __init__(self, d_hid: int = DEFAULT_DHID, splits=2):
assert splits <= 8
super().__init__()
self.splits = splits
self.mm_param0 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param1 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.lin0 = torch.nn.Linear(d_hid, d_hid)
self.lin1 = torch.nn.Linear(d_hid, d_hid)
self.lin2 = torch.nn.Linear(d_hid, d_hid)
self.lin3 = torch.nn.Linear(d_hid, d_hid)
self.lin4 = torch.nn.Linear(d_hid, d_hid)
self.lin5 = torch.nn.Linear(d_hid, d_hid)
self.lin6 = torch.nn.Linear(d_hid, d_hid)
self.lin7 = torch.nn.Linear(d_hid, d_hid)
def forward(self, x, y=torch.zeros(DEFAULT_BATCH_SIZE, DEFAULT_DHID)):
x = torch.mm(x, self.mm_param0)
x = x + y
x = self.lin0(x)
x = torch.relu(x)
pipe_split()
x = torch.mm(x, self.mm_param1)
x = self.lin1(x)
x = torch.relu(x)
if self.splits > 2:
pipe_split()
x = self.lin2(x)
x = torch.relu(x)
if self.splits > 3:
pipe_split()
x = self.lin3(x)
x = torch.relu(x)
if self.splits > 4:
pipe_split()
x = self.lin4(x)
x = torch.relu(x)
if self.splits > 5:
pipe_split()
x = self.lin5(x)
x = torch.relu(x)
if self.splits > 6:
pipe_split()
x = self.lin6(x)
x = torch.relu(x)
if self.splits > 7:
pipe_split()
x = self.lin7(x)
x = torch.relu(x)
return x
|
ModelWithKwargs
|
python
|
explosion__spaCy
|
spacy/lang/fi/__init__.py
|
{
"start": 282,
"end": 536
}
|
class ____(BaseDefaults):
infixes = TOKENIZER_INFIXES
suffixes = TOKENIZER_SUFFIXES
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
syntax_iterators = SYNTAX_ITERATORS
|
FinnishDefaults
|
python
|
getsentry__sentry
|
src/sentry/api/endpoints/prompts_activity.py
|
{
"start": 1336,
"end": 5125
}
|
class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.UNKNOWN,
"PUT": ApiPublishStatus.UNKNOWN,
}
def get(self, request: Request, **kwargs) -> Response:
"""Return feature prompt status if dismissed or in snoozed period"""
if not request.user.is_authenticated:
return Response(status=400)
features = request.GET.getlist("feature")
if len(features) == 0:
return Response({"details": "No feature specified"}, status=400)
conditions: Q | None = None
for feature in features:
if not prompt_config.has(feature):
return Response({"detail": "Invalid feature name " + feature}, status=400)
required_fields = prompt_config.required_fields(feature)
for field in required_fields:
if field not in request.GET:
return Response({"detail": 'Missing required field "%s"' % field}, status=400)
filters = {k: request.GET.get(k) for k in required_fields}
condition = Q(feature=feature, **filters)
conditions = condition if conditions is None else (conditions | condition)
result_qs = PromptsActivity.objects.filter(conditions, user_id=request.user.id)
featuredata = {k.feature: k.data for k in result_qs}
if len(features) == 1:
result = result_qs.first()
data = None if result is None else result.data
return Response({"data": data, "features": featuredata})
else:
return Response({"features": featuredata})
def put(self, request: Request, **kwargs):
serializer = PromptsActivitySerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
serialized = serializer.validated_data
feature = serialized["feature"]
status = serialized["status"]
required_fields = prompt_config.required_fields(feature)
fields = {k: request.data.get(k) for k in required_fields}
if any(elem is None for elem in fields.values()):
return Response({"detail": "Missing required field"}, status=400)
# if project_id or organization_id in required fields make sure they exist
# if NOT in required fields, insert dummy value so dups aren't recorded
if "project_id" in required_fields:
if not Project.objects.filter(id=fields["project_id"]).exists():
return Response({"detail": "Project no longer exists"}, status=400)
else:
fields["project_id"] = 0
if (
"organization_id" in required_fields
and fields["organization_id"] == request.organization.id
):
if not Organization.objects.filter(id=fields["organization_id"]).exists():
return Response({"detail": "Organization no longer exists"}, status=400)
else:
return Response({"detail": "Organization missing or mismatched"}, status=400)
data: dict[str, Any] = {}
now = calendar.timegm(timezone.now().utctimetuple())
if status == "snoozed":
data["snoozed_ts"] = now
elif status == "dismissed":
data["dismissed_ts"] = now
elif status == "visible":
data["snoozed_ts"] = None
data["dismissed_ts"] = None
try:
with transaction.atomic(router.db_for_write(PromptsActivity)):
PromptsActivity.objects.create_or_update(
feature=feature, user_id=request.user.id, values={"data": data}, **fields
)
except IntegrityError:
pass
return HttpResponse(status=201)
|
PromptsActivityEndpoint
|
python
|
zostera__django-bootstrap4
|
tests/forms.py
|
{
"start": 399,
"end": 556
}
|
class ____(forms.Form):
subject = forms.CharField(
max_length=100,
help_text="my_help_text",
required=True,
)
|
CharFieldTestForm
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_persistent_volume_spec.py
|
{
"start": 383,
"end": 31908
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'access_modes': 'list[str]',
'aws_elastic_block_store': 'V1AWSElasticBlockStoreVolumeSource',
'azure_disk': 'V1AzureDiskVolumeSource',
'azure_file': 'V1AzureFilePersistentVolumeSource',
'capacity': 'dict(str, str)',
'cephfs': 'V1CephFSPersistentVolumeSource',
'cinder': 'V1CinderPersistentVolumeSource',
'claim_ref': 'V1ObjectReference',
'csi': 'V1CSIPersistentVolumeSource',
'fc': 'V1FCVolumeSource',
'flex_volume': 'V1FlexPersistentVolumeSource',
'flocker': 'V1FlockerVolumeSource',
'gce_persistent_disk': 'V1GCEPersistentDiskVolumeSource',
'glusterfs': 'V1GlusterfsPersistentVolumeSource',
'host_path': 'V1HostPathVolumeSource',
'iscsi': 'V1ISCSIPersistentVolumeSource',
'local': 'V1LocalVolumeSource',
'mount_options': 'list[str]',
'nfs': 'V1NFSVolumeSource',
'node_affinity': 'V1VolumeNodeAffinity',
'persistent_volume_reclaim_policy': 'str',
'photon_persistent_disk': 'V1PhotonPersistentDiskVolumeSource',
'portworx_volume': 'V1PortworxVolumeSource',
'quobyte': 'V1QuobyteVolumeSource',
'rbd': 'V1RBDPersistentVolumeSource',
'scale_io': 'V1ScaleIOPersistentVolumeSource',
'storage_class_name': 'str',
'storageos': 'V1StorageOSPersistentVolumeSource',
'volume_attributes_class_name': 'str',
'volume_mode': 'str',
'vsphere_volume': 'V1VsphereVirtualDiskVolumeSource'
}
attribute_map = {
'access_modes': 'accessModes',
'aws_elastic_block_store': 'awsElasticBlockStore',
'azure_disk': 'azureDisk',
'azure_file': 'azureFile',
'capacity': 'capacity',
'cephfs': 'cephfs',
'cinder': 'cinder',
'claim_ref': 'claimRef',
'csi': 'csi',
'fc': 'fc',
'flex_volume': 'flexVolume',
'flocker': 'flocker',
'gce_persistent_disk': 'gcePersistentDisk',
'glusterfs': 'glusterfs',
'host_path': 'hostPath',
'iscsi': 'iscsi',
'local': 'local',
'mount_options': 'mountOptions',
'nfs': 'nfs',
'node_affinity': 'nodeAffinity',
'persistent_volume_reclaim_policy': 'persistentVolumeReclaimPolicy',
'photon_persistent_disk': 'photonPersistentDisk',
'portworx_volume': 'portworxVolume',
'quobyte': 'quobyte',
'rbd': 'rbd',
'scale_io': 'scaleIO',
'storage_class_name': 'storageClassName',
'storageos': 'storageos',
'volume_attributes_class_name': 'volumeAttributesClassName',
'volume_mode': 'volumeMode',
'vsphere_volume': 'vsphereVolume'
}
def __init__(self, access_modes=None, aws_elastic_block_store=None, azure_disk=None, azure_file=None, capacity=None, cephfs=None, cinder=None, claim_ref=None, csi=None, fc=None, flex_volume=None, flocker=None, gce_persistent_disk=None, glusterfs=None, host_path=None, iscsi=None, local=None, mount_options=None, nfs=None, node_affinity=None, persistent_volume_reclaim_policy=None, photon_persistent_disk=None, portworx_volume=None, quobyte=None, rbd=None, scale_io=None, storage_class_name=None, storageos=None, volume_attributes_class_name=None, volume_mode=None, vsphere_volume=None, local_vars_configuration=None): # noqa: E501
"""V1PersistentVolumeSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._access_modes = None
self._aws_elastic_block_store = None
self._azure_disk = None
self._azure_file = None
self._capacity = None
self._cephfs = None
self._cinder = None
self._claim_ref = None
self._csi = None
self._fc = None
self._flex_volume = None
self._flocker = None
self._gce_persistent_disk = None
self._glusterfs = None
self._host_path = None
self._iscsi = None
self._local = None
self._mount_options = None
self._nfs = None
self._node_affinity = None
self._persistent_volume_reclaim_policy = None
self._photon_persistent_disk = None
self._portworx_volume = None
self._quobyte = None
self._rbd = None
self._scale_io = None
self._storage_class_name = None
self._storageos = None
self._volume_attributes_class_name = None
self._volume_mode = None
self._vsphere_volume = None
self.discriminator = None
if access_modes is not None:
self.access_modes = access_modes
if aws_elastic_block_store is not None:
self.aws_elastic_block_store = aws_elastic_block_store
if azure_disk is not None:
self.azure_disk = azure_disk
if azure_file is not None:
self.azure_file = azure_file
if capacity is not None:
self.capacity = capacity
if cephfs is not None:
self.cephfs = cephfs
if cinder is not None:
self.cinder = cinder
if claim_ref is not None:
self.claim_ref = claim_ref
if csi is not None:
self.csi = csi
if fc is not None:
self.fc = fc
if flex_volume is not None:
self.flex_volume = flex_volume
if flocker is not None:
self.flocker = flocker
if gce_persistent_disk is not None:
self.gce_persistent_disk = gce_persistent_disk
if glusterfs is not None:
self.glusterfs = glusterfs
if host_path is not None:
self.host_path = host_path
if iscsi is not None:
self.iscsi = iscsi
if local is not None:
self.local = local
if mount_options is not None:
self.mount_options = mount_options
if nfs is not None:
self.nfs = nfs
if node_affinity is not None:
self.node_affinity = node_affinity
if persistent_volume_reclaim_policy is not None:
self.persistent_volume_reclaim_policy = persistent_volume_reclaim_policy
if photon_persistent_disk is not None:
self.photon_persistent_disk = photon_persistent_disk
if portworx_volume is not None:
self.portworx_volume = portworx_volume
if quobyte is not None:
self.quobyte = quobyte
if rbd is not None:
self.rbd = rbd
if scale_io is not None:
self.scale_io = scale_io
if storage_class_name is not None:
self.storage_class_name = storage_class_name
if storageos is not None:
self.storageos = storageos
if volume_attributes_class_name is not None:
self.volume_attributes_class_name = volume_attributes_class_name
if volume_mode is not None:
self.volume_mode = volume_mode
if vsphere_volume is not None:
self.vsphere_volume = vsphere_volume
@property
def access_modes(self):
"""Gets the access_modes of this V1PersistentVolumeSpec. # noqa: E501
accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes # noqa: E501
:return: The access_modes of this V1PersistentVolumeSpec. # noqa: E501
:rtype: list[str]
"""
return self._access_modes
@access_modes.setter
def access_modes(self, access_modes):
"""Sets the access_modes of this V1PersistentVolumeSpec.
accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes # noqa: E501
:param access_modes: The access_modes of this V1PersistentVolumeSpec. # noqa: E501
:type: list[str]
"""
self._access_modes = access_modes
@property
def aws_elastic_block_store(self):
"""Gets the aws_elastic_block_store of this V1PersistentVolumeSpec. # noqa: E501
:return: The aws_elastic_block_store of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1AWSElasticBlockStoreVolumeSource
"""
return self._aws_elastic_block_store
@aws_elastic_block_store.setter
def aws_elastic_block_store(self, aws_elastic_block_store):
"""Sets the aws_elastic_block_store of this V1PersistentVolumeSpec.
:param aws_elastic_block_store: The aws_elastic_block_store of this V1PersistentVolumeSpec. # noqa: E501
:type: V1AWSElasticBlockStoreVolumeSource
"""
self._aws_elastic_block_store = aws_elastic_block_store
@property
def azure_disk(self):
"""Gets the azure_disk of this V1PersistentVolumeSpec. # noqa: E501
:return: The azure_disk of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1AzureDiskVolumeSource
"""
return self._azure_disk
@azure_disk.setter
def azure_disk(self, azure_disk):
"""Sets the azure_disk of this V1PersistentVolumeSpec.
:param azure_disk: The azure_disk of this V1PersistentVolumeSpec. # noqa: E501
:type: V1AzureDiskVolumeSource
"""
self._azure_disk = azure_disk
@property
def azure_file(self):
"""Gets the azure_file of this V1PersistentVolumeSpec. # noqa: E501
:return: The azure_file of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1AzureFilePersistentVolumeSource
"""
return self._azure_file
@azure_file.setter
def azure_file(self, azure_file):
"""Sets the azure_file of this V1PersistentVolumeSpec.
:param azure_file: The azure_file of this V1PersistentVolumeSpec. # noqa: E501
:type: V1AzureFilePersistentVolumeSource
"""
self._azure_file = azure_file
@property
def capacity(self):
"""Gets the capacity of this V1PersistentVolumeSpec. # noqa: E501
capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity # noqa: E501
:return: The capacity of this V1PersistentVolumeSpec. # noqa: E501
:rtype: dict(str, str)
"""
return self._capacity
@capacity.setter
def capacity(self, capacity):
"""Sets the capacity of this V1PersistentVolumeSpec.
capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity # noqa: E501
:param capacity: The capacity of this V1PersistentVolumeSpec. # noqa: E501
:type: dict(str, str)
"""
self._capacity = capacity
@property
def cephfs(self):
"""Gets the cephfs of this V1PersistentVolumeSpec. # noqa: E501
:return: The cephfs of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1CephFSPersistentVolumeSource
"""
return self._cephfs
@cephfs.setter
def cephfs(self, cephfs):
"""Sets the cephfs of this V1PersistentVolumeSpec.
:param cephfs: The cephfs of this V1PersistentVolumeSpec. # noqa: E501
:type: V1CephFSPersistentVolumeSource
"""
self._cephfs = cephfs
@property
def cinder(self):
"""Gets the cinder of this V1PersistentVolumeSpec. # noqa: E501
:return: The cinder of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1CinderPersistentVolumeSource
"""
return self._cinder
@cinder.setter
def cinder(self, cinder):
"""Sets the cinder of this V1PersistentVolumeSpec.
:param cinder: The cinder of this V1PersistentVolumeSpec. # noqa: E501
:type: V1CinderPersistentVolumeSource
"""
self._cinder = cinder
@property
def claim_ref(self):
"""Gets the claim_ref of this V1PersistentVolumeSpec. # noqa: E501
:return: The claim_ref of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1ObjectReference
"""
return self._claim_ref
@claim_ref.setter
def claim_ref(self, claim_ref):
"""Sets the claim_ref of this V1PersistentVolumeSpec.
:param claim_ref: The claim_ref of this V1PersistentVolumeSpec. # noqa: E501
:type: V1ObjectReference
"""
self._claim_ref = claim_ref
@property
def csi(self):
"""Gets the csi of this V1PersistentVolumeSpec. # noqa: E501
:return: The csi of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1CSIPersistentVolumeSource
"""
return self._csi
@csi.setter
def csi(self, csi):
"""Sets the csi of this V1PersistentVolumeSpec.
:param csi: The csi of this V1PersistentVolumeSpec. # noqa: E501
:type: V1CSIPersistentVolumeSource
"""
self._csi = csi
@property
def fc(self):
"""Gets the fc of this V1PersistentVolumeSpec. # noqa: E501
:return: The fc of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1FCVolumeSource
"""
return self._fc
@fc.setter
def fc(self, fc):
"""Sets the fc of this V1PersistentVolumeSpec.
:param fc: The fc of this V1PersistentVolumeSpec. # noqa: E501
:type: V1FCVolumeSource
"""
self._fc = fc
@property
def flex_volume(self):
"""Gets the flex_volume of this V1PersistentVolumeSpec. # noqa: E501
:return: The flex_volume of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1FlexPersistentVolumeSource
"""
return self._flex_volume
@flex_volume.setter
def flex_volume(self, flex_volume):
"""Sets the flex_volume of this V1PersistentVolumeSpec.
:param flex_volume: The flex_volume of this V1PersistentVolumeSpec. # noqa: E501
:type: V1FlexPersistentVolumeSource
"""
self._flex_volume = flex_volume
@property
def flocker(self):
"""Gets the flocker of this V1PersistentVolumeSpec. # noqa: E501
:return: The flocker of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1FlockerVolumeSource
"""
return self._flocker
@flocker.setter
def flocker(self, flocker):
"""Sets the flocker of this V1PersistentVolumeSpec.
:param flocker: The flocker of this V1PersistentVolumeSpec. # noqa: E501
:type: V1FlockerVolumeSource
"""
self._flocker = flocker
@property
def gce_persistent_disk(self):
"""Gets the gce_persistent_disk of this V1PersistentVolumeSpec. # noqa: E501
:return: The gce_persistent_disk of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1GCEPersistentDiskVolumeSource
"""
return self._gce_persistent_disk
@gce_persistent_disk.setter
def gce_persistent_disk(self, gce_persistent_disk):
"""Sets the gce_persistent_disk of this V1PersistentVolumeSpec.
:param gce_persistent_disk: The gce_persistent_disk of this V1PersistentVolumeSpec. # noqa: E501
:type: V1GCEPersistentDiskVolumeSource
"""
self._gce_persistent_disk = gce_persistent_disk
@property
def glusterfs(self):
"""Gets the glusterfs of this V1PersistentVolumeSpec. # noqa: E501
:return: The glusterfs of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1GlusterfsPersistentVolumeSource
"""
return self._glusterfs
@glusterfs.setter
def glusterfs(self, glusterfs):
"""Sets the glusterfs of this V1PersistentVolumeSpec.
:param glusterfs: The glusterfs of this V1PersistentVolumeSpec. # noqa: E501
:type: V1GlusterfsPersistentVolumeSource
"""
self._glusterfs = glusterfs
@property
def host_path(self):
"""Gets the host_path of this V1PersistentVolumeSpec. # noqa: E501
:return: The host_path of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1HostPathVolumeSource
"""
return self._host_path
@host_path.setter
def host_path(self, host_path):
"""Sets the host_path of this V1PersistentVolumeSpec.
:param host_path: The host_path of this V1PersistentVolumeSpec. # noqa: E501
:type: V1HostPathVolumeSource
"""
self._host_path = host_path
@property
def iscsi(self):
"""Gets the iscsi of this V1PersistentVolumeSpec. # noqa: E501
:return: The iscsi of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1ISCSIPersistentVolumeSource
"""
return self._iscsi
@iscsi.setter
def iscsi(self, iscsi):
"""Sets the iscsi of this V1PersistentVolumeSpec.
:param iscsi: The iscsi of this V1PersistentVolumeSpec. # noqa: E501
:type: V1ISCSIPersistentVolumeSource
"""
self._iscsi = iscsi
@property
def local(self):
"""Gets the local of this V1PersistentVolumeSpec. # noqa: E501
:return: The local of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1LocalVolumeSource
"""
return self._local
@local.setter
def local(self, local):
"""Sets the local of this V1PersistentVolumeSpec.
:param local: The local of this V1PersistentVolumeSpec. # noqa: E501
:type: V1LocalVolumeSource
"""
self._local = local
@property
def mount_options(self):
"""Gets the mount_options of this V1PersistentVolumeSpec. # noqa: E501
mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options # noqa: E501
:return: The mount_options of this V1PersistentVolumeSpec. # noqa: E501
:rtype: list[str]
"""
return self._mount_options
@mount_options.setter
def mount_options(self, mount_options):
"""Sets the mount_options of this V1PersistentVolumeSpec.
mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options # noqa: E501
:param mount_options: The mount_options of this V1PersistentVolumeSpec. # noqa: E501
:type: list[str]
"""
self._mount_options = mount_options
@property
def nfs(self):
"""Gets the nfs of this V1PersistentVolumeSpec. # noqa: E501
:return: The nfs of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1NFSVolumeSource
"""
return self._nfs
@nfs.setter
def nfs(self, nfs):
"""Sets the nfs of this V1PersistentVolumeSpec.
:param nfs: The nfs of this V1PersistentVolumeSpec. # noqa: E501
:type: V1NFSVolumeSource
"""
self._nfs = nfs
@property
def node_affinity(self):
"""Gets the node_affinity of this V1PersistentVolumeSpec. # noqa: E501
:return: The node_affinity of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1VolumeNodeAffinity
"""
return self._node_affinity
@node_affinity.setter
def node_affinity(self, node_affinity):
"""Sets the node_affinity of this V1PersistentVolumeSpec.
:param node_affinity: The node_affinity of this V1PersistentVolumeSpec. # noqa: E501
:type: V1VolumeNodeAffinity
"""
self._node_affinity = node_affinity
@property
def persistent_volume_reclaim_policy(self):
"""Gets the persistent_volume_reclaim_policy of this V1PersistentVolumeSpec. # noqa: E501
persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming # noqa: E501
:return: The persistent_volume_reclaim_policy of this V1PersistentVolumeSpec. # noqa: E501
:rtype: str
"""
return self._persistent_volume_reclaim_policy
@persistent_volume_reclaim_policy.setter
def persistent_volume_reclaim_policy(self, persistent_volume_reclaim_policy):
"""Sets the persistent_volume_reclaim_policy of this V1PersistentVolumeSpec.
persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming # noqa: E501
:param persistent_volume_reclaim_policy: The persistent_volume_reclaim_policy of this V1PersistentVolumeSpec. # noqa: E501
:type: str
"""
self._persistent_volume_reclaim_policy = persistent_volume_reclaim_policy
@property
def photon_persistent_disk(self):
"""Gets the photon_persistent_disk of this V1PersistentVolumeSpec. # noqa: E501
:return: The photon_persistent_disk of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1PhotonPersistentDiskVolumeSource
"""
return self._photon_persistent_disk
@photon_persistent_disk.setter
def photon_persistent_disk(self, photon_persistent_disk):
"""Sets the photon_persistent_disk of this V1PersistentVolumeSpec.
:param photon_persistent_disk: The photon_persistent_disk of this V1PersistentVolumeSpec. # noqa: E501
:type: V1PhotonPersistentDiskVolumeSource
"""
self._photon_persistent_disk = photon_persistent_disk
@property
def portworx_volume(self):
"""Gets the portworx_volume of this V1PersistentVolumeSpec. # noqa: E501
:return: The portworx_volume of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1PortworxVolumeSource
"""
return self._portworx_volume
@portworx_volume.setter
def portworx_volume(self, portworx_volume):
"""Sets the portworx_volume of this V1PersistentVolumeSpec.
:param portworx_volume: The portworx_volume of this V1PersistentVolumeSpec. # noqa: E501
:type: V1PortworxVolumeSource
"""
self._portworx_volume = portworx_volume
@property
def quobyte(self):
"""Gets the quobyte of this V1PersistentVolumeSpec. # noqa: E501
:return: The quobyte of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1QuobyteVolumeSource
"""
return self._quobyte
@quobyte.setter
def quobyte(self, quobyte):
"""Sets the quobyte of this V1PersistentVolumeSpec.
:param quobyte: The quobyte of this V1PersistentVolumeSpec. # noqa: E501
:type: V1QuobyteVolumeSource
"""
self._quobyte = quobyte
@property
def rbd(self):
"""Gets the rbd of this V1PersistentVolumeSpec. # noqa: E501
:return: The rbd of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1RBDPersistentVolumeSource
"""
return self._rbd
@rbd.setter
def rbd(self, rbd):
"""Sets the rbd of this V1PersistentVolumeSpec.
:param rbd: The rbd of this V1PersistentVolumeSpec. # noqa: E501
:type: V1RBDPersistentVolumeSource
"""
self._rbd = rbd
@property
def scale_io(self):
"""Gets the scale_io of this V1PersistentVolumeSpec. # noqa: E501
:return: The scale_io of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1ScaleIOPersistentVolumeSource
"""
return self._scale_io
@scale_io.setter
def scale_io(self, scale_io):
"""Sets the scale_io of this V1PersistentVolumeSpec.
:param scale_io: The scale_io of this V1PersistentVolumeSpec. # noqa: E501
:type: V1ScaleIOPersistentVolumeSource
"""
self._scale_io = scale_io
@property
def storage_class_name(self):
"""Gets the storage_class_name of this V1PersistentVolumeSpec. # noqa: E501
storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass. # noqa: E501
:return: The storage_class_name of this V1PersistentVolumeSpec. # noqa: E501
:rtype: str
"""
return self._storage_class_name
@storage_class_name.setter
def storage_class_name(self, storage_class_name):
"""Sets the storage_class_name of this V1PersistentVolumeSpec.
storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass. # noqa: E501
:param storage_class_name: The storage_class_name of this V1PersistentVolumeSpec. # noqa: E501
:type: str
"""
self._storage_class_name = storage_class_name
@property
def storageos(self):
"""Gets the storageos of this V1PersistentVolumeSpec. # noqa: E501
:return: The storageos of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1StorageOSPersistentVolumeSource
"""
return self._storageos
@storageos.setter
def storageos(self, storageos):
"""Sets the storageos of this V1PersistentVolumeSpec.
:param storageos: The storageos of this V1PersistentVolumeSpec. # noqa: E501
:type: V1StorageOSPersistentVolumeSource
"""
self._storageos = storageos
@property
def volume_attributes_class_name(self):
"""Gets the volume_attributes_class_name of this V1PersistentVolumeSpec. # noqa: E501
Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. # noqa: E501
:return: The volume_attributes_class_name of this V1PersistentVolumeSpec. # noqa: E501
:rtype: str
"""
return self._volume_attributes_class_name
@volume_attributes_class_name.setter
def volume_attributes_class_name(self, volume_attributes_class_name):
"""Sets the volume_attributes_class_name of this V1PersistentVolumeSpec.
Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. # noqa: E501
:param volume_attributes_class_name: The volume_attributes_class_name of this V1PersistentVolumeSpec. # noqa: E501
:type: str
"""
self._volume_attributes_class_name = volume_attributes_class_name
@property
def volume_mode(self):
"""Gets the volume_mode of this V1PersistentVolumeSpec. # noqa: E501
volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. # noqa: E501
:return: The volume_mode of this V1PersistentVolumeSpec. # noqa: E501
:rtype: str
"""
return self._volume_mode
@volume_mode.setter
def volume_mode(self, volume_mode):
"""Sets the volume_mode of this V1PersistentVolumeSpec.
volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. # noqa: E501
:param volume_mode: The volume_mode of this V1PersistentVolumeSpec. # noqa: E501
:type: str
"""
self._volume_mode = volume_mode
@property
def vsphere_volume(self):
"""Gets the vsphere_volume of this V1PersistentVolumeSpec. # noqa: E501
:return: The vsphere_volume of this V1PersistentVolumeSpec. # noqa: E501
:rtype: V1VsphereVirtualDiskVolumeSource
"""
return self._vsphere_volume
@vsphere_volume.setter
def vsphere_volume(self, vsphere_volume):
"""Sets the vsphere_volume of this V1PersistentVolumeSpec.
:param vsphere_volume: The vsphere_volume of this V1PersistentVolumeSpec. # noqa: E501
:type: V1VsphereVirtualDiskVolumeSource
"""
self._vsphere_volume = vsphere_volume
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PersistentVolumeSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PersistentVolumeSpec):
return True
return self.to_dict() != other.to_dict()
|
V1PersistentVolumeSpec
|
python
|
sqlalchemy__sqlalchemy
|
test/ext/test_mutable.py
|
{
"start": 2552,
"end": 2741
}
|
class ____(Point):
@classmethod
def coerce(cls, key, value):
if isinstance(value, tuple):
value = Point(*value)
return value
@dataclasses.dataclass
|
MyPoint
|
python
|
huggingface__transformers
|
src/transformers/models/gemma3/modeling_gemma3.py
|
{
"start": 45886,
"end": 55030
}
|
class ____(Gemma3PreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {
"^language_model.model": "model.language_model",
"^vision_tower": "model.vision_tower",
"^multi_modal_projector": "model.multi_modal_projector",
"^language_model.lm_head": "lm_head",
}
_tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"}
# we are filtering the logits/labels so we shouldn't divide the loss based on num_items_in_batch
# Fix: https://github.com/huggingface/transformers/issues/40564
accepts_loss_kwargs = False
def __init__(self, config: Gemma3Config):
super().__init__(config)
self.model = Gemma3Model(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_image_features(self, pixel_values):
return self.model.get_image_features(pixel_values)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
token_type_ids: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**lm_kwargs,
) -> Union[tuple, Gemma3CausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Gemma3ForConditionalGeneration
>>> model = Gemma3ForConditionalGeneration.from_pretrained("google/gemma-3-4b-it")
>>> processor = AutoProcessor.from_pretrained("google/gemma-3-4b-it")
>>> messages = [
... {
... "role": "system",
... "content": [
... {"type": "text", "text": "You are a helpful assistant."}
... ]
... },
... {
... "role": "user", "content": [
... {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"},
... {"type": "text", "text": "Where is the cat standing?"},
... ]
... },
... ]
>>> inputs = processor.apply_chat_template(
... messages,
... tokenize=True,
... return_dict=True,
... return_tensors="pt",
... add_generation_prompt=True
... )
>>> # Generate
>>> generate_ids = model.generate(**inputs)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"user\nYou are a helpful assistant.\n\n\n\n\n\nWhere is the cat standing?\nmodel\nBased on the image, the cat is standing in a snowy area, likely outdoors. It appears to"
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
**lm_kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
# Upcast to float if we need to compute the loss to avoid potential precision issues
logits = logits.float()
shift_logits = logits[..., :-1, :]
shift_labels = labels[..., 1:]
if attention_mask is not None:
# we use the input attention mask to shift the logits and labels, because it is 2D.
# we also crop attn mask in case it is longer, which happens in PrefixTuning with peft
shift_attention_mask = attention_mask[:, -shift_logits.shape[1] :].to(logits.device)
shift_logits = shift_logits[shift_attention_mask.to(logits.device) != 0].contiguous()
shift_labels = shift_labels[shift_attention_mask.to(shift_labels.device) != 0].contiguous()
else:
shift_logits = shift_logits.contiguous()
shift_labels = shift_labels.contiguous()
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss()
flat_logits = shift_logits.view(-1, self.config.text_config.vocab_size)
flat_labels = shift_labels.view(-1).to(shift_logits.device)
loss = loss_fct(flat_logits, flat_labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return Gemma3CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
pixel_values=None,
attention_mask=None,
token_type_ids=None,
use_cache=True,
logits_to_keep=None,
labels=None,
**kwargs,
):
# Overwritten -- custom `position_ids` and `pixel_values` handling
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
position_ids=position_ids,
cache_position=cache_position,
use_cache=use_cache,
logits_to_keep=logits_to_keep,
token_type_ids=token_type_ids,
**kwargs,
)
# If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
# Otherwise we need pixel values to be passed to model. NOTE: use_cache=False needs pixel_values always
if cache_position[0] == 0:
model_inputs["pixel_values"] = pixel_values
return model_inputs
@staticmethod
def create_masks_for_generate(
config: PreTrainedConfig,
input_embeds: torch.Tensor,
attention_mask: Optional[torch.Tensor],
cache_position: torch.Tensor,
past_key_values: Optional[Cache],
position_ids: Optional[torch.Tensor],
token_type_ids: Optional[torch.Tensor] = None,
**kwargs,
) -> dict:
# Uses the overwritten `create_masks_for_generate` with `token_type_ids` masking
return create_causal_mask_mapping(
config,
input_embeds,
attention_mask,
cache_position,
past_key_values,
position_ids,
token_type_ids,
pixel_values=kwargs.get("pixel_values"),
**{k: v for k, v in kwargs.items() if k != "pixel_values"},
)
|
Gemma3ForConditionalGeneration
|
python
|
cherrypy__cherrypy
|
cherrypy/_cpconfig.py
|
{
"start": 4982,
"end": 5931
}
|
class ____(reprconf.Config):
"""The 'global' configuration data for the entire CherryPy process."""
def update(self, config):
"""Update self from a dict, file or filename."""
_if_filename_register_autoreload(config)
super(Config, self).update(config)
def _apply(self, config):
"""Update self from a dict."""
if isinstance(config.get('global'), dict):
if len(config) > 1:
cherrypy.checker.global_config_contained_paths = True
config = config['global']
if 'tools.staticdir.dir' in config:
config['tools.staticdir.section'] = 'global'
super(Config, self)._apply(config)
@staticmethod
def __call__(**kwargs):
"""Decorate for page handlers to set _cp_config."""
def tool_decorator(f):
_Vars(f).setdefault('_cp_config', {}).update(kwargs)
return f
return tool_decorator
|
Config
|
python
|
walkccc__LeetCode
|
solutions/1713. Minimum Operations to Make a Subsequence/1713.py
|
{
"start": 0,
"end": 678
}
|
class ____:
def minOperations(self, target: list[int], arr: list[int]) -> int:
indices = []
numToIndex = {num: i for i, num in enumerate(target)}
for a in arr:
if a in numToIndex:
indices.append(numToIndex[a])
return len(target) - self._lengthOfLIS(indices)
# Same as 300. Longest Increasing Subsequence
def _lengthOfLIS(self, nums: list[int]) -> int:
# tails[i] := the minimum tail of all the increasing subsequences having
# length i + 1
tails = []
for num in nums:
if not tails or num > tails[-1]:
tails.append(num)
else:
tails[bisect.bisect_left(tails, num)] = num
return len(tails)
|
Solution
|
python
|
mlflow__mlflow
|
mlflow/models/rag_signatures.py
|
{
"start": 1028,
"end": 1383
}
|
class ____:
index: int = 0
message: Message = field(
default_factory=lambda: Message(
role="assistant",
content="MLflow is an open source platform for the machine learning lifecycle.",
)
)
finish_reason: str = "stop"
@deprecated("mlflow.types.llm.ChatCompletionChunk")
@dataclass
|
ChainCompletionChoice
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_slots/SLOT002.py
|
{
"start": 634,
"end": 712
}
|
class ____(namedtuple("foo", ["str", "int"]), object):
pass
|
UnusualButStillBad
|
python
|
numba__numba
|
numba/core/typing/builtins.py
|
{
"start": 9046,
"end": 9981
}
|
class ____(ConcreteTemplate):
# For bitshifts, only the first operand's signedness matters
# to choose the operation's signedness (the second operand
# should always be positive but will generally be considered
# signed anyway, since it's often a constant integer).
# (also, see issue #1995 for right-shifts)
# The RHS type is fixed to 64-bit signed/unsigned ints.
# The implementation will always cast the operands to the width of the
# result type, which is the widest between the LHS type and (u)intp.
cases = [signature(max(op, types.intp), op, op2)
for op in sorted(types.signed_domain)
for op2 in [types.uint64, types.int64]]
cases += [signature(max(op, types.uintp), op, op2)
for op in sorted(types.unsigned_domain)
for op2 in [types.uint64, types.int64]]
unsafe_casting = False
@infer_global(operator.lshift)
|
BitwiseShiftOperation
|
python
|
sphinx-doc__sphinx
|
sphinx/util/logging.py
|
{
"start": 2465,
"end": 2912
}
|
class ____(logging.LogRecord):
"""Log record class supporting location"""
prefix = ''
location: Any = None
def getMessage(self) -> str:
message = super().getMessage()
location = getattr(self, 'location', None)
if location:
message = f'{location}: {self.prefix}{message}'
elif self.prefix not in message:
message = self.prefix + message
return message
|
SphinxLogRecord
|
python
|
getsentry__sentry
|
src/sentry/identity/services/identity/impl.py
|
{
"start": 656,
"end": 4780
}
|
class ____(IdentityService):
def get_provider(
self,
*,
provider_id: int | None = None,
provider_type: str | None = None,
provider_ext_id: str | None = None,
) -> RpcIdentityProvider | None:
from sentry.users.models.identity import IdentityProvider
# If an id is provided, use that -- otherwise, use the type and external_id
idp_kwargs: Any = (
{"id": provider_id}
if provider_id
else {"type": provider_type, "external_id": provider_ext_id}
)
idp = IdentityProvider.objects.filter(**idp_kwargs).first()
return serialize_identity_provider(idp) if idp else None
def get_identities(self, *, filter: IdentityFilterArgs) -> list[RpcIdentity]:
return self._FQ.get_many(filter=filter)
def get_identity(self, *, filter: IdentityFilterArgs) -> RpcIdentity | None:
identities = self.get_identities(filter=filter)
if len(identities) == 0:
return None
return identities[0]
def get_user_identities_by_provider_type(
self,
*,
user_id: int,
provider_type: str,
exclude_matching_external_ids: bool = False,
) -> list[RpcIdentity]:
from django.db.models import F
from sentry.users.models.identity import Identity
identities = Identity.objects.filter(user=user_id, idp__type=provider_type)
if exclude_matching_external_ids:
# For Microsoft Teams integration, initially we create rows in the
# identity table with the external_id as a team_id instead of the user_id.
# We need to exclude rows where this is NOT updated to the user_id later.
identities = identities.exclude(external_id=F("idp__external_id"))
return [serialize_identity(identity) for identity in identities]
def delete_identities(self, user_id: int, organization_id: int) -> None:
"""
Deletes the set of identities associated with a user and organization context.
"""
for ai in AuthIdentity.objects.filter(
user_id=user_id, auth_provider__organization_id=organization_id
):
ai.delete()
def update_data(self, *, identity_id: int, data: Any) -> RpcIdentity | None:
identity: Identity | None = Identity.objects.filter(id=identity_id).first()
if identity is None:
return None
identity.update(data=data)
return serialize_identity(identity)
class _IdentityFilterQuery(
FilterQueryDatabaseImpl[Identity, IdentityFilterArgs, RpcIdentity, None]
):
def apply_filters(
self, query: QuerySet[Identity], filters: IdentityFilterArgs
) -> QuerySet[Identity]:
if "id" in filters:
query = query.filter(id=filters["id"])
if "user_id" in filters:
query = query.filter(user_id=filters["user_id"])
if "identity_ext_id" in filters:
query = query.filter(external_id=filters["identity_ext_id"])
if "provider_id" in filters:
query = query.filter(idp_id=filters["provider_id"])
if "provider_ext_id" in filters:
query = query.filter(idp__external_id=filters["provider_ext_id"])
if "provider_type" in filters:
query = query.filter(idp__type=filters["provider_type"])
return query
def base_query(self, select_related: bool = True) -> QuerySet[Identity]:
return Identity.objects.all()
def filter_arg_validator(self) -> Callable[[IdentityFilterArgs], str | None]:
return self._filter_has_any_key_validator(*IdentityFilterArgs.__annotations__.keys())
def serialize_api(self, serializer: None) -> Serializer:
raise NotImplementedError("API Serialization not supported for IdentityService")
def serialize_rpc(self, identity: Identity) -> RpcIdentity:
return serialize_identity(identity=identity)
_FQ = _IdentityFilterQuery()
|
DatabaseBackedIdentityService
|
python
|
google__jax
|
jax/_src/interpreters/batching.py
|
{
"start": 17517,
"end": 18737
}
|
class ____:
name : Any
size : Any
# Only one of spmd_axis_name and explicit_mesh_axis is set.
spmd_name : Any
# short for private `_explicit_mesh_axis`. The public property is called
# `.explicit_mesh_axis`
_ema: tuple[Any, ...] | None
@property
def explicit_mesh_axis(self):
assert self._ema is None or isinstance(self._ema, tuple)
if self._ema is None:
return None
cur_mesh = mesh_lib.get_abstract_mesh()
if cur_mesh.empty:
return self._ema
ema0_type = cur_mesh._name_to_type[self._ema[0]]
assert all(cur_mesh._name_to_type[e] == ema0_type for e in self._ema)
if ema0_type != mesh_lib.AxisType.Explicit:
return None
return self._ema
def __repr__(self):
return (f'AxisData(name={self.name}, size={self.size},'
f' spmd_name={self.spmd_name},'
f' explicit_mesh_axis={self.explicit_mesh_axis})')
__str__ = __repr__
def get_sharding_for_vmap(axis_data, orig_sharding, axis):
val = axis_data.explicit_mesh_axis
# TODO(yashkatariya): Preserve unreduced here using
# `orig_sharding.spec.update`
new_spec = P(*tuple_insert(orig_sharding.spec, axis, val))
return NamedSharding(orig_sharding.mesh, new_spec)
|
AxisData
|
python
|
walkccc__LeetCode
|
solutions/1429. First Unique Number/1429.py
|
{
"start": 0,
"end": 532
}
|
class ____:
def __init__(self, nums: list[int]):
self.seen = set()
self.unique = {}
for num in nums:
self.add(num)
def showFirstUnique(self) -> int:
return next(iter(self.unique), -1)
def add(self, value: int) -> None:
if value not in self.seen:
self.seen.add(value)
self.unique[value] = 1
elif value in self.unique:
# We have added this value before, and this is the second time we're
# adding it. So, erase the value from `unique`.
self.unique.pop(value)
|
FirstUnique
|
python
|
walkccc__LeetCode
|
solutions/2531. Make Number of Distinct Characters Equal/2531.py
|
{
"start": 0,
"end": 854
}
|
class ____:
def isItPossible(self, word1: str, word2: str) -> bool:
count1 = collections.Counter(word1)
count2 = collections.Counter(word2)
distinct1 = len(count1)
distinct2 = len(count2)
for a in count1:
for b in count2:
if a == b:
# Swapping the same letters won't change the number of distinct
# letters in each string, so just check if `distinct1 == distinct2`.
if distinct1 == distinct2:
return True
continue
# The calculation is meaningful only when a != b
# Swap a in word1 with b in word2.
distinctAfterSwap1 = distinct1 - (count1[a] == 1) + (count1[b] == 0)
distinctAfterSwap2 = distinct2 - (count2[b] == 1) + (count2[a] == 0)
if distinctAfterSwap1 == distinctAfterSwap2:
return True
return False
|
Solution
|
python
|
numba__llvmlite
|
llvmlite/tests/test_binding.py
|
{
"start": 36194,
"end": 41831
}
|
class ____(object):
"""
Mixin for ExecutionEngine tests.
"""
def get_sum(self, ee, func_name="sum"):
ee.finalize_object()
cfptr = ee.get_function_address(func_name)
self.assertTrue(cfptr)
return CFUNCTYPE(c_int, c_int, c_int)(cfptr)
def test_run_code(self):
mod = self.module()
with self.jit(mod) as ee:
cfunc = self.get_sum(ee)
res = cfunc(2, -5)
self.assertEqual(-3, res)
def test_close(self):
ee = self.jit(self.module())
ee.close()
ee.close()
with self.assertRaises(ctypes.ArgumentError):
ee.finalize_object()
def test_with(self):
ee = self.jit(self.module())
with ee:
pass
with self.assertRaises(RuntimeError):
with ee:
pass
with self.assertRaises(ctypes.ArgumentError):
ee.finalize_object()
def test_module_lifetime(self):
mod = self.module()
ee = self.jit(mod)
ee.close()
mod.close()
def test_module_lifetime2(self):
mod = self.module()
ee = self.jit(mod)
mod.close()
ee.close()
def test_add_module(self):
ee = self.jit(self.module())
mod = self.module(asm_mul)
ee.add_module(mod)
with self.assertRaises(KeyError):
ee.add_module(mod)
self.assertFalse(mod.closed)
ee.close()
self.assertTrue(mod.closed)
def test_add_module_lifetime(self):
ee = self.jit(self.module())
mod = self.module(asm_mul)
ee.add_module(mod)
mod.close()
ee.close()
def test_add_module_lifetime2(self):
ee = self.jit(self.module())
mod = self.module(asm_mul)
ee.add_module(mod)
ee.close()
mod.close()
def test_remove_module(self):
ee = self.jit(self.module())
mod = self.module(asm_mul)
ee.add_module(mod)
ee.remove_module(mod)
with self.assertRaises(KeyError):
ee.remove_module(mod)
self.assertFalse(mod.closed)
ee.close()
self.assertFalse(mod.closed)
def test_target_data(self):
mod = self.module()
ee = self.jit(mod)
td = ee.target_data
# A singleton is returned
self.assertIs(ee.target_data, td)
str(td)
del mod, ee
str(td)
def test_target_data_abi_enquiries(self):
mod = self.module()
ee = self.jit(mod)
td = ee.target_data
gv_i32 = mod.get_global_variable("glob")
gv_i8 = mod.get_global_variable("glob_b")
gv_struct = mod.get_global_variable("glob_struct")
# A global is a pointer, it has the ABI size of a pointer
pointer_size = 4 if sys.maxsize < 2 ** 32 else 8
for g in (gv_i32, gv_i8, gv_struct):
self.assertEqual(td.get_abi_size(g.type), pointer_size)
self.assertEqual(td.get_abi_size(gv_i32.global_value_type), 4)
self.assertEqual(td.get_abi_alignment(gv_i32.global_value_type), 4)
self.assertEqual(td.get_abi_size(gv_i8.global_value_type), 1)
self.assertIn(td.get_abi_alignment(gv_i8.global_value_type), (1, 2, 4))
self.assertEqual(td.get_abi_size(gv_struct.global_value_type), 24)
self.assertIn(td.get_abi_alignment(gv_struct.global_value_type), (4, 8))
def test_object_cache_notify(self):
notifies = []
def notify(mod, buf):
notifies.append((mod, buf))
mod = self.module()
ee = self.jit(mod)
ee.set_object_cache(notify)
self.assertEqual(len(notifies), 0)
cfunc = self.get_sum(ee)
cfunc(2, -5)
self.assertEqual(len(notifies), 1)
# The right module object was found
self.assertIs(notifies[0][0], mod)
self.assertIsInstance(notifies[0][1], bytes)
notifies[:] = []
mod2 = self.module(asm_mul)
ee.add_module(mod2)
cfunc = self.get_sum(ee, "mul")
self.assertEqual(len(notifies), 1)
# The right module object was found
self.assertIs(notifies[0][0], mod2)
self.assertIsInstance(notifies[0][1], bytes)
def test_object_cache_getbuffer(self):
notifies = []
getbuffers = []
def notify(mod, buf):
notifies.append((mod, buf))
def getbuffer(mod):
getbuffers.append(mod)
mod = self.module()
ee = self.jit(mod)
ee.set_object_cache(notify, getbuffer)
# First return None from getbuffer(): the object is compiled normally
self.assertEqual(len(notifies), 0)
self.assertEqual(len(getbuffers), 0)
cfunc = self.get_sum(ee)
self.assertEqual(len(notifies), 1)
self.assertEqual(len(getbuffers), 1)
self.assertIs(getbuffers[0], mod)
sum_buffer = notifies[0][1]
# Recreate a new EE, and use getbuffer() to return the previously
# compiled object.
def getbuffer_successful(mod):
getbuffers.append(mod)
return sum_buffer
notifies[:] = []
getbuffers[:] = []
# Use another source module to make sure it is ignored
mod = self.module(asm_mul)
ee = self.jit(mod)
ee.set_object_cache(notify, getbuffer_successful)
self.assertEqual(len(notifies), 0)
self.assertEqual(len(getbuffers), 0)
cfunc = self.get_sum(ee)
self.assertEqual(cfunc(2, -5), -3)
self.assertEqual(len(notifies), 0)
self.assertEqual(len(getbuffers), 1)
|
JITTestMixin
|
python
|
google__pytype
|
pytype/matcher.py
|
{
"start": 2057,
"end": 2312
}
|
class ____:
"""A correct type/actual value match."""
view: _ViewType
subst: _SubstType
@classmethod
def default(cls):
return cls(datatypes.AccessTrackingDict(), datatypes.HashableDict())
@dataclasses.dataclass(eq=True, frozen=True)
|
GoodMatch
|
python
|
pypa__setuptools
|
setuptools/_distutils/command/install_egg_info.py
|
{
"start": 279,
"end": 2868
}
|
class ____(Command):
"""Install an .egg-info file for the package"""
description = "Install package's PKG-INFO metadata as an .egg-info file"
user_options: ClassVar[list[tuple[str, str, str]]] = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
@property
def basename(self):
"""
Allow basename to be overridden by child class.
Ref pypa/distutils#2.
"""
name = to_filename(safe_name(self.distribution.get_name()))
version = to_filename(safe_version(self.distribution.get_version()))
return f"{name}-{version}-py{sys.version_info.major}.{sys.version_info.minor}.egg-info"
def finalize_options(self):
self.set_undefined_options('install_lib', ('install_dir', 'install_dir'))
self.target = os.path.join(self.install_dir, self.basename)
self.outputs = [self.target]
def run(self):
target = self.target
if os.path.isdir(target) and not os.path.islink(target):
dir_util.remove_tree(target, dry_run=self.dry_run)
elif os.path.exists(target):
self.execute(os.unlink, (self.target,), "Removing " + target)
elif not os.path.isdir(self.install_dir):
self.execute(
os.makedirs, (self.install_dir,), "Creating " + self.install_dir
)
log.info("Writing %s", target)
if not self.dry_run:
with open(target, 'w', encoding='UTF-8') as f:
self.distribution.metadata.write_pkg_file(f)
def get_outputs(self):
return self.outputs
# The following routines are taken from setuptools' pkg_resources module and
# can be replaced by importing them from pkg_resources once it is included
# in the stdlib.
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
|
install_egg_info
|
python
|
Netflix__metaflow
|
metaflow/_vendor/importlib_metadata/__init__.py
|
{
"start": 1257,
"end": 2827
}
|
class ____:
"""
A simple entry point config parser for performance
>>> for item in Sectioned.read(Sectioned._sample):
... print(item)
Pair(name='sec1', value='# comments ignored')
Pair(name='sec1', value='a = 1')
Pair(name='sec1', value='b = 2')
Pair(name='sec2', value='a = 2')
>>> res = Sectioned.section_pairs(Sectioned._sample)
>>> item = next(res)
>>> item.name
'sec1'
>>> item.value
Pair(name='a', value='1')
>>> item = next(res)
>>> item.value
Pair(name='b', value='2')
>>> item = next(res)
>>> item.name
'sec2'
>>> item.value
Pair(name='a', value='2')
>>> list(res)
[]
"""
_sample = textwrap.dedent(
"""
[sec1]
# comments ignored
a = 1
b = 2
[sec2]
a = 2
"""
).lstrip()
@classmethod
def section_pairs(cls, text):
return (
section._replace(value=Pair.parse(section.value))
for section in cls.read(text, filter_=cls.valid)
if section.name is not None
)
@staticmethod
def read(text, filter_=None):
lines = filter(filter_, map(str.strip, text.splitlines()))
name = None
for value in lines:
section_match = value.startswith('[') and value.endswith(']')
if section_match:
name = value.strip('[]')
continue
yield Pair(name, value)
@staticmethod
def valid(line):
return line and not line.startswith('#')
|
Sectioned
|
python
|
arrow-py__arrow
|
arrow/locales.py
|
{
"start": 47504,
"end": 47573
}
|
class ____(GermanBaseLocale, Locale):
names = ["de-ch"]
|
SwissLocale
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/exceptions.py
|
{
"start": 11363,
"end": 12644
}
|
class ____(DiagnosticPipError, InstallationError):
"""A subprocess call failed."""
reference = "subprocess-exited-with-error"
def __init__(
self,
*,
command_description: str,
exit_code: int,
output_lines: Optional[List[str]],
) -> None:
if output_lines is None:
output_prompt = Text("See above for output.")
else:
output_prompt = (
Text.from_markup(f"[red][{len(output_lines)} lines of output][/]\n")
+ Text("".join(output_lines))
+ Text.from_markup(R"[red]\[end of output][/]")
)
super().__init__(
message=(
f"[green]{escape(command_description)}[/] did not run successfully.\n"
f"exit code: {exit_code}"
),
context=output_prompt,
hint_stmt=None,
note_stmt=(
"This error originates from a subprocess, and is likely not a "
"problem with pip."
),
)
self.command_description = command_description
self.exit_code = exit_code
def __str__(self) -> str:
return f"{self.command_description} exited with {self.exit_code}"
|
InstallationSubprocessError
|
python
|
bokeh__bokeh
|
src/bokeh/models/annotations/legends.py
|
{
"start": 3337,
"end": 7776
}
|
class ____(Annotation):
''' Abstract base class for color bars.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
location = Either(Enum(HVAlign), Tuple(Float, Float), default="top_right", help="""
The location where the color bar should draw itself. It's either one of
``bokeh.core.enums.Anchor``'s enumerated values, or a ``(x, y)`` tuple
indicating an absolute location in screen coordinates (pixels from the
bottom-left corner).
.. warning::
If the color bar is placed in a side panel, the location will likely
have to be set to `(0, 0)`.
""")
orientation = Either(Enum(Orientation), Auto, default="auto", help="""
Whether the color bar should be oriented vertically or horizontally.
""")
height = Either(Auto, Int, help="""
The height (in pixels) that the color scale should occupy.
""")
width = Either(Auto, Int, help="""
The width (in pixels) that the color scale should occupy.
""")
title = Nullable(TextLike, help="""
The title text to render.
""")
title_props = Include(ScalarTextProps, prefix="title", help="""
The {prop} values for the title text.
""")
title_text_font_size = Override(default="13px")
title_text_font_style = Override(default="italic")
title_standoff = Int(2, help="""
The distance (in pixels) to separate the title from the color bar.
""")
ticker = Either(Instance(Ticker), Auto, default="auto", help="""
A Ticker to use for computing locations of axis components.
""")
formatter = Either(Instance(TickFormatter), Auto, default="auto", help="""
A ``TickFormatter`` to use for formatting the visual appearance of ticks.
""")
major_label_overrides = Dict(Either(Float, String), TextLike, default={}, help="""
Provide explicit tick label values for specific tick locations that
override normal formatting.
""")
major_label_policy = Instance(LabelingPolicy, default=InstanceDefault(NoOverlap), help="""
Allows to filter out labels, e.g. declutter labels to avoid overlap.
""")
margin = Int(30, help="""
Amount of margin (in pixels) around the outside of the color bar.
""")
padding = Int(10, help="""
Amount of padding (in pixels) between the color scale and color bar border.
""")
major_label_props = Include(ScalarTextProps, prefix="major_label", help="""
The {prop} of the major tick labels.
""")
major_label_text_font_size = Override(default="11px")
label_standoff = Int(5, help="""
The distance (in pixels) to separate the tick labels from the color bar.
""")
major_tick_props = Include(ScalarLineProps, prefix="major_tick", help="""
The {prop} of the major ticks.
""")
major_tick_line_color = Override(default="#ffffff")
major_tick_in = Int(default=5, help="""
The distance (in pixels) that major ticks should extend into the
main plot area.
""")
major_tick_out = Int(default=0, help="""
The distance (in pixels) that major ticks should extend out of the
main plot area.
""")
minor_tick_props = Include(ScalarLineProps, prefix="minor_tick", help="""
The {prop} of the minor ticks.
""")
minor_tick_line_color = Override(default=None)
minor_tick_in = Int(default=0, help="""
The distance (in pixels) that minor ticks should extend into the
main plot area.
""")
minor_tick_out = Int(default=0, help="""
The distance (in pixels) that major ticks should extend out of the
main plot area.
""")
bar_props = Include(ScalarLineProps, prefix="bar", help="""
The {prop} for the color scale bar outline.
""")
bar_line_color = Override(default=None)
border_props = Include(ScalarLineProps, prefix="border", help="""
The {prop} for the color bar border outline.
""")
border_line_color = Override(default=None)
background_fill_props = Include(ScalarFillProps, prefix="background", help="""
The {prop} for the color bar background style.
""")
background_hatch_props = Include(ScalarHatchProps, prefix="background", help="""
The {prop} for the color bar background style.
""")
background_fill_color = Override(default="#ffffff")
background_fill_alpha = Override(default=0.95)
|
BaseColorBar
|
python
|
django__django
|
tests/migrations/test_migrations_clashing_prefix/a.py
|
{
"start": 35,
"end": 83
}
|
class ____(migrations.Migration):
pass
|
Migration
|
python
|
GoogleCloudPlatform__python-docs-samples
|
kubernetes_engine/django_tutorial/polls/migrations/0001_initial.py
|
{
"start": 763,
"end": 1940
}
|
class ____(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(
auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(
auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(
verbose_name=b'date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='polls.Question'),
),
]
|
Migration
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/data_test_cases.py
|
{
"start": 2380,
"end": 3231
}
|
class ____(NamedTuple):
# The expected number of rows
expected_rows: int
# The expected number of columns (doesn't include index columns)
expected_cols: int
# The expected data format
expected_data_format: DataFormat
# The expected sequence when the data is converted to a sequence
# If None, the sequence is not checked.
expected_sequence: list[Any]
# The expected command used when the data is written via `st.write`
expected_write_command: Literal[
"markdown", "dataframe", "json", "help", "write_stream"
]
# Whether the data structure is unevaluated and will be truncated
# if it is too large.
is_unevaluated: bool
# The expected return type of the data when it is
# returned from the `st.data_editor` function.
expected_type: type | None = None
@dataclass
|
CaseMetadata
|
python
|
conda__conda
|
conda/exceptions.py
|
{
"start": 8626,
"end": 9488
}
|
class ____(ClobberError):
def __init__(
self,
target_path: PathType,
incompatible_package_dists: Iterable[PackageRecord | str],
context: Context,
):
message = dals(
"""
This transaction has incompatible packages due to a shared path.
packages: %(incompatible_packages)s
path: '%(target_path)s'
"""
)
if context.path_conflict == PathConflict.prevent:
message += (
"If you'd like to proceed anyway, re-run the command with "
"the `--clobber` flag.\n."
)
super().__init__(
message,
context.path_conflict,
target_path=target_path,
incompatible_packages=", ".join(str(d) for d in incompatible_package_dists),
)
|
SharedLinkPathClobberError
|
python
|
openai__openai-python
|
src/openai/types/chat/chat_completion_deleted.py
|
{
"start": 198,
"end": 470
}
|
class ____(BaseModel):
id: str
"""The ID of the chat completion that was deleted."""
deleted: bool
"""Whether the chat completion was deleted."""
object: Literal["chat.completion.deleted"]
"""The type of object being deleted."""
|
ChatCompletionDeleted
|
python
|
hynek__structlog
|
src/structlog/stdlib.py
|
{
"start": 3141,
"end": 4389
}
|
class ____(logging.Logger):
"""
Change the behavior of `logging.Logger.findCaller` to cope with
*structlog*'s extra frames.
"""
def findCaller(
self, stack_info: bool = False, stacklevel: int = 1
) -> tuple[str, int, str, str | None]:
"""
Finds the first caller frame outside of structlog so that the caller
info is populated for wrapping stdlib.
This logger gets set as the default one when using LoggerFactory.
"""
sinfo: str | None
# stdlib logging passes stacklevel=1 from log methods like .warning(),
# but we've already skipped those frames by ignoring "logging", so we
# need to adjust stacklevel down by 1. We need to manually drop
# logging frames, because there's cases where we call logging methods
# from within structlog and the stacklevel offsets don't work anymore.
adjusted_stacklevel = max(0, stacklevel - 1) if stacklevel else None
f, _name = _find_first_app_frame_and_name(
["logging"], stacklevel=adjusted_stacklevel
)
sinfo = _format_stack(f) if stack_info else None
return f.f_code.co_filename, f.f_lineno, f.f_code.co_name, sinfo
|
_FixedFindCallerLogger
|
python
|
PrefectHQ__prefect
|
src/prefect/server/utilities/database.py
|
{
"start": 5417,
"end": 6658
}
|
class ____(TypeDecorator[uuid.UUID]):
"""
Platform-independent UUID type.
Uses PostgreSQL's UUID type, otherwise uses
CHAR(36), storing as stringified hex values with
hyphens.
"""
impl: type[TypeEngine[Any]] | TypeEngine[Any] = TypeEngine
cache_ok: bool | None = True
def load_dialect_impl(self, dialect: sa.Dialect) -> TypeEngine[Any]:
if dialect.name == "postgresql":
return dialect.type_descriptor(postgresql.UUID())
else:
return dialect.type_descriptor(CHAR(36))
def process_bind_param(
self, value: Optional[Union[str, uuid.UUID]], dialect: sa.Dialect
) -> Optional[str]:
if value is None:
return None
elif dialect.name == "postgresql":
return str(value)
elif isinstance(value, uuid.UUID):
return str(value)
else:
return str(uuid.UUID(value))
def process_result_value(
self, value: Optional[Union[str, uuid.UUID]], dialect: sa.Dialect
) -> Optional[uuid.UUID]:
if value is None:
return value
else:
if not isinstance(value, uuid.UUID):
value = uuid.UUID(value)
return value
|
UUID
|
python
|
pypa__pip
|
src/pip/_vendor/requests/models.py
|
{
"start": 9506,
"end": 21015
}
|
class ____(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Instances are generated from a :class:`Request <Request>` object, and
should not be instantiated manually; doing so may produce undesirable
effects.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> r = req.prepare()
>>> r
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(
self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None,
json=None,
):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return f"<PreparedRequest [{self.method}]>"
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
from pip._vendor import idna
try:
host = idna.encode(host, uts46=True).decode("utf-8")
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/psf/requests/pull/2238
if isinstance(url, bytes):
url = url.decode("utf8")
else:
url = str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ":" in url and not url.lower().startswith("http"):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
raise MissingSchema(
f"Invalid URL {url!r}: No scheme supplied. "
f"Perhaps you meant https://{url}?"
)
if not host:
raise InvalidURL(f"Invalid URL {url!r}: No host supplied")
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL("URL has an invalid label.")
elif host.startswith(("*", ".")):
raise InvalidURL("URL has an invalid label.")
# Carefully reconstruct the network location
netloc = auth or ""
if netloc:
netloc += "@"
netloc += host
if port:
netloc += f":{port}"
# Bare domains aren't valid URLs.
if not path:
path = "/"
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = f"{query}&{enc_params}"
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = "application/json"
try:
body = complexjson.dumps(json, allow_nan=False)
except ValueError as ve:
raise InvalidJSONError(ve, request=self)
if not isinstance(body, bytes):
body = body.encode("utf-8")
is_stream = all(
[
hasattr(data, "__iter__"),
not isinstance(data, (basestring, list, tuple, Mapping)),
]
)
if is_stream:
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
body = data
if getattr(body, "tell", None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except OSError:
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError(
"Streamed bodies and files are mutually exclusive."
)
if length:
self.headers["Content-Length"] = builtin_str(length)
else:
self.headers["Transfer-Encoding"] = "chunked"
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, "read"):
content_type = None
else:
content_type = "application/x-www-form-urlencoded"
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ("content-type" not in self.headers):
self.headers["Content-Type"] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers["Content-Length"] = builtin_str(length)
elif (
self.method not in ("GET", "HEAD")
and self.headers.get("Content-Length") is None
):
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers["Content-Length"] = "0"
def prepare_auth(self, auth, url=""):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers["Cookie"] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
|
PreparedRequest
|
python
|
pytorch__pytorch
|
torch/_functorch/_aot_autograd/runtime_wrappers.py
|
{
"start": 81326,
"end": 82201
}
|
class ____:
"""
Represents a result of AOTDispatch after calling the inner compiler
that can be serialized
"""
compiled_fn: Callable
serialize_fn: Callable
def __init__(self, compiled_fn: Callable, serialize_fn: Callable):
self.compiled_fn = compiled_fn
self.serialize_fn = serialize_fn
# Equivalent to functools.wraps
functools.update_wrapper(
self,
compiled_fn,
assigned=("__doc__", "__annotations__", "__type_params__"),
)
def serialize(self) -> Any:
return self.serialize_fn()
def __call__(self, *args, **kwargs):
return self.compiled_fn(*args, **kwargs)
# This is wrapped in a class just for namespacing purposes
# No need to make it into an actual CompilerWrapper because it doesn't fit the abstract as cleanly
|
SerializableCompiledFunction
|
python
|
getsentry__sentry
|
src/sentry/search/events/fields.py
|
{
"start": 44152,
"end": 44940
}
|
class ____(ColumnArg):
# XXX(ahmed): hack to get this to work with crash rate alerts over the sessions dataset until
# we deprecate the logic that is tightly coupled with the events dataset. At which point,
# we will just rely on dataset specific logic and refactor this class out
def normalize(self, value: str, params: ParamsType, combinator: Combinator | None) -> str:
if value in SESSIONS_SNUBA_MAP:
return value
raise InvalidFunctionArgument(f"{value} is not a valid sessions dataset column")
def with_default(default, argument):
argument.has_default = True
argument.get_default = lambda *_: default
return argument
# TODO(snql-migration): Remove these Arg classes in favour for their
# non SnQL specific types
|
SessionColumnArg
|
python
|
gevent__gevent
|
src/gevent/tests/test__socket.py
|
{
"start": 985,
"end": 1364
}
|
class ____(object):
terminal_exc = None
def __init__(self, target):
@wraps(target)
def errors_are_fatal(*args, **kwargs):
try:
return target(*args, **kwargs)
except: # pylint:disable=bare-except
self.terminal_exc = sys.exc_info()
raise
self.target = errors_are_fatal
|
BaseThread
|
python
|
tensorflow__tensorflow
|
tensorflow/tools/common/public_api_test.py
|
{
"start": 841,
"end": 2526
}
|
class ____(googletest.TestCase):
class TestVisitor(object):
def __init__(self):
self.symbols = set()
self.last_parent = None
self.last_children = None
def __call__(self, path, parent, children):
self.symbols.add(path)
self.last_parent = parent
self.last_children = list(children) # Make a copy to preserve state.
def test_call_forward(self):
visitor = self.TestVisitor()
children = [('name1', 'thing1'), ('name2', 'thing2')]
public_api.PublicAPIVisitor(visitor)('test', 'dummy', children)
self.assertEqual(set(['test']), visitor.symbols)
self.assertEqual('dummy', visitor.last_parent)
self.assertEqual([('name1', 'thing1'), ('name2', 'thing2')],
visitor.last_children)
def test_private_child_removal(self):
visitor = self.TestVisitor()
children = [('name1', 'thing1'), ('_name2', 'thing2')]
public_api.PublicAPIVisitor(visitor)('test', 'dummy', children)
# Make sure the private symbols are removed before the visitor is called.
self.assertEqual([('name1', 'thing1')], visitor.last_children)
self.assertEqual([('name1', 'thing1')], children)
def test_no_descent_child_removal(self):
visitor = self.TestVisitor()
children = [('name1', 'thing1'), ('mock', 'thing2')]
public_api.PublicAPIVisitor(visitor)('test', 'dummy', children)
# Make sure not-to-be-descended-into symbols are removed after the visitor
# is called.
self.assertEqual([('name1', 'thing1'), ('mock', 'thing2')],
visitor.last_children)
self.assertEqual([('name1', 'thing1')], children)
if __name__ == '__main__':
googletest.main()
|
PublicApiTest
|
python
|
pytorch__pytorch
|
benchmarks/functional_autograd_benchmark/torchvision_models.py
|
{
"start": 2170,
"end": 3983
}
|
class ____(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
Bottleneck
|
python
|
Lightning-AI__lightning
|
src/lightning/pytorch/cli.py
|
{
"start": 9393,
"end": 13848
}
|
class ____(Callback):
"""Saves a LightningCLI config to the log_dir when training starts.
Args:
parser: The parser object used to parse the configuration.
config: The parsed configuration that will be saved.
config_filename: Filename for the config file.
overwrite: Whether to overwrite an existing config file.
multifile: When input is multiple config files, saved config preserves this structure.
save_to_log_dir: Whether to save the config to the log_dir.
Raises:
RuntimeError: If the config file already exists in the directory to avoid overwriting a previous run
"""
def __init__(
self,
parser: LightningArgumentParser,
config: Namespace,
config_filename: str = "config.yaml",
overwrite: bool = False,
multifile: bool = False,
save_to_log_dir: bool = True,
) -> None:
self.parser = parser
self.config = config
self.config_filename = config_filename
self.overwrite = overwrite
self.multifile = multifile
self.save_to_log_dir = save_to_log_dir
self.already_saved = False
if not save_to_log_dir and not is_overridden("save_config", self, SaveConfigCallback):
raise ValueError(
"`save_to_log_dir=False` only makes sense when subclassing SaveConfigCallback to implement "
"`save_config` and it is desired to disable the standard behavior of saving to log_dir."
)
@override
def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> None:
if self.already_saved:
return
if self.save_to_log_dir:
log_dir = trainer.log_dir # this broadcasts the directory
assert log_dir is not None
config_path = os.path.join(log_dir, self.config_filename)
fs = get_filesystem(log_dir)
if not self.overwrite:
# check if the file exists on rank 0
file_exists = fs.isfile(config_path) if trainer.is_global_zero else False
# broadcast whether to fail to all ranks
file_exists = trainer.strategy.broadcast(file_exists)
if file_exists:
raise RuntimeError(
f"{self.__class__.__name__} expected {config_path} to NOT exist. Aborting to avoid overwriting"
" results of a previous run. You can delete the previous config file,"
" set `LightningCLI(save_config_callback=None)` to disable config saving,"
' or set `LightningCLI(save_config_kwargs={"overwrite": True})` to overwrite the config file.'
)
if trainer.is_global_zero:
# save only on rank zero to avoid race conditions.
# the `log_dir` needs to be created as we rely on the logger to do it usually
# but it hasn't logged anything at this point
fs.makedirs(log_dir, exist_ok=True)
self.parser.save(
self.config, config_path, skip_none=False, overwrite=self.overwrite, multifile=self.multifile
)
if trainer.is_global_zero:
self.save_config(trainer, pl_module, stage)
self.already_saved = True
# broadcast so that all ranks are in sync on future calls to .setup()
self.already_saved = trainer.strategy.broadcast(self.already_saved)
def save_config(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> None:
"""Implement to save the config in some other place additional to the standard log_dir.
Example:
def save_config(self, trainer, pl_module, stage):
if isinstance(trainer.logger, Logger):
config = self.parser.dump(self.config, skip_none=False) # Required for proper reproducibility
trainer.logger.log_hyperparams({"config": config})
Note:
This method is only called on rank zero. This allows to implement a custom save config without having to
worry about ranks or race conditions. Since it only runs on rank zero, any collective call will make the
process hang waiting for a broadcast. If you need to make collective calls, implement the setup method
instead.
"""
|
SaveConfigCallback
|
python
|
apache__airflow
|
task-sdk/src/airflow/sdk/api/client.py
|
{
"start": 35881,
"end": 37483
}
|
class ____(httpx.HTTPStatusError):
def __init__(self, message: str, *, request: httpx.Request, response: httpx.Response):
super().__init__(message, request=request, response=response)
detail: list[RemoteValidationError] | str | dict[str, Any] | None
def __reduce__(self) -> tuple[Any, ...]:
# Needed because https://github.com/encode/httpx/pull/3108 isn't merged yet.
return Exception.__new__, (type(self),) + self.args, self.__dict__
@classmethod
def from_response(cls, response: httpx.Response) -> ServerResponseError | None:
if response.is_success:
return None
# 4xx or 5xx error?
if not (400 <= response.status_code < 600):
return None
if response.headers.get("content-type") != "application/json":
return None
detail: list[RemoteValidationError] | dict[str, Any] | None = None
try:
body = _ErrorBody.model_validate_json(response.read())
if isinstance(body.detail, list):
detail = body.detail
msg = "Remote server returned validation error"
else:
msg = body.detail or "Un-parseable error"
except Exception:
try:
detail = msgspec.json.decode(response.content)
except Exception:
# Fallback to a normal httpx error
return None
msg = "Server returned error"
self = cls(msg, request=response.request, response=response)
self.detail = detail
return self
|
ServerResponseError
|
python
|
realpython__materials
|
python-enum/http_methods.py
|
{
"start": 113,
"end": 204
}
|
class ____(Enum):
GET = 1
POST = 2
PUSH = 3
PATCH = 4
DELETE = 5
|
HTTPMethod
|
python
|
PyCQA__pylint
|
tests/test_check_parallel.py
|
{
"start": 4673,
"end": 4889
}
|
class ____(SequentialTestChecker):
"""A checker that does not need to consolidate data across run invocations."""
name = "extra-sequential-checker"
test_data = "extra-sequential"
|
ExtraSequentialTestChecker
|
python
|
pytorch__pytorch
|
test/quantization/fx/test_subgraph_rewriter.py
|
{
"start": 708,
"end": 15936
}
|
class ____(JitTestCase):
def test_subgraph_rewriter_preserves_logic(self):
class M(torch.nn.Module):
def forward(self, x):
val = torch.neg(x) + torch.relu(x)
return torch.add(val, val)
def pattern(x):
return torch.neg(x) + torch.relu(x)
def comparison(x):
val = torch.neg(x) + torch.relu(x)
return torch.add(val, val)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.rand(1, 3)
# Replace `pattern` with the same pattern (shouldn't change
# the underlying logic)
subgraph_rewriter.replace_pattern(traced, pattern, pattern)
traced.graph.lint()
ref_output = comparison_fn(x)
test_output = traced.forward(x)
self.assertEqual(ref_output, test_output)
def test_subgraph_rewriter_with_oneliner_pattern(self):
class M(torch.nn.Module):
def forward(self, x):
val = torch.neg(x)
return torch.add(val, val)
def pattern(x):
return torch.neg(x)
def replacement(x):
return torch.relu(x)
def comparison(x):
val = torch.relu(x)
return torch.add(val, val)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.rand(1, 3)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_output = comparison_fn(x)
test_output = traced.forward(x)
self.assertEqual(ref_output, test_output)
def test_subgraph_rewriter_single_pattern_match(self):
class M(torch.nn.Module):
def forward(self, x):
val = torch.neg(x) + torch.relu(x)
return torch.add(val, val)
def pattern(x):
return torch.neg(x) + torch.relu(x)
def replacement(x):
return torch.relu(x)
def comparison(x):
val = torch.relu(x)
return torch.add(val, val)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.rand(1, 3)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_output = comparison_fn(x)
test_output = traced.forward(x)
self.assertEqual(ref_output, test_output)
def test_subgraph_rewriter_multiple_pattern_match(self):
class M(torch.nn.Module):
def forward(self, x, w1, w2):
m1 = torch.cat([w1, w2]).sum()
m2 = torch.cat([w1, w2]).sum()
return x + torch.max(m1) + torch.max(m2)
def pattern(w1, w2):
return torch.cat([w1, w2]).sum()
def replacement(w1, w2):
return torch.stack([w1, w2])
def comparison(x, w1, w2):
m1 = torch.stack([w1, w2])
m2 = torch.stack([w1, w2])
return x + torch.max(m1) + torch.max(m2)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.rand(1, 3)
w1 = torch.rand(1, 3)
w2 = torch.rand(1, 3)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x, w1, w2)
test_outs = traced.forward(x, w1, w2)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_graph_argument_order(self):
class M(torch.nn.Module):
def forward(self, x, y):
return torch.mm(x, y)
def pattern(x, y):
return torch.mm(x, y)
def comparison(x, y):
return torch.mm(x, y)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
y = torch.randn(4, 5)
subgraph_rewriter.replace_pattern(traced, pattern, pattern)
traced.graph.lint()
ref_outs = comparison_fn(x, y)
test_outs = traced.forward(x, y)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_correct_output_replacement(self):
class M(torch.nn.Module):
def forward(self, x, y):
val = torch.neg(y) + torch.relu(x)
return torch.add(val, val)
def pattern(x):
return torch.relu(x)
def replacement(x):
return torch.neg(x)
def comparison(x, y):
val = torch.neg(y) + torch.neg(x)
return torch.add(val, val)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(4, 4)
y = torch.randn(4, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x, y)
test_outs = traced.forward(x, y)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_traced_as_callable(self):
class M(torch.nn.Module):
def forward(self, x):
val = torch.neg(x) + torch.relu(x)
return torch.add(val, val)
class Pattern(torch.nn.Module):
def forward(self, x):
return torch.neg(x) + torch.relu(x)
class Replacement(torch.nn.Module):
def forward(self, x):
return torch.sigmoid(x)
def comparison(x):
val = torch.sigmoid(x)
return torch.add(val, val)
traced = symbolic_trace(M())
traced_pattern = symbolic_trace(Pattern())
traced_replacement = symbolic_trace(Replacement())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, traced_pattern, traced_replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_pattern_is_entire_graph(self):
class M(torch.nn.Module):
def forward(self, x):
a = torch.neg(x)
return torch.add(a, a)
def pattern(x):
a = torch.neg(x)
return torch.add(a, a)
def replacement(x):
a = torch.sigmoid(x)
return torch.cat([a, a])
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(replacement)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_pattern_output_pattern_node_can_have_users_that_are_not_matched(self):
class M(torch.nn.Module):
def forward(self, x):
y = torch.relu(x)
return torch.neg(y) - y
def pattern(x):
return torch.relu(x)
def replacement(x):
return torch.sigmoid(x)
def comparison(x):
y = torch.sigmoid(x)
return torch.neg(y) - y
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_internal_pattern_nodes_cannot_have_users_that_are_not_matched(self):
class M(torch.nn.Module):
def forward(self, x, w1, w2, b1, b2):
m0 = torch.cat([w1, w2]) # noqa: F841
m1 = torch.cat([w1, w2])
m2 = torch.cat([x, b2])
t0 = torch.addmm(b1, m1, m2.t()) # noqa: F841
t1 = torch.sum(w1, 1)
t2 = torch.addmm(b1, m1, m2.t())
return torch.sum(t1), torch.sum(t2)
def pattern(x, w1, w2, b1, b2):
m1 = torch.cat([w1, w2])
m2 = torch.cat([x, b2])
return torch.addmm(b1, m1, m2.t())
def replacement(x, w1, w2, b1, b2):
return torch.cat([x, w1, w2])
traced = symbolic_trace(M())
# Result should be [] since no matches can be found
res = subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
self.assertEqual(res, [])
def test_subgraph_rewriter_placeholder_matching(self):
"""
This tests that a placeholder Node can be matched to a Node with
a different number of input Nodes. In the example below, the
original traced Module looks like this:
opcode target args kwargs
------------- ---------------------------------------------------------- ------------------------ --------
placeholder x () {}
call_function <built-in function add> (x, 3) {}
call_method dequantize (add,) {}
call_function <built-in method sigmoid of type object at 0x7f7c1f440fe0> (dequantize,) {}
call_method to (sigmoid, torch.float16) {}
output output (to,) {}
while the pattern we want to match looks like this:
opcode target args kwargs
------------- ---------------------------------------------------------- ------------------------ --------
placeholder x () {}
call_method dequantize (x,) {}
call_function <built-in method sigmoid of type object at 0x7f7c1f440fe0> (dequantize,) {}
call_method to (sigmoid, torch.float16) {}
output output (to,) {}
Here, we want to be able to match the original graph's
`call_function.add` Node with the pattern graph's
`plaeholder.x` Node.
Credit to Jerry Zhang (GitHub: jerryzh168) for this test case
"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.dtype = torch.float16
def forward(self, x):
x += 3
x = x.dequantize()
x = torch.sigmoid(x)
dtype = self.dtype
x = x.to(dtype)
return x
def pattern(x):
x = x.dequantize()
x = torch.sigmoid(x)
x = x.to(torch.float16)
return x
def replacement(x):
return x
def comparison(x):
return x + 3
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_replaces_referenced_submodules(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.sigmoid = torch.nn.Sigmoid()
self.submod = torch.nn.ReLU()
def forward(self, x):
x = x + 1
return self.submod(self.sigmoid(x))
class Pattern(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.sigmoid = torch.nn.Sigmoid()
self.submod = torch.nn.ReLU()
def forward(self, x):
return self.submod(self.sigmoid(x))
class Replacement(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.id = torch.nn.Identity()
self.submod = torch.nn.ReLU()
def forward(self, x):
return self.submod(self.id(x))
class Comparison(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.id = torch.nn.Identity()
self.submod = torch.nn.ReLU()
def forward(self, x):
x = x + 1
return self.submod(self.id(x))
traced = symbolic_trace(M())
comparison = Comparison()
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, Pattern(), Replacement())
traced.graph.lint()
ref_outs = comparison(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
traced.get_submodule("id")
with self.assertRaisesRegex(AttributeError, "has no attribute"):
traced.get_submodule("sigmoid")
submod = traced.get_submodule("submod")
self.assertEqual(type(submod), torch.nn.ReLU)
def test_subgraph_rewriter_annotations_int(self):
class M1(torch.nn.Module):
def forward(self, x):
y: int = x
return torch.add(x, y)
class M2(torch.nn.Module):
def forward(self, x):
y = annotate(x, int)
return torch.add(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M1())
module = M2()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
for n, m in zip(symbolic_traced.graph.nodes, graph.nodes):
if n.op == 'placeholder':
assert n.type is int
assert m.type is int
def test_subgraph_writer_replace_consecutive_submodules(self):
def f(x):
x = torch.sigmoid(x)
x = torch.sigmoid(x)
return torch.sigmoid(x)
def pattern(x):
return torch.sigmoid(x)
def replacement(x):
return torch.exp(x)
def comparison(x):
x = torch.exp(x)
x = torch.exp(x)
return torch.exp(x)
traced = symbolic_trace(f)
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
|
TestSubgraphRewriter
|
python
|
sympy__sympy
|
sympy/functions/elementary/exponential.py
|
{
"start": 19814,
"end": 36790
}
|
class ____(DefinedFunction):
r"""
The natural logarithm function `\ln(x)` or `\log(x)`.
Explanation
===========
Logarithms are taken with the natural base, `e`. To get
a logarithm of a different base ``b``, use ``log(x, b)``,
which is essentially short-hand for ``log(x)/log(b)``.
``log`` represents the principal branch of the natural
logarithm. As such it has a branch cut along the negative
real axis and returns values having a complex argument in
`(-\pi, \pi]`.
Examples
========
>>> from sympy import log, sqrt, S, I
>>> log(8, 2)
3
>>> log(S(8)/3, 2)
-log(3)/log(2) + 3
>>> log(-1 + I*sqrt(3))
log(2) + 2*I*pi/3
See Also
========
sympy.functions.elementary.exponential.exp
"""
args: tuple[Expr]
_singularities = (S.Zero, S.ComplexInfinity)
def fdiff(self, argindex=1):
"""
Returns the first derivative of the function.
"""
if argindex == 1:
return 1/self.args[0]
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
r"""
Returns `e^x`, the inverse function of `\log(x)`.
"""
return exp
@classmethod
def eval(cls, arg, base=None):
from sympy.calculus import AccumBounds
from sympy.sets.setexpr import SetExpr
arg = sympify(arg)
if base is not None:
base = sympify(base)
if base == 1:
if arg == 1:
return S.NaN
else:
return S.ComplexInfinity
try:
# handle extraction of powers of the base now
# or else expand_log in Mul would have to handle this
n = multiplicity(base, arg)
if n:
return n + log(arg / base**n) / log(base)
else:
return log(arg)/log(base)
except ValueError:
pass
if base is not S.Exp1:
return cls(arg)/cls(base)
else:
return cls(arg)
if arg.is_Number:
if arg.is_zero:
return S.ComplexInfinity
elif arg is S.One:
return S.Zero
elif arg is S.Infinity or arg is S.NegativeInfinity:
return S.Infinity
elif arg is S.NaN:
return S.NaN
elif arg.is_Rational and arg.p == 1:
return -cls(arg.q)
if arg.is_Pow and arg.base is S.Exp1 and arg.exp.is_extended_real:
return arg.exp
if isinstance(arg, exp) and arg.exp.is_extended_real:
return arg.exp
elif isinstance(arg, exp) and arg.exp.is_number:
r_, i_ = match_real_imag(arg.exp)
if i_ and i_.is_comparable:
i_ %= 2*pi
if i_ > pi:
i_ -= 2*pi
return r_ + expand_mul(i_ * I, deep=False)
elif isinstance(arg, exp_polar):
return unpolarify(arg.exp)
elif isinstance(arg, AccumBounds):
if arg.min.is_positive:
return AccumBounds(log(arg.min), log(arg.max))
elif arg.min.is_zero:
return AccumBounds(S.NegativeInfinity, log(arg.max))
else:
return S.NaN
elif isinstance(arg, SetExpr):
return arg._eval_func(cls)
if arg.is_number:
if arg.is_negative:
return pi * I + cls(-arg)
elif arg is S.ComplexInfinity:
return S.ComplexInfinity
elif arg is S.Exp1:
return S.One
if arg.is_zero:
return S.ComplexInfinity
# don't autoexpand Pow or Mul (see the issue 3351):
if not arg.is_Add:
coeff = arg.as_coefficient(I)
if coeff is not None:
if coeff is S.Infinity or coeff is S.NegativeInfinity:
return S.Infinity
elif coeff.is_Rational:
if coeff.is_nonnegative:
return pi * I * S.Half + cls(coeff)
else:
return -pi * I * S.Half + cls(-coeff)
if arg.is_number and arg.is_algebraic:
# Match arg = coeff*(r_ + i_*I) with coeff>0, r_ and i_ real.
coeff, arg_ = arg.as_independent(I, as_Add=False)
if coeff.is_negative:
coeff *= -1
arg_ *= -1
arg_ = expand_mul(arg_, deep=False)
r_, i_ = arg_.as_independent(I, as_Add=True)
i_ = i_.as_coefficient(I)
if coeff.is_real and i_ and i_.is_real and r_.is_real:
if r_.is_zero:
if i_.is_positive:
return pi * I * S.Half + cls(coeff * i_)
elif i_.is_negative:
return -pi * I * S.Half + cls(coeff * -i_)
else:
from sympy.simplify import ratsimp
# Check for arguments involving rational multiples of pi
t = (i_/r_).cancel()
t1 = (-t).cancel()
atan_table = _log_atan_table()
if t in atan_table:
modulus = ratsimp(coeff * Abs(arg_))
if r_.is_positive:
return cls(modulus) + I * atan_table[t]
else:
return cls(modulus) + I * (atan_table[t] - pi)
elif t1 in atan_table:
modulus = ratsimp(coeff * Abs(arg_))
if r_.is_positive:
return cls(modulus) + I * (-atan_table[t1])
else:
return cls(modulus) + I * (pi - atan_table[t1])
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms): # of log(1+x)
r"""
Returns the next term in the Taylor series expansion of `\log(1+x)`.
"""
from sympy.simplify.powsimp import powsimp
if n < 0:
return S.Zero
x = sympify(x)
if n == 0:
return x
if previous_terms:
p = previous_terms[-1]
if p is not None:
return powsimp((-n) * p * x / (n + 1), deep=True, combine='exp')
return (1 - 2*(n % 2)) * x**(n + 1)/(n + 1)
def _eval_expand_log(self, deep=True, **hints):
from sympy.concrete import Sum, Product
force = hints.get('force', False)
factor = hints.get('factor', False)
if (len(self.args) == 2):
return expand_log(self.func(*self.args), deep=deep, force=force)
arg = self.args[0]
if arg.is_Integer:
# remove perfect powers
p = perfect_power(arg)
logarg = None
coeff = 1
if p is not False:
arg, coeff = p
logarg = self.func(arg)
# expand as product of its prime factors if factor=True
if factor:
p = factorint(arg)
if arg not in p.keys():
logarg = sum(n*log(val) for val, n in p.items())
if logarg is not None:
return coeff*logarg
elif arg.is_Rational:
return log(arg.p) - log(arg.q)
elif arg.is_Mul:
expr = []
nonpos = []
for x in arg.args:
if force or x.is_positive or x.is_polar:
a = self.func(x)
if isinstance(a, log):
expr.append(self.func(x)._eval_expand_log(**hints))
else:
expr.append(a)
elif x.is_negative:
a = self.func(-x)
expr.append(a)
nonpos.append(S.NegativeOne)
else:
nonpos.append(x)
return Add(*expr) + log(Mul(*nonpos))
elif arg.is_Pow or isinstance(arg, exp):
if force or (arg.exp.is_extended_real and (arg.base.is_positive or ((arg.exp+1)
.is_positive and (arg.exp-1).is_nonpositive))) or arg.base.is_polar:
b = arg.base
e = arg.exp
a = self.func(b)
if isinstance(a, log):
return unpolarify(e) * a._eval_expand_log(**hints)
else:
return unpolarify(e) * a
elif isinstance(arg, Product):
if force or arg.function.is_positive:
return Sum(log(arg.function), *arg.limits)
return self.func(arg)
def _eval_simplify(self, **kwargs):
from sympy.simplify.simplify import expand_log, simplify, inversecombine
if len(self.args) == 2: # it's unevaluated
return simplify(self.func(*self.args), **kwargs)
expr = self.func(simplify(self.args[0], **kwargs))
if kwargs['inverse']:
expr = inversecombine(expr)
expr = expand_log(expr, deep=True)
return min([expr, self], key=kwargs['measure'])
def as_real_imag(self, deep=True, **hints):
"""
Returns this function as a complex coordinate.
Examples
========
>>> from sympy import I, log
>>> from sympy.abc import x
>>> log(x).as_real_imag()
(log(Abs(x)), arg(x))
>>> log(I).as_real_imag()
(0, pi/2)
>>> log(1 + I).as_real_imag()
(log(sqrt(2)), pi/4)
>>> log(I*x).as_real_imag()
(log(Abs(x)), arg(I*x))
"""
sarg = self.args[0]
if deep:
sarg = self.args[0].expand(deep, **hints)
sarg_abs = Abs(sarg)
if sarg_abs == sarg:
return self, S.Zero
sarg_arg = arg(sarg)
if hints.get('log', False): # Expand the log
hints['complex'] = False
return (log(sarg_abs).expand(deep, **hints), sarg_arg)
else:
return log(sarg_abs), sarg_arg
def _eval_is_rational(self):
s = self.func(*self.args)
if s.func == self.func:
if (self.args[0] - 1).is_zero:
return True
if s.args[0].is_rational and fuzzy_not((self.args[0] - 1).is_zero):
return False
else:
return s.is_rational
def _eval_is_algebraic(self):
s = self.func(*self.args)
if s.func == self.func:
if (self.args[0] - 1).is_zero:
return True
elif fuzzy_not((self.args[0] - 1).is_zero):
if self.args[0].is_algebraic:
return False
else:
return s.is_algebraic
def _eval_is_extended_real(self):
return self.args[0].is_extended_positive
def _eval_is_complex(self):
z = self.args[0]
return fuzzy_and([z.is_complex, fuzzy_not(z.is_zero)])
def _eval_is_finite(self):
arg = self.args[0]
if arg.is_zero:
return False
return arg.is_finite
def _eval_is_extended_positive(self):
return (self.args[0] - 1).is_extended_positive
def _eval_is_zero(self):
return (self.args[0] - 1).is_zero
def _eval_is_extended_nonnegative(self):
return (self.args[0] - 1).is_extended_nonnegative
def _eval_nseries(self, x, n, logx, cdir=0):
# NOTE Please see the comment at the beginning of this file, labelled
# IMPORTANT.
from sympy.series.order import Order
from sympy.simplify.simplify import logcombine
from sympy.core.symbol import Dummy
if self.args[0] == x:
return log(x) if logx is None else logx
arg = self.args[0]
t = Dummy('t', positive=True)
if cdir == 0:
cdir = 1
z = arg.subs(x, cdir*t)
k, l = Wild("k"), Wild("l")
r = z.match(k*t**l)
if r is not None:
k, l = r[k], r[l]
if l != 0 and not l.has(t) and not k.has(t):
r = l*log(x) if logx is None else l*logx
r += log(k) - l*log(cdir) # XXX true regardless of assumptions?
return r
def coeff_exp(term, x):
coeff, exp = S.One, S.Zero
for factor in Mul.make_args(term):
if factor.has(x):
base, exp = factor.as_base_exp()
if base != x:
try:
return term.leadterm(x)
except ValueError:
return term, S.Zero
else:
coeff *= factor
return coeff, exp
# TODO new and probably slow
try:
a, b = z.leadterm(t, logx=logx, cdir=1)
except (ValueError, NotImplementedError, PoleError):
s = z._eval_nseries(t, n=n, logx=logx, cdir=1)
while s.is_Order:
n += 1
s = z._eval_nseries(t, n=n, logx=logx, cdir=1)
try:
a, b = s.removeO().leadterm(t, cdir=1)
except ValueError:
a, b = s.removeO().as_leading_term(t, cdir=1), S.Zero
p = (z/(a*t**b) - 1).cancel()._eval_nseries(t, n=n, logx=logx, cdir=1)
if p.has(exp):
p = logcombine(p)
if isinstance(p, Order):
n = p.getn()
_, d = coeff_exp(p, t)
logx = log(x) if logx is None else logx
if not d.is_positive:
res = log(a) - b*log(cdir) + b*logx
_res = res
logflags = {"deep": True, "log": True, "mul": False, "power_exp": False,
"power_base": False, "multinomial": False, "basic": False, "force": True,
"factor": False}
expr = self.expand(**logflags)
if (not a.could_extract_minus_sign() and
logx.could_extract_minus_sign()):
_res = _res.subs(-logx, -log(x)).expand(**logflags)
else:
_res = _res.subs(logx, log(x)).expand(**logflags)
if _res == expr:
return res
return res + Order(x**n, x)
def mul(d1, d2):
res = {}
for e1, e2 in product(d1, d2):
ex = e1 + e2
if ex < n:
res[ex] = res.get(ex, S.Zero) + d1[e1]*d2[e2]
return res
pterms = {}
for term in Add.make_args(p.removeO()):
co1, e1 = coeff_exp(term, t)
pterms[e1] = pterms.get(e1, S.Zero) + co1
k = S.One
terms = {}
pk = pterms
while k*d < n:
coeff = -S.NegativeOne**k/k
for ex in pk:
terms[ex] = terms.get(ex, S.Zero) + coeff*pk[ex]
pk = mul(pk, pterms)
k += S.One
res = log(a) - b*log(cdir) + b*logx
for ex in terms:
res += terms[ex].cancel()*t**(ex)
if a.is_negative and im(z) != 0:
from sympy.functions.special.delta_functions import Heaviside
for i, term in enumerate(z.lseries(t)):
if not term.is_real or i == 5:
break
if i < 5:
coeff, _ = term.as_coeff_exponent(t)
res += -2*I*pi*Heaviside(-im(coeff), 0)
res = res.subs(t, x/cdir)
return res + Order(x**n, x)
def _eval_as_leading_term(self, x, logx, cdir):
# NOTE
# Refer https://github.com/sympy/sympy/pull/23592 for more information
# on each of the following steps involved in this method.
arg0 = self.args[0].together()
# STEP 1
t = Dummy('t', positive=True)
if cdir == 0:
cdir = 1
z = arg0.subs(x, cdir*t)
# STEP 2
try:
c, e = z.leadterm(t, logx=logx, cdir=1)
except ValueError:
arg = arg0.as_leading_term(x, logx=logx, cdir=cdir)
return log(arg)
if c.has(t):
c = c.subs(t, x/cdir)
if e != 0:
raise PoleError("Cannot expand %s around 0" % (self))
return log(c)
# STEP 3
if c == S.One and e == S.Zero:
return (arg0 - S.One).as_leading_term(x, logx=logx)
# STEP 4
res = log(c) - e*log(cdir)
logx = log(x) if logx is None else logx
res += e*logx
# STEP 5
if c.is_negative and im(z) != 0:
from sympy.functions.special.delta_functions import Heaviside
for i, term in enumerate(z.lseries(t)):
if not term.is_real or i == 5:
break
if i < 5:
coeff, _ = term.as_coeff_exponent(t)
res += -2*I*pi*Heaviside(-im(coeff), 0)
return res
|
log
|
python
|
mitsuhiko__rye
|
rye-devtools/src/rye_devtools/find_downloads.py
|
{
"start": 606,
"end": 780
}
|
class ____:
version: Version
triple: PlatformTriple
implementation: PythonImplementation
filename: str
url: str
sha256: str | None = None
|
PythonDownload
|
python
|
getsentry__sentry
|
src/sentry/explore/endpoints/explore_saved_query_starred_order.py
|
{
"start": 744,
"end": 1158
}
|
class ____(serializers.Serializer):
query_ids = serializers.ListField(child=serializers.IntegerField(), required=True, min_length=0)
def validate_query_ids(self, query_ids):
if len(query_ids) != len(set(query_ids)):
raise serializers.ValidationError("Single query cannot take up multiple positions")
return query_ids
@region_silo_endpoint
|
ExploreSavedQueryStarredOrderSerializer
|
python
|
dask__dask
|
dask/layers.py
|
{
"start": 10690,
"end": 14789
}
|
class ____(Blockwise):
"""DataFrame-based Blockwise Layer with IO
Parameters
----------
name : str
Name to use for the constructed layer.
columns : str, list or None
Field name(s) to read in as columns in the output.
inputs : list or BlockwiseDep
List of arguments to be passed to ``io_func`` so
that the materialized task to produce partition ``i``
will be: ``(<io_func>, inputs[i])``. Note that each
element of ``inputs`` is typically a tuple of arguments.
io_func : callable
A callable function that takes in a single tuple
of arguments, and outputs a DataFrame partition.
Column projection will be supported for functions
that satisfy the ``DataFrameIOFunction`` protocol.
label : str (optional)
String to use as a prefix in the place-holder collection
name. If nothing is specified (default), "subset-" will
be used.
produces_tasks : bool (optional)
Whether one or more elements of `inputs` is expected to
contain a nested task. This argument in only used for
serialization purposes, and will be deprecated in the
future. Default is False.
creation_info: dict (optional)
Dictionary containing the callable function ('func'),
positional arguments ('args'), and key-word arguments
('kwargs') used to produce the dask collection with
this underlying ``DataFrameIOLayer``.
annotations: dict (optional)
Layer annotations to pass through to Blockwise.
"""
def __init__(
self,
name,
columns,
inputs,
io_func,
label=None,
produces_tasks=False,
creation_info=None,
annotations=None,
):
self.name = name
self._columns = columns
self.inputs = inputs
self.io_func = io_func
self.label = label
self.produces_tasks = produces_tasks
self.annotations = annotations
self.creation_info = creation_info
if not isinstance(inputs, BlockwiseDep):
# Define mapping between key index and "part"
io_arg_map = BlockwiseDepDict(
{(i,): inp for i, inp in enumerate(self.inputs)},
produces_tasks=self.produces_tasks,
)
else:
io_arg_map = inputs
# Use Blockwise initializer
task = Task(self.name, io_func, TaskRef(blockwise_token(0)))
super().__init__(
output=self.name,
output_indices="i",
task=task,
indices=[(io_arg_map, "i")],
numblocks={},
annotations=annotations,
)
@property
def columns(self):
"""Current column projection for this layer"""
return self._columns
def project_columns(self, columns):
"""Produce a column projection for this IO layer.
Given a list of required output columns, this method
returns the projected layer.
"""
from dask.dataframe.io.utils import DataFrameIOFunction
columns = list(columns)
if self.columns is None or set(self.columns).issuperset(columns):
# Apply column projection in IO function.
# Must satisfy `DataFrameIOFunction` protocol
if isinstance(self.io_func, DataFrameIOFunction):
io_func = self.io_func.project_columns(columns)
else:
io_func = self.io_func
layer = DataFrameIOLayer(
(self.label or "subset") + "-" + tokenize(self.name, columns),
columns,
self.inputs,
io_func,
label=self.label,
produces_tasks=self.produces_tasks,
annotations=self.annotations,
)
return layer
else:
# Default behavior
return self
def __repr__(self):
return f"DataFrameIOLayer<name='{self.name}', n_parts={len(self.inputs)}, columns={self.columns}>"
|
DataFrameIOLayer
|
python
|
PyCQA__pylint
|
tests/data/suppliermodule_test.py
|
{
"start": 60,
"end": 204
}
|
class ____:
def get_value(self):
raise NotImplementedError
def set_value(self, value):
raise NotImplementedError
|
Interface
|
python
|
google__jax
|
jax/experimental/pallas/ops/tpu/splash_attention/splash_attention_mask.py
|
{
"start": 785,
"end": 4086
}
|
class ____:
"""A base class for splash attention masks."""
@property
def shape(self) -> tuple[int, ...]:
raise NotImplementedError
def __getitem__(self, idx) -> np.ndarray:
raise NotImplementedError
def __bool__(self) -> bool:
raise NotImplementedError(
'Conversion to bool is unsupported. Could be caused by using logical'
' instead of bitwise operations on masks.'
)
def __or__(self, other: Mask) -> Mask:
if self.shape != other.shape:
raise ValueError(
f'Invalid shape for other: {other.shape}, expected: {self.shape}'
)
return LogicalOr(self, other)
def __and__(self, other: Mask) -> Mask:
if self.shape != other.shape:
raise ValueError(
f'Invalid shape for other: {other.shape}, expected: {self.shape}'
)
return LogicalAnd(self, other)
def make_causal_mask(shape: tuple[int, int], offset: int = 0) -> np.ndarray:
"""Makes a causal attention mask.
Args:
shape: Shape of the 2-dim mask: (q_seq_len, kv_seq_len).
offset: Offset of q start wrt kv. A positive offset shifts the bottom
triangle upward, a negative one shifts it downward. A negative offset
makes the first 'offset' rows of the attention matrix all 0s which leads
to undefined softmax.
Returns:
The causal mask.
"""
q_seq_len, kv_seq_len = shape
q_idx = np.arange(q_seq_len, dtype=np.int32)
kv_idx = np.arange(kv_seq_len, dtype=np.int32)
return (q_idx[:, None] + offset >= kv_idx[None, :]).astype(np.bool_)
def make_local_attention_mask(
shape: tuple[int, int],
window_size: tuple[int | None, int | None],
*,
offset: int = 0,
) -> np.ndarray:
"""Makes a local attention mask."""
q_seq_len, kv_seq_len = shape
q_idx = np.arange(q_seq_len, dtype=np.int32)
kv_idx = np.arange(kv_seq_len, dtype=np.int32)
mask = np.ones((q_seq_len, kv_seq_len), dtype=np.bool_)
left, right = window_size
if left is not None:
mask = mask & (q_idx[:, None] - left + offset <= kv_idx[None, :])
if right is not None:
mask = mask & (q_idx[:, None] + right + offset >= kv_idx[None, :])
return mask.astype(np.bool_)
def make_chunk_attention_mask(
shape: tuple[int, int], chunk_size: int
) -> np.ndarray:
"""Makes a chunked causal attention mask.
Args:
shape: The desired shape of the mask (q_seq_len, kv_seq_len).
chunk_size: The size of the attention chunks.
Returns:
A boolean mask of shape `mask_shape` where True indicates attention is
allowed according to chunked causal rules, and False otherwise.
Raises:
ValueError: If chunk_window_size is None or not positive.
"""
if chunk_size <= 0:
raise ValueError('chunk_size must be positive')
q_seq_len, kv_seq_len = shape
q_idx = np.arange(q_seq_len, dtype=np.int32)
kv_idx = np.arange(kv_seq_len, dtype=np.int32)
# chunk mask calculation
same_chunk = (q_idx[:, None] // chunk_size) == (kv_idx[None, :] // chunk_size)
mask = same_chunk & (q_idx[:, None] >= kv_idx[None, :])
return mask
def make_random_mask(
shape: tuple[int, int], sparsity: float, seed: int
) -> np.ndarray:
"""Makes a random attention mask."""
np.random.seed(seed)
return np.random.binomial(n=1, p=1.0 - sparsity, size=shape).astype(np.bool_)
@dataclasses.dataclass
|
Mask
|
python
|
numba__numba
|
numba/tests/test_typeguard.py
|
{
"start": 329,
"end": 1153
}
|
class ____(TestCase):
def setUp(self):
super().setUp()
import typeguard
# This is a test class invariant but the Numba multiprocesses test
# runner doesn't respect `setUpClass` so just use `setUp`.
# typeguard 3+ uses typeguard.TypeCheckError, 2.x uses TypeError
self._exception_type = getattr(typeguard, 'TypeCheckError', TypeError)
def test_check_args(self):
with self.assertRaises(self._exception_type):
guard_args(float(1.2))
def test_check_ret(self):
with self.assertRaises(self._exception_type):
guard_ret(float(1.2))
def test_check_does_not_work_with_inner_func(self):
def guard(val: int) -> int:
return
guard(float(1.2))
if __name__ == '__main__':
unittest.main()
|
TestTypeGuard
|
python
|
eventlet__eventlet
|
eventlet/green/http/cookiejar.py
|
{
"start": 69155,
"end": 73886
}
|
class ____(FileCookieJar):
"""
The LWPCookieJar saves a sequence of "Set-Cookie3" lines.
"Set-Cookie3" is the format used by the libwww-perl library, not known
to be compatible with any browser, but which is easy to read and
doesn't lose information about RFC 2965 cookies.
Additional methods
as_lwp_str(ignore_discard=True, ignore_expired=True)
"""
def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
"""Return cookies as a string of "\\n"-separated "Set-Cookie3" headers.
ignore_discard and ignore_expires: see docstring for FileCookieJar.save
"""
now = time.time()
r = []
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
return "\n".join(r+[""])
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
with open(filename, "w") as f:
# There really isn't an LWP Cookies 2.0 format, but this indicates
# that there is extra information in here (domain_dot and
# port_spec) while still being compatible with libwww-perl, I hope.
f.write("#LWP-Cookies-2.0\n")
f.write(self.as_lwp_str(ignore_discard, ignore_expires))
def _really_load(self, f, filename, ignore_discard, ignore_expires):
magic = f.readline()
if not self.magic_re.search(magic):
msg = ("%r does not look like a Set-Cookie3 (LWP) format "
"file" % filename)
raise LoadError(msg)
now = time.time()
header = "Set-Cookie3:"
boolean_attrs = ("port_spec", "path_spec", "domain_dot",
"secure", "discard")
value_attrs = ("version",
"port", "path", "domain",
"expires",
"comment", "commenturl")
try:
while 1:
line = f.readline()
if line == "": break
if not line.startswith(header):
continue
line = line[len(header):].strip()
for data in split_header_words([line]):
name, value = data[0]
standard = {}
rest = {}
for k in boolean_attrs:
standard[k] = False
for k, v in data[1:]:
if k is not None:
lc = k.lower()
else:
lc = None
# don't lose case distinction for unknown fields
if (lc in value_attrs) or (lc in boolean_attrs):
k = lc
if k in boolean_attrs:
if v is None: v = True
standard[k] = v
elif k in value_attrs:
standard[k] = v
else:
rest[k] = v
h = standard.get
expires = h("expires")
discard = h("discard")
if expires is not None:
expires = iso2time(expires)
if expires is None:
discard = True
domain = h("domain")
domain_specified = domain.startswith(".")
c = Cookie(h("version"), name, value,
h("port"), h("port_spec"),
domain, domain_specified, h("domain_dot"),
h("path"), h("path_spec"),
h("secure"),
expires,
discard,
h("comment"),
h("commenturl"),
rest)
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except OSError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Set-Cookie3 format file %r: %r" %
(filename, line))
|
LWPCookieJar
|
python
|
pypa__setuptools
|
setuptools/_distutils/command/bdist_dumb.py
|
{
"start": 461,
"end": 4631
}
|
class ____(Command):
description = "create a \"dumb\" built distribution"
user_options = [
('bdist-dir=', 'd', "temporary directory for creating the distribution"),
(
'plat-name=',
'p',
"platform name to embed in generated filenames "
f"[default: {get_platform()}]",
),
(
'format=',
'f',
"archive format to create (tar, gztar, bztar, xztar, ztar, zip)",
),
(
'keep-temp',
'k',
"keep the pseudo-installation tree around after creating the distribution archive",
),
('dist-dir=', 'd', "directory to put final built distributions in"),
('skip-build', None, "skip rebuilding everything (for testing/debugging)"),
(
'relative',
None,
"build the archive using relative paths [default: false]",
),
(
'owner=',
'u',
"Owner name used when creating a tar file [default: current user]",
),
(
'group=',
'g',
"Group name used when creating a tar file [default: current group]",
),
]
boolean_options: ClassVar[list[str]] = ['keep-temp', 'skip-build', 'relative']
default_format = {'posix': 'gztar', 'nt': 'zip'}
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.format = None
self.keep_temp = False
self.dist_dir = None
self.skip_build = None
self.relative = False
self.owner = None
self.group = None
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'dumb')
if self.format is None:
try:
self.format = self.default_format[os.name]
except KeyError:
raise DistutilsPlatformError(
"don't know how to create dumb built distributions "
f"on platform {os.name}"
)
self.set_undefined_options(
'bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
('skip_build', 'skip_build'),
)
def run(self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=True)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = False
log.info("installing to %s", self.bdist_dir)
self.run_command('install')
# And make an archive relative to the root of the
# pseudo-installation tree.
archive_basename = f"{self.distribution.get_fullname()}.{self.plat_name}"
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
if self.distribution.has_ext_modules() and (
install.install_base != install.install_platbase
):
raise DistutilsPlatformError(
"can't make a dumb built distribution where "
f"base and platbase are different ({install.install_base!r}, {install.install_platbase!r})"
)
else:
archive_root = os.path.join(
self.bdist_dir, ensure_relative(install.install_base)
)
# Make the archive
filename = self.make_archive(
pseudoinstall_root,
self.format,
root_dir=archive_root,
owner=self.owner,
group=self.group,
)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_dumb', pyversion, filename))
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
|
bdist_dumb
|
python
|
scipy__scipy
|
scipy/io/matlab/_mio4.py
|
{
"start": 15037,
"end": 19563
}
|
class ____:
def __init__(self, file_writer):
self.file_stream = file_writer.file_stream
self.oned_as = file_writer.oned_as
def write_bytes(self, arr):
self.file_stream.write(arr.tobytes(order='F'))
def write_string(self, s):
self.file_stream.write(s)
def write_header(self, name, shape, P=miDOUBLE, T=mxFULL_CLASS, imagf=0):
''' Write header for given data options
Parameters
----------
name : str
name of variable
shape : sequence
Shape of array as it will be read in matlab
P : int, optional
code for mat4 data type, one of ``miDOUBLE, miSINGLE, miINT32,
miINT16, miUINT16, miUINT8``
T : int, optional
code for mat4 matrix class, one of ``mxFULL_CLASS, mxCHAR_CLASS,
mxSPARSE_CLASS``
imagf : int, optional
flag indicating complex
'''
header = np.empty((), mdtypes_template['header'])
M = not SYS_LITTLE_ENDIAN
O = 0
header['mopt'] = (M * 1000 +
O * 100 +
P * 10 +
T)
header['mrows'] = shape[0]
header['ncols'] = shape[1]
header['imagf'] = imagf
header['namlen'] = len(name) + 1
self.write_bytes(header)
data = name + '\0'
self.write_string(data.encode('latin1'))
def write(self, arr, name):
''' Write matrix `arr`, with name `name`
Parameters
----------
arr : array_like
array to write
name : str
name in matlab workspace
'''
# we need to catch sparse first, because np.asarray returns an
# an object array for scipy.sparse
if scipy.sparse.issparse(arr):
self.write_sparse(arr, name)
return
arr = np.asarray(arr)
dt = arr.dtype
if not dt.isnative:
arr = arr.astype(dt.newbyteorder('='))
dtt = dt.type
if dtt is np.object_:
raise TypeError('Cannot save object arrays in Mat4')
elif dtt is np.void:
raise TypeError('Cannot save void type arrays')
elif dtt in (np.str_, np.bytes_):
self.write_char(arr, name)
return
self.write_numeric(arr, name)
def write_numeric(self, arr, name):
arr = arr_to_2d(arr, self.oned_as)
imagf = arr.dtype.kind == 'c'
try:
P = np_to_mtypes[arr.dtype.str[1:]]
except KeyError:
if imagf:
arr = arr.astype('c128')
else:
arr = arr.astype('f8')
P = miDOUBLE
self.write_header(name,
arr.shape,
P=P,
T=mxFULL_CLASS,
imagf=imagf)
if imagf:
self.write_bytes(arr.real)
self.write_bytes(arr.imag)
else:
self.write_bytes(arr)
def write_char(self, arr, name):
if arr.dtype.type == np.str_ and arr.dtype.itemsize != np.dtype('U1').itemsize:
arr = arr_to_chars(arr)
arr = arr_to_2d(arr, self.oned_as)
dims = arr.shape
self.write_header(
name,
dims,
P=miUINT8,
T=mxCHAR_CLASS)
if arr.dtype.kind == 'U':
# Recode unicode to latin1
n_chars = math.prod(dims)
st_arr = np.ndarray(shape=(),
dtype=arr_dtype_number(arr, n_chars),
buffer=arr)
st = st_arr.item().encode('latin-1')
arr = np.ndarray(shape=dims, dtype='S1', buffer=st)
self.write_bytes(arr)
def write_sparse(self, arr, name):
''' Sparse matrices are 2-D
See docstring for VarReader4.read_sparse_array
'''
A = arr.tocoo() # convert to sparse COO format (ijv)
imagf = A.dtype.kind == 'c'
ijv = np.zeros((A.nnz + 1, 3+imagf), dtype='f8')
ijv[:-1,0] = A.row
ijv[:-1,1] = A.col
ijv[:-1,0:2] += 1 # 1 based indexing
if imagf:
ijv[:-1,2] = A.data.real
ijv[:-1,3] = A.data.imag
else:
ijv[:-1,2] = A.data
ijv[-1,0:2] = A.shape
self.write_header(
name,
ijv.shape,
P=miDOUBLE,
T=mxSPARSE_CLASS)
self.write_bytes(ijv)
|
VarWriter4
|
python
|
huggingface__transformers
|
src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
|
{
"start": 50936,
"end": 51889
}
|
class ____(nn.Module):
def __init__(self, config: SeamlessM4TConfig, ffn_dim: int):
super().__init__()
self.fc1 = nn.Linear(config.hidden_size, ffn_dim)
self.fc2 = nn.Linear(ffn_dim, config.hidden_size)
self.dropout = nn.Dropout(config.activation_dropout)
self.act = ACT2FN[config.activation_function]
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.fc1(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dropout(hidden_states)
if (
isinstance(self.fc2.weight, torch.Tensor)
and hidden_states.dtype != self.fc2.weight.dtype
and (self.fc2.weight.dtype != torch.int8 and self.fc2.weight.dtype != torch.uint8)
):
hidden_states = hidden_states.to(self.fc2.weight.dtype)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
SeamlessM4TFeedForwardNetwork
|
python
|
sympy__sympy
|
sympy/plotting/series.py
|
{
"start": 61824,
"end": 67507
}
|
class ____(ParametricLineBaseSeries):
"""Representation for a line consisting of two parametric SymPy expressions
over a range."""
is_2Dline = True
def __init__(self, expr_x, expr_y, var_start_end, label="", **kwargs):
super().__init__(**kwargs)
self.expr_x = expr_x if callable(expr_x) else sympify(expr_x)
self.expr_y = expr_y if callable(expr_y) else sympify(expr_y)
self.expr = (self.expr_x, self.expr_y)
self.ranges = [var_start_end]
self._cast = float
self.use_cm = kwargs.get("use_cm", True)
self._set_parametric_line_label(label)
self._post_init()
def __str__(self):
return self._str_helper(
"parametric cartesian line: (%s, %s) for %s over %s" % (
str(self.expr_x),
str(self.expr_y),
str(self.var),
str((self.start, self.end))
))
def _adaptive_sampling(self):
try:
if callable(self.expr_x) and callable(self.expr_y):
f_x = self.expr_x
f_y = self.expr_y
else:
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
x, y, p = self._adaptive_sampling_helper(f_x, f_y)
except Exception as err: # noqa: BLE001
warnings.warn(
"The evaluation with %s failed.\n" % (
"NumPy/SciPy" if not self.modules else self.modules) +
"{}: {}\n".format(type(err).__name__, err) +
"Trying to evaluate the expression with Sympy, but it might "
"be a slow operation."
)
f_x = lambdify([self.var], self.expr_x, "sympy")
f_y = lambdify([self.var], self.expr_y, "sympy")
x, y, p = self._adaptive_sampling_helper(f_x, f_y)
return x, y, p
def _adaptive_sampling_helper(self, f_x, f_y):
"""The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
.. [1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
x_coords = []
y_coords = []
param = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
# Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = _adaptive_eval(f_x, param_new)
ynew = _adaptive_eval(f_y, param_new)
new_point = np.array([xnew, ynew])
# Maximum depth
if depth > self.depth:
x_coords.append(q[0])
y_coords.append(q[1])
param.append(param_p)
# Sample irrespective of whether the line is flat till the
# depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
# Sample ten points if complex values are encountered
# at both ends. If there is a real value in between, then
# sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = [_adaptive_eval(f_x, t) for t in param_array]
y_array = [_adaptive_eval(f_y, t) for t in param_array]
if not all(x is None and y is None
for x, y in zip(x_array, y_array)):
for i in range(len(y_array) - 1):
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
# Sample further if one of the end points in None (i.e. a complex
# value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
x_coords.append(q[0])
y_coords.append(q[1])
param.append(param_p)
f_start_x = _adaptive_eval(f_x, self.start)
f_start_y = _adaptive_eval(f_y, self.start)
start = [f_start_x, f_start_y]
f_end_x = _adaptive_eval(f_x, self.end)
f_end_y = _adaptive_eval(f_y, self.end)
end = [f_end_x, f_end_y]
x_coords.append(f_start_x)
y_coords.append(f_start_y)
param.append(self.start)
sample(self.start, self.end, start, end, 0)
return x_coords, y_coords, param
### 3D lines
|
Parametric2DLineSeries
|
python
|
pyca__cryptography
|
src/cryptography/x509/general_name.py
|
{
"start": 5623,
"end": 6901
}
|
class ____(GeneralName):
def __init__(self, value: _IPAddressTypes) -> None:
if not isinstance(
value,
(
ipaddress.IPv4Address,
ipaddress.IPv6Address,
ipaddress.IPv4Network,
ipaddress.IPv6Network,
),
):
raise TypeError(
"value must be an instance of ipaddress.IPv4Address, "
"ipaddress.IPv6Address, ipaddress.IPv4Network, or "
"ipaddress.IPv6Network"
)
self._value = value
@property
def value(self) -> _IPAddressTypes:
return self._value
def _packed(self) -> bytes:
if isinstance(
self.value, (ipaddress.IPv4Address, ipaddress.IPv6Address)
):
return self.value.packed
else:
return (
self.value.network_address.packed + self.value.netmask.packed
)
def __repr__(self) -> str:
return f"<IPAddress(value={self.value})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, IPAddress):
return NotImplemented
return self.value == other.value
def __hash__(self) -> int:
return hash(self.value)
|
IPAddress
|
python
|
getsentry__sentry
|
tests/sentry/middleware/test_access_log_middleware.py
|
{
"start": 13173,
"end": 13917
}
|
class ____(LogCaptureAPITestCase):
endpoint = "sentry-api-0-organization-members"
def setUp(self) -> None:
self.login_as(user=self.user)
def test_org_id_populated(self) -> None:
self._caplog.set_level(logging.INFO, logger="sentry")
self.get_success_response(
self.organization.slug,
qs_params={
"project": [-1],
"category": ["error"],
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(quantity)"],
},
)
tested_log = self.get_tested_log(args=[self.organization.slug])
assert tested_log.organization_id == str(self.organization.id)
|
TestOrganizationIdPresentForControl
|
python
|
getsentry__sentry
|
tests/snuba/api/endpoints/test_discover_saved_query_detail.py
|
{
"start": 21871,
"end": 23779
}
|
class ____(APITestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.org = self.create_organization(owner=self.user)
self.org_without_access = self.create_organization()
self.project_ids = [
self.create_project(organization=self.org).id,
self.create_project(organization=self.org).id,
]
q = {"fields": ["test"], "conditions": [], "limit": 10}
self.query = DiscoverSavedQuery.objects.create(
organization=self.org, created_by_id=self.user.id, name="Test query", query=q
)
self.query.set_projects(self.project_ids)
def url(self, query_id):
return reverse(
"sentry-api-0-discover-saved-query-visit",
kwargs={"organization_id_or_slug": self.org.slug, "query_id": query_id},
)
def test_visit_query(self) -> None:
last_visited = self.query.last_visited
assert last_visited is not None
assert self.query.visits == 1
with self.feature("organizations:discover-query"):
response = self.client.post(self.url(self.query.id))
assert response.status_code == 204
query = DiscoverSavedQuery.objects.get(id=self.query.id)
assert query.visits == 2
assert query.last_visited is not None
assert query.last_visited > last_visited
def test_visit_query_no_access(self) -> None:
last_visited = self.query.last_visited
assert self.query.visits == 1
with self.feature({"organizations:discover-query": False}):
response = self.client.post(self.url(self.query.id))
assert response.status_code == 404
query = DiscoverSavedQuery.objects.get(id=self.query.id)
assert query.visits == 1
assert query.last_visited == last_visited
|
OrganizationDiscoverQueryVisitTest
|
python
|
spyder-ide__spyder
|
spyder/plugins/pythonpath/widgets/pathmanager.py
|
{
"start": 1325,
"end": 1620
}
|
class ____:
MoveTop = 'move_top'
MoveUp = 'move_up'
MoveDown = 'move_down'
MoveToBottom = 'move_to_bottom'
AddPath = 'add_path'
RemovePath = 'remove_path'
ImportPaths = 'import_paths'
ExportPaths = 'export_paths'
Prioritize = 'prioritize'
|
PathManagerToolbuttons
|
python
|
numpy__numpy
|
numpy/_core/tests/test_numeric.py
|
{
"start": 140305,
"end": 142747
}
|
class ____:
def test_roll1d(self):
x = np.arange(10)
xr = np.roll(x, 2)
assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]))
def test_roll2d(self):
x2 = np.reshape(np.arange(10), (2, 5))
x2r = np.roll(x2, 1)
assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]]))
x2r = np.roll(x2, 1, axis=0)
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, 1, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
# Roll multiple axes at once.
x2r = np.roll(x2, 1, axis=(0, 1))
assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
x2r = np.roll(x2, (1, 0), axis=(0, 1))
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, (-1, 0), axis=(0, 1))
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, (0, 1), axis=(0, 1))
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
x2r = np.roll(x2, (0, -1), axis=(0, 1))
assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]))
x2r = np.roll(x2, (1, 1), axis=(0, 1))
assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
x2r = np.roll(x2, (-1, -1), axis=(0, 1))
assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]]))
# Roll the same axis multiple times.
x2r = np.roll(x2, 1, axis=(0, 0))
assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]))
x2r = np.roll(x2, 1, axis=(1, 1))
assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]]))
# Roll more than one turn in either direction.
x2r = np.roll(x2, 6, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
x2r = np.roll(x2, -4, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
def test_roll_empty(self):
x = np.array([])
assert_equal(np.roll(x, 1), np.array([]))
def test_roll_unsigned_shift(self):
x = np.arange(4)
shift = np.uint16(2)
assert_equal(np.roll(x, shift), np.roll(x, 2))
shift = np.uint64(2**63 + 2)
assert_equal(np.roll(x, shift), np.roll(x, 2))
def test_roll_big_int(self):
x = np.arange(4)
assert_equal(np.roll(x, 2**100), x)
|
TestRoll
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/pipelines/pipeline_run_stats.py
|
{
"start": 2058,
"end": 2234
}
|
class ____(graphene.Union):
class Meta:
types = (GrapheneRunStatsSnapshot, GraphenePythonError)
name = "RunStatsSnapshotOrError"
|
GrapheneRunStatsSnapshotOrError
|
python
|
walkccc__LeetCode
|
solutions/2908. Minimum Sum of Mountain Triplets I/2908.py
|
{
"start": 0,
"end": 449
}
|
class ____:
# Same as 2908. Minimum Sum of Mountain Triplets I
def minimumSum(self, nums: list[int]) -> int:
ans = math.inf
minPrefix = list(itertools.accumulate(nums, min))
minSuffix = list(itertools.accumulate(reversed(nums), min))[::-1]
for i, num in enumerate(nums):
if num > minPrefix[i] and num > minSuffix[i]:
ans = min(ans, num + minPrefix[i] + minSuffix[i])
return -1 if ans == math.inf else ans
|
Solution
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/storage/noop_compute_log_manager.py
|
{
"start": 497,
"end": 2325
}
|
class ____(ComputeLogManager, ConfigurableClass):
"""When enabled for a Dagster instance, stdout and stderr will not be available for any step."""
def __init__(self, inst_data: Optional[ConfigurableClassData] = None):
self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return {}
@classmethod
def from_config_value(
cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]
) -> Self:
return cls(inst_data=inst_data, **config_value)
@contextmanager
def capture_logs(self, log_key: Sequence[str]) -> Generator[CapturedLogContext, None, None]:
yield CapturedLogContext(log_key=log_key)
def is_capture_complete(self, log_key: Sequence[str]):
return True
@contextmanager
def open_log_stream(
self, log_key: Sequence[str], io_type: ComputeIOType
) -> Generator[Optional[IO], None, None]:
yield None
def get_log_data_for_type(
self,
log_key: Sequence[str],
io_type: ComputeIOType,
offset: int,
max_bytes: Optional[int],
) -> tuple[Optional[bytes], int]:
return None, 0
def get_log_metadata(self, log_key: Sequence[str]) -> CapturedLogMetadata:
return CapturedLogMetadata()
def delete_logs(
self, log_key: Optional[Sequence[str]] = None, prefix: Optional[Sequence[str]] = None
):
pass
def subscribe(
self, log_key: Sequence[str], cursor: Optional[str] = None
) -> CapturedLogSubscription:
return CapturedLogSubscription(self, log_key, cursor)
def unsubscribe(self, subscription: CapturedLogSubscription):
pass
|
NoOpComputeLogManager
|
python
|
numpy__numpy
|
tools/swig/test/testFortran.py
|
{
"start": 303,
"end": 1407
}
|
class ____(unittest.TestCase):
def __init__(self, methodName="runTests"):
unittest.TestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
# Test (type* IN_FARRAY2, int DIM1, int DIM2) typemap
def testSecondElementFortran(self):
"Test Fortran matrix initialized from reshaped NumPy fortranarray"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
second = Fortran.__dict__[self.typeStr + "SecondElement"]
matrix = np.asfortranarray(np.arange(9).reshape(3, 3),
self.typeCode)
self.assertEqual(second(matrix), 3)
def testSecondElementObject(self):
"Test Fortran matrix initialized from nested list fortranarray"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
second = Fortran.__dict__[self.typeStr + "SecondElement"]
matrix = np.asfortranarray([[0, 1, 2], [3, 4, 5], [6, 7, 8]], self.typeCode)
self.assertEqual(second(matrix), 3)
######################################################################
|
FortranTestCase
|
python
|
doocs__leetcode
|
solution/1300-1399/1320.Minimum Distance to Type a Word Using Two Fingers/Solution.py
|
{
"start": 0,
"end": 1133
}
|
class ____:
def minimumDistance(self, word: str) -> int:
def dist(a: int, b: int) -> int:
x1, y1 = divmod(a, 6)
x2, y2 = divmod(b, 6)
return abs(x1 - x2) + abs(y1 - y2)
n = len(word)
f = [[[inf] * 26 for _ in range(26)] for _ in range(n)]
for j in range(26):
f[0][ord(word[0]) - ord('A')][j] = 0
f[0][j][ord(word[0]) - ord('A')] = 0
for i in range(1, n):
a, b = ord(word[i - 1]) - ord('A'), ord(word[i]) - ord('A')
d = dist(a, b)
for j in range(26):
f[i][b][j] = min(f[i][b][j], f[i - 1][a][j] + d)
f[i][j][b] = min(f[i][j][b], f[i - 1][j][a] + d)
if j == a:
for k in range(26):
t = dist(k, b)
f[i][b][j] = min(f[i][b][j], f[i - 1][k][a] + t)
f[i][j][b] = min(f[i][j][b], f[i - 1][a][k] + t)
a = min(f[n - 1][ord(word[-1]) - ord('A')])
b = min(f[n - 1][j][ord(word[-1]) - ord('A')] for j in range(26))
return int(min(a, b))
|
Solution
|
python
|
pypa__installer
|
tests/test_utils.py
|
{
"start": 6110,
"end": 7693
}
|
class ____:
@pytest.mark.parametrize(
("script", "expected"),
[
pytest.param("", [], id="empty"),
pytest.param(
"""
[foo]
foo = foo.bar
""",
[],
id="unrelated",
),
pytest.param(
"""
[console_scripts]
package = package.__main__:package
""",
[
("package", "package.__main__", "package", "console"),
],
id="cli",
),
pytest.param(
"""
[gui_scripts]
package = package.__main__:package
""",
[
("package", "package.__main__", "package", "gui"),
],
id="gui",
),
pytest.param(
"""
[console_scripts]
magic-cli = magic.cli:main
[gui_scripts]
magic-gui = magic.gui:main
""",
[
("magic-cli", "magic.cli", "main", "console"),
("magic-gui", "magic.gui", "main", "gui"),
],
id="cli-and-gui",
),
],
)
def test_valid(self, script, expected):
iterable = parse_entrypoints(textwrap.dedent(script))
assert list(iterable) == expected, expected
|
TestParseEntryPoints
|
python
|
cython__cython
|
docs/examples/userguide/extension_types/extendable_animal.py
|
{
"start": 162,
"end": 293
}
|
class ____(Animal): # Note that we use class, not cdef class
pass
dog = ExtendableAnimal(4)
dog.has_tail = True
|
ExtendableAnimal
|
python
|
getsentry__sentry
|
src/sentry/notifications/notification_action/action_validation.py
|
{
"start": 5012,
"end": 5184
}
|
class ____(TicketingActionValidatorHandler):
provider = Action.Type.GITHUB
@action_validator_registry.register(Action.Type.GITHUB_ENTERPRISE)
|
GithubActionValidatorHandler
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/models.py
|
{
"start": 108675,
"end": 109814
}
|
class ____(Response):
"""
Response of models.make_private endpoint.
:param updated: Number of models updated
:type updated: int
"""
_service = "models"
_action = "make_private"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of models updated",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(MakePrivateResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
|
MakePrivateResponse
|
python
|
plotly__plotly.py
|
_plotly_utils/basevalidators.py
|
{
"start": 8169,
"end": 12303
}
|
class ____(object):
"""
Base class for all validator classes
"""
def __init__(self, plotly_name, parent_name, role=None, **_):
"""
Construct a validator instance
Parameters
----------
plotly_name : str
Name of the property being validated
parent_name : str
Names of all of the ancestors of this property joined on '.'
characters. e.g.
plotly_name == 'range' and parent_name == 'layout.xaxis'
role : str
The role string for the property as specified in
plot-schema.json
"""
self.parent_name = parent_name
self.plotly_name = plotly_name
self.role = role
self.array_ok = False
def description(self):
"""
Returns a string that describes the values that are acceptable
to the validator
Should start with:
The '{plotly_name}' property is a...
For consistancy, string should have leading 4-space indent
"""
raise NotImplementedError()
def raise_invalid_val(self, v, inds=None):
"""
Helper method to raise an informative exception when an invalid
value is passed to the validate_coerce method.
Parameters
----------
v :
Value that was input to validate_coerce and could not be coerced
inds: list of int or None (default)
Indexes to display after property name. e.g. if self.plotly_name
is 'prop' and inds=[2, 1] then the name in the validation error
message will be 'prop[2][1]`
Raises
-------
ValueError
"""
name = self.plotly_name
if inds:
for i in inds:
name += "[" + str(i) + "]"
raise ValueError(
"""
Invalid value of type {typ} received for the '{name}' property of {pname}
Received value: {v}
{valid_clr_desc}""".format(
name=name,
pname=self.parent_name,
typ=type_str(v),
v=repr(v),
valid_clr_desc=self.description(),
)
)
def raise_invalid_elements(self, invalid_els):
if invalid_els:
raise ValueError(
"""
Invalid element(s) received for the '{name}' property of {pname}
Invalid elements include: {invalid}
{valid_clr_desc}""".format(
name=self.plotly_name,
pname=self.parent_name,
invalid=invalid_els[:10],
valid_clr_desc=self.description(),
)
)
def validate_coerce(self, v):
"""
Validate whether an input value is compatible with this property,
and coerce the value to be compatible of possible.
Parameters
----------
v
The input value to be validated
Raises
------
ValueError
if `v` cannot be coerced into a compatible form
Returns
-------
The input `v` in a form that's compatible with this property
"""
raise NotImplementedError()
def present(self, v):
"""
Convert output value of a previous call to `validate_coerce` into a
form suitable to be returned to the user on upon property
access.
Note: The value returned by present must be either immutable or an
instance of BasePlotlyType, otherwise the value could be mutated by
the user and we wouldn't get notified about the change.
Parameters
----------
v
A value that was the ouput of a previous call the
`validate_coerce` method on the same object
Returns
-------
"""
if is_homogeneous_array(v):
# Note: numpy array was already coerced into read-only form so
# we don't need to copy it here.
return v
elif is_simple_array(v):
return tuple(v)
else:
return v
|
BaseValidator
|
python
|
django__django
|
tests/auth_tests/test_hashers.py
|
{
"start": 1047,
"end": 23838
}
|
class ____(SimpleTestCase):
def test_simple(self):
encoded = make_password("lètmein")
self.assertTrue(encoded.startswith("pbkdf2_sha256$"))
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password("lètmein", encoded))
self.assertFalse(check_password("lètmeinz", encoded))
# Blank passwords
blank_encoded = make_password("")
self.assertTrue(blank_encoded.startswith("pbkdf2_sha256$"))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password("", blank_encoded))
self.assertFalse(check_password(" ", blank_encoded))
async def test_acheck_password(self):
encoded = make_password("lètmein")
self.assertIs(await acheck_password("lètmein", encoded), True)
self.assertIs(await acheck_password("lètmeinz", encoded), False)
# Blank passwords.
blank_encoded = make_password("")
self.assertIs(await acheck_password("", blank_encoded), True)
self.assertIs(await acheck_password(" ", blank_encoded), False)
def test_bytes(self):
encoded = make_password(b"bytes_password")
self.assertTrue(encoded.startswith("pbkdf2_sha256$"))
self.assertIs(is_password_usable(encoded), True)
self.assertIs(check_password(b"bytes_password", encoded), True)
def test_invalid_password(self):
msg = "Password must be a string or bytes, got int."
with self.assertRaisesMessage(TypeError, msg):
make_password(1)
def test_pbkdf2(self):
encoded = make_password("lètmein", "seasalt", "pbkdf2_sha256")
self.assertEqual(
encoded,
"pbkdf2_sha256$1500000$"
"seasalt$P4UiMPVduVWIL/oS1GzH+IofsccjJNM5hUTikBvi5to=",
)
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password("lètmein", encoded))
self.assertFalse(check_password("lètmeinz", encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "pbkdf2_sha256")
# Blank passwords
blank_encoded = make_password("", "seasalt", "pbkdf2_sha256")
self.assertTrue(blank_encoded.startswith("pbkdf2_sha256$"))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password("", blank_encoded))
self.assertFalse(check_password(" ", blank_encoded))
# Salt entropy check.
hasher = get_hasher("pbkdf2_sha256")
encoded_weak_salt = make_password("lètmein", "iodizedsalt", "pbkdf2_sha256")
encoded_strong_salt = make_password("lètmein", hasher.salt(), "pbkdf2_sha256")
self.assertIs(hasher.must_update(encoded_weak_salt), True)
self.assertIs(hasher.must_update(encoded_strong_salt), False)
@override_settings(
PASSWORD_HASHERS=["django.contrib.auth.hashers.MD5PasswordHasher"]
)
def test_md5(self):
encoded = make_password("lètmein", "seasalt", "md5")
self.assertEqual(encoded, "md5$seasalt$3f86d0d3d465b7b458c231bf3555c0e3")
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password("lètmein", encoded))
self.assertFalse(check_password("lètmeinz", encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "md5")
# Blank passwords
blank_encoded = make_password("", "seasalt", "md5")
self.assertTrue(blank_encoded.startswith("md5$"))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password("", blank_encoded))
self.assertFalse(check_password(" ", blank_encoded))
# Salt entropy check.
hasher = get_hasher("md5")
encoded_weak_salt = make_password("lètmein", "iodizedsalt", "md5")
encoded_strong_salt = make_password("lètmein", hasher.salt(), "md5")
self.assertIs(hasher.must_update(encoded_weak_salt), True)
self.assertIs(hasher.must_update(encoded_strong_salt), False)
@skipUnless(bcrypt, "bcrypt not installed")
def test_bcrypt_sha256(self):
encoded = make_password("lètmein", hasher="bcrypt_sha256")
self.assertTrue(is_password_usable(encoded))
self.assertTrue(encoded.startswith("bcrypt_sha256$"))
self.assertTrue(check_password("lètmein", encoded))
self.assertFalse(check_password("lètmeinz", encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt_sha256")
# password truncation no longer works
password = (
"VSK0UYV6FFQVZ0KG88DYN9WADAADZO1CTSIVDJUNZSUML6IBX7LN7ZS3R5"
"JGB3RGZ7VI7G7DJQ9NI8BQFSRPTG6UWTTVESA5ZPUN"
)
encoded = make_password(password, hasher="bcrypt_sha256")
self.assertTrue(check_password(password, encoded))
self.assertFalse(check_password(password[:72], encoded))
# Blank passwords
blank_encoded = make_password("", hasher="bcrypt_sha256")
self.assertTrue(blank_encoded.startswith("bcrypt_sha256$"))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password("", blank_encoded))
self.assertFalse(check_password(" ", blank_encoded))
@skipUnless(bcrypt, "bcrypt not installed")
@override_settings(
PASSWORD_HASHERS=["django.contrib.auth.hashers.BCryptPasswordHasher"]
)
def test_bcrypt(self):
encoded = make_password("lètmein", hasher="bcrypt")
self.assertTrue(is_password_usable(encoded))
self.assertTrue(encoded.startswith("bcrypt$"))
self.assertTrue(check_password("lètmein", encoded))
self.assertFalse(check_password("lètmeinz", encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt")
# Blank passwords
blank_encoded = make_password("", hasher="bcrypt")
self.assertTrue(blank_encoded.startswith("bcrypt$"))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password("", blank_encoded))
self.assertFalse(check_password(" ", blank_encoded))
@skipUnless(bcrypt, "bcrypt not installed")
@override_settings(
PASSWORD_HASHERS=["django.contrib.auth.hashers.BCryptPasswordHasher"]
)
def test_bcrypt_upgrade(self):
hasher = get_hasher("bcrypt")
self.assertEqual("bcrypt", hasher.algorithm)
self.assertNotEqual(hasher.rounds, 4)
old_rounds = hasher.rounds
try:
# Generate a password with 4 rounds.
hasher.rounds = 4
encoded = make_password("letmein", hasher="bcrypt")
rounds = hasher.safe_summary(encoded)["work factor"]
self.assertEqual(rounds, 4)
state = {"upgraded": False}
def setter(password):
state["upgraded"] = True
# No upgrade is triggered.
self.assertTrue(check_password("letmein", encoded, setter, "bcrypt"))
self.assertFalse(state["upgraded"])
# Revert to the old rounds count and ...
hasher.rounds = old_rounds
# ... check if the password would get updated to the new count.
self.assertTrue(check_password("letmein", encoded, setter, "bcrypt"))
self.assertTrue(state["upgraded"])
finally:
hasher.rounds = old_rounds
@skipUnless(bcrypt, "bcrypt not installed")
@override_settings(
PASSWORD_HASHERS=["django.contrib.auth.hashers.BCryptPasswordHasher"]
)
def test_bcrypt_harden_runtime(self):
hasher = get_hasher("bcrypt")
self.assertEqual("bcrypt", hasher.algorithm)
with mock.patch.object(hasher, "rounds", 4):
encoded = make_password("letmein", hasher="bcrypt")
with (
mock.patch.object(hasher, "rounds", 6),
mock.patch.object(hasher, "encode", side_effect=hasher.encode),
):
hasher.harden_runtime("wrong_password", encoded)
# Increasing rounds from 4 to 6 means an increase of 4 in workload,
# therefore hardening should run 3 times to make the timing the
# same (the original encode() call already ran once).
self.assertEqual(hasher.encode.call_count, 3)
# Get the original salt (includes the original workload factor)
algorithm, data = encoded.split("$", 1)
expected_call = (("wrong_password", data[:29].encode()),)
self.assertEqual(hasher.encode.call_args_list, [expected_call] * 3)
def test_unusable(self):
encoded = make_password(None)
self.assertEqual(
len(encoded),
len(UNUSABLE_PASSWORD_PREFIX) + UNUSABLE_PASSWORD_SUFFIX_LENGTH,
)
self.assertFalse(is_password_usable(encoded))
self.assertFalse(check_password(None, encoded))
self.assertFalse(check_password(encoded, encoded))
self.assertFalse(check_password(UNUSABLE_PASSWORD_PREFIX, encoded))
self.assertFalse(check_password("", encoded))
self.assertFalse(check_password("lètmein", encoded))
self.assertFalse(check_password("lètmeinz", encoded))
with self.assertRaisesMessage(ValueError, "Unknown password hashing algorithm"):
identify_hasher(encoded)
# Assert that the unusable passwords actually contain a random part.
# This might fail one day due to a hash collision.
self.assertNotEqual(encoded, make_password(None), "Random password collision?")
def test_unspecified_password(self):
"""
Makes sure specifying no plain password with a valid encoded password
returns `False`.
"""
self.assertFalse(check_password(None, make_password("lètmein")))
def test_bad_algorithm(self):
msg = (
"Unknown password hashing algorithm '%s'. Did you specify it in "
"the PASSWORD_HASHERS setting?"
)
with self.assertRaisesMessage(ValueError, msg % "lolcat"):
make_password("lètmein", hasher="lolcat")
with self.assertRaisesMessage(ValueError, msg % "lolcat"):
identify_hasher("lolcat$salt$hash")
def test_is_password_usable(self):
passwords = ("lètmein_badencoded", "", None)
for password in passwords:
with self.subTest(password=password):
self.assertIs(is_password_usable(password), True)
def test_low_level_pbkdf2(self):
hasher = PBKDF2PasswordHasher()
encoded = hasher.encode("lètmein", "seasalt2")
self.assertEqual(
encoded,
"pbkdf2_sha256$1500000$"
"seasalt2$xWKIh704updzhxL+vMfPbhVsHljK62FyE988AtcoHU4=",
)
self.assertTrue(hasher.verify("lètmein", encoded))
def test_low_level_pbkdf2_sha1(self):
hasher = PBKDF2SHA1PasswordHasher()
encoded = hasher.encode("lètmein", "seasalt2")
self.assertEqual(
encoded, "pbkdf2_sha1$1500000$seasalt2$ep4Ou2hnt2mlvMRsIjUln0Z5MYY="
)
self.assertTrue(hasher.verify("lètmein", encoded))
@skipUnless(bcrypt, "bcrypt not installed")
def test_bcrypt_salt_check(self):
hasher = BCryptPasswordHasher()
encoded = hasher.encode("lètmein", hasher.salt())
self.assertIs(hasher.must_update(encoded), False)
@skipUnless(bcrypt, "bcrypt not installed")
def test_bcryptsha256_salt_check(self):
hasher = BCryptSHA256PasswordHasher()
encoded = hasher.encode("lètmein", hasher.salt())
self.assertIs(hasher.must_update(encoded), False)
@override_settings(
PASSWORD_HASHERS=[
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.MD5PasswordHasher",
],
)
def test_upgrade(self):
self.assertEqual("pbkdf2_sha256", get_hasher("default").algorithm)
for algo in ("pbkdf2_sha1", "md5"):
with self.subTest(algo=algo):
encoded = make_password("lètmein", hasher=algo)
state = {"upgraded": False}
def setter(password):
state["upgraded"] = True
self.assertTrue(check_password("lètmein", encoded, setter))
self.assertTrue(state["upgraded"])
def test_no_upgrade(self):
encoded = make_password("lètmein")
state = {"upgraded": False}
def setter():
state["upgraded"] = True
self.assertFalse(check_password("WRONG", encoded, setter))
self.assertFalse(state["upgraded"])
@override_settings(
PASSWORD_HASHERS=[
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.MD5PasswordHasher",
],
)
def test_no_upgrade_on_incorrect_pass(self):
self.assertEqual("pbkdf2_sha256", get_hasher("default").algorithm)
for algo in ("pbkdf2_sha1", "md5"):
with self.subTest(algo=algo):
encoded = make_password("lètmein", hasher=algo)
state = {"upgraded": False}
def setter():
state["upgraded"] = True
self.assertFalse(check_password("WRONG", encoded, setter))
self.assertFalse(state["upgraded"])
def test_pbkdf2_upgrade(self):
hasher = get_hasher("default")
self.assertEqual("pbkdf2_sha256", hasher.algorithm)
self.assertNotEqual(hasher.iterations, 1)
old_iterations = hasher.iterations
try:
# Generate a password with 1 iteration.
hasher.iterations = 1
encoded = make_password("letmein")
algo, iterations, salt, hash = encoded.split("$", 3)
self.assertEqual(iterations, "1")
state = {"upgraded": False}
def setter(password):
state["upgraded"] = True
# No upgrade is triggered
self.assertTrue(check_password("letmein", encoded, setter))
self.assertFalse(state["upgraded"])
# Revert to the old iteration count and ...
hasher.iterations = old_iterations
# ... check if the password would get updated to the new iteration
# count.
self.assertTrue(check_password("letmein", encoded, setter))
self.assertTrue(state["upgraded"])
finally:
hasher.iterations = old_iterations
def test_pbkdf2_harden_runtime(self):
hasher = get_hasher("default")
self.assertEqual("pbkdf2_sha256", hasher.algorithm)
with mock.patch.object(hasher, "iterations", 1):
encoded = make_password("letmein")
with (
mock.patch.object(hasher, "iterations", 6),
mock.patch.object(hasher, "encode", side_effect=hasher.encode),
):
hasher.harden_runtime("wrong_password", encoded)
# Encode should get called once ...
self.assertEqual(hasher.encode.call_count, 1)
# ... with the original salt and 5 iterations.
algorithm, iterations, salt, hash = encoded.split("$", 3)
expected_call = (("wrong_password", salt, 5),)
self.assertEqual(hasher.encode.call_args, expected_call)
def test_pbkdf2_upgrade_new_hasher(self):
hasher = get_hasher("default")
self.assertEqual("pbkdf2_sha256", hasher.algorithm)
self.assertNotEqual(hasher.iterations, 1)
state = {"upgraded": False}
def setter(password):
state["upgraded"] = True
with self.settings(
PASSWORD_HASHERS=["auth_tests.test_hashers.PBKDF2SingleIterationHasher"]
):
encoded = make_password("letmein")
algo, iterations, salt, hash = encoded.split("$", 3)
self.assertEqual(iterations, "1")
# No upgrade is triggered
self.assertTrue(check_password("letmein", encoded, setter))
self.assertFalse(state["upgraded"])
# Revert to the old iteration count and check if the password would get
# updated to the new iteration count.
with self.settings(
PASSWORD_HASHERS=[
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"auth_tests.test_hashers.PBKDF2SingleIterationHasher",
]
):
self.assertTrue(check_password("letmein", encoded, setter))
self.assertTrue(state["upgraded"])
def test_check_password_calls_harden_runtime(self):
hasher = get_hasher("default")
encoded = make_password("letmein")
with (
mock.patch.object(hasher, "harden_runtime"),
mock.patch.object(hasher, "must_update", return_value=True),
):
# Correct password supplied, no hardening needed
check_password("letmein", encoded)
self.assertEqual(hasher.harden_runtime.call_count, 0)
# Wrong password supplied, hardening needed
check_password("wrong_password", encoded)
self.assertEqual(hasher.harden_runtime.call_count, 1)
@contextmanager
def assertMakePasswordCalled(self, password, encoded, hasher_side_effect):
hasher = get_hasher("default")
with (
mock.patch(
"django.contrib.auth.hashers.identify_hasher",
side_effect=hasher_side_effect,
) as mock_identify_hasher,
mock.patch(
"django.contrib.auth.hashers.make_password"
) as mock_make_password,
mock.patch(
"django.contrib.auth.hashers.get_random_string",
side_effect=lambda size: "x" * size,
),
mock.patch.object(hasher, "verify"),
):
# Ensure make_password is called to standardize timing.
yield
self.assertEqual(hasher.verify.call_count, 0)
self.assertEqual(mock_identify_hasher.mock_calls, [mock.call(encoded)])
self.assertEqual(
mock_make_password.mock_calls,
[mock.call("x" * UNUSABLE_PASSWORD_SUFFIX_LENGTH)],
)
def test_check_password_calls_make_password_to_fake_runtime(self):
cases = [
(None, None, None), # no plain text password provided
("foo", make_password(password=None), None), # unusable encoded
("letmein", make_password(password="letmein"), ValueError), # valid encoded
]
for password, encoded, hasher_side_effect in cases:
with (
self.subTest(encoded=encoded),
self.assertMakePasswordCalled(password, encoded, hasher_side_effect),
):
check_password(password, encoded)
async def test_acheck_password_calls_make_password_to_fake_runtime(self):
cases = [
(None, None, None), # no plain text password provided
("foo", make_password(password=None), None), # unusable encoded
("letmein", make_password(password="letmein"), ValueError), # valid encoded
]
for password, encoded, hasher_side_effect in cases:
with (
self.subTest(encoded=encoded),
self.assertMakePasswordCalled(password, encoded, hasher_side_effect),
):
await acheck_password(password, encoded)
def test_encode_invalid_salt(self):
hasher_classes = [
MD5PasswordHasher,
PBKDF2PasswordHasher,
PBKDF2SHA1PasswordHasher,
ScryptPasswordHasher,
]
msg = "salt must be provided and cannot contain $."
for hasher_class in hasher_classes:
hasher = hasher_class()
for salt in [None, "", "sea$salt"]:
with self.subTest(hasher_class.__name__, salt=salt):
with self.assertRaisesMessage(ValueError, msg):
hasher.encode("password", salt)
def test_password_and_salt_in_str_and_bytes(self):
hasher_classes = [
MD5PasswordHasher,
PBKDF2PasswordHasher,
PBKDF2SHA1PasswordHasher,
ScryptPasswordHasher,
]
for hasher_class in hasher_classes:
hasher = hasher_class()
with self.subTest(hasher_class.__name__):
passwords = ["password", b"password"]
for password in passwords:
for salt in [hasher.salt(), hasher.salt().encode()]:
encoded = hasher.encode(password, salt)
for password_to_verify in passwords:
self.assertIs(
hasher.verify(password_to_verify, encoded), True
)
@skipUnless(argon2, "argon2-cffi not installed")
def test_password_and_salt_in_str_and_bytes_argon2(self):
hasher = Argon2PasswordHasher()
passwords = ["password", b"password"]
for password in passwords:
for salt in [hasher.salt(), hasher.salt().encode()]:
encoded = hasher.encode(password, salt)
for password_to_verify in passwords:
self.assertIs(hasher.verify(password_to_verify, encoded), True)
@skipUnless(bcrypt, "bcrypt not installed")
def test_password_and_salt_in_str_and_bytes_bcrypt(self):
hasher_classes = [
BCryptPasswordHasher,
BCryptSHA256PasswordHasher,
]
for hasher_class in hasher_classes:
hasher = hasher_class()
with self.subTest(hasher_class.__name__):
passwords = ["password", b"password"]
for password in passwords:
salts = [hasher.salt().decode(), hasher.salt()]
for salt in salts:
encoded = hasher.encode(password, salt)
for password_to_verify in passwords:
self.assertIs(
hasher.verify(password_to_verify, encoded), True
)
def test_encode_password_required(self):
hasher_classes = [
MD5PasswordHasher,
PBKDF2PasswordHasher,
PBKDF2SHA1PasswordHasher,
ScryptPasswordHasher,
]
msg = "password must be provided."
for hasher_class in hasher_classes:
hasher = hasher_class()
with self.subTest(hasher_class.__name__):
with self.assertRaisesMessage(TypeError, msg):
hasher.encode(None, "seasalt")
|
TestUtilsHashPass
|
python
|
explosion__spaCy
|
spacy/lang/nb/__init__.py
|
{
"start": 629,
"end": 1274
}
|
class ____(Language):
lang = "nb"
Defaults = NorwegianDefaults
@Norwegian.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={
"model": None,
"mode": "rule",
"overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
overwrite: bool,
scorer: Optional[Callable],
):
return Lemmatizer(
nlp.vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
)
__all__ = ["Norwegian"]
|
Norwegian
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/relationship_attributes/back_populates/tutorial001_py310.py
|
{
"start": 279,
"end": 4488
}
|
class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
team_id: int | None = Field(default=None, foreign_key="team.id")
team: Team | None = Relationship()
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team=team_z_force
)
hero_rusty_man = Hero(
name="Rusty-Man", secret_name="Tommy Sharp", age=48, team=team_preventers
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
hero_spider_boy.team = team_preventers
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_spider_boy)
print("Updated hero:", hero_spider_boy)
hero_black_lion = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_sure_e = Hero(name="Princess Sure-E", secret_name="Sure-E")
team_wakaland = Team(
name="Wakaland",
headquarters="Wakaland Capital City",
heroes=[hero_black_lion, hero_sure_e],
)
session.add(team_wakaland)
session.commit()
session.refresh(team_wakaland)
print("Team Wakaland:", team_wakaland)
hero_tarantula = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_dr_weird = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_cap = Hero(
name="Captain North America", secret_name="Esteban Rogelios", age=93
)
team_preventers.heroes.append(hero_tarantula)
team_preventers.heroes.append(hero_dr_weird)
team_preventers.heroes.append(hero_cap)
session.add(team_preventers)
session.commit()
session.refresh(hero_tarantula)
session.refresh(hero_dr_weird)
session.refresh(hero_cap)
print("Preventers new hero:", hero_tarantula)
print("Preventers new hero:", hero_dr_weird)
print("Preventers new hero:", hero_cap)
def select_heroes():
with Session(engine) as session:
statement = select(Team).where(Team.name == "Preventers")
result = session.exec(statement)
team_preventers = result.one()
print("Preventers heroes:", team_preventers.heroes)
def update_heroes():
with Session(engine) as session:
hero_spider_boy = session.exec(
select(Hero).where(Hero.name == "Spider-Boy")
).one()
preventers_team = session.exec(
select(Team).where(Team.name == "Preventers")
).one()
print("Hero Spider-Boy:", hero_spider_boy)
print("Preventers Team:", preventers_team)
print("Preventers Team Heroes:", preventers_team.heroes)
hero_spider_boy.team = None
print("Spider-Boy without team:", hero_spider_boy)
print("Preventers Team Heroes again:", preventers_team.heroes)
session.add(hero_spider_boy)
session.commit()
print("After committing")
session.refresh(hero_spider_boy)
print("Spider-Boy after commit:", hero_spider_boy)
print("Preventers Team Heroes after commit:", preventers_team.heroes)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
update_heroes()
if __name__ == "__main__":
main()
|
Hero
|
python
|
getsentry__sentry
|
src/sentry/users/api/endpoints/user_identity_config.py
|
{
"start": 3786,
"end": 5806
}
|
class ____(UserEndpoint):
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = (UserAndStaffPermission,)
@staticmethod
def _get_identity(user: User, category: str, identity_id: str) -> UserIdentityConfig | None:
identity_int = int(identity_id)
# This fetches and iterates over all the user's identities.
# If needed, we could optimize to look directly for the one
# object, but we would still need to examine the full set of
# Identity objects in order to correctly set the status.
for identity in get_identities(user):
if identity.category == category and identity.id == identity_int:
return identity
return None
def get(self, request: Request, user: User, category: str, identity_id: str) -> Response:
identity = self._get_identity(user, category, identity_id)
if identity:
return Response(serialize(identity, serializer=UserIdentityConfigSerializer()))
else:
return Response(status=status.HTTP_404_NOT_FOUND)
def delete(self, request: Request, user: User, category: str, identity_id: str) -> Response:
with transaction.atomic(using=router.db_for_write(Identity)):
identity = self._get_identity(user, category, identity_id)
if not identity:
# Returns 404 even if the ID exists but belongs to
# another user. In that case, 403 would also be
# appropriate, but 404 is fine or even preferable.
return Response(status=status.HTTP_404_NOT_FOUND)
if identity.status != Status.CAN_DISCONNECT:
return Response(status=status.HTTP_403_FORBIDDEN)
model_type = identity.get_model_type_for_category()
model_type.objects.get(id=int(identity_id)).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
UserIdentityConfigDetailsEndpoint
|
python
|
gevent__gevent
|
src/gevent/tests/test__socket_dns.py
|
{
"start": 20294,
"end": 21787
}
|
class ____(TestCase):
# certain tests in test_patched_socket.py only work if getaddrinfo('localhost') does not switch
# (e.g. NetworkConnectionAttributesTest.testSourceAddress)
#switch_expected = False
# XXX: The above has been commented out for some time. Apparently this isn't the case
# anymore.
def _normalize_result_getaddrinfo(self, result):
if RESOLVER_NOT_SYSTEM:
# We see that some impls (OS X) return extra results
# like DGRAM that ares does not.
return ()
return super(TestLocalhost, self)._normalize_result_getaddrinfo(result)
NORMALIZE_GHBA_IGNORE_ALIAS = True
if greentest.RUNNING_ON_TRAVIS and greentest.PY2 and RESOLVER_NOT_SYSTEM:
def _normalize_result_gethostbyaddr(self, result):
# Beginning in November 2017 after an upgrade to Travis,
# we started seeing ares return ::1 for localhost, but
# the system resolver is still returning 127.0.0.1 under Python 2
result = super(TestLocalhost, self)._normalize_result_gethostbyaddr(result)
if isinstance(result, tuple):
result = (result[0], result[1], ['127.0.0.1'])
return result
add(
TestLocalhost, 'ip6-localhost',
skip=RESOLVER_DNSPYTHON, # XXX: Fix these.
skip_reason="Can return gaierror(-2)"
)
add(
TestLocalhost, 'localhost',
skip=greentest.RUNNING_ON_TRAVIS,
skip_reason="Can return gaierror(-2)"
)
|
TestLocalhost
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/hooks/test_kubernetes_engine.py
|
{
"start": 5117,
"end": 7255
}
|
class ____:
def setup_method(self):
with mock.patch(
BASE_STRING.format("GoogleBaseHook.__init__"), new=mock_base_gcp_hook_default_project_id
):
self.gke_hook = GKEHook(gcp_conn_id="test", location=GKE_ZONE)
self.gke_hook._client = mock.Mock()
@mock.patch(GKE_STRING.format("GKEHook.wait_for_operation"))
def test_delete_cluster(self, wait_mock):
retry_mock, timeout_mock = mock.Mock(), mock.Mock()
client_delete = self.gke_hook._client.delete_cluster = mock.Mock()
self.gke_hook.delete_cluster(
name=CLUSTER_NAME, project_id=TEST_GCP_PROJECT_ID, retry=retry_mock, timeout=timeout_mock
)
client_delete.assert_called_once_with(
name=f"projects/{TEST_GCP_PROJECT_ID}/locations/{GKE_ZONE}/clusters/{CLUSTER_NAME}",
retry=retry_mock,
timeout=timeout_mock,
)
wait_mock.assert_called_once_with(client_delete.return_value, TEST_GCP_PROJECT_ID)
@mock.patch(GKE_STRING.format("GKEHook.log"))
@mock.patch(GKE_STRING.format("GKEHook.wait_for_operation"))
def test_delete_cluster_not_found(self, wait_mock, log_mock):
from google.api_core.exceptions import NotFound
# To force an error
message = "Not Found"
self.gke_hook._client.delete_cluster.side_effect = NotFound(message=message)
self.gke_hook.delete_cluster(name="not-existing", project_id=TEST_GCP_PROJECT_ID)
wait_mock.assert_not_called()
log_mock.info.assert_any_call("Assuming Success: %s", message)
@mock.patch(
BASE_STRING.format("GoogleBaseHook.project_id"),
new_callable=mock.PropertyMock,
return_value=None,
)
@mock.patch(GKE_STRING.format("GKEHook.wait_for_operation"))
def test_delete_cluster_error(self, wait_mock, mock_project_id):
# To force an error
self.gke_hook._client.delete_cluster.side_effect = AirflowException("400")
with pytest.raises(AirflowException):
self.gke_hook.delete_cluster(name="a-cluster")
wait_mock.assert_not_called()
|
TestGKEHookDelete
|
python
|
tensorflow__tensorflow
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/partially_shaped_variables.py
|
{
"start": 905,
"end": 1573
}
|
class ____(tf.Module):
def __init__(self):
super(TestModule, self).__init__()
# CHECK: "tf_saved_model.global_tensor"() <{is_mutable, {{.*}} type = tensor<*xf32>, value = dense<0.000000e+00> : tensor<1xf32>}> {tf_saved_model.exported_names = ["v0"]} : () -> ()
# CHECK: "tf_saved_model.global_tensor"() <{is_mutable, {{.*}} type = tensor<?xf32>, value = dense<[0.000000e+00, 1.000000e+00]> : tensor<2xf32>}> {tf_saved_model.exported_names = ["v1"]} : () -> ()
self.v0 = tf.Variable([0.], shape=tf.TensorShape(None))
self.v1 = tf.Variable([0., 1.], shape=[None])
if __name__ == '__main__':
common.do_test(TestModule, exported_names=[])
|
TestModule
|
python
|
great-expectations__great_expectations
|
tests/datasource/fluent/test_batch.py
|
{
"start": 6389,
"end": 10617
}
|
class ____:
@pytest.fixture
def suite(self) -> ExpectationSuite:
return gx.ExpectationSuite(
name="my-suite",
expectations=[gxe.ExpectColumnValuesToNotBeNull(column="vendor_id", mostly=0.95)],
)
@pytest.mark.filesystem
def test_boolean_validation_result(
self,
pandas_setup: Tuple[AbstractDataContext, Batch],
suite: ExpectationSuite,
):
_, batch = pandas_setup
result = batch.validate(suite, result_format="BOOLEAN_ONLY")
assert result.success
assert len(result.results[0].result) == 0
@pytest.mark.filesystem
def test_summary_validation_result(
self,
pandas_setup: Tuple[AbstractDataContext, Batch],
suite: ExpectationSuite,
):
_, batch = pandas_setup
summary_result = batch.validate(suite, result_format="SUMMARY")
assert summary_result.success
assert len(summary_result.results[0].result) > 0
@pytest.mark.filesystem
def test_complete_validation_result(
self,
pandas_setup: Tuple[AbstractDataContext, Batch],
suite: ExpectationSuite,
):
_, batch = pandas_setup
result = batch.validate(suite, result_format="COMPLETE")
assert result.success
assert "unexpected_index_list" in result.results[0].result
@pytest.mark.filesystem
def test_batch_validate_expectation_does_not_persist_a_batch_definition(
pandas_setup: Tuple[AbstractDataContext, Batch],
):
context, batch = pandas_setup
datasource = context.data_sources.get(DATASOURCE_NAME)
assert isinstance(datasource, Datasource)
asset = datasource.get_asset(ASSET_NAME)
expectation = gxe.ExpectColumnValuesToNotBeNull(
column="vendor_id",
mostly=0.95,
)
result = batch.validate(expectation)
assert result.success
assert len(asset.batch_definitions) == 0
@pytest.mark.filesystem
def test_batch_validate_expectation_suite_does_not_persist_a_batch_definition(
pandas_setup: Tuple[AbstractDataContext, Batch],
):
context, batch = pandas_setup
datasource = context.data_sources.get(DATASOURCE_NAME)
assert isinstance(datasource, Datasource)
asset = datasource.get_asset(ASSET_NAME)
suite = ExpectationSuite(
"suite",
expectations=[
gxe.ExpectColumnValuesToNotBeNull(
column="vendor_id",
mostly=0.95,
)
],
)
result = batch.validate(suite)
assert result.success
assert len(asset.batch_definitions) == 0
@pytest.mark.filesystem
def test_batch_compute_metrics_single_metric_success(
pandas_setup: Tuple[AbstractDataContext, Batch],
):
_, batch = pandas_setup
metric = ColumnValuesNonNull(
column="vendor_id",
)
metric_results = batch.compute_metrics(metric)
assert type(metric_results) is ColumnValuesNonNullResult
@pytest.mark.filesystem
def test_batch_compute_metrics_multiple_metrics_success(
pandas_setup: Tuple[AbstractDataContext, Batch],
):
_, batch = pandas_setup
metric_1 = ColumnValuesNonNull(
column="passenger_count",
)
metric_2 = BatchRowCount()
metrics: list[Metric] = [metric_1, metric_2]
requested_metric_count = len(metrics)
metric_results = batch.compute_metrics(metrics)
assert isinstance(metric_results, list)
assert len(metric_results) == requested_metric_count
assert type(metric_results[0]) is ColumnValuesNonNullResult
assert type(metric_results[1]) is BatchRowCountResult
@pytest.mark.filesystem
def test_batch_compute_metrics_multiple_metrics_error(
pandas_setup: Tuple[AbstractDataContext, Batch],
):
_, batch = pandas_setup
metric_1 = ColumnValuesNonNull(
column="not_a_column",
)
metric_2 = BatchRowCount()
metrics: list[Metric] = [metric_1, metric_2]
requested_metric_count = len(metrics)
metric_results = batch.compute_metrics(metrics)
assert isinstance(metric_results, list)
assert len(metric_results) == requested_metric_count
assert type(metric_results[0]) is MetricErrorResult
assert type(metric_results[1]) is BatchRowCountResult
|
TestBatchValidateExpectationSuite
|
python
|
gevent__gevent
|
src/gevent/tests/test__threading_2.py
|
{
"start": 2889,
"end": 16510
}
|
class ____(unittest.TestCase):
maxDiff = None
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
initial_regex = r'<TestThread\(.*, stopped\)>'
if sys.version_info[:2] < (3, 13):
# prior to 3.13, they distinguished the initial state from
# the stopped state.
initial_regex = r'<TestThread\(.*, initial\)>'
for i in range(NUMTASKS):
t = TestThread("<thread %d>" % i, self, sema, mutex, numrunning)
threads.append(t)
# pylint:disable-next=attribute-defined-outside-init
t.daemon = False # Under PYPY we get daemon by default?
self.assertIsNone(t.ident)
self.assertFalse(t.daemon)
self.assertTrue(re.match(initial_regex, repr(t)),
repr(t))
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join(NUMTASKS)
self.assertFalse(t.is_alive(), t.__dict__)
if hasattr(t, 'ident'):
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match(r'<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads,
# as must the repr and str.
t = threading.current_thread()
self.assertFalse(t.ident is None)
str(t)
repr(t)
def f():
t = threading.current_thread()
ident.append(t.ident)
str(t)
repr(t)
done.set()
done = threading.Event()
ident = []
thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except thread.error:
if verbose:
print('platform does not support changing thread stack size')
return
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except thread.error:
if verbose:
print('platform does not support changing thread stack size')
return
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid],
threading._DummyThread)
del threading._active[tid]
# in gevent, we actually clean up threading._active, but it's not happended there yet
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def SKIP_test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
if verbose:
print("test_PyThreadState_SetAsyncExc can't import ctypes")
return # can't do anything
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
id = None
finished = False
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
# pylint:disable-next=attribute-defined-outside-init
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
worker_started.wait()
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*_args, **_kw):
raise thread.error()
if hasattr(threading, '_start_new_thread'):
patcher = Patch.object(threading, '_start_new_thread', new=fail_new_thread)
else:
# 3.13 or later
patcher = Patch.object(threading, '_start_joinable_thread', new=fail_new_thread)
self.addCleanup(patcher.stop)
patcher.start()
t = threading.Thread(target=lambda: None)
self.assertRaises(thread.error, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
getattr(ctypes, 'pythonapi') # not available on PyPy
getattr(ctypes.pythonapi, 'PyGILState_Ensure') # not available on PyPy3
except (ImportError, AttributeError):
if verbose:
print("test_finalize_with_runnning_thread can't import ctypes")
return # can't do anything
del ctypes # pyflakes fix
import subprocess
rc = subprocess.call([sys.executable, "-W", "ignore", "-c", """if 1:
%s
import ctypes, sys, time
try:
import thread
except ImportError:
import _thread as thread # Py3
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""" % setup_3])
self.assertEqual(rc, 42)
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
import subprocess
script = """if 1:
%s
import threading
from time import sleep
def child():
sleep(0.3)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is: %%s.%%s" %% (sleep.__module__, sleep.__name__))
threading.Thread(target=child).start()
raise SystemExit
""" % setup_4
p = subprocess.Popen([sys.executable, "-W", "ignore", "-c", script],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout.strip()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
self.assertEqual(
'Woke up, sleep function is: gevent.hub.sleep',
stdout)
# On Python 2, importing pkg_resources tends to result in some 'ImportWarning'
# being printed to stderr about packages missing __init__.py; the -W ignore is...
# ignored.
# self.assertEqual(stderr, "")
@greentest.skipIf(
not(hasattr(sys, 'getcheckinterval')),
"Needs sys.getcheckinterval"
)
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
# get/set checkinterval are deprecated in Python 3,
# and removed in Python 3.9
old_interval = sys.getcheckinterval() # pylint:disable=no-member
try:
for i in xrange(1, 100):
# Try a couple times at each thread-switching interval
# to get more interleavings.
sys.setcheckinterval(i // 5) # pylint:disable=no-member
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertFalse(t in l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval) # pylint:disable=no-member
if not hasattr(sys, 'pypy_version_info'):
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'_yet_another': self})
self.thread.start()
def _run(self, _other_ref, _yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
@skipDueToHang
|
ThreadTests
|
python
|
charliermarsh__ruff
|
crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/class_definition.py
|
{
"start": 2419,
"end": 2586
}
|
class ____[Aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa]:
pass
|
TestTypeParams
|
python
|
xlwings__xlwings
|
xlwings/constants.py
|
{
"start": 63230,
"end": 63353
}
|
class ____:
xlTypePDF = 0 # from enum XlFixedFormatType
xlTypeXPS = 1 # from enum XlFixedFormatType
|
FixedFormatType
|
python
|
huggingface__transformers
|
src/transformers/models/ernie/modeling_ernie.py
|
{
"start": 31905,
"end": 32585
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = ErnieLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
@auto_docstring(
custom_intro="""
Ernie Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
"""
)
|
ErniePreTrainingHeads
|
python
|
python__mypy
|
test-data/unit/plugins/union_method.py
|
{
"start": 224,
"end": 1857
}
|
class ____(Plugin):
def get_method_signature_hook(
self, fullname: str
) -> Callable[[MethodSigContext], CallableType] | None:
if fullname.startswith("__main__.Foo."):
return my_meth_sig_hook
return None
def get_method_hook(self, fullname: str) -> Callable[[MethodContext], Type] | None:
if fullname.startswith("__main__.Bar."):
return my_meth_hook
return None
def _str_to_int(api: CheckerPluginInterface, typ: Type) -> Type:
typ = get_proper_type(typ)
if isinstance(typ, Instance):
if typ.type.fullname == "builtins.str":
return api.named_generic_type("builtins.int", [])
elif typ.args:
return typ.copy_modified(args=[_str_to_int(api, t) for t in typ.args])
return typ
def _float_to_int(api: CheckerPluginInterface, typ: Type) -> Type:
typ = get_proper_type(typ)
if isinstance(typ, Instance):
if typ.type.fullname == "builtins.float":
return api.named_generic_type("builtins.int", [])
elif typ.args:
return typ.copy_modified(args=[_float_to_int(api, t) for t in typ.args])
return typ
def my_meth_sig_hook(ctx: MethodSigContext) -> CallableType:
return ctx.default_signature.copy_modified(
arg_types=[_str_to_int(ctx.api, t) for t in ctx.default_signature.arg_types],
ret_type=_str_to_int(ctx.api, ctx.default_signature.ret_type),
)
def my_meth_hook(ctx: MethodContext) -> Type:
return _float_to_int(ctx.api, ctx.default_return_type)
def plugin(version: str) -> type[MethodPlugin]:
return MethodPlugin
|
MethodPlugin
|
python
|
apache__airflow
|
helm-tests/tests/helm_tests/airflow_core/test_pdb_scheduler.py
|
{
"start": 900,
"end": 2705
}
|
class ____:
"""Tests Scheduler PDB."""
def test_should_pass_validation_with_just_pdb_enabled_v1(self):
render_chart(
values={"scheduler": {"podDisruptionBudget": {"enabled": True}}},
show_only=["templates/scheduler/scheduler-poddisruptionbudget.yaml"],
) # checks that no validation exception is raised
def test_should_pass_validation_with_just_pdb_enabled_v1beta1(self):
render_chart(
values={"scheduler": {"podDisruptionBudget": {"enabled": True}}},
show_only=["templates/scheduler/scheduler-poddisruptionbudget.yaml"],
kubernetes_version="1.16.0",
) # checks that no validation exception is raised
def test_should_add_component_specific_labels(self):
docs = render_chart(
values={
"scheduler": {
"podDisruptionBudget": {"enabled": True},
"labels": {"test_label": "test_label_value"},
},
},
show_only=["templates/scheduler/scheduler-poddisruptionbudget.yaml"],
)
assert "test_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value"
def test_should_pass_validation_with_pdb_enabled_and_min_available_param(self):
render_chart(
values={
"scheduler": {
"podDisruptionBudget": {
"enabled": True,
"config": {"maxUnavailable": None, "minAvailable": 1},
}
}
},
show_only=["templates/scheduler/scheduler-poddisruptionbudget.yaml"],
) # checks that no validation exception is raised
|
TestSchedulerPdb
|
python
|
kamyu104__LeetCode-Solutions
|
Python/select-cells-in-grid-with-maximum-score.py
|
{
"start": 147,
"end": 2294
}
|
class ____(object):
def maxScore(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
# Template translated from:
# https://github.com/kth-competitive-programming/kactl/blob/main/content/graph/WeightedMatching.h
def hungarian(a): # Time: O(n^2 * m), Space: O(n + m)
if not a:
return 0, []
n, m = len(a)+1, len(a[0])+1
u, v, p, ans = [0]*n, [0]*m, [0]*m, [0]*(n-1)
for i in xrange(1, n):
p[0] = i
j0 = 0 # add "dummy" worker 0
dist, pre = [float("inf")]*m, [-1]*m
done = [False]*(m+1)
while True: # dijkstra
done[j0] = True
i0, j1, delta = p[j0], None, float("inf")
for j in xrange(1, m):
if done[j]:
continue
cur = a[i0-1][j-1]-u[i0]-v[j]
if cur < dist[j]:
dist[j], pre[j] = cur, j0
if dist[j] < delta:
delta, j1 = dist[j], j
for j in xrange(m):
if done[j]:
u[p[j]] += delta
v[j] -= delta
else:
dist[j] -= delta
j0 = j1
if not p[j0]:
break
while j0: # update alternating path
j1 = pre[j0]
p[j0], j0 = p[j1], j1
for j in xrange(1, m):
if p[j]:
ans[p[j]-1] = j-1
return -v[0], ans # min cost
mx = max(x for row in grid for x in row)
adj = [[0]*max(mx, len(grid)) for _ in xrange(len(grid))]
for i, row in enumerate(grid):
for x in row:
adj[i][x-1] = -x
return -hungarian(adj)[0]
# Time: O(r + (n * m) * 2^n), r = max(x for row in grid for x in row)
# Space: O(r + n * m + 2^n)
# dp, bitmasks
|
Solution
|
python
|
pallets__click
|
src/click/types.py
|
{
"start": 641,
"end": 5592
}
|
class ____:
"""Represents the type of a parameter. Validates and converts values
from the command line or Python into the correct type.
To implement a custom type, subclass and implement at least the
following:
- The :attr:`name` class attribute must be set.
- Calling an instance of the type with ``None`` must return
``None``. This is already implemented by default.
- :meth:`convert` must convert string values to the correct type.
- :meth:`convert` must accept values that are already the correct
type.
- It must be able to convert a value if the ``ctx`` and ``param``
arguments are ``None``. This can occur when converting prompt
input.
"""
is_composite: t.ClassVar[bool] = False
arity: t.ClassVar[int] = 1
#: the descriptive name of this type
name: str
#: if a list of this type is expected and the value is pulled from a
#: string environment variable, this is what splits it up. `None`
#: means any whitespace. For all parameters the general rule is that
#: whitespace splits them up. The exception are paths and files which
#: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
#: Windows).
envvar_list_splitter: t.ClassVar[str | None] = None
def to_info_dict(self) -> dict[str, t.Any]:
"""Gather information that could be useful for a tool generating
user-facing documentation.
Use :meth:`click.Context.to_info_dict` to traverse the entire
CLI structure.
.. versionadded:: 8.0
"""
# The class name without the "ParamType" suffix.
param_type = type(self).__name__.partition("ParamType")[0]
param_type = param_type.partition("ParameterType")[0]
# Custom subclasses might not remember to set a name.
if hasattr(self, "name"):
name = self.name
else:
name = param_type
return {"param_type": param_type, "name": name}
def __call__(
self,
value: t.Any,
param: Parameter | None = None,
ctx: Context | None = None,
) -> t.Any:
if value is not None:
return self.convert(value, param, ctx)
def get_metavar(self, param: Parameter, ctx: Context) -> str | None:
"""Returns the metavar default for this param if it provides one."""
def get_missing_message(self, param: Parameter, ctx: Context | None) -> str | None:
"""Optionally might return extra information about a missing
parameter.
.. versionadded:: 2.0
"""
def convert(
self, value: t.Any, param: Parameter | None, ctx: Context | None
) -> t.Any:
"""Convert the value to the correct type. This is not called if
the value is ``None`` (the missing value).
This must accept string values from the command line, as well as
values that are already the correct type. It may also convert
other compatible types.
The ``param`` and ``ctx`` arguments may be ``None`` in certain
situations, such as when converting prompt input.
If the value cannot be converted, call :meth:`fail` with a
descriptive message.
:param value: The value to convert.
:param param: The parameter that is using this type to convert
its value. May be ``None``.
:param ctx: The current context that arrived at this value. May
be ``None``.
"""
return value
def split_envvar_value(self, rv: str) -> cabc.Sequence[str]:
"""Given a value from an environment variable this splits it up
into small chunks depending on the defined envvar list splitter.
If the splitter is set to `None`, which means that whitespace splits,
then leading and trailing whitespace is ignored. Otherwise, leading
and trailing splitters usually lead to empty items being included.
"""
return (rv or "").split(self.envvar_list_splitter)
def fail(
self,
message: str,
param: Parameter | None = None,
ctx: Context | None = None,
) -> t.NoReturn:
"""Helper method to fail with an invalid value message."""
raise BadParameter(message, ctx=ctx, param=param)
def shell_complete(
self, ctx: Context, param: Parameter, incomplete: str
) -> list[CompletionItem]:
"""Return a list of
:class:`~click.shell_completion.CompletionItem` objects for the
incomplete value. Most types do not provide completions, but
some do, and this allows custom types to provide custom
completions as well.
:param ctx: Invocation context for this command.
:param param: The parameter that is requesting completion.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
return []
|
ParamType
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.